2022-07-13 19:16:31 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2022 Nordic Semiconductor ASA
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <zephyr/kernel.h>
|
2023-08-30 10:08:57 +02:00
|
|
|
#include <zephyr/init.h>
|
2022-07-13 19:16:31 +02:00
|
|
|
#include <zephyr/sys/dlist.h>
|
2023-05-15 15:50:28 +02:00
|
|
|
#include <zephyr/sys/iterable_sections.h>
|
2022-07-13 19:16:31 +02:00
|
|
|
|
|
|
|
#include "uvb.h"
|
|
|
|
|
|
|
|
#include <zephyr/logging/log.h>
|
|
|
|
LOG_MODULE_REGISTER(uvb, CONFIG_UVB_LOG_LEVEL);
|
|
|
|
|
|
|
|
static struct k_fifo uvb_queue;
|
|
|
|
static void uvb_work_handler(struct k_work *work);
|
|
|
|
static K_WORK_DEFINE(uvb_work, uvb_work_handler);
|
|
|
|
|
|
|
|
enum uvb_msg_type {
|
|
|
|
UVB_MSG_ADVERT,
|
|
|
|
UVB_MSG_TO_HOST,
|
|
|
|
UVB_MSG_SUBSCRIBE,
|
|
|
|
UVB_MSG_UNSUBSCRIBE,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct uvb_msg {
|
|
|
|
sys_snode_t node;
|
|
|
|
enum uvb_msg_type type;
|
|
|
|
const struct uvb_node *source;
|
|
|
|
union {
|
|
|
|
struct uvb_node *sink;
|
|
|
|
struct {
|
|
|
|
enum uvb_event_type type;
|
|
|
|
const void *data;
|
|
|
|
} event;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
K_MEM_SLAB_DEFINE_STATIC(uvb_msg_slab, sizeof(struct uvb_msg),
|
|
|
|
CONFIG_UVB_MAX_MESSAGES, sizeof(void *));
|
|
|
|
|
|
|
|
K_MEM_SLAB_DEFINE_STATIC(uvb_pkt_slab, sizeof(struct uvb_packet),
|
|
|
|
CONFIG_UVB_MAX_MESSAGES, sizeof(void *));
|
|
|
|
|
|
|
|
struct uvb_packet *uvb_alloc_pkt(const enum uvb_request request,
|
|
|
|
const uint8_t addr, const uint8_t ep,
|
|
|
|
uint8_t *const data,
|
|
|
|
const size_t length)
|
|
|
|
{
|
|
|
|
static uint32_t seq;
|
|
|
|
struct uvb_packet *pkt;
|
|
|
|
|
|
|
|
if (k_mem_slab_alloc(&uvb_pkt_slab, (void **)&pkt, K_NO_WAIT)) {
|
|
|
|
LOG_ERR("Failed to allocate packet memory");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
seq++;
|
|
|
|
pkt->seq = seq;
|
|
|
|
pkt->request = request;
|
|
|
|
pkt->reply = UVB_REPLY_TIMEOUT;
|
|
|
|
pkt->addr = addr;
|
|
|
|
pkt->ep = ep;
|
|
|
|
pkt->data = data;
|
|
|
|
pkt->length = length;
|
|
|
|
|
|
|
|
return pkt;
|
|
|
|
}
|
|
|
|
|
|
|
|
void uvb_free_pkt(struct uvb_packet *const pkt)
|
|
|
|
{
|
2023-08-25 14:31:30 +02:00
|
|
|
k_mem_slab_free(&uvb_pkt_slab, (void *)pkt);
|
2022-07-13 19:16:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static ALWAYS_INLINE int submit_new_work(struct uvb_msg *const msg)
|
|
|
|
{
|
|
|
|
k_fifo_put(&uvb_queue, msg);
|
|
|
|
return k_work_submit(&uvb_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct uvb_msg *uvb_alloc_msg(const struct uvb_node *const node)
|
|
|
|
{
|
|
|
|
struct uvb_msg *msg;
|
|
|
|
|
|
|
|
if (k_mem_slab_alloc(&uvb_msg_slab, (void **)&msg, K_NO_WAIT)) {
|
|
|
|
LOG_ERR("Failed to allocate msg memory");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(msg, 0, sizeof(struct uvb_msg));
|
|
|
|
msg->source = node;
|
|
|
|
|
|
|
|
return msg;
|
|
|
|
}
|
|
|
|
|
|
|
|
int uvb_advert(const struct uvb_node *const host_node,
|
|
|
|
const enum uvb_event_type type,
|
|
|
|
const struct uvb_packet *const pkt)
|
|
|
|
{
|
|
|
|
struct uvb_msg *msg;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
msg = uvb_alloc_msg(host_node);
|
|
|
|
if (msg == NULL) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
msg->type = UVB_MSG_ADVERT;
|
|
|
|
msg->event.type = type;
|
|
|
|
msg->event.data = (void *)pkt;
|
|
|
|
err = submit_new_work(msg);
|
|
|
|
|
|
|
|
return err < 0 ? err : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int uvb_to_host(const struct uvb_node *const dev_node,
|
|
|
|
const enum uvb_event_type type,
|
|
|
|
const struct uvb_packet *const pkt)
|
|
|
|
{
|
|
|
|
struct uvb_msg *msg;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
msg = uvb_alloc_msg(dev_node);
|
|
|
|
if (msg == NULL) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
msg->type = UVB_MSG_TO_HOST;
|
|
|
|
msg->event.type = type;
|
|
|
|
msg->event.data = (void *)pkt;
|
|
|
|
err = submit_new_work(msg);
|
|
|
|
|
|
|
|
return err < 0 ? err : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int subscribe_msg(const struct uvb_node *const host_node,
|
|
|
|
struct uvb_node *const dev_node,
|
|
|
|
const enum uvb_msg_type type)
|
|
|
|
{
|
|
|
|
struct uvb_msg *msg;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
msg = uvb_alloc_msg(host_node);
|
|
|
|
if (msg == NULL) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
msg->type = type;
|
|
|
|
msg->sink = dev_node;
|
|
|
|
err = submit_new_work(msg);
|
|
|
|
|
|
|
|
return err < 0 ? err : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int unsubscribe_msg(const struct uvb_node *const host_node,
|
|
|
|
struct uvb_node *const dev_node)
|
|
|
|
{
|
|
|
|
struct uvb_msg *msg;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
msg = uvb_alloc_msg(host_node);
|
|
|
|
if (msg == NULL) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
msg->type = UVB_MSG_UNSUBSCRIBE;
|
|
|
|
msg->sink = dev_node;
|
|
|
|
err = submit_new_work(msg);
|
|
|
|
|
|
|
|
return err < 0 ? err : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct uvb_node *find_host_node(const char *name)
|
|
|
|
{
|
|
|
|
if (name == NULL || name[0] == '\0') {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
STRUCT_SECTION_FOREACH(uvb_node, host) {
|
|
|
|
if (strcmp(name, host->name) == 0) {
|
|
|
|
return host;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int uvb_subscribe(const char *name, struct uvb_node *const dev_node)
|
|
|
|
{
|
|
|
|
const struct uvb_node *host_node;
|
|
|
|
|
|
|
|
host_node = find_host_node(name);
|
|
|
|
if (host_node == NULL) {
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return subscribe_msg(host_node, dev_node, UVB_MSG_SUBSCRIBE);
|
|
|
|
}
|
|
|
|
|
|
|
|
int uvb_unsubscribe(const char *name, struct uvb_node *const dev_node)
|
|
|
|
{
|
|
|
|
const struct uvb_node *host_node;
|
|
|
|
|
|
|
|
host_node = find_host_node(name);
|
|
|
|
if (host_node == NULL) {
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return unsubscribe_msg(host_node, dev_node);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ALWAYS_INLINE void handle_msg_subscribe(struct uvb_msg *const msg)
|
|
|
|
{
|
|
|
|
struct uvb_node *host_node;
|
|
|
|
struct uvb_node *dev_node;
|
|
|
|
|
|
|
|
host_node = (struct uvb_node *)msg->source;
|
|
|
|
dev_node = msg->sink;
|
|
|
|
if (atomic_get(&dev_node->subscribed)) {
|
|
|
|
LOG_ERR("%p already subscribed", dev_node);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
LOG_DBG("%p -> %p", dev_node, host_node);
|
|
|
|
sys_dnode_init(&dev_node->node);
|
|
|
|
if (msg->type == UVB_MSG_SUBSCRIBE) {
|
|
|
|
sys_dlist_prepend(&host_node->list, &dev_node->node);
|
|
|
|
}
|
|
|
|
|
|
|
|
atomic_inc(&dev_node->subscribed);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ALWAYS_INLINE void handle_msg_unsubscribe(struct uvb_msg *const msg)
|
|
|
|
{
|
|
|
|
struct uvb_node *dev_node;
|
|
|
|
atomic_t tmp;
|
|
|
|
|
|
|
|
dev_node = msg->sink;
|
|
|
|
tmp = atomic_clear(&dev_node->subscribed);
|
|
|
|
if (tmp) {
|
|
|
|
LOG_DBG("unsubscribe %p", dev_node);
|
|
|
|
sys_dlist_remove(&dev_node->node);
|
|
|
|
} else {
|
|
|
|
LOG_ERR("%p is not subscribed", dev_node);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static ALWAYS_INLINE void handle_msg_event(struct uvb_msg *const msg)
|
|
|
|
{
|
|
|
|
struct uvb_node *host_node;
|
|
|
|
struct uvb_node *dev_node;
|
|
|
|
|
|
|
|
host_node = (struct uvb_node *)msg->source;
|
|
|
|
SYS_DLIST_FOR_EACH_CONTAINER(&host_node->list, dev_node, node) {
|
|
|
|
LOG_DBG("%p from %p to %p", msg, host_node, dev_node);
|
|
|
|
if (dev_node->notify) {
|
|
|
|
dev_node->notify(dev_node->priv,
|
|
|
|
msg->event.type,
|
|
|
|
msg->event.data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static ALWAYS_INLINE void handle_msg_to_host(struct uvb_msg *const msg)
|
|
|
|
{
|
|
|
|
struct uvb_node *host_node;
|
|
|
|
struct uvb_node *source;
|
|
|
|
|
|
|
|
source = (struct uvb_node *)msg->source;
|
|
|
|
if (source->head) {
|
|
|
|
LOG_ERR("Host may not reply");
|
|
|
|
}
|
|
|
|
|
|
|
|
SYS_DLIST_FOR_EACH_CONTAINER(&source->node, host_node, node) {
|
|
|
|
LOG_DBG("%p from %p to %p", msg, source, host_node);
|
|
|
|
if (host_node->head && host_node->notify) {
|
|
|
|
host_node->notify(host_node->priv,
|
|
|
|
msg->event.type,
|
|
|
|
msg->event.data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void uvb_work_handler(struct k_work *work)
|
|
|
|
{
|
|
|
|
struct uvb_msg *msg;
|
|
|
|
|
|
|
|
msg = k_fifo_get(&uvb_queue, K_NO_WAIT);
|
|
|
|
if (msg == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
LOG_DBG("Message %p %s", msg->source, msg->source->name);
|
|
|
|
switch (msg->type) {
|
|
|
|
case UVB_MSG_SUBSCRIBE:
|
|
|
|
handle_msg_subscribe(msg);
|
|
|
|
break;
|
|
|
|
case UVB_MSG_UNSUBSCRIBE:
|
|
|
|
handle_msg_unsubscribe(msg);
|
|
|
|
break;
|
|
|
|
case UVB_MSG_ADVERT:
|
|
|
|
handle_msg_event(msg);
|
|
|
|
break;
|
|
|
|
case UVB_MSG_TO_HOST:
|
|
|
|
handle_msg_to_host(msg);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2023-08-25 14:31:30 +02:00
|
|
|
k_mem_slab_free(&uvb_msg_slab, (void *)msg);
|
2022-07-13 19:16:31 +02:00
|
|
|
if (!k_fifo_is_empty(&uvb_queue)) {
|
|
|
|
(void)k_work_submit(work);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
init: remove the need for a dummy device pointer in SYS_INIT functions
The init infrastructure, found in `init.h`, is currently used by:
- `SYS_INIT`: to call functions before `main`
- `DEVICE_*`: to initialize devices
They are all sorted according to an initialization level + a priority.
`SYS_INIT` calls are really orthogonal to devices, however, the required
function signature requires a `const struct device *dev` as a first
argument. The only reason for that is because the same init machinery is
used by devices, so we have something like:
```c
struct init_entry {
int (*init)(const struct device *dev);
/* only set by DEVICE_*, otherwise NULL */
const struct device *dev;
}
```
As a result, we end up with such weird/ugly pattern:
```c
static int my_init(const struct device *dev)
{
/* always NULL! add ARG_UNUSED to avoid compiler warning */
ARG_UNUSED(dev);
...
}
```
This is really a result of poor internals isolation. This patch proposes
a to make init entries more flexible so that they can accept sytem
initialization calls like this:
```c
static int my_init(void)
{
...
}
```
This is achieved using a union:
```c
union init_function {
/* for SYS_INIT, used when init_entry.dev == NULL */
int (*sys)(void);
/* for DEVICE*, used when init_entry.dev != NULL */
int (*dev)(const struct device *dev);
};
struct init_entry {
/* stores init function (either for SYS_INIT or DEVICE*)
union init_function init_fn;
/* stores device pointer for DEVICE*, NULL for SYS_INIT. Allows
* to know which union entry to call.
*/
const struct device *dev;
}
```
This solution **does not increase ROM usage**, and allows to offer clean
public APIs for both SYS_INIT and DEVICE*. Note that however, init
machinery keeps a coupling with devices.
**NOTE**: This is a breaking change! All `SYS_INIT` functions will need
to be converted to the new signature. See the script offered in the
following commit.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
init: convert SYS_INIT functions to the new signature
Conversion scripted using scripts/utils/migrate_sys_init.py.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
manifest: update projects for SYS_INIT changes
Update modules with updated SYS_INIT calls:
- hal_ti
- lvgl
- sof
- TraceRecorderSource
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
tests: devicetree: devices: adjust test
Adjust test according to the recently introduced SYS_INIT
infrastructure.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
tests: kernel: threads: adjust SYS_INIT call
Adjust to the new signature: int (*init_fn)(void);
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
2022-10-19 09:33:44 +02:00
|
|
|
static int uvb_init(void)
|
2022-07-13 19:16:31 +02:00
|
|
|
{
|
|
|
|
STRUCT_SECTION_FOREACH(uvb_node, host) {
|
|
|
|
LOG_DBG("Host %p - %s", host, host->name);
|
|
|
|
sys_dlist_init(&host->list);
|
|
|
|
}
|
|
|
|
|
|
|
|
k_fifo_init(&uvb_queue);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
SYS_INIT(uvb_init, PRE_KERNEL_2, CONFIG_KERNEL_INIT_PRIORITY_DEVICE);
|