net/mgmt: Add a function to wait on a event synchronously
Instead of creating a handler and a related callback structure on an event_mask: net_mgmt_event_wait() can be used to wait synchronously on such event_mask. The core mgmt part will seamlessly reuse the struct net_mgmt_event_callback so the whole internal notification mechanism is using the same code. Change-Id: I426d782c770e75e5222aa3c5b703172b1f1f2e5e Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
This commit is contained in:
parent
c1ecd8c890
commit
35e5aa8865
4 changed files with 138 additions and 11 deletions
|
@ -47,3 +47,15 @@
|
||||||
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+]
|
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+]
|
||||||
^.*dns_context.address
|
^.*dns_context.address
|
||||||
^[- \t]*\^
|
^[- \t]*\^
|
||||||
|
#
|
||||||
|
# include/net/net_mgmt.h
|
||||||
|
#
|
||||||
|
^(?P<filename>[-._/\w]+/doc/api/networking.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
|
||||||
|
^[ \t]*
|
||||||
|
^[ \t]*\^
|
||||||
|
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
|
||||||
|
^[ \t]*
|
||||||
|
^[ \t]*\^
|
||||||
|
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+]
|
||||||
|
^.*net_mgmt_event_callback.__unnamed__
|
||||||
|
^[- \t]*\^
|
||||||
|
|
|
@ -29,11 +29,13 @@ struct net_if;
|
||||||
#define NET_MGMT_EVENT_MASK 0x80000000
|
#define NET_MGMT_EVENT_MASK 0x80000000
|
||||||
#define NET_MGMT_ON_IFACE_MASK 0x40000000
|
#define NET_MGMT_ON_IFACE_MASK 0x40000000
|
||||||
#define NET_MGMT_LAYER_MASK 0x30000000
|
#define NET_MGMT_LAYER_MASK 0x30000000
|
||||||
#define NET_MGMT_LAYER_CODE_MASK 0x0FFF0000
|
#define NET_MGMT_SYNC_EVENT_MASK 0x08000000
|
||||||
|
#define NET_MGMT_LAYER_CODE_MASK 0x07FF0000
|
||||||
#define NET_MGMT_COMMAND_MASK 0x0000FFFF
|
#define NET_MGMT_COMMAND_MASK 0x0000FFFF
|
||||||
|
|
||||||
#define NET_MGMT_EVENT_BIT BIT(31)
|
#define NET_MGMT_EVENT_BIT BIT(31)
|
||||||
#define NET_MGMT_IFACE_BIT BIT(30)
|
#define NET_MGMT_IFACE_BIT BIT(30)
|
||||||
|
#define NET_MGMT_SYNC_EVENT_BIT BIT(27)
|
||||||
|
|
||||||
#define NET_MGMT_LAYER(_layer) (_layer << 28)
|
#define NET_MGMT_LAYER(_layer) (_layer << 28)
|
||||||
#define NET_MGMT_LAYER_CODE(_code) (_code << 16)
|
#define NET_MGMT_LAYER_CODE(_code) (_code << 16)
|
||||||
|
@ -44,6 +46,9 @@ struct net_if;
|
||||||
#define NET_MGMT_ON_IFACE(mgmt_request) \
|
#define NET_MGMT_ON_IFACE(mgmt_request) \
|
||||||
(mgmt_request & NET_MGMT_ON_IFACE_MASK)
|
(mgmt_request & NET_MGMT_ON_IFACE_MASK)
|
||||||
|
|
||||||
|
#define NET_MGMT_EVENT_SYNCHRONOUS(mgmt_request) \
|
||||||
|
(mgmt_request & NET_MGMT_SYNC_EVENT_MASK)
|
||||||
|
|
||||||
#define NET_MGMT_GET_LAYER(mgmt_request) \
|
#define NET_MGMT_GET_LAYER(mgmt_request) \
|
||||||
((mgmt_request & NET_MGMT_LAYER_MASK) >> 28)
|
((mgmt_request & NET_MGMT_LAYER_MASK) >> 28)
|
||||||
|
|
||||||
|
@ -115,16 +120,33 @@ struct net_mgmt_event_callback {
|
||||||
*/
|
*/
|
||||||
sys_snode_t node;
|
sys_snode_t node;
|
||||||
|
|
||||||
|
union {
|
||||||
/** Actual callback function being used to notify the owner
|
/** Actual callback function being used to notify the owner
|
||||||
*/
|
*/
|
||||||
net_mgmt_event_handler_t handler;
|
net_mgmt_event_handler_t handler;
|
||||||
|
/** Semaphore meant to be used internaly for the synchronous
|
||||||
|
* net_mgmt_event_wait() function.
|
||||||
|
*/
|
||||||
|
struct k_sem *sync_call;
|
||||||
|
};
|
||||||
|
|
||||||
/** A mask of network events on which the above handler should be
|
/** A mask of network events on which the above handler should be
|
||||||
* called in case those events come. Such mask can be modified
|
* called in case those events come. Such mask can be modified
|
||||||
* whenever necessary by the owner, and thus will affect the handler
|
* whenever necessary by the owner, and thus will affect the handler
|
||||||
* being called or not.
|
* being called or not.
|
||||||
*/
|
*/
|
||||||
|
union {
|
||||||
|
/** A mask of network events on which the above handler should
|
||||||
|
* be called in case those events come. Such mask can be
|
||||||
|
* modified whenever necessary by the owner, and thus will
|
||||||
|
* affect the handler being called or not.
|
||||||
|
*/
|
||||||
uint32_t event_mask;
|
uint32_t event_mask;
|
||||||
|
/** Internal place holder when a synchronous event wait is
|
||||||
|
* successfully unlocked on a event.
|
||||||
|
*/
|
||||||
|
uint32_t raised_event;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_NET_MGMT_EVENT
|
#ifdef CONFIG_NET_MGMT_EVENT
|
||||||
|
@ -166,6 +188,28 @@ void net_mgmt_del_event_callback(struct net_mgmt_event_callback *cb);
|
||||||
*/
|
*/
|
||||||
void net_mgmt_event_notify(uint32_t mgmt_event, struct net_if *iface);
|
void net_mgmt_event_notify(uint32_t mgmt_event, struct net_if *iface);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Used to wait synchronously on an event mask
|
||||||
|
* @param mgmt_event_mask A mask of relevant events to wait on.
|
||||||
|
* @param raised_event a pointer on a uint32_t to get which event from
|
||||||
|
* the mask generated the event. Can be NULL if the caller is not
|
||||||
|
* interested in that information.
|
||||||
|
* @param iface a pointer on a place holder for the iface on which the
|
||||||
|
* event has originated from. This is valid if only the event mask
|
||||||
|
* has bit NET_MGMT_IFACE_BIT set relevantly, depending on events
|
||||||
|
* the caller wants to listen to.
|
||||||
|
* @param timeout a delay in milliseconds. K_FOREVER can be used to wait
|
||||||
|
* undefinitely.
|
||||||
|
*
|
||||||
|
* @return 0 on success, a negative error code otherwise. -ETIMEDOUT will
|
||||||
|
* be specifically returned if the timeout kick-in instead of an
|
||||||
|
* actual event.
|
||||||
|
*/
|
||||||
|
int net_mgmt_event_wait(uint32_t mgmt_event_mask,
|
||||||
|
uint32_t *raised_event,
|
||||||
|
struct net_if **iface,
|
||||||
|
int timeout);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Used by the core of the network stack to initialize the network
|
* @brief Used by the core of the network stack to initialize the network
|
||||||
* event processing.
|
* event processing.
|
||||||
|
@ -176,6 +220,15 @@ void net_mgmt_event_init(void);
|
||||||
#define net_mgmt_add_event_callback(...)
|
#define net_mgmt_add_event_callback(...)
|
||||||
#define net_mgmt_event_notify(...)
|
#define net_mgmt_event_notify(...)
|
||||||
#define net_mgmt_event_init(...)
|
#define net_mgmt_event_init(...)
|
||||||
|
|
||||||
|
static inline int net_mgmt_event_wait(uint32_t mgmt_event_mask,
|
||||||
|
uint32_t *raised_event,
|
||||||
|
struct net_if **iface,
|
||||||
|
int timeout)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_NET_MGMT_EVENT */
|
#endif /* CONFIG_NET_MGMT_EVENT */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -22,6 +22,11 @@ struct mgmt_event_entry {
|
||||||
struct net_if *iface;
|
struct net_if *iface;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct mgmt_event_wait {
|
||||||
|
struct k_sem sync_call;
|
||||||
|
struct net_if *iface;
|
||||||
|
};
|
||||||
|
|
||||||
static struct k_sem network_event;
|
static struct k_sem network_event;
|
||||||
NET_STACK_DEFINE(MGMT, mgmt_stack, CONFIG_NET_MGMT_EVENT_STACK_SIZE,
|
NET_STACK_DEFINE(MGMT, mgmt_stack, CONFIG_NET_MGMT_EVENT_STACK_SIZE,
|
||||||
CONFIG_NET_MGMT_EVENT_STACK_SIZE);
|
CONFIG_NET_MGMT_EVENT_STACK_SIZE);
|
||||||
|
@ -102,6 +107,7 @@ static inline bool mgmt_is_event_handled(uint32_t mgmt_event)
|
||||||
|
|
||||||
static inline void mgmt_run_callbacks(struct mgmt_event_entry *mgmt_event)
|
static inline void mgmt_run_callbacks(struct mgmt_event_entry *mgmt_event)
|
||||||
{
|
{
|
||||||
|
sys_snode_t *prev = NULL;
|
||||||
struct net_mgmt_event_callback *cb, *tmp;
|
struct net_mgmt_event_callback *cb, *tmp;
|
||||||
|
|
||||||
NET_DBG("Event layer %u code %u type %u",
|
NET_DBG("Event layer %u code %u type %u",
|
||||||
|
@ -110,10 +116,30 @@ static inline void mgmt_run_callbacks(struct mgmt_event_entry *mgmt_event)
|
||||||
NET_MGMT_GET_COMMAND(mgmt_event->event));
|
NET_MGMT_GET_COMMAND(mgmt_event->event));
|
||||||
|
|
||||||
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&event_callbacks, cb, tmp, node) {
|
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&event_callbacks, cb, tmp, node) {
|
||||||
NET_DBG("Running callback %p : %p", cb, cb->handler);
|
|
||||||
|
|
||||||
if ((mgmt_event->event & cb->event_mask) == mgmt_event->event) {
|
if ((mgmt_event->event & cb->event_mask) == mgmt_event->event) {
|
||||||
cb->handler(cb, mgmt_event->event, mgmt_event->iface);
|
if (NET_MGMT_EVENT_SYNCHRONOUS(cb->event_mask)) {
|
||||||
|
struct mgmt_event_wait *sync_data =
|
||||||
|
CONTAINER_OF(cb->sync_call,
|
||||||
|
struct mgmt_event_wait,
|
||||||
|
sync_call);
|
||||||
|
|
||||||
|
NET_DBG("Unlocking %p synchronous call", cb);
|
||||||
|
|
||||||
|
cb->raised_event = mgmt_event->event;
|
||||||
|
sync_data->iface = mgmt_event->iface;
|
||||||
|
|
||||||
|
k_sem_give(cb->sync_call);
|
||||||
|
|
||||||
|
sys_slist_remove(&event_callbacks,
|
||||||
|
prev, &cb->node);
|
||||||
|
} else {
|
||||||
|
NET_DBG("Running callback %p : %p",
|
||||||
|
cb, cb->handler);
|
||||||
|
|
||||||
|
cb->handler(cb, mgmt_event->event,
|
||||||
|
mgmt_event->iface);
|
||||||
|
prev = &cb->node;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NET_DEBUG_MGMT_EVENT_STACK
|
#ifdef CONFIG_NET_DEBUG_MGMT_EVENT_STACK
|
||||||
|
@ -186,6 +212,42 @@ void net_mgmt_event_notify(uint32_t mgmt_event, struct net_if *iface)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int net_mgmt_event_wait(uint32_t mgmt_event_mask,
|
||||||
|
uint32_t *raised_event,
|
||||||
|
struct net_if **iface,
|
||||||
|
int timeout)
|
||||||
|
{
|
||||||
|
struct mgmt_event_wait sync_data = {
|
||||||
|
.sync_call = K_SEM_INITIALIZER(sync_data.sync_call, 0, 1),
|
||||||
|
};
|
||||||
|
struct net_mgmt_event_callback sync = {
|
||||||
|
.sync_call = &sync_data.sync_call,
|
||||||
|
.event_mask = mgmt_event_mask | NET_MGMT_SYNC_EVENT_BIT,
|
||||||
|
};
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
NET_DBG("Synchronous event wait %p", &cb);
|
||||||
|
|
||||||
|
net_mgmt_add_event_callback(&sync);
|
||||||
|
|
||||||
|
ret = k_sem_take(sync.sync_call, timeout);
|
||||||
|
if (ret == -EAGAIN) {
|
||||||
|
ret = -ETIMEDOUT;
|
||||||
|
} else {
|
||||||
|
if (!ret) {
|
||||||
|
if (raised_event) {
|
||||||
|
*raised_event = sync.raised_event;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (iface) {
|
||||||
|
*iface = sync_data.iface;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
void net_mgmt_event_init(void)
|
void net_mgmt_event_init(void)
|
||||||
{
|
{
|
||||||
sys_slist_init(&event_callbacks);
|
sys_slist_init(&event_callbacks);
|
||||||
|
|
|
@ -13,9 +13,9 @@
|
||||||
#include <net/net_mgmt.h>
|
#include <net/net_mgmt.h>
|
||||||
#include <net/nbuf.h>
|
#include <net/nbuf.h>
|
||||||
|
|
||||||
#define TEST_MGMT_REQUEST 0x0ABC1234
|
#define TEST_MGMT_REQUEST 0x07AB1234
|
||||||
#define TEST_MGMT_EVENT 0x8ABC1234
|
#define TEST_MGMT_EVENT 0x87AB1234
|
||||||
#define TEST_MGMT_EVENT_UNHANDLED 0x8ABC4321
|
#define TEST_MGMT_EVENT_UNHANDLED 0x87AB4321
|
||||||
|
|
||||||
/* Notifier infra */
|
/* Notifier infra */
|
||||||
static uint32_t event2throw;
|
static uint32_t event2throw;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue