poll: Enable multiple threads to use k_poll in the same object
This is necessary in order for k_queue_get to work properly since that is used with buffer pools which might be used by multiple threads asking for buffers. Jira: ZEP-2553 Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
This commit is contained in:
parent
7725fc07e3
commit
7d01c5ecb7
9 changed files with 113 additions and 115 deletions
|
@ -98,11 +98,11 @@ typedef sys_dlist_t _wait_q_t;
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_POLL
|
||||
#define _POLL_EVENT_OBJ_INIT \
|
||||
.poll_event = NULL,
|
||||
#define _POLL_EVENT struct k_poll_event *poll_event
|
||||
#define _POLL_EVENT_OBJ_INIT(obj) \
|
||||
.poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
|
||||
#define _POLL_EVENT sys_dlist_t poll_events
|
||||
#else
|
||||
#define _POLL_EVENT_OBJ_INIT
|
||||
#define _POLL_EVENT_OBJ_INIT(obj)
|
||||
#define _POLL_EVENT
|
||||
#endif
|
||||
|
||||
|
@ -1327,7 +1327,7 @@ struct k_queue {
|
|||
{ \
|
||||
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
|
||||
.data_q = SYS_SLIST_STATIC_INIT(&obj.data_q), \
|
||||
_POLL_EVENT_OBJ_INIT \
|
||||
_POLL_EVENT_OBJ_INIT(obj) \
|
||||
_OBJECT_TRACING_INIT \
|
||||
}
|
||||
|
||||
|
@ -2360,7 +2360,7 @@ struct k_sem {
|
|||
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
|
||||
.count = initial_count, \
|
||||
.limit = count_limit, \
|
||||
_POLL_EVENT_OBJ_INIT \
|
||||
_POLL_EVENT_OBJ_INIT(obj) \
|
||||
_OBJECT_TRACING_INIT \
|
||||
}
|
||||
|
||||
|
@ -3544,9 +3544,6 @@ enum _poll_states_bits {
|
|||
/* default state when creating event */
|
||||
_POLL_STATE_NOT_READY,
|
||||
|
||||
/* there was another poller already on the object */
|
||||
_POLL_STATE_EADDRINUSE,
|
||||
|
||||
/* signaled by k_poll_signal() */
|
||||
_POLL_STATE_SIGNALED,
|
||||
|
||||
|
@ -3601,7 +3598,6 @@ enum k_poll_modes {
|
|||
|
||||
/* public - values for k_poll_event.state bitfield */
|
||||
#define K_POLL_STATE_NOT_READY 0
|
||||
#define K_POLL_STATE_EADDRINUSE _POLL_STATE_BIT(_POLL_STATE_EADDRINUSE)
|
||||
#define K_POLL_STATE_SIGNALED _POLL_STATE_BIT(_POLL_STATE_SIGNALED)
|
||||
#define K_POLL_STATE_SEM_AVAILABLE _POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
|
||||
#define K_POLL_STATE_DATA_AVAILABLE _POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
|
||||
|
@ -3610,7 +3606,7 @@ enum k_poll_modes {
|
|||
/* public - poll signal object */
|
||||
struct k_poll_signal {
|
||||
/* PRIVATE - DO NOT TOUCH */
|
||||
struct k_poll_event *poll_event;
|
||||
sys_dlist_t poll_events;
|
||||
|
||||
/*
|
||||
* 1 if the event has been signaled, 0 otherwise. Stays set to 1 until
|
||||
|
@ -3622,14 +3618,17 @@ struct k_poll_signal {
|
|||
int result;
|
||||
};
|
||||
|
||||
#define K_POLL_SIGNAL_INITIALIZER() \
|
||||
#define K_POLL_SIGNAL_INITIALIZER(obj) \
|
||||
{ \
|
||||
.poll_event = NULL, \
|
||||
.poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
|
||||
.signaled = 0, \
|
||||
.result = 0, \
|
||||
}
|
||||
|
||||
struct k_poll_event {
|
||||
/* PRIVATE - DO NOT TOUCH */
|
||||
sys_dnode_t _node;
|
||||
|
||||
/* PRIVATE - DO NOT TOUCH */
|
||||
struct _poller *poller;
|
||||
|
||||
|
@ -3716,16 +3715,9 @@ extern void k_poll_event_init(struct k_poll_event *event, u32_t type,
|
|||
* reason, the k_poll() call is more effective when the objects being polled
|
||||
* only have one thread, the polling thread, trying to acquire them.
|
||||
*
|
||||
* Only one thread can be polling for a particular object at a given time. If
|
||||
* another thread tries to poll on it, the k_poll() call returns -EADDRINUSE
|
||||
* and returns as soon as it has finished handling the other events. This means
|
||||
* that k_poll() can return -EADDRINUSE and have the state value of some events
|
||||
* be non-K_POLL_STATE_NOT_READY. When this condition occurs, the @a timeout
|
||||
* parameter is ignored.
|
||||
*
|
||||
* When k_poll() returns 0 or -EADDRINUSE, the caller should loop on all the
|
||||
* events that were passed to k_poll() and check the state field for the values
|
||||
* that were expected and take the associated actions.
|
||||
* When k_poll() returns 0, the caller should loop on all the events that were
|
||||
* passed to k_poll() and check the state field for the values that were
|
||||
* expected and take the associated actions.
|
||||
*
|
||||
* Before being reused for another call to k_poll(), the user has to reset the
|
||||
* state field to K_POLL_STATE_NOT_READY.
|
||||
|
@ -3736,7 +3728,6 @@ extern void k_poll_event_init(struct k_poll_event *event, u32_t type,
|
|||
* or one of the special values K_NO_WAIT and K_FOREVER.
|
||||
*
|
||||
* @retval 0 One or more events are ready.
|
||||
* @retval -EADDRINUSE One or more objects already had a poller.
|
||||
* @retval -EAGAIN Waiting period timed out.
|
||||
*/
|
||||
|
||||
|
@ -3777,8 +3768,7 @@ extern void k_poll_signal_init(struct k_poll_signal *signal);
|
|||
extern int k_poll_signal(struct k_poll_signal *signal, int result);
|
||||
|
||||
/* private internal function */
|
||||
extern int _handle_obj_poll_event(struct k_poll_event **obj_poll_event,
|
||||
u32_t state);
|
||||
extern int _handle_obj_poll_events(sys_dlist_t *events, u32_t state);
|
||||
|
||||
/**
|
||||
* @} end defgroup poll_apis
|
||||
|
|
|
@ -89,30 +89,46 @@ static inline int is_condition_met(struct k_poll_event *event, u32_t *state)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void add_event(sys_dlist_t *events, struct k_poll_event *event,
|
||||
struct _poller *poller)
|
||||
{
|
||||
struct k_poll_event *pending;
|
||||
|
||||
pending = (struct k_poll_event *)sys_dlist_peek_tail(events);
|
||||
if (!pending || _is_t1_higher_prio_than_t2(pending->poller->thread,
|
||||
poller->thread)) {
|
||||
sys_dlist_append(events, &event->_node);
|
||||
return;
|
||||
}
|
||||
|
||||
SYS_DLIST_FOR_EACH_CONTAINER(events, pending, _node) {
|
||||
if (_is_t1_higher_prio_than_t2(poller->thread,
|
||||
pending->poller->thread)) {
|
||||
sys_dlist_insert_before(events, &pending->_node,
|
||||
&event->_node);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
sys_dlist_append(events, &event->_node);
|
||||
}
|
||||
|
||||
/* must be called with interrupts locked */
|
||||
static inline int register_event(struct k_poll_event *event)
|
||||
static inline int register_event(struct k_poll_event *event,
|
||||
struct _poller *poller)
|
||||
{
|
||||
switch (event->type) {
|
||||
case K_POLL_TYPE_SEM_AVAILABLE:
|
||||
__ASSERT(event->sem, "invalid semaphore\n");
|
||||
if (event->sem->poll_event) {
|
||||
return -EADDRINUSE;
|
||||
}
|
||||
event->sem->poll_event = event;
|
||||
add_event(&event->sem->poll_events, event, poller);
|
||||
break;
|
||||
case K_POLL_TYPE_DATA_AVAILABLE:
|
||||
__ASSERT(event->queue, "invalid queue\n");
|
||||
if (event->queue->poll_event) {
|
||||
return -EADDRINUSE;
|
||||
}
|
||||
event->queue->poll_event = event;
|
||||
add_event(&event->queue->poll_events, event, poller);
|
||||
break;
|
||||
case K_POLL_TYPE_SIGNAL:
|
||||
__ASSERT(event->queue, "invalid poll signal\n");
|
||||
if (event->signal->poll_event) {
|
||||
return -EADDRINUSE;
|
||||
}
|
||||
event->signal->poll_event = event;
|
||||
__ASSERT(event->signal, "invalid poll signal\n");
|
||||
add_event(&event->signal->poll_events, event, poller);
|
||||
break;
|
||||
case K_POLL_TYPE_IGNORE:
|
||||
/* nothing to do */
|
||||
|
@ -122,6 +138,8 @@ static inline int register_event(struct k_poll_event *event)
|
|||
break;
|
||||
}
|
||||
|
||||
event->poller = poller;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -133,15 +151,15 @@ static inline void clear_event_registration(struct k_poll_event *event)
|
|||
switch (event->type) {
|
||||
case K_POLL_TYPE_SEM_AVAILABLE:
|
||||
__ASSERT(event->sem, "invalid semaphore\n");
|
||||
event->sem->poll_event = NULL;
|
||||
sys_dlist_remove(&event->_node);
|
||||
break;
|
||||
case K_POLL_TYPE_DATA_AVAILABLE:
|
||||
__ASSERT(event->queue, "invalid queue\n");
|
||||
event->queue->poll_event = NULL;
|
||||
sys_dlist_remove(&event->_node);
|
||||
break;
|
||||
case K_POLL_TYPE_SIGNAL:
|
||||
__ASSERT(event->signal, "invalid poll signal\n");
|
||||
event->signal->poll_event = NULL;
|
||||
sys_dlist_remove(&event->_node);
|
||||
break;
|
||||
case K_POLL_TYPE_IGNORE:
|
||||
/* nothing to do */
|
||||
|
@ -176,18 +194,13 @@ int k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
|
|||
__ASSERT(events, "NULL events\n");
|
||||
__ASSERT(num_events > 0, "zero events\n");
|
||||
|
||||
int last_registered = -1, in_use = 0, rc;
|
||||
int last_registered = -1, rc;
|
||||
unsigned int key;
|
||||
|
||||
key = irq_lock();
|
||||
set_polling_state(_current);
|
||||
irq_unlock(key);
|
||||
|
||||
/*
|
||||
* We can get by with one poller structure for all events for now:
|
||||
* if/when we allow multiple threads to poll on the same object, we
|
||||
* will need one per poll event associated with an object.
|
||||
*/
|
||||
struct _poller poller = { .thread = _current };
|
||||
|
||||
/* find events whose condition is already fulfilled */
|
||||
|
@ -198,18 +211,10 @@ int k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
|
|||
if (is_condition_met(&events[ii], &state)) {
|
||||
set_event_ready(&events[ii], state);
|
||||
clear_polling_state(_current);
|
||||
} else if (timeout != K_NO_WAIT && is_polling() && !in_use) {
|
||||
rc = register_event(&events[ii]);
|
||||
} else if (timeout != K_NO_WAIT && is_polling()) {
|
||||
rc = register_event(&events[ii], &poller);
|
||||
if (rc == 0) {
|
||||
events[ii].poller = &poller;
|
||||
++last_registered;
|
||||
} else if (rc == -EADDRINUSE) {
|
||||
/* setting in_use also prevents any further
|
||||
* registrations by the current thread
|
||||
*/
|
||||
in_use = -EADDRINUSE;
|
||||
events[ii].state = K_POLL_STATE_EADDRINUSE;
|
||||
clear_polling_state(_current);
|
||||
} else {
|
||||
__ASSERT(0, "unexpected return code\n");
|
||||
}
|
||||
|
@ -224,16 +229,12 @@ int k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
|
|||
* condition is met, either when looping through the events here or
|
||||
* because one of the events registered has had its state changed, or
|
||||
* that one of the objects we wanted to poll on already had a thread
|
||||
* polling on it. We can remove all registrations and return either
|
||||
* success or a -EADDRINUSE error. In the case of a -EADDRINUSE error,
|
||||
* the events that were available are still flagged as such, and it is
|
||||
* valid for the caller to consider them available, as if this function
|
||||
* returned success.
|
||||
* polling on it.
|
||||
*/
|
||||
if (!is_polling()) {
|
||||
clear_event_registrations(events, last_registered, key);
|
||||
irq_unlock(key);
|
||||
return in_use;
|
||||
return 0;
|
||||
}
|
||||
|
||||
clear_polling_state(_current);
|
||||
|
@ -306,20 +307,23 @@ ready_event:
|
|||
}
|
||||
|
||||
/* returns 1 if a reschedule must take place, 0 otherwise */
|
||||
/* *obj_poll_event is guaranteed to not be NULL */
|
||||
int _handle_obj_poll_event(struct k_poll_event **obj_poll_event, u32_t state)
|
||||
int _handle_obj_poll_events(sys_dlist_t *events, u32_t state)
|
||||
{
|
||||
struct k_poll_event *poll_event = *obj_poll_event;
|
||||
struct k_poll_event *poll_event;
|
||||
int must_reschedule;
|
||||
|
||||
*obj_poll_event = NULL;
|
||||
poll_event = (struct k_poll_event *)sys_dlist_get(events);
|
||||
if (!poll_event) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
(void)_signal_poll_event(poll_event, state, &must_reschedule);
|
||||
return must_reschedule;
|
||||
}
|
||||
|
||||
void k_poll_signal_init(struct k_poll_signal *signal)
|
||||
{
|
||||
signal->poll_event = NULL;
|
||||
sys_dlist_init(&signal->poll_events);
|
||||
signal->signaled = 0;
|
||||
/* signal->result is left unitialized */
|
||||
}
|
||||
|
@ -327,17 +331,19 @@ void k_poll_signal_init(struct k_poll_signal *signal)
|
|||
int k_poll_signal(struct k_poll_signal *signal, int result)
|
||||
{
|
||||
unsigned int key = irq_lock();
|
||||
struct k_poll_event *poll_event;
|
||||
int must_reschedule;
|
||||
|
||||
signal->result = result;
|
||||
signal->signaled = 1;
|
||||
|
||||
if (!signal->poll_event) {
|
||||
poll_event = (struct k_poll_event *)sys_dlist_get(&signal->poll_events);
|
||||
if (!poll_event) {
|
||||
irq_unlock(key);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rc = _signal_poll_event(signal->poll_event, K_POLL_STATE_SIGNALED,
|
||||
int rc = _signal_poll_event(poll_event, K_POLL_STATE_SIGNALED,
|
||||
&must_reschedule);
|
||||
|
||||
if (must_reschedule) {
|
||||
|
|
|
@ -51,8 +51,9 @@ void k_queue_init(struct k_queue *queue)
|
|||
{
|
||||
sys_slist_init(&queue->data_q);
|
||||
sys_dlist_init(&queue->wait_q);
|
||||
|
||||
_INIT_OBJ_POLL_EVENT(queue);
|
||||
#if defined(CONFIG_POLL)
|
||||
sys_dlist_init(&queue->poll_events);
|
||||
#endif
|
||||
|
||||
SYS_TRACING_OBJ_INIT(k_queue, queue);
|
||||
}
|
||||
|
@ -67,13 +68,12 @@ static void prepare_thread_to_run(struct k_thread *thread, void *data)
|
|||
#endif /* CONFIG_POLL */
|
||||
|
||||
/* returns 1 if a reschedule must take place, 0 otherwise */
|
||||
static inline int handle_poll_event(struct k_queue *queue)
|
||||
static inline int handle_poll_events(struct k_queue *queue)
|
||||
{
|
||||
#ifdef CONFIG_POLL
|
||||
u32_t state = K_POLL_STATE_DATA_AVAILABLE;
|
||||
|
||||
return queue->poll_event ?
|
||||
_handle_obj_poll_event(&queue->poll_event, state) : 0;
|
||||
return _handle_obj_poll_events(&queue->poll_events, state);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
|
@ -95,7 +95,7 @@ void k_queue_cancel_wait(struct k_queue *queue)
|
|||
}
|
||||
}
|
||||
#else
|
||||
if (handle_poll_event(queue)) {
|
||||
if (handle_poll_events(queue)) {
|
||||
(void)_Swap(key);
|
||||
return;
|
||||
}
|
||||
|
@ -126,7 +126,7 @@ void k_queue_insert(struct k_queue *queue, void *prev, void *data)
|
|||
sys_slist_insert(&queue->data_q, prev, data);
|
||||
|
||||
#if defined(CONFIG_POLL)
|
||||
if (handle_poll_event(queue)) {
|
||||
if (handle_poll_events(queue)) {
|
||||
(void)_Swap(key);
|
||||
return;
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
|
|||
}
|
||||
#else
|
||||
sys_slist_append_list(&queue->data_q, head, tail);
|
||||
if (handle_poll_event(queue)) {
|
||||
if (handle_poll_events(queue)) {
|
||||
(void)_Swap(key);
|
||||
return;
|
||||
}
|
||||
|
@ -206,11 +206,10 @@ static void *k_queue_poll(struct k_queue *queue, s32_t timeout)
|
|||
event.state = K_POLL_STATE_NOT_READY;
|
||||
|
||||
err = k_poll(&event, 1, timeout);
|
||||
if (err == -EAGAIN) {
|
||||
if (err) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
__ASSERT_NO_MSG(err == 0);
|
||||
__ASSERT_NO_MSG(event.state == K_POLL_STATE_FIFO_DATA_AVAILABLE);
|
||||
|
||||
return sys_slist_get(&queue->data_q);
|
||||
|
|
12
kernel/sem.c
12
kernel/sem.c
|
@ -60,21 +60,21 @@ void k_sem_init(struct k_sem *sem, unsigned int initial_count,
|
|||
sem->count = initial_count;
|
||||
sem->limit = limit;
|
||||
sys_dlist_init(&sem->wait_q);
|
||||
|
||||
_INIT_OBJ_POLL_EVENT(sem);
|
||||
#if defined(CONFIG_POLL)
|
||||
sys_dlist_init(&sem->poll_events);
|
||||
#endif
|
||||
|
||||
SYS_TRACING_OBJ_INIT(k_sem, sem);
|
||||
}
|
||||
|
||||
|
||||
/* returns 1 if a reschedule must take place, 0 otherwise */
|
||||
static inline int handle_poll_event(struct k_sem *sem)
|
||||
static inline int handle_poll_events(struct k_sem *sem)
|
||||
{
|
||||
#ifdef CONFIG_POLL
|
||||
u32_t state = K_POLL_STATE_SEM_AVAILABLE;
|
||||
|
||||
return sem->poll_event ?
|
||||
_handle_obj_poll_event(&sem->poll_event, state) : 0;
|
||||
return _handle_obj_poll_events(&sem->poll_events, state);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
|
@ -92,7 +92,7 @@ static int do_sem_give(struct k_sem *sem)
|
|||
|
||||
if (!thread) {
|
||||
increment_count_up_to_limit(sem);
|
||||
return handle_poll_event(sem);
|
||||
return handle_poll_events(sem);
|
||||
}
|
||||
(void)_abort_thread_timeout(thread);
|
||||
_ready_thread(thread);
|
||||
|
|
|
@ -59,7 +59,8 @@ static u32_t rx_ts;
|
|||
#endif
|
||||
|
||||
#if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
|
||||
static struct k_poll_signal hbuf_signal = K_POLL_SIGNAL_INITIALIZER();
|
||||
static struct k_poll_signal hbuf_signal =
|
||||
K_POLL_SIGNAL_INITIALIZER(hbuf_signal);
|
||||
static sys_slist_t hbuf_pend;
|
||||
static s32_t hbuf_count;
|
||||
#endif
|
||||
|
|
|
@ -1266,7 +1266,8 @@ static bool send_buf(struct bt_conn *conn, struct net_buf *buf)
|
|||
return send_frag(conn, buf, BT_ACL_CONT, false);
|
||||
}
|
||||
|
||||
static struct k_poll_signal conn_change = K_POLL_SIGNAL_INITIALIZER();
|
||||
static struct k_poll_signal conn_change =
|
||||
K_POLL_SIGNAL_INITIALIZER(conn_change);
|
||||
|
||||
static void conn_cleanup(struct bt_conn *conn)
|
||||
{
|
||||
|
|
|
@ -324,7 +324,7 @@ static int spi_rx_every_4(struct spi_config *spi_conf)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct k_poll_signal async_sig = K_POLL_SIGNAL_INITIALIZER();
|
||||
static struct k_poll_signal async_sig = K_POLL_SIGNAL_INITIALIZER(async_sig);
|
||||
static struct k_poll_event async_evt =
|
||||
K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SIGNAL,
|
||||
K_POLL_MODE_NOTIFY_ONLY,
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#include <ztest.h>
|
||||
extern void test_poll_no_wait(void);
|
||||
extern void test_poll_wait(void);
|
||||
extern void test_poll_eaddrinuse(void);
|
||||
extern void test_poll_multi(void);
|
||||
|
||||
/*test case main entry*/
|
||||
void test_main(void)
|
||||
|
@ -22,7 +22,7 @@ void test_main(void)
|
|||
ztest_test_suite(test_poll_api
|
||||
, ztest_unit_test(test_poll_no_wait)
|
||||
, ztest_unit_test(test_poll_wait)
|
||||
, ztest_unit_test(test_poll_eaddrinuse)
|
||||
, ztest_unit_test(test_poll_multi)
|
||||
);
|
||||
ztest_run_test_suite(test_poll_api);
|
||||
}
|
||||
|
|
|
@ -91,7 +91,8 @@ void test_poll_no_wait(void)
|
|||
|
||||
static K_SEM_DEFINE(wait_sem, 0, 1);
|
||||
static K_FIFO_DEFINE(wait_fifo);
|
||||
static struct k_poll_signal wait_signal = K_POLL_SIGNAL_INITIALIZER();
|
||||
static struct k_poll_signal wait_signal =
|
||||
K_POLL_SIGNAL_INITIALIZER(wait_signal);
|
||||
|
||||
struct fifo_msg wait_msg = { NULL, FIFO_MSG_VALUE };
|
||||
|
||||
|
@ -298,30 +299,30 @@ void test_poll_wait(void)
|
|||
wait_signal.signaled = 0;
|
||||
}
|
||||
|
||||
/* verify -EADDRINUSE return value when object has already a poller */
|
||||
static K_SEM_DEFINE(eaddrinuse_sem, 0, 1);
|
||||
static K_SEM_DEFINE(eaddrinuse_reply, 0, 1);
|
||||
/* verify multiple pollers */
|
||||
static K_SEM_DEFINE(multi_sem, 0, 1);
|
||||
static K_SEM_DEFINE(multi_reply, 0, 1);
|
||||
|
||||
static struct k_thread eaddrinuse_hogger_thread;
|
||||
static K_THREAD_STACK_DEFINE(eaddrinuse_hogger_stack, KB(1));
|
||||
static struct k_thread multi_thread;
|
||||
static K_THREAD_STACK_DEFINE(multi_stack, KB(1));
|
||||
|
||||
static void eaddrinuse_hogger(void *p1, void *p2, void *p3)
|
||||
static void multi(void *p1, void *p2, void *p3)
|
||||
{
|
||||
(void)p1; (void)p2; (void)p3;
|
||||
|
||||
struct k_poll_event event;
|
||||
|
||||
k_poll_event_init(&event, K_POLL_TYPE_SEM_AVAILABLE,
|
||||
K_POLL_MODE_NOTIFY_ONLY, &eaddrinuse_sem);
|
||||
K_POLL_MODE_NOTIFY_ONLY, &multi_sem);
|
||||
|
||||
(void)k_poll(&event, 1, K_FOREVER);
|
||||
k_sem_take(&eaddrinuse_sem, K_FOREVER);
|
||||
k_sem_give(&eaddrinuse_reply);
|
||||
k_sem_take(&multi_sem, K_FOREVER);
|
||||
k_sem_give(&multi_reply);
|
||||
}
|
||||
|
||||
static K_SEM_DEFINE(eaddrinuse_ready_sem, 1, 1);
|
||||
static K_SEM_DEFINE(multi_ready_sem, 1, 1);
|
||||
|
||||
void test_poll_eaddrinuse(void)
|
||||
void test_poll_multi(void)
|
||||
{
|
||||
int old_prio = k_thread_priority_get(k_current_get());
|
||||
const int main_low_prio = 10;
|
||||
|
@ -330,29 +331,29 @@ void test_poll_eaddrinuse(void)
|
|||
struct k_poll_event events[] = {
|
||||
K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SEM_AVAILABLE,
|
||||
K_POLL_MODE_NOTIFY_ONLY,
|
||||
&eaddrinuse_sem),
|
||||
&multi_sem),
|
||||
K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SEM_AVAILABLE,
|
||||
K_POLL_MODE_NOTIFY_ONLY,
|
||||
&eaddrinuse_ready_sem),
|
||||
&multi_ready_sem),
|
||||
};
|
||||
|
||||
k_thread_priority_set(k_current_get(), main_low_prio);
|
||||
|
||||
k_thread_create(&eaddrinuse_hogger_thread, eaddrinuse_hogger_stack,
|
||||
K_THREAD_STACK_SIZEOF(eaddrinuse_hogger_stack),
|
||||
eaddrinuse_hogger, 0, 0, 0, main_low_prio - 1, 0, 0);
|
||||
k_thread_create(&multi_thread, multi_stack,
|
||||
K_THREAD_STACK_SIZEOF(multi_stack),
|
||||
multi, 0, 0, 0, main_low_prio - 1, 0, 0);
|
||||
|
||||
rc = k_poll(events, ARRAY_SIZE(events), K_SECONDS(1));
|
||||
|
||||
k_thread_priority_set(k_current_get(), old_prio);
|
||||
|
||||
zassert_equal(rc, -EADDRINUSE, "");
|
||||
zassert_equal(events[0].state, K_POLL_STATE_EADDRINUSE, "");
|
||||
zassert_equal(rc, 0, "");
|
||||
zassert_equal(events[0].state, K_POLL_STATE_NOT_READY, "");
|
||||
zassert_equal(events[1].state, K_POLL_STATE_SEM_AVAILABLE, "");
|
||||
|
||||
/* free hogger, ensuring it awoken from k_poll() and got the sem */
|
||||
k_sem_give(&eaddrinuse_sem);
|
||||
rc = k_sem_take(&eaddrinuse_reply, K_SECONDS(1));
|
||||
/* free multi, ensuring it awoken from k_poll() and got the sem */
|
||||
k_sem_give(&multi_sem);
|
||||
rc = k_sem_take(&multi_reply, K_SECONDS(1));
|
||||
|
||||
zassert_equal(rc, 0, "");
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue