poll: Enable multiple threads to use k_poll in the same object

This is necessary in order for k_queue_get to work properly since that
is used with buffer pools which might be used by multiple threads asking
for buffers.

Jira: ZEP-2553

Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
This commit is contained in:
Luiz Augusto von Dentz 2017-08-21 10:49:29 +03:00 committed by Anas Nashif
commit 7d01c5ecb7
9 changed files with 113 additions and 115 deletions

View file

@ -98,11 +98,11 @@ typedef sys_dlist_t _wait_q_t;
#endif #endif
#ifdef CONFIG_POLL #ifdef CONFIG_POLL
#define _POLL_EVENT_OBJ_INIT \ #define _POLL_EVENT_OBJ_INIT(obj) \
.poll_event = NULL, .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
#define _POLL_EVENT struct k_poll_event *poll_event #define _POLL_EVENT sys_dlist_t poll_events
#else #else
#define _POLL_EVENT_OBJ_INIT #define _POLL_EVENT_OBJ_INIT(obj)
#define _POLL_EVENT #define _POLL_EVENT
#endif #endif
@ -1327,7 +1327,7 @@ struct k_queue {
{ \ { \
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \ .wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
.data_q = SYS_SLIST_STATIC_INIT(&obj.data_q), \ .data_q = SYS_SLIST_STATIC_INIT(&obj.data_q), \
_POLL_EVENT_OBJ_INIT \ _POLL_EVENT_OBJ_INIT(obj) \
_OBJECT_TRACING_INIT \ _OBJECT_TRACING_INIT \
} }
@ -2360,7 +2360,7 @@ struct k_sem {
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \ .wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
.count = initial_count, \ .count = initial_count, \
.limit = count_limit, \ .limit = count_limit, \
_POLL_EVENT_OBJ_INIT \ _POLL_EVENT_OBJ_INIT(obj) \
_OBJECT_TRACING_INIT \ _OBJECT_TRACING_INIT \
} }
@ -3544,9 +3544,6 @@ enum _poll_states_bits {
/* default state when creating event */ /* default state when creating event */
_POLL_STATE_NOT_READY, _POLL_STATE_NOT_READY,
/* there was another poller already on the object */
_POLL_STATE_EADDRINUSE,
/* signaled by k_poll_signal() */ /* signaled by k_poll_signal() */
_POLL_STATE_SIGNALED, _POLL_STATE_SIGNALED,
@ -3601,7 +3598,6 @@ enum k_poll_modes {
/* public - values for k_poll_event.state bitfield */ /* public - values for k_poll_event.state bitfield */
#define K_POLL_STATE_NOT_READY 0 #define K_POLL_STATE_NOT_READY 0
#define K_POLL_STATE_EADDRINUSE _POLL_STATE_BIT(_POLL_STATE_EADDRINUSE)
#define K_POLL_STATE_SIGNALED _POLL_STATE_BIT(_POLL_STATE_SIGNALED) #define K_POLL_STATE_SIGNALED _POLL_STATE_BIT(_POLL_STATE_SIGNALED)
#define K_POLL_STATE_SEM_AVAILABLE _POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE) #define K_POLL_STATE_SEM_AVAILABLE _POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
#define K_POLL_STATE_DATA_AVAILABLE _POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE) #define K_POLL_STATE_DATA_AVAILABLE _POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
@ -3610,7 +3606,7 @@ enum k_poll_modes {
/* public - poll signal object */ /* public - poll signal object */
struct k_poll_signal { struct k_poll_signal {
/* PRIVATE - DO NOT TOUCH */ /* PRIVATE - DO NOT TOUCH */
struct k_poll_event *poll_event; sys_dlist_t poll_events;
/* /*
* 1 if the event has been signaled, 0 otherwise. Stays set to 1 until * 1 if the event has been signaled, 0 otherwise. Stays set to 1 until
@ -3622,14 +3618,17 @@ struct k_poll_signal {
int result; int result;
}; };
#define K_POLL_SIGNAL_INITIALIZER() \ #define K_POLL_SIGNAL_INITIALIZER(obj) \
{ \ { \
.poll_event = NULL, \ .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
.signaled = 0, \ .signaled = 0, \
.result = 0, \ .result = 0, \
} }
struct k_poll_event { struct k_poll_event {
/* PRIVATE - DO NOT TOUCH */
sys_dnode_t _node;
/* PRIVATE - DO NOT TOUCH */ /* PRIVATE - DO NOT TOUCH */
struct _poller *poller; struct _poller *poller;
@ -3716,16 +3715,9 @@ extern void k_poll_event_init(struct k_poll_event *event, u32_t type,
* reason, the k_poll() call is more effective when the objects being polled * reason, the k_poll() call is more effective when the objects being polled
* only have one thread, the polling thread, trying to acquire them. * only have one thread, the polling thread, trying to acquire them.
* *
* Only one thread can be polling for a particular object at a given time. If * When k_poll() returns 0, the caller should loop on all the events that were
* another thread tries to poll on it, the k_poll() call returns -EADDRINUSE * passed to k_poll() and check the state field for the values that were
* and returns as soon as it has finished handling the other events. This means * expected and take the associated actions.
* that k_poll() can return -EADDRINUSE and have the state value of some events
* be non-K_POLL_STATE_NOT_READY. When this condition occurs, the @a timeout
* parameter is ignored.
*
* When k_poll() returns 0 or -EADDRINUSE, the caller should loop on all the
* events that were passed to k_poll() and check the state field for the values
* that were expected and take the associated actions.
* *
* Before being reused for another call to k_poll(), the user has to reset the * Before being reused for another call to k_poll(), the user has to reset the
* state field to K_POLL_STATE_NOT_READY. * state field to K_POLL_STATE_NOT_READY.
@ -3736,7 +3728,6 @@ extern void k_poll_event_init(struct k_poll_event *event, u32_t type,
* or one of the special values K_NO_WAIT and K_FOREVER. * or one of the special values K_NO_WAIT and K_FOREVER.
* *
* @retval 0 One or more events are ready. * @retval 0 One or more events are ready.
* @retval -EADDRINUSE One or more objects already had a poller.
* @retval -EAGAIN Waiting period timed out. * @retval -EAGAIN Waiting period timed out.
*/ */
@ -3777,8 +3768,7 @@ extern void k_poll_signal_init(struct k_poll_signal *signal);
extern int k_poll_signal(struct k_poll_signal *signal, int result); extern int k_poll_signal(struct k_poll_signal *signal, int result);
/* private internal function */ /* private internal function */
extern int _handle_obj_poll_event(struct k_poll_event **obj_poll_event, extern int _handle_obj_poll_events(sys_dlist_t *events, u32_t state);
u32_t state);
/** /**
* @} end defgroup poll_apis * @} end defgroup poll_apis

View file

@ -89,30 +89,46 @@ static inline int is_condition_met(struct k_poll_event *event, u32_t *state)
return 0; return 0;
} }
static inline void add_event(sys_dlist_t *events, struct k_poll_event *event,
struct _poller *poller)
{
struct k_poll_event *pending;
pending = (struct k_poll_event *)sys_dlist_peek_tail(events);
if (!pending || _is_t1_higher_prio_than_t2(pending->poller->thread,
poller->thread)) {
sys_dlist_append(events, &event->_node);
return;
}
SYS_DLIST_FOR_EACH_CONTAINER(events, pending, _node) {
if (_is_t1_higher_prio_than_t2(poller->thread,
pending->poller->thread)) {
sys_dlist_insert_before(events, &pending->_node,
&event->_node);
return;
}
}
sys_dlist_append(events, &event->_node);
}
/* must be called with interrupts locked */ /* must be called with interrupts locked */
static inline int register_event(struct k_poll_event *event) static inline int register_event(struct k_poll_event *event,
struct _poller *poller)
{ {
switch (event->type) { switch (event->type) {
case K_POLL_TYPE_SEM_AVAILABLE: case K_POLL_TYPE_SEM_AVAILABLE:
__ASSERT(event->sem, "invalid semaphore\n"); __ASSERT(event->sem, "invalid semaphore\n");
if (event->sem->poll_event) { add_event(&event->sem->poll_events, event, poller);
return -EADDRINUSE;
}
event->sem->poll_event = event;
break; break;
case K_POLL_TYPE_DATA_AVAILABLE: case K_POLL_TYPE_DATA_AVAILABLE:
__ASSERT(event->queue, "invalid queue\n"); __ASSERT(event->queue, "invalid queue\n");
if (event->queue->poll_event) { add_event(&event->queue->poll_events, event, poller);
return -EADDRINUSE;
}
event->queue->poll_event = event;
break; break;
case K_POLL_TYPE_SIGNAL: case K_POLL_TYPE_SIGNAL:
__ASSERT(event->queue, "invalid poll signal\n"); __ASSERT(event->signal, "invalid poll signal\n");
if (event->signal->poll_event) { add_event(&event->signal->poll_events, event, poller);
return -EADDRINUSE;
}
event->signal->poll_event = event;
break; break;
case K_POLL_TYPE_IGNORE: case K_POLL_TYPE_IGNORE:
/* nothing to do */ /* nothing to do */
@ -122,6 +138,8 @@ static inline int register_event(struct k_poll_event *event)
break; break;
} }
event->poller = poller;
return 0; return 0;
} }
@ -133,15 +151,15 @@ static inline void clear_event_registration(struct k_poll_event *event)
switch (event->type) { switch (event->type) {
case K_POLL_TYPE_SEM_AVAILABLE: case K_POLL_TYPE_SEM_AVAILABLE:
__ASSERT(event->sem, "invalid semaphore\n"); __ASSERT(event->sem, "invalid semaphore\n");
event->sem->poll_event = NULL; sys_dlist_remove(&event->_node);
break; break;
case K_POLL_TYPE_DATA_AVAILABLE: case K_POLL_TYPE_DATA_AVAILABLE:
__ASSERT(event->queue, "invalid queue\n"); __ASSERT(event->queue, "invalid queue\n");
event->queue->poll_event = NULL; sys_dlist_remove(&event->_node);
break; break;
case K_POLL_TYPE_SIGNAL: case K_POLL_TYPE_SIGNAL:
__ASSERT(event->signal, "invalid poll signal\n"); __ASSERT(event->signal, "invalid poll signal\n");
event->signal->poll_event = NULL; sys_dlist_remove(&event->_node);
break; break;
case K_POLL_TYPE_IGNORE: case K_POLL_TYPE_IGNORE:
/* nothing to do */ /* nothing to do */
@ -176,18 +194,13 @@ int k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
__ASSERT(events, "NULL events\n"); __ASSERT(events, "NULL events\n");
__ASSERT(num_events > 0, "zero events\n"); __ASSERT(num_events > 0, "zero events\n");
int last_registered = -1, in_use = 0, rc; int last_registered = -1, rc;
unsigned int key; unsigned int key;
key = irq_lock(); key = irq_lock();
set_polling_state(_current); set_polling_state(_current);
irq_unlock(key); irq_unlock(key);
/*
* We can get by with one poller structure for all events for now:
* if/when we allow multiple threads to poll on the same object, we
* will need one per poll event associated with an object.
*/
struct _poller poller = { .thread = _current }; struct _poller poller = { .thread = _current };
/* find events whose condition is already fulfilled */ /* find events whose condition is already fulfilled */
@ -198,18 +211,10 @@ int k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
if (is_condition_met(&events[ii], &state)) { if (is_condition_met(&events[ii], &state)) {
set_event_ready(&events[ii], state); set_event_ready(&events[ii], state);
clear_polling_state(_current); clear_polling_state(_current);
} else if (timeout != K_NO_WAIT && is_polling() && !in_use) { } else if (timeout != K_NO_WAIT && is_polling()) {
rc = register_event(&events[ii]); rc = register_event(&events[ii], &poller);
if (rc == 0) { if (rc == 0) {
events[ii].poller = &poller;
++last_registered; ++last_registered;
} else if (rc == -EADDRINUSE) {
/* setting in_use also prevents any further
* registrations by the current thread
*/
in_use = -EADDRINUSE;
events[ii].state = K_POLL_STATE_EADDRINUSE;
clear_polling_state(_current);
} else { } else {
__ASSERT(0, "unexpected return code\n"); __ASSERT(0, "unexpected return code\n");
} }
@ -224,16 +229,12 @@ int k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
* condition is met, either when looping through the events here or * condition is met, either when looping through the events here or
* because one of the events registered has had its state changed, or * because one of the events registered has had its state changed, or
* that one of the objects we wanted to poll on already had a thread * that one of the objects we wanted to poll on already had a thread
* polling on it. We can remove all registrations and return either * polling on it.
* success or a -EADDRINUSE error. In the case of a -EADDRINUSE error,
* the events that were available are still flagged as such, and it is
* valid for the caller to consider them available, as if this function
* returned success.
*/ */
if (!is_polling()) { if (!is_polling()) {
clear_event_registrations(events, last_registered, key); clear_event_registrations(events, last_registered, key);
irq_unlock(key); irq_unlock(key);
return in_use; return 0;
} }
clear_polling_state(_current); clear_polling_state(_current);
@ -306,20 +307,23 @@ ready_event:
} }
/* returns 1 if a reschedule must take place, 0 otherwise */ /* returns 1 if a reschedule must take place, 0 otherwise */
/* *obj_poll_event is guaranteed to not be NULL */ int _handle_obj_poll_events(sys_dlist_t *events, u32_t state)
int _handle_obj_poll_event(struct k_poll_event **obj_poll_event, u32_t state)
{ {
struct k_poll_event *poll_event = *obj_poll_event; struct k_poll_event *poll_event;
int must_reschedule; int must_reschedule;
*obj_poll_event = NULL; poll_event = (struct k_poll_event *)sys_dlist_get(events);
if (!poll_event) {
return 0;
}
(void)_signal_poll_event(poll_event, state, &must_reschedule); (void)_signal_poll_event(poll_event, state, &must_reschedule);
return must_reschedule; return must_reschedule;
} }
void k_poll_signal_init(struct k_poll_signal *signal) void k_poll_signal_init(struct k_poll_signal *signal)
{ {
signal->poll_event = NULL; sys_dlist_init(&signal->poll_events);
signal->signaled = 0; signal->signaled = 0;
/* signal->result is left unitialized */ /* signal->result is left unitialized */
} }
@ -327,17 +331,19 @@ void k_poll_signal_init(struct k_poll_signal *signal)
int k_poll_signal(struct k_poll_signal *signal, int result) int k_poll_signal(struct k_poll_signal *signal, int result)
{ {
unsigned int key = irq_lock(); unsigned int key = irq_lock();
struct k_poll_event *poll_event;
int must_reschedule; int must_reschedule;
signal->result = result; signal->result = result;
signal->signaled = 1; signal->signaled = 1;
if (!signal->poll_event) { poll_event = (struct k_poll_event *)sys_dlist_get(&signal->poll_events);
if (!poll_event) {
irq_unlock(key); irq_unlock(key);
return 0; return 0;
} }
int rc = _signal_poll_event(signal->poll_event, K_POLL_STATE_SIGNALED, int rc = _signal_poll_event(poll_event, K_POLL_STATE_SIGNALED,
&must_reschedule); &must_reschedule);
if (must_reschedule) { if (must_reschedule) {

View file

@ -51,8 +51,9 @@ void k_queue_init(struct k_queue *queue)
{ {
sys_slist_init(&queue->data_q); sys_slist_init(&queue->data_q);
sys_dlist_init(&queue->wait_q); sys_dlist_init(&queue->wait_q);
#if defined(CONFIG_POLL)
_INIT_OBJ_POLL_EVENT(queue); sys_dlist_init(&queue->poll_events);
#endif
SYS_TRACING_OBJ_INIT(k_queue, queue); SYS_TRACING_OBJ_INIT(k_queue, queue);
} }
@ -67,13 +68,12 @@ static void prepare_thread_to_run(struct k_thread *thread, void *data)
#endif /* CONFIG_POLL */ #endif /* CONFIG_POLL */
/* returns 1 if a reschedule must take place, 0 otherwise */ /* returns 1 if a reschedule must take place, 0 otherwise */
static inline int handle_poll_event(struct k_queue *queue) static inline int handle_poll_events(struct k_queue *queue)
{ {
#ifdef CONFIG_POLL #ifdef CONFIG_POLL
u32_t state = K_POLL_STATE_DATA_AVAILABLE; u32_t state = K_POLL_STATE_DATA_AVAILABLE;
return queue->poll_event ? return _handle_obj_poll_events(&queue->poll_events, state);
_handle_obj_poll_event(&queue->poll_event, state) : 0;
#else #else
return 0; return 0;
#endif #endif
@ -95,7 +95,7 @@ void k_queue_cancel_wait(struct k_queue *queue)
} }
} }
#else #else
if (handle_poll_event(queue)) { if (handle_poll_events(queue)) {
(void)_Swap(key); (void)_Swap(key);
return; return;
} }
@ -126,7 +126,7 @@ void k_queue_insert(struct k_queue *queue, void *prev, void *data)
sys_slist_insert(&queue->data_q, prev, data); sys_slist_insert(&queue->data_q, prev, data);
#if defined(CONFIG_POLL) #if defined(CONFIG_POLL)
if (handle_poll_event(queue)) { if (handle_poll_events(queue)) {
(void)_Swap(key); (void)_Swap(key);
return; return;
} }
@ -171,7 +171,7 @@ void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
} }
#else #else
sys_slist_append_list(&queue->data_q, head, tail); sys_slist_append_list(&queue->data_q, head, tail);
if (handle_poll_event(queue)) { if (handle_poll_events(queue)) {
(void)_Swap(key); (void)_Swap(key);
return; return;
} }
@ -206,11 +206,10 @@ static void *k_queue_poll(struct k_queue *queue, s32_t timeout)
event.state = K_POLL_STATE_NOT_READY; event.state = K_POLL_STATE_NOT_READY;
err = k_poll(&event, 1, timeout); err = k_poll(&event, 1, timeout);
if (err == -EAGAIN) { if (err) {
return NULL; return NULL;
} }
__ASSERT_NO_MSG(err == 0);
__ASSERT_NO_MSG(event.state == K_POLL_STATE_FIFO_DATA_AVAILABLE); __ASSERT_NO_MSG(event.state == K_POLL_STATE_FIFO_DATA_AVAILABLE);
return sys_slist_get(&queue->data_q); return sys_slist_get(&queue->data_q);

View file

@ -60,21 +60,21 @@ void k_sem_init(struct k_sem *sem, unsigned int initial_count,
sem->count = initial_count; sem->count = initial_count;
sem->limit = limit; sem->limit = limit;
sys_dlist_init(&sem->wait_q); sys_dlist_init(&sem->wait_q);
#if defined(CONFIG_POLL)
_INIT_OBJ_POLL_EVENT(sem); sys_dlist_init(&sem->poll_events);
#endif
SYS_TRACING_OBJ_INIT(k_sem, sem); SYS_TRACING_OBJ_INIT(k_sem, sem);
} }
/* returns 1 if a reschedule must take place, 0 otherwise */ /* returns 1 if a reschedule must take place, 0 otherwise */
static inline int handle_poll_event(struct k_sem *sem) static inline int handle_poll_events(struct k_sem *sem)
{ {
#ifdef CONFIG_POLL #ifdef CONFIG_POLL
u32_t state = K_POLL_STATE_SEM_AVAILABLE; u32_t state = K_POLL_STATE_SEM_AVAILABLE;
return sem->poll_event ? return _handle_obj_poll_events(&sem->poll_events, state);
_handle_obj_poll_event(&sem->poll_event, state) : 0;
#else #else
return 0; return 0;
#endif #endif
@ -92,7 +92,7 @@ static int do_sem_give(struct k_sem *sem)
if (!thread) { if (!thread) {
increment_count_up_to_limit(sem); increment_count_up_to_limit(sem);
return handle_poll_event(sem); return handle_poll_events(sem);
} }
(void)_abort_thread_timeout(thread); (void)_abort_thread_timeout(thread);
_ready_thread(thread); _ready_thread(thread);

View file

@ -59,7 +59,8 @@ static u32_t rx_ts;
#endif #endif
#if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL) #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
static struct k_poll_signal hbuf_signal = K_POLL_SIGNAL_INITIALIZER(); static struct k_poll_signal hbuf_signal =
K_POLL_SIGNAL_INITIALIZER(hbuf_signal);
static sys_slist_t hbuf_pend; static sys_slist_t hbuf_pend;
static s32_t hbuf_count; static s32_t hbuf_count;
#endif #endif

View file

@ -1266,7 +1266,8 @@ static bool send_buf(struct bt_conn *conn, struct net_buf *buf)
return send_frag(conn, buf, BT_ACL_CONT, false); return send_frag(conn, buf, BT_ACL_CONT, false);
} }
static struct k_poll_signal conn_change = K_POLL_SIGNAL_INITIALIZER(); static struct k_poll_signal conn_change =
K_POLL_SIGNAL_INITIALIZER(conn_change);
static void conn_cleanup(struct bt_conn *conn) static void conn_cleanup(struct bt_conn *conn)
{ {

View file

@ -324,7 +324,7 @@ static int spi_rx_every_4(struct spi_config *spi_conf)
return 0; return 0;
} }
static struct k_poll_signal async_sig = K_POLL_SIGNAL_INITIALIZER(); static struct k_poll_signal async_sig = K_POLL_SIGNAL_INITIALIZER(async_sig);
static struct k_poll_event async_evt = static struct k_poll_event async_evt =
K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SIGNAL, K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SIGNAL,
K_POLL_MODE_NOTIFY_ONLY, K_POLL_MODE_NOTIFY_ONLY,

View file

@ -14,7 +14,7 @@
#include <ztest.h> #include <ztest.h>
extern void test_poll_no_wait(void); extern void test_poll_no_wait(void);
extern void test_poll_wait(void); extern void test_poll_wait(void);
extern void test_poll_eaddrinuse(void); extern void test_poll_multi(void);
/*test case main entry*/ /*test case main entry*/
void test_main(void) void test_main(void)
@ -22,7 +22,7 @@ void test_main(void)
ztest_test_suite(test_poll_api ztest_test_suite(test_poll_api
, ztest_unit_test(test_poll_no_wait) , ztest_unit_test(test_poll_no_wait)
, ztest_unit_test(test_poll_wait) , ztest_unit_test(test_poll_wait)
, ztest_unit_test(test_poll_eaddrinuse) , ztest_unit_test(test_poll_multi)
); );
ztest_run_test_suite(test_poll_api); ztest_run_test_suite(test_poll_api);
} }

View file

@ -91,7 +91,8 @@ void test_poll_no_wait(void)
static K_SEM_DEFINE(wait_sem, 0, 1); static K_SEM_DEFINE(wait_sem, 0, 1);
static K_FIFO_DEFINE(wait_fifo); static K_FIFO_DEFINE(wait_fifo);
static struct k_poll_signal wait_signal = K_POLL_SIGNAL_INITIALIZER(); static struct k_poll_signal wait_signal =
K_POLL_SIGNAL_INITIALIZER(wait_signal);
struct fifo_msg wait_msg = { NULL, FIFO_MSG_VALUE }; struct fifo_msg wait_msg = { NULL, FIFO_MSG_VALUE };
@ -298,30 +299,30 @@ void test_poll_wait(void)
wait_signal.signaled = 0; wait_signal.signaled = 0;
} }
/* verify -EADDRINUSE return value when object has already a poller */ /* verify multiple pollers */
static K_SEM_DEFINE(eaddrinuse_sem, 0, 1); static K_SEM_DEFINE(multi_sem, 0, 1);
static K_SEM_DEFINE(eaddrinuse_reply, 0, 1); static K_SEM_DEFINE(multi_reply, 0, 1);
static struct k_thread eaddrinuse_hogger_thread; static struct k_thread multi_thread;
static K_THREAD_STACK_DEFINE(eaddrinuse_hogger_stack, KB(1)); static K_THREAD_STACK_DEFINE(multi_stack, KB(1));
static void eaddrinuse_hogger(void *p1, void *p2, void *p3) static void multi(void *p1, void *p2, void *p3)
{ {
(void)p1; (void)p2; (void)p3; (void)p1; (void)p2; (void)p3;
struct k_poll_event event; struct k_poll_event event;
k_poll_event_init(&event, K_POLL_TYPE_SEM_AVAILABLE, k_poll_event_init(&event, K_POLL_TYPE_SEM_AVAILABLE,
K_POLL_MODE_NOTIFY_ONLY, &eaddrinuse_sem); K_POLL_MODE_NOTIFY_ONLY, &multi_sem);
(void)k_poll(&event, 1, K_FOREVER); (void)k_poll(&event, 1, K_FOREVER);
k_sem_take(&eaddrinuse_sem, K_FOREVER); k_sem_take(&multi_sem, K_FOREVER);
k_sem_give(&eaddrinuse_reply); k_sem_give(&multi_reply);
} }
static K_SEM_DEFINE(eaddrinuse_ready_sem, 1, 1); static K_SEM_DEFINE(multi_ready_sem, 1, 1);
void test_poll_eaddrinuse(void) void test_poll_multi(void)
{ {
int old_prio = k_thread_priority_get(k_current_get()); int old_prio = k_thread_priority_get(k_current_get());
const int main_low_prio = 10; const int main_low_prio = 10;
@ -330,29 +331,29 @@ void test_poll_eaddrinuse(void)
struct k_poll_event events[] = { struct k_poll_event events[] = {
K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SEM_AVAILABLE, K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SEM_AVAILABLE,
K_POLL_MODE_NOTIFY_ONLY, K_POLL_MODE_NOTIFY_ONLY,
&eaddrinuse_sem), &multi_sem),
K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SEM_AVAILABLE, K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SEM_AVAILABLE,
K_POLL_MODE_NOTIFY_ONLY, K_POLL_MODE_NOTIFY_ONLY,
&eaddrinuse_ready_sem), &multi_ready_sem),
}; };
k_thread_priority_set(k_current_get(), main_low_prio); k_thread_priority_set(k_current_get(), main_low_prio);
k_thread_create(&eaddrinuse_hogger_thread, eaddrinuse_hogger_stack, k_thread_create(&multi_thread, multi_stack,
K_THREAD_STACK_SIZEOF(eaddrinuse_hogger_stack), K_THREAD_STACK_SIZEOF(multi_stack),
eaddrinuse_hogger, 0, 0, 0, main_low_prio - 1, 0, 0); multi, 0, 0, 0, main_low_prio - 1, 0, 0);
rc = k_poll(events, ARRAY_SIZE(events), K_SECONDS(1)); rc = k_poll(events, ARRAY_SIZE(events), K_SECONDS(1));
k_thread_priority_set(k_current_get(), old_prio); k_thread_priority_set(k_current_get(), old_prio);
zassert_equal(rc, -EADDRINUSE, ""); zassert_equal(rc, 0, "");
zassert_equal(events[0].state, K_POLL_STATE_EADDRINUSE, ""); zassert_equal(events[0].state, K_POLL_STATE_NOT_READY, "");
zassert_equal(events[1].state, K_POLL_STATE_SEM_AVAILABLE, ""); zassert_equal(events[1].state, K_POLL_STATE_SEM_AVAILABLE, "");
/* free hogger, ensuring it awoken from k_poll() and got the sem */ /* free multi, ensuring it awoken from k_poll() and got the sem */
k_sem_give(&eaddrinuse_sem); k_sem_give(&multi_sem);
rc = k_sem_take(&eaddrinuse_reply, K_SECONDS(1)); rc = k_sem_take(&multi_reply, K_SECONDS(1));
zassert_equal(rc, 0, ""); zassert_equal(rc, 0, "");
} }