kernel: add k_poll() API

k_poll() is similar to the POSIX poll() API in spirit in that it allows
a single thread to monitor multiple events without actively polling
them, but rather pending for one or more to become ready. Such events
can be a direct event, or kernel objects (currently only semaphores and
fifos).

When a kernel object being polled on is ready, it is not "given" to the
poller: the poller must then acquire it via the regular API for the
object (e.g. k_sem_take()). Only one thread can poll on a particular
object at one time. These restrictions mean that k_poll() is most
effective when a single thread monitors multiple events that are not
subject for contention. For example, being the sole reader on multiple
fifos, or the only thread being signalled by multiple semaphores, or a
combination of both.

Change-Id: I7035a9baf4aa016fb87afc5f5c0f5f8cb216480f
Signed-off-by: Benjamin Walsh <walsh.benj@gmail.com>
This commit is contained in:
Benjamin Walsh 2017-01-29 18:57:45 -05:00 committed by Anas Nashif
commit acc68c1e59
6 changed files with 639 additions and 1 deletions

View file

@ -96,6 +96,15 @@ typedef sys_dlist_t _wait_q_t;
#define _OBJECT_TRACING_NEXT_PTR(type)
#endif
#ifdef CONFIG_POLL
#define _POLL_EVENT_OBJ_INIT \
.poll_event = NULL,
#define _POLL_EVENT struct k_poll_event *poll_event
#else
#define _POLL_EVENT_OBJ_INIT
#define _POLL_EVENT
#endif
#define tcs k_thread
struct k_thread;
struct k_mutex;
@ -110,6 +119,8 @@ struct k_stack;
struct k_mem_slab;
struct k_mem_pool;
struct k_timer;
struct k_poll_event;
struct k_poll_signal;
typedef struct k_thread *k_tid_t;
@ -1093,6 +1104,7 @@ extern uint32_t k_cycle_get_32(void);
struct k_fifo {
_wait_q_t wait_q;
sys_slist_t data_q;
_POLL_EVENT;
_OBJECT_TRACING_NEXT_PTR(k_fifo);
};
@ -1101,6 +1113,7 @@ struct k_fifo {
{ \
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
.data_q = SYS_SLIST_STATIC_INIT(&obj.data_q), \
_POLL_EVENT_OBJ_INIT \
_OBJECT_TRACING_INIT \
}
@ -1834,6 +1847,7 @@ struct k_sem {
_wait_q_t wait_q;
unsigned int count;
unsigned int limit;
_POLL_EVENT;
_OBJECT_TRACING_NEXT_PTR(k_sem);
};
@ -1843,6 +1857,7 @@ struct k_sem {
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
.count = initial_count, \
.limit = count_limit, \
_POLL_EVENT_OBJ_INIT \
_OBJECT_TRACING_INIT \
}
@ -3087,6 +3102,245 @@ extern void k_free(void *ptr);
* @} end defgroup heap_apis
*/
/* polling API - PRIVATE */
/* private - implementation data created as needed, per-type */
struct _poller {
struct k_thread *thread;
};
/* private - types bit positions */
enum _poll_types_bits {
/* can be used to ignore an event */
_POLL_TYPE_IGNORE,
/* to be signaled by k_poll_signal() */
_POLL_TYPE_SIGNAL,
/* semaphore availability */
_POLL_TYPE_SEM_AVAILABLE,
/* fifo data availability */
_POLL_TYPE_FIFO_DATA_AVAILABLE,
_POLL_NUM_TYPES
};
#define _POLL_TYPE_BIT(type) (1 << ((type) - 1))
/* private - states bit positions */
enum _poll_states_bits {
/* default state when creating event */
_POLL_STATE_NOT_READY,
/* there was another poller already on the object */
_POLL_STATE_EADDRINUSE,
/* signaled by k_poll_signal() */
_POLL_STATE_SIGNALED,
/* semaphore is available */
_POLL_STATE_SEM_AVAILABLE,
/* data is available to read on fifo */
_POLL_STATE_FIFO_DATA_AVAILABLE,
_POLL_NUM_STATES
};
#define _POLL_STATE_BIT(state) (1 << ((state) - 1))
#define _POLL_EVENT_NUM_UNUSED_BITS \
(32 - (_POLL_NUM_TYPES + _POLL_NUM_STATES + 1 /* modes */))
#if _POLL_EVENT_NUM_UNUSED_BITS < 0
#error overflow of 32-bit word in struct k_poll_event
#endif
/* end of polling API - PRIVATE */
/**
* @defgroup poll_apis Async polling APIs
* @ingroup kernel_apis
* @{
*/
/* Public polling API */
/* public - values for k_poll_event.type bitfield */
#define K_POLL_TYPE_IGNORE 0
#define K_POLL_TYPE_SIGNAL _POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
#define K_POLL_TYPE_SEM_AVAILABLE _POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
#define K_POLL_TYPE_FIFO_DATA_AVAILABLE \
_POLL_TYPE_BIT(_POLL_TYPE_FIFO_DATA_AVAILABLE)
/* public - polling modes */
enum k_poll_modes {
/* polling thread does not take ownership of objects when available */
K_POLL_MODE_NOTIFY_ONLY = 0,
K_POLL_NUM_MODES
};
/* public - values for k_poll_event.state bitfield */
#define K_POLL_STATE_NOT_READY 0
#define K_POLL_STATE_EADDRINUSE _POLL_STATE_BIT(_POLL_STATE_EADDRINUSE)
#define K_POLL_STATE_SIGNALED _POLL_STATE_BIT(_POLL_STATE_SIGNALED)
#define K_POLL_STATE_SEM_AVAILABLE _POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
#define K_POLL_STATE_FIFO_DATA_AVAILABLE \
_POLL_STATE_BIT(_POLL_STATE_FIFO_DATA_AVAILABLE)
/* public - poll signal object */
struct k_poll_signal {
/* PRIVATE - DO NOT TOUCH */
struct k_poll_event *poll_event;
/*
* 1 if the event has been signaled, 0 otherwise. Stays set to 1 until
* user resets it to 0.
*/
unsigned int signaled;
/* custom result value passed to k_poll_signal() if needed */
int result;
};
#define K_POLL_SIGNAL_INITIALIZER() \
{ \
.poll_event = NULL, \
.signaled = 0, \
.result = 0, \
}
struct k_poll_event {
/* PRIVATE - DO NOT TOUCH */
struct _poller *poller;
/* bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values) */
uint32_t type:_POLL_NUM_TYPES;
/* bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values) */
uint32_t state:_POLL_NUM_STATES;
/* mode of operation, from enum k_poll_modes */
uint32_t mode:1;
/* unused bits in 32-bit word */
uint32_t unused:_POLL_EVENT_NUM_UNUSED_BITS;
/* per-type data */
union {
void *obj;
struct k_poll_signal *signal;
struct k_sem *sem;
struct k_fifo *fifo;
};
};
#define K_POLL_EVENT_INITIALIZER(event_type, event_mode, event_data) \
{ \
.poller = NULL, \
.type = event_type, \
.state = K_POLL_STATE_NOT_READY, \
.mode = event_mode, \
.unused = 0, \
{ .obj = event_data }, \
}
/**
* @brief Initialize one struct k_poll_event instance
*
* After this routine is called on a poll event, the event it ready to be
* placed in an event array to be passed to k_poll().
*
* @param event The event to initialize.
* @param type A bitfield of the types of event, from the K_POLL_TYPE_xxx
* values. Only values that apply to the same object being polled
* can be used together. Choosing K_POLL_TYPE_IGNORE disables the
* event.
* @param mode Future. Use K_POLL_MODE_INFORM_ONLY.
* @param obj Kernel object or poll signal.
*
* @return N/A
*/
extern void k_poll_event_init(struct k_poll_event *event, uint32_t type,
int mode, void *obj);
/**
* @brief Wait for one or many of multiple poll events to occur
*
* This routine allows a thread to wait concurrently for one or many of
* multiple poll events to have occurred. Such events can be a kernel object
* being available, like a semaphore, or a poll signal event.
*
* When an event notifies that a kernel object is available, the kernel object
* is not "given" to the thread calling k_poll(): it merely signals the fact
* that the object was available when the k_poll() call was in effect. Also,
* all threads trying to acquire an object the regular way, i.e. by pending on
* the object, have precedence over the thread polling on the object. This
* means that the polling thread will never get the poll event on an object
* until the object becomes available and its pend queue is empty. For this
* reason, the k_poll() call is more effective when the objects being polled
* only have one thread, the polling thread, trying to acquire them.
*
* Only one thread can be polling for a particular object at a given time. If
* another thread tries to poll on it, the k_poll() call returns -EADDRINUSE
* and returns as soon as it has finished handling the other events. This means
* that k_poll() can return -EADDRINUSE and have the state value of some events
* be non-K_POLL_STATE_NOT_READY. When this condition occurs, the @a timeout
* parameter is ignored.
*
* When k_poll() returns 0 or -EADDRINUSE, the caller should loop on all the
* events that were passed to k_poll() and check the state field for the values
* that were expected and take the associated actions.
*
* Before being reused for another call to k_poll(), the user has to reset the
* state field to K_POLL_STATE_NOT_READY.
*
* @param events An array of pointers to events to be polled for.
* @param num_events The number of events in the array.
* @param timeout Waiting period for an event to be ready (in milliseconds),
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 One or more events are ready.
* @retval -EADDRINUSE One or more objects already had a poller.
* @retval -EAGAIN Waiting period timed out.
*/
extern int k_poll(struct k_poll_event *events, int num_events,
int32_t timeout);
/**
* @brief Signal a poll signal object.
*
* This routine makes ready a poll signal, which is basically a poll event of
* type K_POLL_TYPE_SIGNAL. If a thread was polling on that event, it will be
* made ready to run. A @a result value can be specified.
*
* The poll signal contains a 'signaled' field that, when set by
* k_poll_signal(), stays set until the user sets it back to 0. It thus has to
* be reset by the user before being passed again to k_poll() or k_poll() will
* consider it being signaled, and will return immediately.
*
* @param signal A poll signal.
* @param result The value to store in the result field of the signal.
*
* @retval 0 The signal was delivered successfully.
* @retval -EAGAIN The polling thread's timeout is in the process of expiring.
*/
extern int k_poll_signal(struct k_poll_signal *signal, int result);
/* private internal function */
extern int _handle_obj_poll_event(struct k_poll_event **obj_poll_event,
uint32_t state);
/**
* @} end defgroup poll_apis
*/
/**
* @brief Make the CPU idle.
*

View file

@ -289,6 +289,16 @@ config TIMESLICE_PRIORITY
takes effect; threads having a higher priority than this ceiling are
not subject to time slicing.
config POLL
bool
prompt "async I/O framework"
default n
help
Asynchronous notification framework. Enable the k_poll() and
k_poll_signal() APIs. The former can wait on multiple events
concurrently, which can be either directly triggered or triggered by
the availability of some kernel objects (semaphores and fifos).
endmenu
menu "Other Kernel Object Options"

View file

@ -39,3 +39,4 @@ lib-$(CONFIG_STACK_CANARIES) += compiler_stack_protect.o
lib-$(CONFIG_SYS_CLOCK_EXISTS) += timer.o
lib-$(CONFIG_LEGACY_KERNEL) += legacy_timer.o
lib-$(CONFIG_ATOMIC_OPERATIONS_C) += atomic_c.o
lib-$(CONFIG_POLL) += poll.o

View file

@ -62,6 +62,19 @@ static void prepare_thread_to_run(struct k_thread *thread, void *data)
_set_thread_return_value_with_data(thread, 0, data);
}
/* returns 1 if a reschedule must take place, 0 otherwise */
static inline int handle_poll_event(struct k_fifo *fifo)
{
#ifdef CONFIG_POLL
uint32_t state = K_POLL_STATE_FIFO_DATA_AVAILABLE;
return fifo->poll_event ?
_handle_obj_poll_event(&fifo->poll_event, state) : 0;
#else
return 0;
#endif
}
void k_fifo_put(struct k_fifo *fifo, void *data)
{
struct k_thread *first_pending_thread;
@ -79,6 +92,10 @@ void k_fifo_put(struct k_fifo *fifo, void *data)
}
} else {
sys_slist_append(&fifo->data_q, data);
if (handle_poll_event(fifo)) {
(void)_Swap(key);
return;
}
}
irq_unlock(key);

342
kernel/poll.c Normal file
View file

@ -0,0 +1,342 @@
/*
* Copyright (c) 2017 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
*
* @brief Kernel asynchronous event polling interface.
*
* This polling mechanism allows waiting on multiple events concurrently,
* either events triggered directly, or from kernel objects or other kernel
* constructs.
*/
#include <kernel.h>
#include <kernel_structs.h>
#include <wait_q.h>
#include <ksched.h>
#include <misc/slist.h>
#include <misc/dlist.h>
#include <misc/__assert.h>
void k_poll_event_init(struct k_poll_event *event, uint32_t type,
int mode, void *obj)
{
__ASSERT(mode == K_POLL_MODE_NOTIFY_ONLY,
"only NOTIFY_ONLY mode is supported\n");
__ASSERT(type < (1 << _POLL_NUM_TYPES), "invalid type\n");
__ASSERT(obj, "must provide an object\n");
event->poller = NULL;
event->type = type;
event->state = K_POLL_STATE_NOT_READY;
event->mode = mode;
event->unused = 0;
event->obj = obj;
}
/* must be called with interrupts locked */
static inline void set_polling_state(struct k_thread *thread)
{
_mark_thread_as_polling(thread);
}
/* must be called with interrupts locked */
static inline void clear_polling_state(struct k_thread *thread)
{
_mark_thread_as_not_polling(thread);
}
/* must be called with interrupts locked */
static inline int is_polling(struct k_thread *thread)
{
return _is_thread_polling(thread);
}
/* must be called with interrupts locked */
static inline int is_condition_met(struct k_poll_event *event, uint32_t *state)
{
switch (event->type) {
case K_POLL_TYPE_SEM_AVAILABLE:
if (k_sem_count_get(event->sem) > 0) {
*state = K_POLL_STATE_SEM_AVAILABLE;
return 1;
}
break;
case K_POLL_TYPE_FIFO_DATA_AVAILABLE:
if (!k_fifo_is_empty(event->fifo)) {
*state = K_POLL_STATE_FIFO_DATA_AVAILABLE;
return 1;
}
break;
case K_POLL_TYPE_SIGNAL:
if (event->signal->signaled) {
*state = K_POLL_STATE_SIGNALED;
return 1;
}
break;
case K_POLL_TYPE_IGNORE:
return 0;
default:
__ASSERT(0, "invalid event type (0x%x)\n", event->type);
break;
}
return 0;
}
/* must be called with interrupts locked */
static inline int register_event(struct k_poll_event *event)
{
switch (event->type) {
case K_POLL_TYPE_SEM_AVAILABLE:
__ASSERT(event->sem, "invalid semaphore\n");
if (event->sem->poll_event) {
return -EADDRINUSE;
}
event->sem->poll_event = event;
break;
case K_POLL_TYPE_FIFO_DATA_AVAILABLE:
__ASSERT(event->fifo, "invalid fifo\n");
if (event->fifo->poll_event) {
return -EADDRINUSE;
}
event->fifo->poll_event = event;
break;
case K_POLL_TYPE_SIGNAL:
__ASSERT(event->fifo, "invalid poll signal\n");
if (event->signal->poll_event) {
return -EADDRINUSE;
}
event->signal->poll_event = event;
break;
case K_POLL_TYPE_IGNORE:
/* nothing to do */
break;
default:
__ASSERT(0, "invalid event type\n");
break;
}
return 0;
}
/* must be called with interrupts locked */
static inline void clear_event_registration(struct k_poll_event *event)
{
event->poller = NULL;
switch (event->type) {
case K_POLL_TYPE_SEM_AVAILABLE:
__ASSERT(event->sem, "invalid semaphore\n");
event->sem->poll_event = NULL;
break;
case K_POLL_TYPE_FIFO_DATA_AVAILABLE:
__ASSERT(event->fifo, "invalid fifo\n");
event->fifo->poll_event = NULL;
break;
case K_POLL_TYPE_SIGNAL:
__ASSERT(event->signal, "invalid poll signal\n");
event->signal->poll_event = NULL;
break;
case K_POLL_TYPE_IGNORE:
/* nothing to do */
break;
default:
__ASSERT(0, "invalid event type\n");
break;
}
}
/* must be called with interrupts locked */
static inline void clear_event_registrations(struct k_poll_event *events,
int last_registered,
unsigned int key)
{
for (; last_registered >= 0; last_registered--) {
clear_event_registration(&events[last_registered]);
irq_unlock(key);
key = irq_lock();
}
}
static inline void set_event_ready(struct k_poll_event *event, uint32_t state)
{
event->poller = NULL;
event->state |= state;
}
int k_poll(struct k_poll_event *events, int num_events, int32_t timeout)
{
__ASSERT(!_is_in_isr(), "");
__ASSERT(events, "NULL events\n");
__ASSERT(num_events > 0, "zero events\n");
int last_registered = -1, in_use = 0, rc;
unsigned int key;
key = irq_lock();
set_polling_state(_current);
irq_unlock(key);
/*
* We can get by with one poller structure for all events for now:
* if/when we allow multiple threads to poll on the same object, we
* will need one per poll event associated with an object.
*/
struct _poller poller = { .thread = _current };
/* find events whose condition is already fulfilled */
for (int ii = 0; ii < num_events; ii++) {
uint32_t state;
key = irq_lock();
if (is_condition_met(&events[ii], &state)) {
set_event_ready(&events[ii], state);
clear_polling_state(_current);
} else if (timeout != K_NO_WAIT && !in_use) {
rc = register_event(&events[ii]);
if (rc == 0) {
events[ii].poller = &poller;
++last_registered;
} else if (rc == -EADDRINUSE) {
/* setting in_use also prevents any further
* registrations by the current thread
*/
in_use = -EADDRINUSE;
events[ii].state = K_POLL_STATE_EADDRINUSE;
clear_polling_state(_current);
} else {
__ASSERT(0, "unexpected return code\n");
}
}
irq_unlock(key);
}
key = irq_lock();
/*
* If we're not polling anymore, it means that at least one event
* condition is met, either when looping through the events here or
* because one of the events registered has had its state changed, or
* that one of the objects we wanted to poll on already had a thread
* polling on it. We can remove all registrations and return either
* success or a -EADDRINUSE error. In the case of a -EADDRINUSE error,
* the events that were available are still flagged as such, and it is
* valid for the caller to consider them available, as if this function
* returned success.
*/
if (!is_polling(_current)) {
clear_event_registrations(events, last_registered, key);
irq_unlock(key);
return in_use;
}
clear_polling_state(_current);
if (timeout == K_NO_WAIT) {
irq_unlock(key);
return -EAGAIN;
}
_wait_q_t wait_q = _WAIT_Q_INIT(&wait_q);
_pend_current_thread(&wait_q, timeout);
int swap_rc = _Swap(key);
/*
* Clear all event registrations. If events happen while we're in this
* loop, and we already had one that triggered, that's OK: they will
* end up in the list of events that are ready; if we timed out, and
* events happen while we're in this loop, that is OK as well since
* we've already know the return code (-EAGAIN), and even if they are
* added to the list of events that occurred, the user has to check the
* return code first, which invalidates the whole list of event states.
*/
key = irq_lock();
clear_event_registrations(events, last_registered, key);
irq_unlock(key);
return swap_rc;
}
/* must be called with interrupts locked */
static int _signal_poll_event(struct k_poll_event *event, uint32_t state,
int *must_reschedule)
{
*must_reschedule = 0;
if (!event->poller) {
goto ready_event;
}
struct k_thread *thread = event->poller->thread;
__ASSERT(event->poller->thread, "poller should have a thread\n");
clear_polling_state(thread);
if (!_is_thread_pending(thread)) {
goto ready_event;
}
if (_is_thread_timeout_expired(thread)) {
return -EAGAIN;
}
_unpend_thread(thread);
_abort_thread_timeout(thread);
_set_thread_return_value(thread, 0);
if (!_is_thread_ready(thread)) {
goto ready_event;
}
_add_thread_to_ready_q(thread);
*must_reschedule = !_is_in_isr() && _must_switch_threads();
ready_event:
set_event_ready(event, state);
return 0;
}
/* returns 1 if a reschedule must take place, 0 otherwise */
/* *obj_poll_event is guaranteed to not be NULL */
int _handle_obj_poll_event(struct k_poll_event **obj_poll_event, uint32_t state)
{
struct k_poll_event *poll_event = *obj_poll_event;
int must_reschedule;
*obj_poll_event = NULL;
(void)_signal_poll_event(poll_event, state, &must_reschedule);
return must_reschedule;
}
int k_poll_signal(struct k_poll_signal *signal, int result)
{
unsigned int key = irq_lock();
int must_reschedule;
signal->result = result;
if (!signal->poll_event) {
signal->signaled = 1;
irq_unlock(key);
return 0;
}
int rc = _signal_poll_event(signal->poll_event, K_POLL_STATE_SIGNALED,
&must_reschedule);
if (must_reschedule) {
(void)_Swap(key);
} else {
irq_unlock(key);
}
return rc;
}

View file

@ -231,6 +231,19 @@ static int handle_sem_group(struct k_sem *sem, struct k_thread *thread)
#define handle_sem_group(sem, thread) 0
#endif
/* returns 1 if a reschedule must take place, 0 otherwise */
static inline int handle_poll_event(struct k_sem *sem)
{
#ifdef CONFIG_POLL
uint32_t state = K_POLL_STATE_SEM_AVAILABLE;
return sem->poll_event ?
_handle_obj_poll_event(&sem->poll_event, state) : 0;
#else
return 0;
#endif
}
/**
* @brief Common semaphore give code
*
@ -248,7 +261,8 @@ static bool sem_give_common(struct k_sem *sem)
* its limit has already been reached.
*/
sem->count += (sem->count != sem->limit);
return false;
return handle_poll_event(sem);
}
_abort_thread_timeout(thread);