kernel/poll: Make the poll callback a single-byte enum instead

Somewhat weirdly, k_poll() can do one of two things when the set of
events is signaled: it can wake up a sleeping thread, or it can submit
an unrelated work item to a work queue.  The difference in behaviors
is currently captured by a callback, but as there are only two it's
cleaner to put this into a "mode" enumerant.  That also shrinks the
size of the data such that the poller struct can be moved somewhere
other than the calling stack.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2020-10-30 11:18:53 -07:00 committed by Anas Nashif
commit 0c7af40d61
2 changed files with 22 additions and 14 deletions

View file

@ -2717,7 +2717,7 @@ typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
struct _poller { struct _poller {
volatile bool is_polling; volatile bool is_polling;
struct k_thread *thread; struct k_thread *thread;
_poller_cb_t cb; uint8_t mode;
}; };
/** /**

View file

@ -34,6 +34,11 @@
*/ */
static struct k_spinlock lock; static struct k_spinlock lock;
enum POLL_MODE { MODE_NONE, MODE_POLL, MODE_TRIGGERED };
static int signal_poller(struct k_poll_event *event, uint32_t state);
static int signal_triggered_work(struct k_poll_event *event, uint32_t status);
void k_poll_event_init(struct k_poll_event *event, uint32_t type, void k_poll_event_init(struct k_poll_event *event, uint32_t type,
int mode, void *obj) int mode, void *obj)
{ {
@ -216,7 +221,7 @@ static inline int register_events(struct k_poll_event *events,
return events_registered; return events_registered;
} }
static int k_poll_poller_cb(struct k_poll_event *event, uint32_t state) static int signal_poller(struct k_poll_event *event, uint32_t state)
{ {
struct k_thread *thread = event->poller->thread; struct k_thread *thread = event->poller->thread;
@ -258,7 +263,7 @@ int z_impl_k_poll(struct k_poll_event *events, int num_events,
*/ */
struct _poller poller = { .is_polling = true, struct _poller poller = { .is_polling = true,
.thread = _current, .thread = _current,
.cb = k_poll_poller_cb }; .mode = MODE_POLL };
__ASSERT(!arch_is_in_isr(), ""); __ASSERT(!arch_is_in_isr(), "");
__ASSERT(events != NULL, "NULL events\n"); __ASSERT(events != NULL, "NULL events\n");
@ -391,8 +396,10 @@ static int signal_poll_event(struct k_poll_event *event, uint32_t state)
int retcode = 0; int retcode = 0;
if (poller) { if (poller) {
if (poller->cb != NULL) { if (poller->mode == MODE_POLL) {
retcode = poller->cb(event, state); retcode = signal_poller(event, state);
} else if (poller->mode == MODE_TRIGGERED) {
retcode = signal_triggered_work(event, state);
} }
poller->is_polling = false; poller->is_polling = false;
@ -500,7 +507,7 @@ static void triggered_work_handler(struct k_work *work)
* If callback is not set, the k_work_poll_submit_to_queue() * If callback is not set, the k_work_poll_submit_to_queue()
* already cleared event registrations. * already cleared event registrations.
*/ */
if (twork->poller.cb != NULL) { if (twork->poller.mode != MODE_NONE) {
k_spinlock_key_t key; k_spinlock_key_t key;
key = k_spin_lock(&lock); key = k_spin_lock(&lock);
@ -528,7 +535,7 @@ static void triggered_work_expiration_handler(struct _timeout *timeout)
k_work_submit_to_queue(work_q, &twork->work); k_work_submit_to_queue(work_q, &twork->work);
} }
static int triggered_work_poller_cb(struct k_poll_event *event, uint32_t status) static int signal_triggered_work(struct k_poll_event *event, uint32_t status)
{ {
struct _poller *poller = event->poller; struct _poller *poller = event->poller;
@ -550,7 +557,7 @@ static int triggered_work_cancel(struct k_work_poll *work,
k_spinlock_key_t key) k_spinlock_key_t key)
{ {
/* Check if the work waits for event. */ /* Check if the work waits for event. */
if (work->poller.is_polling && work->poller.cb != NULL) { if (work->poller.is_polling && work->poller.mode != MODE_NONE) {
/* Remove timeout associated with the work. */ /* Remove timeout associated with the work. */
z_abort_timeout(&work->timeout); z_abort_timeout(&work->timeout);
@ -558,7 +565,7 @@ static int triggered_work_cancel(struct k_work_poll *work,
* Prevent work execution if event arrives while we will be * Prevent work execution if event arrives while we will be
* clearing registrations. * clearing registrations.
*/ */
work->poller.cb = NULL; work->poller.mode = MODE_NONE;
/* Clear registrations and work ownership. */ /* Clear registrations and work ownership. */
clear_event_registrations(work->events, work->num_events, key); clear_event_registrations(work->events, work->num_events, key);
@ -619,7 +626,7 @@ int k_work_poll_submit_to_queue(struct k_work_q *work_q,
work->poller.is_polling = true; work->poller.is_polling = true;
work->poller.thread = &work_q->thread; work->poller.thread = &work_q->thread;
work->poller.cb = NULL; work->poller.mode = MODE_NONE;
k_spin_unlock(&lock, key); k_spin_unlock(&lock, key);
/* Save list of events. */ /* Save list of events. */
@ -654,15 +661,16 @@ int k_work_poll_submit_to_queue(struct k_work_q *work_q,
} }
/* From now, any event will result in submitted work. */ /* From now, any event will result in submitted work. */
work->poller.cb = triggered_work_poller_cb; work->poller.mode = MODE_TRIGGERED;
k_spin_unlock(&lock, key); k_spin_unlock(&lock, key);
return 0; return 0;
} }
/* /*
* The K_NO_WAIT timeout was specified or at least one event was ready * The K_NO_WAIT timeout was specified or at least one event
* at registration time or changed state since registration. Hopefully, * was ready at registration time or changed state since
* the poller->cb was not set, so work was not submitted to workqueue. * registration. Hopefully, the poller mode was not set, so
* work was not submitted to workqueue.
*/ */
/* /*