kernel/poll: Make the poll callback a single-byte enum instead
Somewhat weirdly, k_poll() can do one of two things when the set of events is signaled: it can wake up a sleeping thread, or it can submit an unrelated work item to a work queue. The difference in behaviors is currently captured by a callback, but as there are only two it's cleaner to put this into a "mode" enumerant. That also shrinks the size of the data such that the poller struct can be moved somewhere other than the calling stack. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
c80fe8ae69
commit
0c7af40d61
2 changed files with 22 additions and 14 deletions
|
@ -2717,7 +2717,7 @@ typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
|
|||
struct _poller {
|
||||
volatile bool is_polling;
|
||||
struct k_thread *thread;
|
||||
_poller_cb_t cb;
|
||||
uint8_t mode;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -34,6 +34,11 @@
|
|||
*/
|
||||
static struct k_spinlock lock;
|
||||
|
||||
enum POLL_MODE { MODE_NONE, MODE_POLL, MODE_TRIGGERED };
|
||||
|
||||
static int signal_poller(struct k_poll_event *event, uint32_t state);
|
||||
static int signal_triggered_work(struct k_poll_event *event, uint32_t status);
|
||||
|
||||
void k_poll_event_init(struct k_poll_event *event, uint32_t type,
|
||||
int mode, void *obj)
|
||||
{
|
||||
|
@ -216,7 +221,7 @@ static inline int register_events(struct k_poll_event *events,
|
|||
return events_registered;
|
||||
}
|
||||
|
||||
static int k_poll_poller_cb(struct k_poll_event *event, uint32_t state)
|
||||
static int signal_poller(struct k_poll_event *event, uint32_t state)
|
||||
{
|
||||
struct k_thread *thread = event->poller->thread;
|
||||
|
||||
|
@ -258,7 +263,7 @@ int z_impl_k_poll(struct k_poll_event *events, int num_events,
|
|||
*/
|
||||
struct _poller poller = { .is_polling = true,
|
||||
.thread = _current,
|
||||
.cb = k_poll_poller_cb };
|
||||
.mode = MODE_POLL };
|
||||
|
||||
__ASSERT(!arch_is_in_isr(), "");
|
||||
__ASSERT(events != NULL, "NULL events\n");
|
||||
|
@ -391,8 +396,10 @@ static int signal_poll_event(struct k_poll_event *event, uint32_t state)
|
|||
int retcode = 0;
|
||||
|
||||
if (poller) {
|
||||
if (poller->cb != NULL) {
|
||||
retcode = poller->cb(event, state);
|
||||
if (poller->mode == MODE_POLL) {
|
||||
retcode = signal_poller(event, state);
|
||||
} else if (poller->mode == MODE_TRIGGERED) {
|
||||
retcode = signal_triggered_work(event, state);
|
||||
}
|
||||
|
||||
poller->is_polling = false;
|
||||
|
@ -500,7 +507,7 @@ static void triggered_work_handler(struct k_work *work)
|
|||
* If callback is not set, the k_work_poll_submit_to_queue()
|
||||
* already cleared event registrations.
|
||||
*/
|
||||
if (twork->poller.cb != NULL) {
|
||||
if (twork->poller.mode != MODE_NONE) {
|
||||
k_spinlock_key_t key;
|
||||
|
||||
key = k_spin_lock(&lock);
|
||||
|
@ -528,7 +535,7 @@ static void triggered_work_expiration_handler(struct _timeout *timeout)
|
|||
k_work_submit_to_queue(work_q, &twork->work);
|
||||
}
|
||||
|
||||
static int triggered_work_poller_cb(struct k_poll_event *event, uint32_t status)
|
||||
static int signal_triggered_work(struct k_poll_event *event, uint32_t status)
|
||||
{
|
||||
struct _poller *poller = event->poller;
|
||||
|
||||
|
@ -550,7 +557,7 @@ static int triggered_work_cancel(struct k_work_poll *work,
|
|||
k_spinlock_key_t key)
|
||||
{
|
||||
/* Check if the work waits for event. */
|
||||
if (work->poller.is_polling && work->poller.cb != NULL) {
|
||||
if (work->poller.is_polling && work->poller.mode != MODE_NONE) {
|
||||
/* Remove timeout associated with the work. */
|
||||
z_abort_timeout(&work->timeout);
|
||||
|
||||
|
@ -558,7 +565,7 @@ static int triggered_work_cancel(struct k_work_poll *work,
|
|||
* Prevent work execution if event arrives while we will be
|
||||
* clearing registrations.
|
||||
*/
|
||||
work->poller.cb = NULL;
|
||||
work->poller.mode = MODE_NONE;
|
||||
|
||||
/* Clear registrations and work ownership. */
|
||||
clear_event_registrations(work->events, work->num_events, key);
|
||||
|
@ -619,7 +626,7 @@ int k_work_poll_submit_to_queue(struct k_work_q *work_q,
|
|||
|
||||
work->poller.is_polling = true;
|
||||
work->poller.thread = &work_q->thread;
|
||||
work->poller.cb = NULL;
|
||||
work->poller.mode = MODE_NONE;
|
||||
k_spin_unlock(&lock, key);
|
||||
|
||||
/* Save list of events. */
|
||||
|
@ -654,15 +661,16 @@ int k_work_poll_submit_to_queue(struct k_work_q *work_q,
|
|||
}
|
||||
|
||||
/* From now, any event will result in submitted work. */
|
||||
work->poller.cb = triggered_work_poller_cb;
|
||||
work->poller.mode = MODE_TRIGGERED;
|
||||
k_spin_unlock(&lock, key);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The K_NO_WAIT timeout was specified or at least one event was ready
|
||||
* at registration time or changed state since registration. Hopefully,
|
||||
* the poller->cb was not set, so work was not submitted to workqueue.
|
||||
* The K_NO_WAIT timeout was specified or at least one event
|
||||
* was ready at registration time or changed state since
|
||||
* registration. Hopefully, the poller mode was not set, so
|
||||
* work was not submitted to workqueue.
|
||||
*/
|
||||
|
||||
/*
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue