kernel: add support for event objects

Threads may wait on an event object such that any events posted to
that event object may wake a waiting thread if the posting satisfies
the waiting threads' event conditions.

The configuration option CONFIG_EVENTS is used to control the inclusion
of events in a system as their use increases the size of
'struct k_thread'.

Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
This commit is contained in:
Peter Mitsis 2021-09-20 14:14:32 -04:00 committed by Anas Nashif
commit ae394bff7c
13 changed files with 546 additions and 2 deletions

View file

@ -79,6 +79,7 @@ struct k_poll_signal;
struct k_mem_domain;
struct k_mem_partition;
struct k_futex;
struct k_event;
enum execution_context_types {
K_ISR = 0,
@ -2013,6 +2014,142 @@ __syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
/** @} */
#endif
/**
* @defgroup event_apis Event APIs
* @ingroup kernel_apis
* @{
*/
/**
* Event Structure
* @ingroup event_apis
*/
struct k_event {
_wait_q_t wait_q;
uint32_t events;
struct k_spinlock lock;
};
#define Z_EVENT_INITIALIZER(obj) \
{ \
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
.events = 0 \
}
/**
* @brief Initialize an event object
*
* This routine initializes an event object, prior to its first use.
*
* @param event Address of the event object.
*
* @return N/A
*/
__syscall void k_event_init(struct k_event *event);
/**
* @brief Post one or more events to an event object
*
* This routine posts one or more events to an event object. All tasks waiting
* on the event object @a event whose waiting conditions become met by this
* posting immediately unpend.
*
* Posting differs from setting in that posted events are merged together with
* the current set of events tracked by the event object.
*
* @param event Address of the event object
* @param events Set of events to post to @a event
*
* @return N/A
*/
__syscall void k_event_post(struct k_event *event, uint32_t events);
/**
* @brief Set the events in an event object
*
* This routine sets the events stored in event object to the specified value.
* All tasks waiting on the event object @a event whose waiting conditions
* become met by this immediately unpend.
*
* Setting differs from posting in that set events replace the current set of
* events tracked by the event object.
*
* @param event Address of the event object
* @param events Set of events to post to @a event
*
* @return N/A
*/
__syscall void k_event_set(struct k_event *event, uint32_t events);
/**
* @brief Wait for any of the specified events
*
* This routine waits on event object @a event until any of the specified
* events have been delivered to the event object, or the maximum wait time
* @a timeout has expired. A thread may wait on up to 32 distinctly numbered
* events that are expressed as bits in a single 32-bit word.
*
* @note The caller must be careful when resetting if there are multiple threads
* waiting for the event object @a event.
*
* @param event Address of the event object
* @param events Set of desired events on which to wait
* @param reset If true, clear the set of events tracked by the event object
* before waiting. If false, do not clear the events.
* @param timeout Waiting period for the desired set of events or one of the
* special values K_NO_WAIT and K_FOREVER.
*
* @retval set of matching events upon success
* @retval 0 if matching events were not received within the specified time
*/
__syscall uint32_t k_event_wait(struct k_event *event, uint32_t events,
bool reset, k_timeout_t timeout);
/**
* @brief Wait for any of the specified events
*
* This routine waits on event object @a event until all of the specified
* events have been delivered to the event object, or the maximum wait time
* @a timeout has expired. A thread may wait on up to 32 distinctly numbered
* events that are expressed as bits in a single 32-bit word.
*
* @note The caller must be careful when resetting if there are multiple threads
* waiting for the event object @a event.
*
* @param event Address of the event object
* @param events Set of desired events on which to wait
* @param reset If true, clear the set of events tracked by the event object
* before waiting. If false, do not clear the events.
* @param timeout Waiting period for the desired set of events or one of the
* special values K_NO_WAIT and K_FOREVER.
*
* @retval set of matching events upon success
* @retval 0 if matching events were not received within the specified time
*/
__syscall uint32_t k_event_wait_all(struct k_event *event, uint32_t events,
bool reset, k_timeout_t timeout);
/**
* @brief Statically define and initialize an event object
*
* The event can be accessed outside the module where it is defined using:
*
* @code extern struct k_event <name>; @endcode
*
* @param name Name of the event object.
*/
#define K_EVENT_DEFINE(name) \
STRUCT_SECTION_ITERABLE(k_event, name) = \
Z_EVENT_INITIALIZER(name);
/** @} */
struct k_fifo {
struct k_queue _queue;
};

View file

@ -215,6 +215,13 @@ struct k_thread {
struct z_poller poller;
#endif
#if defined(CONFIG_EVENTS)
struct k_thread *next_event_link;
uint32_t events;
uint32_t event_options;
#endif
#if defined(CONFIG_THREAD_MONITOR)
/** thread entry and parameters description */
struct __thread_entry entry;

View file

@ -85,6 +85,7 @@
ITERABLE_SECTION_RAM_GC_ALLOWED(k_mbox, 4)
ITERABLE_SECTION_RAM_GC_ALLOWED(k_pipe, 4)
ITERABLE_SECTION_RAM_GC_ALLOWED(k_sem, 4)
ITERABLE_SECTION_RAM_GC_ALLOWED(k_event, 4)
ITERABLE_SECTION_RAM_GC_ALLOWED(k_queue, 4)
ITERABLE_SECTION_RAM_GC_ALLOWED(k_condvar, 4)

View file

@ -1924,6 +1924,65 @@
* @}
*/ /* end of timer_tracing_apis */
/**
* @brief Event Tracing APIs
* @defgroup event_tracing_apis Event Tracing APIs
* @ingroup tracing_apis
* @{
*/
/**
* @brief Trace initialisation of an Event
* @param event Event object
*/
#define sys_port_trace_k_event_init(event)
/**
* @brief Trace posting of an Event call entry
* @param event Event object
* @param events Set of posted events
* @param accumulate True if events accumulate, false otherwise
*/
#define sys_port_trace_k_event_post_enter(event, events, accumulate)
/**
* @brief Trace posting of an Event call exit
* @param event Event object
* @param events Set of posted events
* @param accumulate True if events accumulate, false otherwise
*/
#define sys_port_trace_k_event_post_exit(event, events, accumulate)
/**
* @brief Trace waiting of an Event call entry
* @param event Event object
* @param events Set of events for which to wait
* @param options Event wait options
* @param timeout Timeout period
*/
#define sys_port_trace_k_event_wait_enter(event, events, options, timeout)
/**
* @brief Trace waiting of an Event call exit
* @param event Event object
* @param events Set of events for which to wait
* @param options Event wait options
* @param timeout Timeout period
*/
#define sys_port_trace_k_event_wait_blocking(event, events, options, timeout)
/**
* @brief Trace waiting of an Event call exit
* @param event Event object
* @param events Set of events for which to wait
* @param ret Set of received events
*/
#define sys_port_trace_k_event_wait_exit(event, events, ret)
/**
* @}
*/ /* end of event_tracing_apis */
#define sys_port_trace_pm_system_suspend_enter(ticks)
#define sys_port_trace_pm_system_suspend_exit(ticks, ret)

View file

@ -145,7 +145,11 @@
#define sys_port_trace_type_mask_k_timer(trace_call)
#endif
#if defined(CONFIG_TRACING_EVENT)
#define sys_port_trace_type_mask_k_event(trace_call) trace_call
#else
#define sys_port_trace_type_mask_k_event(trace_call)
#endif
/**

View file

@ -80,11 +80,13 @@ target_sources_ifdef(CONFIG_SYS_CLOCK_EXISTS kernel PRIVATE timeout.c timer
target_sources_ifdef(CONFIG_ATOMIC_OPERATIONS_C kernel PRIVATE atomic_c.c)
target_sources_ifdef(CONFIG_MMU kernel PRIVATE mmu.c)
target_sources_ifdef(CONFIG_POLL kernel PRIVATE poll.c)
target_sources_ifdef(CONFIG_EVENTS kernel PRIVATE events.c)
if(${CONFIG_KERNEL_MEM_POOL})
target_sources(kernel PRIVATE mempool.c)
endif()
# The last 2 files inside the target_sources_ifdef should be
# userspace_handler.c and userspace.c. If not the linker would complain.
# This order has to be maintained. Any new file should be placed

View file

@ -523,6 +523,16 @@ config NUM_PIPE_ASYNC_MSGS
Setting this option to 0 disables support for asynchronous
pipe messages.
config EVENTS
bool "Enable event objects"
help
This option enables event objects. Threads may wait on event
objects for specific events, but both threads and ISRs may deliver
events to event objects.
Note that setting this option slightly increases the size of the
thread structure.
config KERNEL_MEM_POOL
bool "Use Kernel Memory Pool"
default y

285
kernel/events.c Normal file
View file

@ -0,0 +1,285 @@
/*
* Copyright (c) 2021 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file event objects library
*
* Event objects are used to signal one or more threads that a custom set of
* events has occurred. Threads wait on event objects until another thread or
* ISR posts the desired set of events to the event object. Each time events
* are posted to an event object, all threads waiting on that event object are
* processed to determine if there is a match. All threads that whose wait
* conditions match the current set of events now belonging to the event object
* are awakened.
*
* Threads waiting on an event object have the option of either waking once
* any or all of the events it desires have been posted to the event object.
*
* @brief Kernel event object
*/
#include <kernel.h>
#include <kernel_structs.h>
#include <toolchain.h>
#include <wait_q.h>
#include <sys/dlist.h>
#include <ksched.h>
#include <init.h>
#include <syscall_handler.h>
#include <tracing/tracing.h>
#include <sys/check.h>
#define K_EVENT_WAIT_ANY 0x00 /* Wait for any events */
#define K_EVENT_WAIT_ALL 0x01 /* Wait for all events */
#define K_EVENT_WAIT_MASK 0x01
#define K_EVENT_WAIT_RESET 0x02 /* Reset events prior to waiting */
void z_impl_k_event_init(struct k_event *event)
{
event->events = 0;
event->lock = (struct k_spinlock) {};
SYS_PORT_TRACING_OBJ_INIT(k_event, event);
z_waitq_init(&event->wait_q);
z_object_init(event);
}
#ifdef CONFIG_USERSPACE
void z_vrfy_k_event_init(struct k_event *event)
{
Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(event, K_OBJ_EVENT));
z_impl_k_event_init(event);
}
#include <syscalls/k_event_init_mrsh.c>
#endif
/**
* @brief determine if desired set of events been satisfied
*
* This routine determines if the current set of events satisfies the desired
* set of events. If @a wait_condition is K_EVENT_WAIT_ALL, then at least
* all the desired events must be present to satisfy the request. If @a
* wait_condition is not K_EVENT_WAIT_ALL, it is assumed to be K_EVENT_WAIT_ANY.
* In the K_EVENT_WAIT_ANY case, the request is satisfied when any of the
* current set of events are present in the desired set of events.
*/
static bool are_wait_conditions_met(uint32_t desired, uint32_t current,
unsigned int wait_condition)
{
uint32_t match = current & desired;
if (wait_condition == K_EVENT_WAIT_ALL) {
return match == desired;
}
/* wait_condition assumed to be K_EVENT_WAIT_ANY */
return match != 0;
}
static void k_event_post_internal(struct k_event *event, uint32_t events,
bool accumulate)
{
k_spinlock_key_t key;
struct k_thread *thread;
unsigned int wait_condition;
struct k_thread *head = NULL;
key = k_spin_lock(&event->lock);
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_event, post, event, events,
accumulate);
if (accumulate) {
events |= event->events;
}
event->events = events;
/*
* Posting an event has the potential to wake multiple pended threads.
* It is desirable to unpend all affected threads simultaneously. To
* do so, this must be done in three steps as it is unsafe to unpend
* threads from within the _WAIT_Q_FOR_EACH() loop.
*
* 1. Create a linked list of threads to unpend.
* 2. Unpend each of the threads in the linked list
* 3. Ready each of the threads in the linked list
*/
_WAIT_Q_FOR_EACH(&event->wait_q, thread) {
wait_condition = thread->event_options & K_EVENT_WAIT_MASK;
if (are_wait_conditions_met(thread->events, events,
wait_condition)) {
/*
* The wait conditions have been satisfied. Add this
* thread to the list of threads to unpend.
*/
thread->next_event_link = head;
head = thread;
}
}
if (head != NULL) {
thread = head;
do {
z_unpend_thread(thread);
arch_thread_return_value_set(thread, 0);
thread->events = events;
z_ready_thread(thread);
thread = thread->next_event_link;
} while (thread != NULL);
}
z_reschedule(&event->lock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, post, event, events,
accumulate);
}
void z_impl_k_event_post(struct k_event *event, uint32_t events)
{
k_event_post_internal(event, events, true);
}
#ifdef CONFIG_USERSPACE
void z_vrfy_k_event_post(struct k_event *event, uint32_t events)
{
Z_OOPS(Z_SYSCALL_OBJ(event, K_OBJ_EVENT));
z_impl_k_event_post(event, events);
}
#include <syscalls/k_event_post_mrsh.c>
#endif
void z_impl_k_event_set(struct k_event *event, uint32_t events)
{
k_event_post_internal(event, events, false);
}
#ifdef CONFIG_USERSPACE
void z_vrfy_k_event_set(struct k_event *event, uint32_t events)
{
Z_OOPS(Z_SYSCALL_OBJ(event, K_OBJ_EVENT));
z_impl_k_event_set(event, events);
}
#include <syscalls/k_event_set_mrsh.c>
#endif
static uint32_t k_event_wait_internal(struct k_event *event, uint32_t events,
unsigned int options, k_timeout_t timeout)
{
uint32_t rv = 0;
unsigned int wait_condition;
struct k_thread *thread;
__ASSERT(((arch_is_in_isr() == false) ||
K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_event, wait, event, events,
options, timeout);
if (events == 0) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, wait, event, events, 0);
return 0;
}
wait_condition = options & K_EVENT_WAIT_MASK;
thread = z_current_get();
k_spinlock_key_t key = k_spin_lock(&event->lock);
if (options & K_EVENT_WAIT_RESET) {
event->events = 0;
}
/* Test if the wait conditions have already been met. */
if (are_wait_conditions_met(events, event->events, wait_condition)) {
rv = event->events;
k_spin_unlock(&event->lock, key);
goto out;
}
/* Match conditions have not been met. */
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
k_spin_unlock(&event->lock, key);
goto out;
}
/*
* The caller must pend to wait for the match. Save the desired
* set of events in the k_thread structure.
*/
thread->events = events;
thread->event_options = options;
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_event, wait, event, events,
options, timeout);
if (z_pend_curr(&event->lock, key, &event->wait_q, timeout) == 0) {
/* Retrieve the set of events that woke the thread */
rv = thread->events;
}
out:
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, wait, event,
events, rv & events);
return rv & events;
}
/**
* Wait for any of the specified events
*/
uint32_t z_impl_k_event_wait(struct k_event *event, uint32_t events,
bool reset, k_timeout_t timeout)
{
uint32_t options = reset ? K_EVENT_WAIT_RESET : 0;
return k_event_wait_internal(event, events, options, timeout);
}
#ifdef CONFIG_USERSPACE
uint32_t z_vrfy_k_event_wait(struct k_event *event, uint32_t events,
bool reset, k_timeout_t timeout)
{
Z_OOPS(Z_SYSCALL_OBJ(event, K_OBJ_EVENT));
return z_impl_k_event_wait(event, events, reset, timeout);
}
#include <syscalls/k_event_wait_mrsh.c>
#endif
/**
* Wait for all of the specified events
*/
uint32_t z_impl_k_event_wait_all(struct k_event *event, uint32_t events,
bool reset, k_timeout_t timeout)
{
uint32_t options = reset ? (K_EVENT_WAIT_RESET | K_EVENT_WAIT_ALL)
: K_EVENT_WAIT_ALL;
return k_event_wait_internal(event, events, options, timeout);
}
#ifdef CONFIG_USERSPACE
uint32_t z_vrfy_k_event_wait_all(struct k_event *event, uint32_t events,
bool reset, k_timeout_t timeout)
{
Z_OOPS(Z_SYSCALL_OBJ(event, K_OBJ_EVENT));
return z_impl_k_event_wait_all(event, events, reset, timeout);
}
#include <syscalls/k_event_wait_all_mrsh.c>
#endif

View file

@ -106,7 +106,8 @@ kobjects = OrderedDict([
("net_if", (None, False, False)),
("sys_mutex", (None, True, False)),
("k_futex", (None, True, False)),
("k_condvar", (None, False, True))
("k_condvar", (None, False, True)),
("k_event", ("CONFIG_EVENTS", False, True))
])
def kobject_to_enum(kobj):

View file

@ -307,6 +307,12 @@ config TRACING_TIMER
help
Enable tracing Timers.
config TRACING_EVENT
bool "Enable tracing Events"
default y
help
Enable tracing Events.
endmenu # Tracing Configuration
endif

View file

@ -308,6 +308,14 @@ extern "C" {
#define sys_port_trace_k_timer_status_sync_enter(timer)
#define sys_port_trace_k_timer_status_sync_blocking(timer, timeout)
#define sys_port_trace_k_timer_status_sync_exit(timer, result)
#define sys_port_trace_k_event_init(event)
#define sys_port_trace_k_event_post_enter(event, events, accumulate)
#define sys_port_trace_k_event_post_exit(event, events, accumulate)
#define sys_port_trace_k_event_wait_enter(event, events, options, timeout)
#define sys_port_trace_k_event_wait_blocking(event, events, options, timeout)
#define sys_port_trace_k_event_wait_exit(event, events, ret)
#define sys_port_trace_k_thread_abort_exit(thread)
#define sys_port_trace_k_thread_abort_enter(thread)
#define sys_port_trace_k_thread_resume_exit(thread)
@ -413,6 +421,7 @@ void sys_trace_k_timer_stop(struct k_timer *timer);
void sys_trace_k_timer_status_sync_blocking(struct k_timer *timer);
void sys_trace_k_timer_status_sync_exit(struct k_timer *timer, uint32_t result);
void sys_trace_k_event_init(struct k_event *event);
#ifdef __cplusplus
}

View file

@ -408,6 +408,19 @@
sys_trace_k_timer_status_sync_blocking(timer)
#define sys_port_trace_k_timer_status_sync_exit(timer, result) \
sys_trace_k_timer_status_sync_exit(timer, result)
#define sys_port_trace_k_event_init(event) sys_trace_k_event_init(event)
#define sys_port_trace_k_event_post_enter(event, events, accumulate) \
sys_trace_k_event_post_enter(event, events, accumulate)
#define sys_port_trace_k_event_post_exit(event, events, accumulate) \
sys_trace_k_event_post_exit(event, events, accumulate)
#define sys_port_trace_k_event_wait_enter(event, events, options, timeout) \
sys_trace_k_event_wait_enter(event, events, options, timeout)
#define sys_port_trace_k_event_wait_blocking(event, events, options, timeout) \
sys_trace_k_event_wait_blocking(event, events, options, timeout)
#define sys_port_trace_k_event_wait_exit(event, events, ret) \
sys_trace_k_event_wait_exit(event, events, ret)
#define sys_port_trace_k_thread_abort_exit(thread) sys_trace_k_thread_abort_exit(thread)
#define sys_port_trace_k_thread_abort_enter(thread) sys_trace_k_thread_abort_enter(thread)
@ -660,4 +673,6 @@ void sys_trace_k_timer_stop(struct k_timer *timer);
void sys_trace_k_timer_status_sync_blocking(struct k_timer *timer);
void sys_trace_k_timer_status_sync_exit(struct k_timer *timer, uint32_t result);
void sys_trace_k_event_init(struct k_event *event);
#endif /* ZEPHYR_TRACE_TEST_H */

View file

@ -289,6 +289,14 @@ void sys_trace_idle(void);
#define sys_port_trace_k_timer_status_sync_enter(timer)
#define sys_port_trace_k_timer_status_sync_blocking(timer, timeout)
#define sys_port_trace_k_timer_status_sync_exit(timer, result)
#define sys_port_trace_k_event_init(event)
#define sys_port_trace_k_event_post_enter(event, events, accumulate)
#define sys_port_trace_k_event_post_exit(event, events, accumulate)
#define sys_port_trace_k_event_wait_enter(event, events, options, timeout)
#define sys_port_trace_k_event_wait_blocking(event, events, options, timeout)
#define sys_port_trace_k_event_wait_exit(event, events, ret)
#define sys_port_trace_k_thread_abort_exit(thread)
#define sys_port_trace_k_thread_abort_enter(thread)
#define sys_port_trace_k_thread_resume_exit(thread)