unified: Add support for semaphore groups

Semaphore groups are enabled by default. Disabling them will both
decrease the footprint as well as improve the performance of the
k_sem_give() routine.

Change-Id: If6c1b0e2e1f71afd43e620f05f17068039d12b05
Signed-off-by: Peter Mitsis <peter.mitsis@windriver.com>
This commit is contained in:
Peter Mitsis 2016-09-09 14:24:06 -04:00 committed by Benjamin Walsh
commit 45403678aa
4 changed files with 295 additions and 19 deletions

View file

@ -644,10 +644,53 @@ static inline int k_sem_count_get(struct k_sem *sem)
return sem->count;
}
extern struct k_sem *k_sem_group_take(struct k_sem **sem_array,
int32_t timeout);
extern void k_sem_group_give(struct k_sem **sem_array);
extern void k_sem_group_reset(struct k_sem **sem_array);
#ifdef CONFIG_SEMAPHORE_GROUPS
/**
* @brief Take the first available semaphore
*
* Given a list of semaphore pointers, this routine will attempt to take one
* of them, waiting up to a maximum of @a timeout ms to do so. The taken
* semaphore is identified by @a sem (set to NULL on error).
*
* Be aware that the more semaphores specified in the group, the more stack
* space is required by the waiting thread.
*
* @param sem_array Array of semaphore pointers terminated by a K_END entry
* @param sem Identifies the semaphore that was taken
* @param timeout Maximum number of milliseconds to wait
*
* @retval 0 A semaphore was successfully taken
* @retval -EBUSY No semaphore was available (@a timeout = K_NO_WAIT)
* @retval -EAGAIN Time out occurred while waiting for semaphore
*/
extern int k_sem_group_take(struct k_sem *sem_array[], struct k_sem **sem,
int32_t timeout);
/**
* @brief Give all the semaphores in the group
*
* This routine will give each semaphore in the array of semaphore pointers.
*
* @param sem_array Array of semaphore pointers terminated by a K_END entry
*
* @return N/A
*/
extern void k_sem_group_give(struct k_sem *sem_array[]);
/**
* @brief Reset the count to zero on each semaphore in the array
*
* This routine resets the count of each semaphore in the group to zero.
* Note that it does NOT have any impact on any thread that might have
* been previously pending on any of the semaphores.
*
* @param sem_array Array of semaphore pointers terminated by a K_END entry
*
* @return N/A
*/
extern void k_sem_group_reset(struct k_sem *sem_array[]);
#endif
#define K_SEM_INITIALIZER(obj, initial_count, count_limit) \
{ \

View file

@ -273,15 +273,21 @@ static inline int task_sem_take(ksem_t sem, int32_t timeout)
#define task_sem_reset k_sem_reset
#define task_sem_count_get k_sem_count_get
#ifdef CONFIG_SEMAPHORE_GROUPS
typedef ksem_t *ksemg_t;
static inline ksem_t task_sem_group_take(ksemg_t group, int32_t timeout)
{
return k_sem_group_take(group, _ticks_to_ms(timeout));
struct k_sem *sem;
(void)k_sem_group_take(group, &sem, _ticks_to_ms(timeout));
return sem;
}
#define task_sem_group_give k_sem_group_give
#define task_sem_group_reset k_sem_group_reset
#endif
#define DEFINE_SEMAPHORE(name) \
K_SEM_DEFINE(_k_sem_obj_##name, 0, UINT_MAX); \

View file

@ -277,4 +277,14 @@ config TIMESLICE_PRIORITY
not subject to time slicing.
endmenu
config SEMAPHORE_GROUPS
bool "Enable semaphore groups"
default y
help
This option enables support for semaphore groups. Threads that use
semaphore groups require more stack space. Disabling this option will
both decrease the footprint as well as improve the performance of
the k_sem_give() routine.
endmenu

View file

@ -35,6 +35,19 @@
#include <misc/dlist.h>
#include <sched.h>
#ifdef CONFIG_SEMAPHORE_GROUPS
struct _sem_desc {
sys_dnode_t semg_node; /* Node in list of semaphores */
struct k_thread *thread; /* Thread waiting for semaphores */
struct k_sem *sem; /* Semaphore on which to wait */
};
struct _sem_thread {
struct tcs_base dummy;
struct _sem_desc desc;
};
#endif
void k_sem_init(struct k_sem *sem, unsigned int initial_count,
unsigned int limit)
{
@ -46,29 +59,233 @@ void k_sem_init(struct k_sem *sem, unsigned int initial_count,
SYS_TRACING_OBJ_INIT(nano_sem, sem);
}
void k_sem_give(struct k_sem *sem)
#ifdef CONFIG_SEMAPHORE_GROUPS
int k_sem_group_take(struct k_sem *sem_array[], struct k_sem **sem,
int32_t timeout)
{
int key = irq_lock();
struct tcs *first_pending_thread = _unpend_first_thread(&sem->wait_q);
unsigned int key;
struct k_sem *item = *sem_array;
int num = 0;
if (first_pending_thread) {
_timeout_abort(first_pending_thread);
_ready_thread(first_pending_thread);
__ASSERT(sem_array[0] != K_END, "Empty semaphore list");
_set_thread_return_value(first_pending_thread, 0);
key = irq_lock();
if (!_is_in_isr() && _must_switch_threads()) {
_Swap(key);
return;
}
} else {
if (likely(sem->count != sem->limit)) {
sem->count++;
do {
if (item->count > 0) {
item->count--; /* Available semaphore found */
irq_unlock(key);
*sem = item;
return 0;
}
num++;
item = sem_array[num];
} while (item != K_END);
if (timeout == K_NO_WAIT) {
irq_unlock(key);
*sem = NULL;
return -EBUSY;
}
struct _sem_thread wait_objects[num];
int32_t priority = k_thread_priority_get(_current);
sys_dlist_t list;
sys_dlist_init(&list);
_current->swap_data = &list;
for (int i = 0; i < num; i++) {
wait_objects[i].dummy.flags = K_DUMMY;
wait_objects[i].dummy.prio = priority;
_timeout_tcs_init((struct k_thread *) &wait_objects[i].dummy);
sys_dlist_append(&list, &wait_objects[i].desc.semg_node);
wait_objects[i].desc.thread = _current;
wait_objects[i].desc.sem = sem_array[i];
_pend_thread((struct k_thread *)&wait_objects[i].dummy,
&sem_array[i]->wait_q, timeout);
}
/* Pend the current thread on a dummy wait queue */
_wait_q_t wait_q;
sys_dlist_init(&wait_q);
_pend_current_thread(&wait_q, timeout);
if (_Swap(key) != 0) {
*sem = NULL;
return -EAGAIN;
}
/* The accepted semaphore is the only one left on the list */
struct _sem_desc *desc = (struct _sem_desc *)sys_dlist_get(&list);
*sem = desc->sem;
return 0;
}
/**
* @brief Cancel all but specified semaphore in list if part of a semphore group
*
* Interrupts are locked prior to calling this routine
*
* @return 0 if not part of semaphore group, 1 if it is
*/
static int handle_sem_group(struct k_sem *sem, struct k_thread *thread)
{
struct _sem_thread *dummy = (struct _sem_thread *)thread;
struct _sem_thread *sem_thread;
struct _sem_desc *desc = NULL;
sys_dlist_t *list;
sys_dnode_t *node;
sys_dnode_t *next;
if (!(thread->flags & K_DUMMY)) {
/*
* The awakened thread is a real thread and thus was not
* involved in a semaphore group operation.
*/
return 0;
}
/*
* The awakened thread is a dummy thread and thus was involved
* in a semaphore group operation.
*/
list = (sys_dlist_t *)dummy->desc.thread->swap_data;
node = sys_dlist_peek_head(list);
__ASSERT(node != NULL, "");
do {
next = sys_dlist_peek_next(list, node);
desc = (struct _sem_desc *)node;
if (desc->sem != sem) {
sem_thread = CONTAINER_OF(desc, struct _sem_thread,
desc);
_timeout_abort((struct k_thread *)&sem_thread->dummy);
_unpend_thread((struct k_thread *)&sem_thread->dummy);
sys_dlist_remove(node);
}
node = next;
} while (node != NULL);
/*
* If 'desc' is NULL, then the user-supplied 'sem_array' had only
* one semaphore in it. This is considered a user error as
* k_sem_give() should have been called instead.
*/
__ASSERT(desc != NULL, "");
/*
* As this code may be executed several times by a semaphore group give
* operation, it is important to ensure that the attempt to ready the
* master thread is done only once.
*/
if (!_is_thread_ready(desc->thread)) {
_reset_thread_states(desc->thread, K_PENDING | K_TIMING);
_timeout_abort(desc->thread);
if (_is_thread_ready(desc->thread)) {
_add_thread_to_ready_q(desc->thread);
}
}
_set_thread_return_value(desc->thread, 0);
return 1;
}
#else
#define handle_sem_group(sem, thread) 0
#endif
/**
* @brief Common semaphore give code
*
* @return true if _Swap() will need to be invoked; false if not
*/
static bool sem_give_common(struct k_sem *sem)
{
struct k_thread *thread;
thread = _unpend_first_thread(&sem->wait_q);
if (!thread) {
/*
* No thread is waiting on the semaphore.
* Increment the semaphore's count unless
* its limit has already been reached.
*/
sem->count += (sem->count != sem->limit);
return false;
}
_timeout_abort(thread);
if (!handle_sem_group(sem, thread)) {
/* Handle the non-group case */
_ready_thread(thread);
_set_thread_return_value(thread, 0);
}
return !_is_in_isr() && _must_switch_threads();
}
#ifdef CONFIG_SEMAPHORE_GROUPS
void k_sem_group_give(struct k_sem *sem_array[])
{
unsigned int key;
bool swap_needed = false;
__ASSERT(sem_array[0] != K_END, "Empty semaphore list");
key = irq_lock();
for (int i = 0; sem_array[i] != K_END; i++) {
swap_needed |= sem_give_common(sem_array[i]);
}
if (swap_needed) {
_Swap(key);
} else {
irq_unlock(key);
}
}
void k_sem_group_reset(struct k_sem *sem_array[])
{
unsigned int key;
key = irq_lock();
for (int i = 0; sem_array[i] != K_END; i++) {
sem_array[i]->count = 0;
}
irq_unlock(key);
}
#endif
void k_sem_give(struct k_sem *sem)
{
unsigned int key;
key = irq_lock();
if (sem_give_common(sem)) {
_Swap(key);
} else {
irq_unlock(key);
}
}
int k_sem_take(struct k_sem *sem, int32_t timeout)
{