2017-02-21 14:50:42 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2010-2016 Wind River Systems, Inc.
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
*
|
|
|
|
* @brief dynamic-size QUEUE object.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
#include <kernel.h>
|
|
|
|
#include <kernel_structs.h>
|
|
|
|
#include <debug/object_tracing_common.h>
|
|
|
|
#include <toolchain.h>
|
2017-06-17 11:30:47 -04:00
|
|
|
#include <linker/sections.h>
|
2017-02-21 14:50:42 +02:00
|
|
|
#include <wait_q.h>
|
|
|
|
#include <ksched.h>
|
2018-04-27 13:21:22 -07:00
|
|
|
#include <misc/sflist.h>
|
2017-02-21 14:50:42 +02:00
|
|
|
#include <init.h>
|
2018-04-27 13:21:22 -07:00
|
|
|
#include <syscall_handler.h>
|
2017-02-21 14:50:42 +02:00
|
|
|
|
|
|
|
extern struct k_queue _k_queue_list_start[];
|
|
|
|
extern struct k_queue _k_queue_list_end[];
|
|
|
|
|
2018-04-27 13:21:22 -07:00
|
|
|
struct alloc_node {
|
|
|
|
sys_sfnode_t node;
|
|
|
|
void *data;
|
|
|
|
};
|
|
|
|
|
|
|
|
void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free)
|
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
|
|
|
|
if (node && sys_sfnode_flags_get(node)) {
|
|
|
|
/* If the flag is set, then the enqueue operation for this item
|
|
|
|
* did a behind-the scenes memory allocation of an alloc_node
|
|
|
|
* struct, which is what got put in the queue. Free it and pass
|
|
|
|
* back the data pointer.
|
|
|
|
*/
|
|
|
|
struct alloc_node *anode;
|
|
|
|
|
|
|
|
anode = CONTAINER_OF(node, struct alloc_node, node);
|
|
|
|
ret = anode->data;
|
|
|
|
if (needs_free) {
|
|
|
|
k_free(anode);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Data was directly placed in the queue, the first 4 bytes
|
|
|
|
* reserved for the linked list. User mode isn't allowed to
|
|
|
|
* do this, although it can get data sent this way.
|
|
|
|
*/
|
|
|
|
ret = (void *)node;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-02-21 14:50:42 +02:00
|
|
|
#ifdef CONFIG_OBJECT_TRACING
|
|
|
|
|
2017-05-18 12:16:45 +02:00
|
|
|
struct k_queue *_trace_list_k_queue;
|
|
|
|
|
2017-02-21 14:50:42 +02:00
|
|
|
/*
|
|
|
|
* Complete initialization of statically defined queues.
|
|
|
|
*/
|
|
|
|
static int init_queue_module(struct device *dev)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(dev);
|
|
|
|
|
|
|
|
struct k_queue *queue;
|
|
|
|
|
|
|
|
for (queue = _k_queue_list_start; queue < _k_queue_list_end; queue++) {
|
|
|
|
SYS_TRACING_OBJ_INIT(k_queue, queue);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
SYS_INIT(init_queue_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
|
|
|
|
|
|
|
#endif /* CONFIG_OBJECT_TRACING */
|
|
|
|
|
2018-04-27 13:21:22 -07:00
|
|
|
void _impl_k_queue_init(struct k_queue *queue)
|
2017-02-21 14:50:42 +02:00
|
|
|
{
|
2018-04-27 13:21:22 -07:00
|
|
|
sys_sflist_init(&queue->data_q);
|
2018-05-10 11:10:34 -07:00
|
|
|
_waitq_init(&queue->wait_q);
|
2017-08-21 10:49:29 +03:00
|
|
|
#if defined(CONFIG_POLL)
|
|
|
|
sys_dlist_init(&queue->poll_events);
|
|
|
|
#endif
|
2017-02-21 14:50:42 +02:00
|
|
|
|
|
|
|
SYS_TRACING_OBJ_INIT(k_queue, queue);
|
2018-04-27 13:21:22 -07:00
|
|
|
_k_object_init(queue);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_USERSPACE
|
2018-05-04 15:57:57 -07:00
|
|
|
Z_SYSCALL_HANDLER(k_queue_init, queue_ptr)
|
2018-04-27 13:21:22 -07:00
|
|
|
{
|
|
|
|
struct k_queue *queue = (struct k_queue *)queue_ptr;
|
|
|
|
|
2018-05-04 15:57:57 -07:00
|
|
|
Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(queue, K_OBJ_QUEUE));
|
2018-04-27 13:21:22 -07:00
|
|
|
_impl_k_queue_init(queue);
|
|
|
|
return 0;
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|
2018-04-27 13:21:22 -07:00
|
|
|
#endif
|
2017-02-21 14:50:42 +02:00
|
|
|
|
2017-07-13 12:43:59 +03:00
|
|
|
#if !defined(CONFIG_POLL)
|
2017-02-21 14:50:42 +02:00
|
|
|
static void prepare_thread_to_run(struct k_thread *thread, void *data)
|
|
|
|
{
|
|
|
|
_ready_thread(thread);
|
|
|
|
_set_thread_return_value_with_data(thread, 0, data);
|
|
|
|
}
|
2017-07-13 12:43:59 +03:00
|
|
|
#endif /* CONFIG_POLL */
|
2017-02-21 14:50:42 +02:00
|
|
|
|
kernel: Scheduler refactoring: use _reschedule_*() always
There was a somewhat promiscuous pattern in the kernel where IPC
mechanisms would do something that might effect the current thread
choice, then check _must_switch_threads() (or occasionally
__must_switch_threads -- don't ask, the distinction is being replaced
by real English words), sometimes _is_in_isr() (but not always, even
in contexts where that looks like it would be a mistake), and then
call _Swap() if everything is OK, otherwise releasing the irq_lock().
Sometimes this was done directly, sometimes via the inverted test,
sometimes (poll, heh) by doing the test when the thread state was
modified and then needlessly passing the result up the call stack to
the point of the _Swap().
And some places were just calling _reschedule_threads(), which did all
this already.
Unify all this madness. The old _reschedule_threads() function has
split into two variants: _reschedule_yield() and
_reschedule_noyield(). The latter is the "normal" one that respects
the cooperative priority of the current thread (i.e. it won't switch
out even if there is a higher priority thread ready -- the current
thread has to pend itself first), the former is used in the handful of
places where code was doing a swap unconditionally, just to preserve
precise behavior across the refactor. I'm not at all convinced it
should exist...
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-03-26 10:54:40 -07:00
|
|
|
static inline void handle_poll_events(struct k_queue *queue, u32_t state)
|
2017-02-21 14:50:42 +02:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_POLL
|
kernel: Scheduler refactoring: use _reschedule_*() always
There was a somewhat promiscuous pattern in the kernel where IPC
mechanisms would do something that might effect the current thread
choice, then check _must_switch_threads() (or occasionally
__must_switch_threads -- don't ask, the distinction is being replaced
by real English words), sometimes _is_in_isr() (but not always, even
in contexts where that looks like it would be a mistake), and then
call _Swap() if everything is OK, otherwise releasing the irq_lock().
Sometimes this was done directly, sometimes via the inverted test,
sometimes (poll, heh) by doing the test when the thread state was
modified and then needlessly passing the result up the call stack to
the point of the _Swap().
And some places were just calling _reschedule_threads(), which did all
this already.
Unify all this madness. The old _reschedule_threads() function has
split into two variants: _reschedule_yield() and
_reschedule_noyield(). The latter is the "normal" one that respects
the cooperative priority of the current thread (i.e. it won't switch
out even if there is a higher priority thread ready -- the current
thread has to pend itself first), the former is used in the handful of
places where code was doing a swap unconditionally, just to preserve
precise behavior across the refactor. I'm not at all convinced it
should exist...
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-03-26 10:54:40 -07:00
|
|
|
_handle_obj_poll_events(&queue->poll_events, state);
|
2017-02-21 14:50:42 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-04-27 13:21:22 -07:00
|
|
|
void _impl_k_queue_cancel_wait(struct k_queue *queue)
|
kernel: queue, fifo: Add cancel_wait operation.
Currently, a queue/fifo getter chooses how long to wait for an
element. But there are scenarios when putter would know better,
there should be a way to expire getter's timeout to make it run
again. k_queue_cancel_wait() and k_fifo_cancel_wait() functions
do just that. They cause corresponding *_get() functions to return
with NULL value, as if timeout expired on getter's side (even
K_FOREVER).
This can be used to signal out of band conditions from putter to
getter, e.g. end of processing, error, configuration change, etc.
A specific event would be communicated to getter by other means
(e.g. using existing shared context structures).
Without this call, achieving the same effect would require e.g.
calling k_fifo_put() with a pointer to a special sentinal memory
structure - such structure would need to be allocated somewhere
and somehow, and getter would need to recognize it from a normal
data item. Having cancel_wait() functions offers an elegant
alternative. From this perspective, these calls can be seen as
an equivalent to e.g. k_fifo_put(fifo, NULL), except that such
call won't work in practice.
Change-Id: I47b7f690dc325a80943082bcf5345c41649e7024
Signed-off-by: Paul Sokolovsky <paul.sokolovsky@linaro.org>
2017-04-25 17:54:31 +03:00
|
|
|
{
|
2017-07-13 12:43:59 +03:00
|
|
|
unsigned int key = irq_lock();
|
|
|
|
#if !defined(CONFIG_POLL)
|
kernel: queue, fifo: Add cancel_wait operation.
Currently, a queue/fifo getter chooses how long to wait for an
element. But there are scenarios when putter would know better,
there should be a way to expire getter's timeout to make it run
again. k_queue_cancel_wait() and k_fifo_cancel_wait() functions
do just that. They cause corresponding *_get() functions to return
with NULL value, as if timeout expired on getter's side (even
K_FOREVER).
This can be used to signal out of band conditions from putter to
getter, e.g. end of processing, error, configuration change, etc.
A specific event would be communicated to getter by other means
(e.g. using existing shared context structures).
Without this call, achieving the same effect would require e.g.
calling k_fifo_put() with a pointer to a special sentinal memory
structure - such structure would need to be allocated somewhere
and somehow, and getter would need to recognize it from a normal
data item. Having cancel_wait() functions offers an elegant
alternative. From this perspective, these calls can be seen as
an equivalent to e.g. k_fifo_put(fifo, NULL), except that such
call won't work in practice.
Change-Id: I47b7f690dc325a80943082bcf5345c41649e7024
Signed-off-by: Paul Sokolovsky <paul.sokolovsky@linaro.org>
2017-04-25 17:54:31 +03:00
|
|
|
struct k_thread *first_pending_thread;
|
|
|
|
|
|
|
|
first_pending_thread = _unpend_first_thread(&queue->wait_q);
|
|
|
|
|
|
|
|
if (first_pending_thread) {
|
|
|
|
prepare_thread_to_run(first_pending_thread, NULL);
|
|
|
|
}
|
2017-07-13 12:43:59 +03:00
|
|
|
#else
|
kernel: Scheduler refactoring: use _reschedule_*() always
There was a somewhat promiscuous pattern in the kernel where IPC
mechanisms would do something that might effect the current thread
choice, then check _must_switch_threads() (or occasionally
__must_switch_threads -- don't ask, the distinction is being replaced
by real English words), sometimes _is_in_isr() (but not always, even
in contexts where that looks like it would be a mistake), and then
call _Swap() if everything is OK, otherwise releasing the irq_lock().
Sometimes this was done directly, sometimes via the inverted test,
sometimes (poll, heh) by doing the test when the thread state was
modified and then needlessly passing the result up the call stack to
the point of the _Swap().
And some places were just calling _reschedule_threads(), which did all
this already.
Unify all this madness. The old _reschedule_threads() function has
split into two variants: _reschedule_yield() and
_reschedule_noyield(). The latter is the "normal" one that respects
the cooperative priority of the current thread (i.e. it won't switch
out even if there is a higher priority thread ready -- the current
thread has to pend itself first), the former is used in the handful of
places where code was doing a swap unconditionally, just to preserve
precise behavior across the refactor. I'm not at all convinced it
should exist...
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-03-26 10:54:40 -07:00
|
|
|
handle_poll_events(queue, K_POLL_STATE_NOT_READY);
|
2017-07-13 12:43:59 +03:00
|
|
|
#endif /* !CONFIG_POLL */
|
kernel: queue, fifo: Add cancel_wait operation.
Currently, a queue/fifo getter chooses how long to wait for an
element. But there are scenarios when putter would know better,
there should be a way to expire getter's timeout to make it run
again. k_queue_cancel_wait() and k_fifo_cancel_wait() functions
do just that. They cause corresponding *_get() functions to return
with NULL value, as if timeout expired on getter's side (even
K_FOREVER).
This can be used to signal out of band conditions from putter to
getter, e.g. end of processing, error, configuration change, etc.
A specific event would be communicated to getter by other means
(e.g. using existing shared context structures).
Without this call, achieving the same effect would require e.g.
calling k_fifo_put() with a pointer to a special sentinal memory
structure - such structure would need to be allocated somewhere
and somehow, and getter would need to recognize it from a normal
data item. Having cancel_wait() functions offers an elegant
alternative. From this perspective, these calls can be seen as
an equivalent to e.g. k_fifo_put(fifo, NULL), except that such
call won't work in practice.
Change-Id: I47b7f690dc325a80943082bcf5345c41649e7024
Signed-off-by: Paul Sokolovsky <paul.sokolovsky@linaro.org>
2017-04-25 17:54:31 +03:00
|
|
|
|
2018-04-02 18:40:10 -07:00
|
|
|
_reschedule(key);
|
kernel: queue, fifo: Add cancel_wait operation.
Currently, a queue/fifo getter chooses how long to wait for an
element. But there are scenarios when putter would know better,
there should be a way to expire getter's timeout to make it run
again. k_queue_cancel_wait() and k_fifo_cancel_wait() functions
do just that. They cause corresponding *_get() functions to return
with NULL value, as if timeout expired on getter's side (even
K_FOREVER).
This can be used to signal out of band conditions from putter to
getter, e.g. end of processing, error, configuration change, etc.
A specific event would be communicated to getter by other means
(e.g. using existing shared context structures).
Without this call, achieving the same effect would require e.g.
calling k_fifo_put() with a pointer to a special sentinal memory
structure - such structure would need to be allocated somewhere
and somehow, and getter would need to recognize it from a normal
data item. Having cancel_wait() functions offers an elegant
alternative. From this perspective, these calls can be seen as
an equivalent to e.g. k_fifo_put(fifo, NULL), except that such
call won't work in practice.
Change-Id: I47b7f690dc325a80943082bcf5345c41649e7024
Signed-off-by: Paul Sokolovsky <paul.sokolovsky@linaro.org>
2017-04-25 17:54:31 +03:00
|
|
|
}
|
|
|
|
|
2018-04-27 13:21:22 -07:00
|
|
|
#ifdef CONFIG_USERSPACE
|
2018-05-04 15:57:57 -07:00
|
|
|
Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_queue_cancel_wait, K_OBJ_QUEUE,
|
|
|
|
struct k_queue *);
|
2018-04-27 13:21:22 -07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
static int queue_insert(struct k_queue *queue, void *prev, void *data,
|
|
|
|
bool alloc)
|
2017-02-21 14:50:42 +02:00
|
|
|
{
|
2017-07-13 12:43:59 +03:00
|
|
|
unsigned int key = irq_lock();
|
|
|
|
#if !defined(CONFIG_POLL)
|
2017-02-21 14:50:42 +02:00
|
|
|
struct k_thread *first_pending_thread;
|
|
|
|
|
|
|
|
first_pending_thread = _unpend_first_thread(&queue->wait_q);
|
|
|
|
|
|
|
|
if (first_pending_thread) {
|
|
|
|
prepare_thread_to_run(first_pending_thread, data);
|
2018-04-02 18:40:10 -07:00
|
|
|
_reschedule(key);
|
2018-04-27 13:21:22 -07:00
|
|
|
return 0;
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|
2017-07-13 12:43:59 +03:00
|
|
|
#endif /* !CONFIG_POLL */
|
|
|
|
|
2018-04-27 13:21:22 -07:00
|
|
|
/* Only need to actually allocate if no threads are pending */
|
|
|
|
if (alloc) {
|
|
|
|
struct alloc_node *anode;
|
|
|
|
|
|
|
|
anode = z_thread_malloc(sizeof(*anode));
|
|
|
|
if (!anode) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
anode->data = data;
|
|
|
|
sys_sfnode_init(&anode->node, 0x1);
|
|
|
|
data = anode;
|
|
|
|
} else {
|
|
|
|
sys_sfnode_init(data, 0x0);
|
|
|
|
}
|
|
|
|
sys_sflist_insert(&queue->data_q, prev, data);
|
2017-07-13 12:43:59 +03:00
|
|
|
|
|
|
|
#if defined(CONFIG_POLL)
|
kernel: Scheduler refactoring: use _reschedule_*() always
There was a somewhat promiscuous pattern in the kernel where IPC
mechanisms would do something that might effect the current thread
choice, then check _must_switch_threads() (or occasionally
__must_switch_threads -- don't ask, the distinction is being replaced
by real English words), sometimes _is_in_isr() (but not always, even
in contexts where that looks like it would be a mistake), and then
call _Swap() if everything is OK, otherwise releasing the irq_lock().
Sometimes this was done directly, sometimes via the inverted test,
sometimes (poll, heh) by doing the test when the thread state was
modified and then needlessly passing the result up the call stack to
the point of the _Swap().
And some places were just calling _reschedule_threads(), which did all
this already.
Unify all this madness. The old _reschedule_threads() function has
split into two variants: _reschedule_yield() and
_reschedule_noyield(). The latter is the "normal" one that respects
the cooperative priority of the current thread (i.e. it won't switch
out even if there is a higher priority thread ready -- the current
thread has to pend itself first), the former is used in the handful of
places where code was doing a swap unconditionally, just to preserve
precise behavior across the refactor. I'm not at all convinced it
should exist...
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-03-26 10:54:40 -07:00
|
|
|
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
|
2017-07-13 12:43:59 +03:00
|
|
|
#endif /* CONFIG_POLL */
|
2017-02-21 14:50:42 +02:00
|
|
|
|
2018-04-02 18:40:10 -07:00
|
|
|
_reschedule(key);
|
2018-04-27 13:21:22 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void k_queue_insert(struct k_queue *queue, void *prev, void *data)
|
|
|
|
{
|
|
|
|
queue_insert(queue, prev, data, false);
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void k_queue_append(struct k_queue *queue, void *data)
|
|
|
|
{
|
2018-04-27 13:21:22 -07:00
|
|
|
queue_insert(queue, sys_sflist_peek_tail(&queue->data_q), data, false);
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void k_queue_prepend(struct k_queue *queue, void *data)
|
|
|
|
{
|
2018-04-27 13:21:22 -07:00
|
|
|
queue_insert(queue, NULL, data, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
int _impl_k_queue_alloc_append(struct k_queue *queue, void *data)
|
|
|
|
{
|
|
|
|
return queue_insert(queue, sys_sflist_peek_tail(&queue->data_q), data,
|
|
|
|
true);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_USERSPACE
|
2018-05-04 15:57:57 -07:00
|
|
|
Z_SYSCALL_HANDLER(k_queue_alloc_append, queue, data)
|
2018-04-27 13:21:22 -07:00
|
|
|
{
|
2018-05-04 15:57:57 -07:00
|
|
|
Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
|
2018-04-27 13:21:22 -07:00
|
|
|
|
|
|
|
return _impl_k_queue_alloc_append((struct k_queue *)queue,
|
|
|
|
(void *)data);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
int _impl_k_queue_alloc_prepend(struct k_queue *queue, void *data)
|
|
|
|
{
|
|
|
|
return queue_insert(queue, NULL, data, true);
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|
|
|
|
|
2018-04-27 13:21:22 -07:00
|
|
|
#ifdef CONFIG_USERSPACE
|
2018-05-04 15:57:57 -07:00
|
|
|
Z_SYSCALL_HANDLER(k_queue_alloc_prepend, queue, data)
|
2018-04-27 13:21:22 -07:00
|
|
|
{
|
2018-05-04 15:57:57 -07:00
|
|
|
Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
|
2018-04-27 13:21:22 -07:00
|
|
|
|
|
|
|
return _impl_k_queue_alloc_prepend((struct k_queue *)queue,
|
|
|
|
(void *)data);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-02-21 14:50:42 +02:00
|
|
|
void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
|
|
|
|
{
|
|
|
|
__ASSERT(head && tail, "invalid head or tail");
|
|
|
|
|
2017-07-13 12:43:59 +03:00
|
|
|
unsigned int key = irq_lock();
|
|
|
|
#if !defined(CONFIG_POLL)
|
2018-03-09 13:05:15 -08:00
|
|
|
struct k_thread *thread;
|
2017-02-21 14:50:42 +02:00
|
|
|
|
|
|
|
while (head && ((thread = _unpend_first_thread(&queue->wait_q)))) {
|
|
|
|
prepare_thread_to_run(thread, head);
|
|
|
|
head = *(void **)head;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (head) {
|
2018-04-27 13:21:22 -07:00
|
|
|
sys_sflist_append_list(&queue->data_q, head, tail);
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|
|
|
|
|
2017-07-13 12:43:59 +03:00
|
|
|
#else
|
2018-04-27 13:21:22 -07:00
|
|
|
sys_sflist_append_list(&queue->data_q, head, tail);
|
kernel: Scheduler refactoring: use _reschedule_*() always
There was a somewhat promiscuous pattern in the kernel where IPC
mechanisms would do something that might effect the current thread
choice, then check _must_switch_threads() (or occasionally
__must_switch_threads -- don't ask, the distinction is being replaced
by real English words), sometimes _is_in_isr() (but not always, even
in contexts where that looks like it would be a mistake), and then
call _Swap() if everything is OK, otherwise releasing the irq_lock().
Sometimes this was done directly, sometimes via the inverted test,
sometimes (poll, heh) by doing the test when the thread state was
modified and then needlessly passing the result up the call stack to
the point of the _Swap().
And some places were just calling _reschedule_threads(), which did all
this already.
Unify all this madness. The old _reschedule_threads() function has
split into two variants: _reschedule_yield() and
_reschedule_noyield(). The latter is the "normal" one that respects
the cooperative priority of the current thread (i.e. it won't switch
out even if there is a higher priority thread ready -- the current
thread has to pend itself first), the former is used in the handful of
places where code was doing a swap unconditionally, just to preserve
precise behavior across the refactor. I'm not at all convinced it
should exist...
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-03-26 10:54:40 -07:00
|
|
|
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
|
2017-07-13 12:43:59 +03:00
|
|
|
#endif /* !CONFIG_POLL */
|
2017-02-21 14:50:42 +02:00
|
|
|
|
2018-04-02 18:40:10 -07:00
|
|
|
_reschedule(key);
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
|
|
|
|
{
|
|
|
|
__ASSERT(!sys_slist_is_empty(list), "list must not be empty");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* note: this works as long as:
|
|
|
|
* - the slist implementation keeps the next pointer as the first
|
|
|
|
* field of the node object type
|
|
|
|
* - list->tail->next = NULL.
|
2018-04-27 13:21:22 -07:00
|
|
|
* - sflist implementation only differs from slist by stuffing
|
|
|
|
* flag bytes in the lower order bits of the data pointer
|
|
|
|
* - source list is really an slist and not an sflist with flags set
|
2017-02-21 14:50:42 +02:00
|
|
|
*/
|
|
|
|
k_queue_append_list(queue, list->head, list->tail);
|
|
|
|
sys_slist_init(list);
|
|
|
|
}
|
|
|
|
|
2017-07-13 12:43:59 +03:00
|
|
|
#if defined(CONFIG_POLL)
|
|
|
|
static void *k_queue_poll(struct k_queue *queue, s32_t timeout)
|
|
|
|
{
|
|
|
|
struct k_poll_event event;
|
|
|
|
int err;
|
2017-10-16 13:36:37 +03:00
|
|
|
unsigned int key;
|
|
|
|
void *val;
|
2017-07-13 12:43:59 +03:00
|
|
|
|
|
|
|
k_poll_event_init(&event, K_POLL_TYPE_FIFO_DATA_AVAILABLE,
|
|
|
|
K_POLL_MODE_NOTIFY_ONLY, queue);
|
|
|
|
|
2017-10-17 11:34:21 +03:00
|
|
|
do {
|
|
|
|
event.state = K_POLL_STATE_NOT_READY;
|
2017-07-13 12:43:59 +03:00
|
|
|
|
2017-10-17 11:34:21 +03:00
|
|
|
err = k_poll(&event, 1, timeout);
|
|
|
|
if (err) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2017-07-13 12:43:59 +03:00
|
|
|
|
2017-10-17 11:34:21 +03:00
|
|
|
__ASSERT_NO_MSG(event.state ==
|
|
|
|
K_POLL_STATE_FIFO_DATA_AVAILABLE);
|
|
|
|
|
2018-04-27 13:21:22 -07:00
|
|
|
/* sys_sflist_* aren't threadsafe, so must be always protected
|
|
|
|
* by irq_lock.
|
2017-10-17 11:34:21 +03:00
|
|
|
*/
|
|
|
|
key = irq_lock();
|
2018-04-27 13:21:22 -07:00
|
|
|
val = z_queue_node_peek(sys_sflist_get(&queue->data_q), true);
|
2017-10-17 11:34:21 +03:00
|
|
|
irq_unlock(key);
|
|
|
|
} while (!val && timeout == K_FOREVER);
|
2017-07-13 12:43:59 +03:00
|
|
|
|
2017-10-16 13:36:37 +03:00
|
|
|
return val;
|
2017-07-13 12:43:59 +03:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_POLL */
|
|
|
|
|
2018-04-27 13:21:22 -07:00
|
|
|
void *_impl_k_queue_get(struct k_queue *queue, s32_t timeout)
|
2017-02-21 14:50:42 +02:00
|
|
|
{
|
|
|
|
unsigned int key;
|
|
|
|
void *data;
|
|
|
|
|
|
|
|
key = irq_lock();
|
|
|
|
|
2018-04-27 13:21:22 -07:00
|
|
|
if (likely(!sys_sflist_is_empty(&queue->data_q))) {
|
|
|
|
sys_sfnode_t *node;
|
|
|
|
|
|
|
|
node = sys_sflist_get_not_empty(&queue->data_q);
|
|
|
|
data = z_queue_node_peek(node, true);
|
2017-02-21 14:50:42 +02:00
|
|
|
irq_unlock(key);
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (timeout == K_NO_WAIT) {
|
|
|
|
irq_unlock(key);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-07-13 12:43:59 +03:00
|
|
|
#if defined(CONFIG_POLL)
|
|
|
|
irq_unlock(key);
|
|
|
|
|
|
|
|
return k_queue_poll(queue, timeout);
|
|
|
|
|
|
|
|
#else
|
2018-03-26 11:58:10 -07:00
|
|
|
int ret = _pend_current_thread(key, &queue->wait_q, timeout);
|
2017-02-21 14:50:42 +02:00
|
|
|
|
2018-03-26 11:58:10 -07:00
|
|
|
return ret ? NULL : _current->base.swap_data;
|
2017-07-13 12:43:59 +03:00
|
|
|
#endif /* CONFIG_POLL */
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|
2018-04-27 13:21:22 -07:00
|
|
|
|
|
|
|
#ifdef CONFIG_USERSPACE
|
2018-05-04 15:57:57 -07:00
|
|
|
Z_SYSCALL_HANDLER(k_queue_get, queue, timeout_p)
|
2018-04-27 13:21:22 -07:00
|
|
|
{
|
|
|
|
s32_t timeout = timeout_p;
|
|
|
|
|
2018-05-04 15:57:57 -07:00
|
|
|
Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
|
2018-04-27 13:21:22 -07:00
|
|
|
|
|
|
|
return (u32_t)_impl_k_queue_get((struct k_queue *)queue, timeout);
|
|
|
|
}
|
|
|
|
|
2018-05-04 15:57:57 -07:00
|
|
|
Z_SYSCALL_HANDLER1_SIMPLE(k_queue_is_empty, K_OBJ_QUEUE, struct k_queue *);
|
|
|
|
Z_SYSCALL_HANDLER1_SIMPLE(k_queue_peek_head, K_OBJ_QUEUE, struct k_queue *);
|
|
|
|
Z_SYSCALL_HANDLER1_SIMPLE(k_queue_peek_tail, K_OBJ_QUEUE, struct k_queue *);
|
2018-04-27 13:21:22 -07:00
|
|
|
#endif /* CONFIG_USERSPACE */
|