2017-02-21 14:50:42 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2010-2016 Wind River Systems, Inc.
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
*
|
|
|
|
* @brief dynamic-size QUEUE object.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
#include <kernel.h>
|
|
|
|
#include <kernel_structs.h>
|
|
|
|
#include <debug/object_tracing_common.h>
|
|
|
|
#include <toolchain.h>
|
2017-06-17 11:30:47 -04:00
|
|
|
#include <linker/sections.h>
|
2017-02-21 14:50:42 +02:00
|
|
|
#include <wait_q.h>
|
|
|
|
#include <ksched.h>
|
|
|
|
#include <misc/slist.h>
|
|
|
|
#include <init.h>
|
|
|
|
|
|
|
|
extern struct k_queue _k_queue_list_start[];
|
|
|
|
extern struct k_queue _k_queue_list_end[];
|
|
|
|
|
|
|
|
#ifdef CONFIG_OBJECT_TRACING
|
|
|
|
|
2017-05-18 12:16:45 +02:00
|
|
|
struct k_queue *_trace_list_k_queue;
|
|
|
|
|
2017-02-21 14:50:42 +02:00
|
|
|
/*
|
|
|
|
* Complete initialization of statically defined queues.
|
|
|
|
*/
|
|
|
|
static int init_queue_module(struct device *dev)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(dev);
|
|
|
|
|
|
|
|
struct k_queue *queue;
|
|
|
|
|
|
|
|
for (queue = _k_queue_list_start; queue < _k_queue_list_end; queue++) {
|
|
|
|
SYS_TRACING_OBJ_INIT(k_queue, queue);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
SYS_INIT(init_queue_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
|
|
|
|
|
|
|
#endif /* CONFIG_OBJECT_TRACING */
|
|
|
|
|
|
|
|
void k_queue_init(struct k_queue *queue)
|
|
|
|
{
|
|
|
|
sys_slist_init(&queue->data_q);
|
|
|
|
sys_dlist_init(&queue->wait_q);
|
2017-08-21 10:49:29 +03:00
|
|
|
#if defined(CONFIG_POLL)
|
|
|
|
sys_dlist_init(&queue->poll_events);
|
|
|
|
#endif
|
2017-02-21 14:50:42 +02:00
|
|
|
|
|
|
|
SYS_TRACING_OBJ_INIT(k_queue, queue);
|
|
|
|
}
|
|
|
|
|
2017-07-13 12:43:59 +03:00
|
|
|
#if !defined(CONFIG_POLL)
|
2017-02-21 14:50:42 +02:00
|
|
|
static void prepare_thread_to_run(struct k_thread *thread, void *data)
|
|
|
|
{
|
|
|
|
_abort_thread_timeout(thread);
|
|
|
|
_ready_thread(thread);
|
|
|
|
_set_thread_return_value_with_data(thread, 0, data);
|
|
|
|
}
|
2017-07-13 12:43:59 +03:00
|
|
|
#endif /* CONFIG_POLL */
|
2017-02-21 14:50:42 +02:00
|
|
|
|
kernel: Scheduler refactoring: use _reschedule_*() always
There was a somewhat promiscuous pattern in the kernel where IPC
mechanisms would do something that might effect the current thread
choice, then check _must_switch_threads() (or occasionally
__must_switch_threads -- don't ask, the distinction is being replaced
by real English words), sometimes _is_in_isr() (but not always, even
in contexts where that looks like it would be a mistake), and then
call _Swap() if everything is OK, otherwise releasing the irq_lock().
Sometimes this was done directly, sometimes via the inverted test,
sometimes (poll, heh) by doing the test when the thread state was
modified and then needlessly passing the result up the call stack to
the point of the _Swap().
And some places were just calling _reschedule_threads(), which did all
this already.
Unify all this madness. The old _reschedule_threads() function has
split into two variants: _reschedule_yield() and
_reschedule_noyield(). The latter is the "normal" one that respects
the cooperative priority of the current thread (i.e. it won't switch
out even if there is a higher priority thread ready -- the current
thread has to pend itself first), the former is used in the handful of
places where code was doing a swap unconditionally, just to preserve
precise behavior across the refactor. I'm not at all convinced it
should exist...
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-03-26 10:54:40 -07:00
|
|
|
static inline void handle_poll_events(struct k_queue *queue, u32_t state)
|
2017-02-21 14:50:42 +02:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_POLL
|
kernel: Scheduler refactoring: use _reschedule_*() always
There was a somewhat promiscuous pattern in the kernel where IPC
mechanisms would do something that might effect the current thread
choice, then check _must_switch_threads() (or occasionally
__must_switch_threads -- don't ask, the distinction is being replaced
by real English words), sometimes _is_in_isr() (but not always, even
in contexts where that looks like it would be a mistake), and then
call _Swap() if everything is OK, otherwise releasing the irq_lock().
Sometimes this was done directly, sometimes via the inverted test,
sometimes (poll, heh) by doing the test when the thread state was
modified and then needlessly passing the result up the call stack to
the point of the _Swap().
And some places were just calling _reschedule_threads(), which did all
this already.
Unify all this madness. The old _reschedule_threads() function has
split into two variants: _reschedule_yield() and
_reschedule_noyield(). The latter is the "normal" one that respects
the cooperative priority of the current thread (i.e. it won't switch
out even if there is a higher priority thread ready -- the current
thread has to pend itself first), the former is used in the handful of
places where code was doing a swap unconditionally, just to preserve
precise behavior across the refactor. I'm not at all convinced it
should exist...
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-03-26 10:54:40 -07:00
|
|
|
_handle_obj_poll_events(&queue->poll_events, state);
|
2017-02-21 14:50:42 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
kernel: queue, fifo: Add cancel_wait operation.
Currently, a queue/fifo getter chooses how long to wait for an
element. But there are scenarios when putter would know better,
there should be a way to expire getter's timeout to make it run
again. k_queue_cancel_wait() and k_fifo_cancel_wait() functions
do just that. They cause corresponding *_get() functions to return
with NULL value, as if timeout expired on getter's side (even
K_FOREVER).
This can be used to signal out of band conditions from putter to
getter, e.g. end of processing, error, configuration change, etc.
A specific event would be communicated to getter by other means
(e.g. using existing shared context structures).
Without this call, achieving the same effect would require e.g.
calling k_fifo_put() with a pointer to a special sentinal memory
structure - such structure would need to be allocated somewhere
and somehow, and getter would need to recognize it from a normal
data item. Having cancel_wait() functions offers an elegant
alternative. From this perspective, these calls can be seen as
an equivalent to e.g. k_fifo_put(fifo, NULL), except that such
call won't work in practice.
Change-Id: I47b7f690dc325a80943082bcf5345c41649e7024
Signed-off-by: Paul Sokolovsky <paul.sokolovsky@linaro.org>
2017-04-25 17:54:31 +03:00
|
|
|
void k_queue_cancel_wait(struct k_queue *queue)
|
|
|
|
{
|
2017-07-13 12:43:59 +03:00
|
|
|
unsigned int key = irq_lock();
|
|
|
|
#if !defined(CONFIG_POLL)
|
kernel: queue, fifo: Add cancel_wait operation.
Currently, a queue/fifo getter chooses how long to wait for an
element. But there are scenarios when putter would know better,
there should be a way to expire getter's timeout to make it run
again. k_queue_cancel_wait() and k_fifo_cancel_wait() functions
do just that. They cause corresponding *_get() functions to return
with NULL value, as if timeout expired on getter's side (even
K_FOREVER).
This can be used to signal out of band conditions from putter to
getter, e.g. end of processing, error, configuration change, etc.
A specific event would be communicated to getter by other means
(e.g. using existing shared context structures).
Without this call, achieving the same effect would require e.g.
calling k_fifo_put() with a pointer to a special sentinal memory
structure - such structure would need to be allocated somewhere
and somehow, and getter would need to recognize it from a normal
data item. Having cancel_wait() functions offers an elegant
alternative. From this perspective, these calls can be seen as
an equivalent to e.g. k_fifo_put(fifo, NULL), except that such
call won't work in practice.
Change-Id: I47b7f690dc325a80943082bcf5345c41649e7024
Signed-off-by: Paul Sokolovsky <paul.sokolovsky@linaro.org>
2017-04-25 17:54:31 +03:00
|
|
|
struct k_thread *first_pending_thread;
|
|
|
|
|
|
|
|
first_pending_thread = _unpend_first_thread(&queue->wait_q);
|
|
|
|
|
|
|
|
if (first_pending_thread) {
|
|
|
|
prepare_thread_to_run(first_pending_thread, NULL);
|
|
|
|
}
|
2017-07-13 12:43:59 +03:00
|
|
|
#else
|
kernel: Scheduler refactoring: use _reschedule_*() always
There was a somewhat promiscuous pattern in the kernel where IPC
mechanisms would do something that might effect the current thread
choice, then check _must_switch_threads() (or occasionally
__must_switch_threads -- don't ask, the distinction is being replaced
by real English words), sometimes _is_in_isr() (but not always, even
in contexts where that looks like it would be a mistake), and then
call _Swap() if everything is OK, otherwise releasing the irq_lock().
Sometimes this was done directly, sometimes via the inverted test,
sometimes (poll, heh) by doing the test when the thread state was
modified and then needlessly passing the result up the call stack to
the point of the _Swap().
And some places were just calling _reschedule_threads(), which did all
this already.
Unify all this madness. The old _reschedule_threads() function has
split into two variants: _reschedule_yield() and
_reschedule_noyield(). The latter is the "normal" one that respects
the cooperative priority of the current thread (i.e. it won't switch
out even if there is a higher priority thread ready -- the current
thread has to pend itself first), the former is used in the handful of
places where code was doing a swap unconditionally, just to preserve
precise behavior across the refactor. I'm not at all convinced it
should exist...
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-03-26 10:54:40 -07:00
|
|
|
handle_poll_events(queue, K_POLL_STATE_NOT_READY);
|
2017-07-13 12:43:59 +03:00
|
|
|
#endif /* !CONFIG_POLL */
|
kernel: queue, fifo: Add cancel_wait operation.
Currently, a queue/fifo getter chooses how long to wait for an
element. But there are scenarios when putter would know better,
there should be a way to expire getter's timeout to make it run
again. k_queue_cancel_wait() and k_fifo_cancel_wait() functions
do just that. They cause corresponding *_get() functions to return
with NULL value, as if timeout expired on getter's side (even
K_FOREVER).
This can be used to signal out of band conditions from putter to
getter, e.g. end of processing, error, configuration change, etc.
A specific event would be communicated to getter by other means
(e.g. using existing shared context structures).
Without this call, achieving the same effect would require e.g.
calling k_fifo_put() with a pointer to a special sentinal memory
structure - such structure would need to be allocated somewhere
and somehow, and getter would need to recognize it from a normal
data item. Having cancel_wait() functions offers an elegant
alternative. From this perspective, these calls can be seen as
an equivalent to e.g. k_fifo_put(fifo, NULL), except that such
call won't work in practice.
Change-Id: I47b7f690dc325a80943082bcf5345c41649e7024
Signed-off-by: Paul Sokolovsky <paul.sokolovsky@linaro.org>
2017-04-25 17:54:31 +03:00
|
|
|
|
kernel: Scheduler refactoring: use _reschedule_*() always
There was a somewhat promiscuous pattern in the kernel where IPC
mechanisms would do something that might effect the current thread
choice, then check _must_switch_threads() (or occasionally
__must_switch_threads -- don't ask, the distinction is being replaced
by real English words), sometimes _is_in_isr() (but not always, even
in contexts where that looks like it would be a mistake), and then
call _Swap() if everything is OK, otherwise releasing the irq_lock().
Sometimes this was done directly, sometimes via the inverted test,
sometimes (poll, heh) by doing the test when the thread state was
modified and then needlessly passing the result up the call stack to
the point of the _Swap().
And some places were just calling _reschedule_threads(), which did all
this already.
Unify all this madness. The old _reschedule_threads() function has
split into two variants: _reschedule_yield() and
_reschedule_noyield(). The latter is the "normal" one that respects
the cooperative priority of the current thread (i.e. it won't switch
out even if there is a higher priority thread ready -- the current
thread has to pend itself first), the former is used in the handful of
places where code was doing a swap unconditionally, just to preserve
precise behavior across the refactor. I'm not at all convinced it
should exist...
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-03-26 10:54:40 -07:00
|
|
|
_reschedule_noyield(key);
|
kernel: queue, fifo: Add cancel_wait operation.
Currently, a queue/fifo getter chooses how long to wait for an
element. But there are scenarios when putter would know better,
there should be a way to expire getter's timeout to make it run
again. k_queue_cancel_wait() and k_fifo_cancel_wait() functions
do just that. They cause corresponding *_get() functions to return
with NULL value, as if timeout expired on getter's side (even
K_FOREVER).
This can be used to signal out of band conditions from putter to
getter, e.g. end of processing, error, configuration change, etc.
A specific event would be communicated to getter by other means
(e.g. using existing shared context structures).
Without this call, achieving the same effect would require e.g.
calling k_fifo_put() with a pointer to a special sentinal memory
structure - such structure would need to be allocated somewhere
and somehow, and getter would need to recognize it from a normal
data item. Having cancel_wait() functions offers an elegant
alternative. From this perspective, these calls can be seen as
an equivalent to e.g. k_fifo_put(fifo, NULL), except that such
call won't work in practice.
Change-Id: I47b7f690dc325a80943082bcf5345c41649e7024
Signed-off-by: Paul Sokolovsky <paul.sokolovsky@linaro.org>
2017-04-25 17:54:31 +03:00
|
|
|
}
|
|
|
|
|
2017-02-21 14:50:42 +02:00
|
|
|
void k_queue_insert(struct k_queue *queue, void *prev, void *data)
|
|
|
|
{
|
2017-07-13 12:43:59 +03:00
|
|
|
unsigned int key = irq_lock();
|
|
|
|
#if !defined(CONFIG_POLL)
|
2017-02-21 14:50:42 +02:00
|
|
|
struct k_thread *first_pending_thread;
|
|
|
|
|
|
|
|
first_pending_thread = _unpend_first_thread(&queue->wait_q);
|
|
|
|
|
|
|
|
if (first_pending_thread) {
|
|
|
|
prepare_thread_to_run(first_pending_thread, data);
|
kernel: Scheduler refactoring: use _reschedule_*() always
There was a somewhat promiscuous pattern in the kernel where IPC
mechanisms would do something that might effect the current thread
choice, then check _must_switch_threads() (or occasionally
__must_switch_threads -- don't ask, the distinction is being replaced
by real English words), sometimes _is_in_isr() (but not always, even
in contexts where that looks like it would be a mistake), and then
call _Swap() if everything is OK, otherwise releasing the irq_lock().
Sometimes this was done directly, sometimes via the inverted test,
sometimes (poll, heh) by doing the test when the thread state was
modified and then needlessly passing the result up the call stack to
the point of the _Swap().
And some places were just calling _reschedule_threads(), which did all
this already.
Unify all this madness. The old _reschedule_threads() function has
split into two variants: _reschedule_yield() and
_reschedule_noyield(). The latter is the "normal" one that respects
the cooperative priority of the current thread (i.e. it won't switch
out even if there is a higher priority thread ready -- the current
thread has to pend itself first), the former is used in the handful of
places where code was doing a swap unconditionally, just to preserve
precise behavior across the refactor. I'm not at all convinced it
should exist...
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-03-26 10:54:40 -07:00
|
|
|
_reschedule_noyield(key);
|
2017-07-13 12:43:59 +03:00
|
|
|
return;
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|
2017-07-13 12:43:59 +03:00
|
|
|
#endif /* !CONFIG_POLL */
|
|
|
|
|
|
|
|
sys_slist_insert(&queue->data_q, prev, data);
|
|
|
|
|
|
|
|
#if defined(CONFIG_POLL)
|
kernel: Scheduler refactoring: use _reschedule_*() always
There was a somewhat promiscuous pattern in the kernel where IPC
mechanisms would do something that might effect the current thread
choice, then check _must_switch_threads() (or occasionally
__must_switch_threads -- don't ask, the distinction is being replaced
by real English words), sometimes _is_in_isr() (but not always, even
in contexts where that looks like it would be a mistake), and then
call _Swap() if everything is OK, otherwise releasing the irq_lock().
Sometimes this was done directly, sometimes via the inverted test,
sometimes (poll, heh) by doing the test when the thread state was
modified and then needlessly passing the result up the call stack to
the point of the _Swap().
And some places were just calling _reschedule_threads(), which did all
this already.
Unify all this madness. The old _reschedule_threads() function has
split into two variants: _reschedule_yield() and
_reschedule_noyield(). The latter is the "normal" one that respects
the cooperative priority of the current thread (i.e. it won't switch
out even if there is a higher priority thread ready -- the current
thread has to pend itself first), the former is used in the handful of
places where code was doing a swap unconditionally, just to preserve
precise behavior across the refactor. I'm not at all convinced it
should exist...
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-03-26 10:54:40 -07:00
|
|
|
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
|
2017-07-13 12:43:59 +03:00
|
|
|
#endif /* CONFIG_POLL */
|
2017-02-21 14:50:42 +02:00
|
|
|
|
kernel: Scheduler refactoring: use _reschedule_*() always
There was a somewhat promiscuous pattern in the kernel where IPC
mechanisms would do something that might effect the current thread
choice, then check _must_switch_threads() (or occasionally
__must_switch_threads -- don't ask, the distinction is being replaced
by real English words), sometimes _is_in_isr() (but not always, even
in contexts where that looks like it would be a mistake), and then
call _Swap() if everything is OK, otherwise releasing the irq_lock().
Sometimes this was done directly, sometimes via the inverted test,
sometimes (poll, heh) by doing the test when the thread state was
modified and then needlessly passing the result up the call stack to
the point of the _Swap().
And some places were just calling _reschedule_threads(), which did all
this already.
Unify all this madness. The old _reschedule_threads() function has
split into two variants: _reschedule_yield() and
_reschedule_noyield(). The latter is the "normal" one that respects
the cooperative priority of the current thread (i.e. it won't switch
out even if there is a higher priority thread ready -- the current
thread has to pend itself first), the former is used in the handful of
places where code was doing a swap unconditionally, just to preserve
precise behavior across the refactor. I'm not at all convinced it
should exist...
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-03-26 10:54:40 -07:00
|
|
|
_reschedule_noyield(key);
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void k_queue_append(struct k_queue *queue, void *data)
|
|
|
|
{
|
|
|
|
return k_queue_insert(queue, queue->data_q.tail, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void k_queue_prepend(struct k_queue *queue, void *data)
|
|
|
|
{
|
|
|
|
return k_queue_insert(queue, NULL, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
|
|
|
|
{
|
|
|
|
__ASSERT(head && tail, "invalid head or tail");
|
|
|
|
|
2017-07-13 12:43:59 +03:00
|
|
|
unsigned int key = irq_lock();
|
|
|
|
#if !defined(CONFIG_POLL)
|
2018-03-09 13:05:15 -08:00
|
|
|
struct k_thread *thread;
|
2017-02-21 14:50:42 +02:00
|
|
|
|
|
|
|
while (head && ((thread = _unpend_first_thread(&queue->wait_q)))) {
|
|
|
|
prepare_thread_to_run(thread, head);
|
|
|
|
head = *(void **)head;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (head) {
|
|
|
|
sys_slist_append_list(&queue->data_q, head, tail);
|
|
|
|
}
|
|
|
|
|
2017-07-13 12:43:59 +03:00
|
|
|
#else
|
|
|
|
sys_slist_append_list(&queue->data_q, head, tail);
|
kernel: Scheduler refactoring: use _reschedule_*() always
There was a somewhat promiscuous pattern in the kernel where IPC
mechanisms would do something that might effect the current thread
choice, then check _must_switch_threads() (or occasionally
__must_switch_threads -- don't ask, the distinction is being replaced
by real English words), sometimes _is_in_isr() (but not always, even
in contexts where that looks like it would be a mistake), and then
call _Swap() if everything is OK, otherwise releasing the irq_lock().
Sometimes this was done directly, sometimes via the inverted test,
sometimes (poll, heh) by doing the test when the thread state was
modified and then needlessly passing the result up the call stack to
the point of the _Swap().
And some places were just calling _reschedule_threads(), which did all
this already.
Unify all this madness. The old _reschedule_threads() function has
split into two variants: _reschedule_yield() and
_reschedule_noyield(). The latter is the "normal" one that respects
the cooperative priority of the current thread (i.e. it won't switch
out even if there is a higher priority thread ready -- the current
thread has to pend itself first), the former is used in the handful of
places where code was doing a swap unconditionally, just to preserve
precise behavior across the refactor. I'm not at all convinced it
should exist...
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-03-26 10:54:40 -07:00
|
|
|
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
|
2017-07-13 12:43:59 +03:00
|
|
|
#endif /* !CONFIG_POLL */
|
2017-02-21 14:50:42 +02:00
|
|
|
|
kernel: Scheduler refactoring: use _reschedule_*() always
There was a somewhat promiscuous pattern in the kernel where IPC
mechanisms would do something that might effect the current thread
choice, then check _must_switch_threads() (or occasionally
__must_switch_threads -- don't ask, the distinction is being replaced
by real English words), sometimes _is_in_isr() (but not always, even
in contexts where that looks like it would be a mistake), and then
call _Swap() if everything is OK, otherwise releasing the irq_lock().
Sometimes this was done directly, sometimes via the inverted test,
sometimes (poll, heh) by doing the test when the thread state was
modified and then needlessly passing the result up the call stack to
the point of the _Swap().
And some places were just calling _reschedule_threads(), which did all
this already.
Unify all this madness. The old _reschedule_threads() function has
split into two variants: _reschedule_yield() and
_reschedule_noyield(). The latter is the "normal" one that respects
the cooperative priority of the current thread (i.e. it won't switch
out even if there is a higher priority thread ready -- the current
thread has to pend itself first), the former is used in the handful of
places where code was doing a swap unconditionally, just to preserve
precise behavior across the refactor. I'm not at all convinced it
should exist...
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-03-26 10:54:40 -07:00
|
|
|
_reschedule_noyield(key);
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
|
|
|
|
{
|
|
|
|
__ASSERT(!sys_slist_is_empty(list), "list must not be empty");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* note: this works as long as:
|
|
|
|
* - the slist implementation keeps the next pointer as the first
|
|
|
|
* field of the node object type
|
|
|
|
* - list->tail->next = NULL.
|
|
|
|
*/
|
|
|
|
k_queue_append_list(queue, list->head, list->tail);
|
|
|
|
sys_slist_init(list);
|
|
|
|
}
|
|
|
|
|
2017-07-13 12:43:59 +03:00
|
|
|
#if defined(CONFIG_POLL)
|
|
|
|
static void *k_queue_poll(struct k_queue *queue, s32_t timeout)
|
|
|
|
{
|
|
|
|
struct k_poll_event event;
|
|
|
|
int err;
|
2017-10-16 13:36:37 +03:00
|
|
|
unsigned int key;
|
|
|
|
void *val;
|
2017-07-13 12:43:59 +03:00
|
|
|
|
|
|
|
k_poll_event_init(&event, K_POLL_TYPE_FIFO_DATA_AVAILABLE,
|
|
|
|
K_POLL_MODE_NOTIFY_ONLY, queue);
|
|
|
|
|
2017-10-17 11:34:21 +03:00
|
|
|
do {
|
|
|
|
event.state = K_POLL_STATE_NOT_READY;
|
2017-07-13 12:43:59 +03:00
|
|
|
|
2017-10-17 11:34:21 +03:00
|
|
|
err = k_poll(&event, 1, timeout);
|
|
|
|
if (err) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2017-07-13 12:43:59 +03:00
|
|
|
|
2017-10-17 11:34:21 +03:00
|
|
|
__ASSERT_NO_MSG(event.state ==
|
|
|
|
K_POLL_STATE_FIFO_DATA_AVAILABLE);
|
|
|
|
|
|
|
|
/* sys_slist_* aren't threadsafe, so must be always protected by
|
|
|
|
* irq_lock.
|
|
|
|
*/
|
|
|
|
key = irq_lock();
|
|
|
|
val = sys_slist_get(&queue->data_q);
|
|
|
|
irq_unlock(key);
|
|
|
|
} while (!val && timeout == K_FOREVER);
|
2017-07-13 12:43:59 +03:00
|
|
|
|
2017-10-16 13:36:37 +03:00
|
|
|
return val;
|
2017-07-13 12:43:59 +03:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_POLL */
|
|
|
|
|
2017-04-21 10:55:34 -05:00
|
|
|
void *k_queue_get(struct k_queue *queue, s32_t timeout)
|
2017-02-21 14:50:42 +02:00
|
|
|
{
|
|
|
|
unsigned int key;
|
|
|
|
void *data;
|
|
|
|
|
|
|
|
key = irq_lock();
|
|
|
|
|
|
|
|
if (likely(!sys_slist_is_empty(&queue->data_q))) {
|
|
|
|
data = sys_slist_get_not_empty(&queue->data_q);
|
|
|
|
irq_unlock(key);
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (timeout == K_NO_WAIT) {
|
|
|
|
irq_unlock(key);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-07-13 12:43:59 +03:00
|
|
|
#if defined(CONFIG_POLL)
|
|
|
|
irq_unlock(key);
|
|
|
|
|
|
|
|
return k_queue_poll(queue, timeout);
|
|
|
|
|
|
|
|
#else
|
2018-03-26 11:58:10 -07:00
|
|
|
int ret = _pend_current_thread(key, &queue->wait_q, timeout);
|
2017-02-21 14:50:42 +02:00
|
|
|
|
2018-03-26 11:58:10 -07:00
|
|
|
return ret ? NULL : _current->base.swap_data;
|
2017-07-13 12:43:59 +03:00
|
|
|
#endif /* CONFIG_POLL */
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|