2017-02-21 14:50:42 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2010-2016 Wind River Systems, Inc.
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
*
|
|
|
|
* @brief dynamic-size QUEUE object.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
#include <kernel.h>
|
|
|
|
#include <kernel_structs.h>
|
|
|
|
#include <debug/object_tracing_common.h>
|
|
|
|
#include <toolchain.h>
|
2017-06-17 11:30:47 -04:00
|
|
|
#include <linker/sections.h>
|
2017-02-21 14:50:42 +02:00
|
|
|
#include <wait_q.h>
|
|
|
|
#include <ksched.h>
|
2019-06-26 10:33:51 -04:00
|
|
|
#include <sys/sflist.h>
|
2017-02-21 14:50:42 +02:00
|
|
|
#include <init.h>
|
2018-04-27 13:21:22 -07:00
|
|
|
#include <syscall_handler.h>
|
2018-09-05 10:13:38 -07:00
|
|
|
#include <kernel_internal.h>
|
2017-02-21 14:50:42 +02:00
|
|
|
|
2018-04-27 13:21:22 -07:00
|
|
|
struct alloc_node {
|
|
|
|
sys_sfnode_t node;
|
|
|
|
void *data;
|
|
|
|
};
|
|
|
|
|
|
|
|
void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free)
|
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
|
2018-10-25 12:09:04 +05:30
|
|
|
if ((node != NULL) && (sys_sfnode_flags_get(node) != (u8_t)0)) {
|
2018-04-27 13:21:22 -07:00
|
|
|
/* If the flag is set, then the enqueue operation for this item
|
|
|
|
* did a behind-the scenes memory allocation of an alloc_node
|
|
|
|
* struct, which is what got put in the queue. Free it and pass
|
|
|
|
* back the data pointer.
|
|
|
|
*/
|
|
|
|
struct alloc_node *anode;
|
|
|
|
|
|
|
|
anode = CONTAINER_OF(node, struct alloc_node, node);
|
|
|
|
ret = anode->data;
|
|
|
|
if (needs_free) {
|
|
|
|
k_free(anode);
|
|
|
|
}
|
|
|
|
} else {
|
2019-05-21 22:13:01 -04:00
|
|
|
/* Data was directly placed in the queue, the first word
|
2018-04-27 13:21:22 -07:00
|
|
|
* reserved for the linked list. User mode isn't allowed to
|
|
|
|
* do this, although it can get data sent this way.
|
|
|
|
*/
|
|
|
|
ret = (void *)node;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-02-21 14:50:42 +02:00
|
|
|
#ifdef CONFIG_OBJECT_TRACING
|
|
|
|
|
2017-05-18 12:16:45 +02:00
|
|
|
struct k_queue *_trace_list_k_queue;
|
|
|
|
|
2017-02-21 14:50:42 +02:00
|
|
|
/*
|
|
|
|
* Complete initialization of statically defined queues.
|
|
|
|
*/
|
|
|
|
static int init_queue_module(struct device *dev)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(dev);
|
|
|
|
|
2019-06-03 13:01:43 -04:00
|
|
|
Z_STRUCT_SECTION_FOREACH(k_queue, queue) {
|
2017-02-21 14:50:42 +02:00
|
|
|
SYS_TRACING_OBJ_INIT(k_queue, queue);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
SYS_INIT(init_queue_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
|
|
|
|
|
|
|
#endif /* CONFIG_OBJECT_TRACING */
|
|
|
|
|
2019-03-08 14:19:05 -07:00
|
|
|
void z_impl_k_queue_init(struct k_queue *queue)
|
2017-02-21 14:50:42 +02:00
|
|
|
{
|
2018-04-27 13:21:22 -07:00
|
|
|
sys_sflist_init(&queue->data_q);
|
2018-07-25 13:01:54 -07:00
|
|
|
queue->lock = (struct k_spinlock) {};
|
2019-03-08 14:19:05 -07:00
|
|
|
z_waitq_init(&queue->wait_q);
|
2017-08-21 10:49:29 +03:00
|
|
|
#if defined(CONFIG_POLL)
|
|
|
|
sys_dlist_init(&queue->poll_events);
|
|
|
|
#endif
|
2017-02-21 14:50:42 +02:00
|
|
|
|
|
|
|
SYS_TRACING_OBJ_INIT(k_queue, queue);
|
2019-03-08 14:19:05 -07:00
|
|
|
z_object_init(queue);
|
2018-04-27 13:21:22 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_USERSPACE
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
static inline void z_vrfy_k_queue_init(struct k_queue *queue)
|
2018-04-27 13:21:22 -07:00
|
|
|
{
|
2018-05-04 15:57:57 -07:00
|
|
|
Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(queue, K_OBJ_QUEUE));
|
2019-03-08 14:19:05 -07:00
|
|
|
z_impl_k_queue_init(queue);
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
#include <syscalls/k_queue_init_mrsh.c>
|
2018-04-27 13:21:22 -07:00
|
|
|
#endif
|
2017-02-21 14:50:42 +02:00
|
|
|
|
2017-07-13 12:43:59 +03:00
|
|
|
#if !defined(CONFIG_POLL)
|
2017-02-21 14:50:42 +02:00
|
|
|
static void prepare_thread_to_run(struct k_thread *thread, void *data)
|
|
|
|
{
|
2019-03-08 14:19:05 -07:00
|
|
|
z_ready_thread(thread);
|
|
|
|
z_set_thread_return_value_with_data(thread, 0, data);
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|
2017-07-13 12:43:59 +03:00
|
|
|
#endif /* CONFIG_POLL */
|
2017-02-21 14:50:42 +02:00
|
|
|
|
2018-06-23 08:20:34 -05:00
|
|
|
#ifdef CONFIG_POLL
|
kernel: Scheduler refactoring: use _reschedule_*() always
There was a somewhat promiscuous pattern in the kernel where IPC
mechanisms would do something that might effect the current thread
choice, then check _must_switch_threads() (or occasionally
__must_switch_threads -- don't ask, the distinction is being replaced
by real English words), sometimes _is_in_isr() (but not always, even
in contexts where that looks like it would be a mistake), and then
call _Swap() if everything is OK, otherwise releasing the irq_lock().
Sometimes this was done directly, sometimes via the inverted test,
sometimes (poll, heh) by doing the test when the thread state was
modified and then needlessly passing the result up the call stack to
the point of the _Swap().
And some places were just calling _reschedule_threads(), which did all
this already.
Unify all this madness. The old _reschedule_threads() function has
split into two variants: _reschedule_yield() and
_reschedule_noyield(). The latter is the "normal" one that respects
the cooperative priority of the current thread (i.e. it won't switch
out even if there is a higher priority thread ready -- the current
thread has to pend itself first), the former is used in the handful of
places where code was doing a swap unconditionally, just to preserve
precise behavior across the refactor. I'm not at all convinced it
should exist...
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-03-26 10:54:40 -07:00
|
|
|
static inline void handle_poll_events(struct k_queue *queue, u32_t state)
|
2017-02-21 14:50:42 +02:00
|
|
|
{
|
2019-03-08 14:19:05 -07:00
|
|
|
z_handle_obj_poll_events(&queue->poll_events, state);
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|
2018-06-23 08:20:34 -05:00
|
|
|
#endif
|
2017-02-21 14:50:42 +02:00
|
|
|
|
2019-03-08 14:19:05 -07:00
|
|
|
void z_impl_k_queue_cancel_wait(struct k_queue *queue)
|
kernel: queue, fifo: Add cancel_wait operation.
Currently, a queue/fifo getter chooses how long to wait for an
element. But there are scenarios when putter would know better,
there should be a way to expire getter's timeout to make it run
again. k_queue_cancel_wait() and k_fifo_cancel_wait() functions
do just that. They cause corresponding *_get() functions to return
with NULL value, as if timeout expired on getter's side (even
K_FOREVER).
This can be used to signal out of band conditions from putter to
getter, e.g. end of processing, error, configuration change, etc.
A specific event would be communicated to getter by other means
(e.g. using existing shared context structures).
Without this call, achieving the same effect would require e.g.
calling k_fifo_put() with a pointer to a special sentinal memory
structure - such structure would need to be allocated somewhere
and somehow, and getter would need to recognize it from a normal
data item. Having cancel_wait() functions offers an elegant
alternative. From this perspective, these calls can be seen as
an equivalent to e.g. k_fifo_put(fifo, NULL), except that such
call won't work in practice.
Change-Id: I47b7f690dc325a80943082bcf5345c41649e7024
Signed-off-by: Paul Sokolovsky <paul.sokolovsky@linaro.org>
2017-04-25 17:54:31 +03:00
|
|
|
{
|
2018-07-25 13:01:54 -07:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&queue->lock);
|
2017-07-13 12:43:59 +03:00
|
|
|
#if !defined(CONFIG_POLL)
|
kernel: queue, fifo: Add cancel_wait operation.
Currently, a queue/fifo getter chooses how long to wait for an
element. But there are scenarios when putter would know better,
there should be a way to expire getter's timeout to make it run
again. k_queue_cancel_wait() and k_fifo_cancel_wait() functions
do just that. They cause corresponding *_get() functions to return
with NULL value, as if timeout expired on getter's side (even
K_FOREVER).
This can be used to signal out of band conditions from putter to
getter, e.g. end of processing, error, configuration change, etc.
A specific event would be communicated to getter by other means
(e.g. using existing shared context structures).
Without this call, achieving the same effect would require e.g.
calling k_fifo_put() with a pointer to a special sentinal memory
structure - such structure would need to be allocated somewhere
and somehow, and getter would need to recognize it from a normal
data item. Having cancel_wait() functions offers an elegant
alternative. From this perspective, these calls can be seen as
an equivalent to e.g. k_fifo_put(fifo, NULL), except that such
call won't work in practice.
Change-Id: I47b7f690dc325a80943082bcf5345c41649e7024
Signed-off-by: Paul Sokolovsky <paul.sokolovsky@linaro.org>
2017-04-25 17:54:31 +03:00
|
|
|
struct k_thread *first_pending_thread;
|
|
|
|
|
2019-03-08 14:19:05 -07:00
|
|
|
first_pending_thread = z_unpend_first_thread(&queue->wait_q);
|
kernel: queue, fifo: Add cancel_wait operation.
Currently, a queue/fifo getter chooses how long to wait for an
element. But there are scenarios when putter would know better,
there should be a way to expire getter's timeout to make it run
again. k_queue_cancel_wait() and k_fifo_cancel_wait() functions
do just that. They cause corresponding *_get() functions to return
with NULL value, as if timeout expired on getter's side (even
K_FOREVER).
This can be used to signal out of band conditions from putter to
getter, e.g. end of processing, error, configuration change, etc.
A specific event would be communicated to getter by other means
(e.g. using existing shared context structures).
Without this call, achieving the same effect would require e.g.
calling k_fifo_put() with a pointer to a special sentinal memory
structure - such structure would need to be allocated somewhere
and somehow, and getter would need to recognize it from a normal
data item. Having cancel_wait() functions offers an elegant
alternative. From this perspective, these calls can be seen as
an equivalent to e.g. k_fifo_put(fifo, NULL), except that such
call won't work in practice.
Change-Id: I47b7f690dc325a80943082bcf5345c41649e7024
Signed-off-by: Paul Sokolovsky <paul.sokolovsky@linaro.org>
2017-04-25 17:54:31 +03:00
|
|
|
|
2018-09-17 09:39:51 -07:00
|
|
|
if (first_pending_thread != NULL) {
|
kernel: queue, fifo: Add cancel_wait operation.
Currently, a queue/fifo getter chooses how long to wait for an
element. But there are scenarios when putter would know better,
there should be a way to expire getter's timeout to make it run
again. k_queue_cancel_wait() and k_fifo_cancel_wait() functions
do just that. They cause corresponding *_get() functions to return
with NULL value, as if timeout expired on getter's side (even
K_FOREVER).
This can be used to signal out of band conditions from putter to
getter, e.g. end of processing, error, configuration change, etc.
A specific event would be communicated to getter by other means
(e.g. using existing shared context structures).
Without this call, achieving the same effect would require e.g.
calling k_fifo_put() with a pointer to a special sentinal memory
structure - such structure would need to be allocated somewhere
and somehow, and getter would need to recognize it from a normal
data item. Having cancel_wait() functions offers an elegant
alternative. From this perspective, these calls can be seen as
an equivalent to e.g. k_fifo_put(fifo, NULL), except that such
call won't work in practice.
Change-Id: I47b7f690dc325a80943082bcf5345c41649e7024
Signed-off-by: Paul Sokolovsky <paul.sokolovsky@linaro.org>
2017-04-25 17:54:31 +03:00
|
|
|
prepare_thread_to_run(first_pending_thread, NULL);
|
|
|
|
}
|
2017-07-13 12:43:59 +03:00
|
|
|
#else
|
2018-08-21 23:29:11 +03:00
|
|
|
handle_poll_events(queue, K_POLL_STATE_CANCELLED);
|
2017-07-13 12:43:59 +03:00
|
|
|
#endif /* !CONFIG_POLL */
|
kernel: queue, fifo: Add cancel_wait operation.
Currently, a queue/fifo getter chooses how long to wait for an
element. But there are scenarios when putter would know better,
there should be a way to expire getter's timeout to make it run
again. k_queue_cancel_wait() and k_fifo_cancel_wait() functions
do just that. They cause corresponding *_get() functions to return
with NULL value, as if timeout expired on getter's side (even
K_FOREVER).
This can be used to signal out of band conditions from putter to
getter, e.g. end of processing, error, configuration change, etc.
A specific event would be communicated to getter by other means
(e.g. using existing shared context structures).
Without this call, achieving the same effect would require e.g.
calling k_fifo_put() with a pointer to a special sentinal memory
structure - such structure would need to be allocated somewhere
and somehow, and getter would need to recognize it from a normal
data item. Having cancel_wait() functions offers an elegant
alternative. From this perspective, these calls can be seen as
an equivalent to e.g. k_fifo_put(fifo, NULL), except that such
call won't work in practice.
Change-Id: I47b7f690dc325a80943082bcf5345c41649e7024
Signed-off-by: Paul Sokolovsky <paul.sokolovsky@linaro.org>
2017-04-25 17:54:31 +03:00
|
|
|
|
2019-03-08 14:19:05 -07:00
|
|
|
z_reschedule(&queue->lock, key);
|
kernel: queue, fifo: Add cancel_wait operation.
Currently, a queue/fifo getter chooses how long to wait for an
element. But there are scenarios when putter would know better,
there should be a way to expire getter's timeout to make it run
again. k_queue_cancel_wait() and k_fifo_cancel_wait() functions
do just that. They cause corresponding *_get() functions to return
with NULL value, as if timeout expired on getter's side (even
K_FOREVER).
This can be used to signal out of band conditions from putter to
getter, e.g. end of processing, error, configuration change, etc.
A specific event would be communicated to getter by other means
(e.g. using existing shared context structures).
Without this call, achieving the same effect would require e.g.
calling k_fifo_put() with a pointer to a special sentinal memory
structure - such structure would need to be allocated somewhere
and somehow, and getter would need to recognize it from a normal
data item. Having cancel_wait() functions offers an elegant
alternative. From this perspective, these calls can be seen as
an equivalent to e.g. k_fifo_put(fifo, NULL), except that such
call won't work in practice.
Change-Id: I47b7f690dc325a80943082bcf5345c41649e7024
Signed-off-by: Paul Sokolovsky <paul.sokolovsky@linaro.org>
2017-04-25 17:54:31 +03:00
|
|
|
}
|
|
|
|
|
2018-04-27 13:21:22 -07:00
|
|
|
#ifdef CONFIG_USERSPACE
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
static inline void z_vrfy_k_queue_cancel_wait(struct k_queue *queue)
|
|
|
|
{
|
|
|
|
Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
|
|
|
|
z_impl_k_queue_cancel_wait(queue);
|
|
|
|
}
|
|
|
|
#include <syscalls/k_queue_cancel_wait_mrsh.c>
|
2018-04-27 13:21:22 -07:00
|
|
|
#endif
|
|
|
|
|
2018-10-25 12:09:04 +05:30
|
|
|
static s32_t queue_insert(struct k_queue *queue, void *prev, void *data,
|
|
|
|
bool alloc)
|
2017-02-21 14:50:42 +02:00
|
|
|
{
|
2018-07-25 13:01:54 -07:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&queue->lock);
|
2017-07-13 12:43:59 +03:00
|
|
|
#if !defined(CONFIG_POLL)
|
2017-02-21 14:50:42 +02:00
|
|
|
struct k_thread *first_pending_thread;
|
|
|
|
|
2019-03-08 14:19:05 -07:00
|
|
|
first_pending_thread = z_unpend_first_thread(&queue->wait_q);
|
2017-02-21 14:50:42 +02:00
|
|
|
|
2018-09-17 09:39:51 -07:00
|
|
|
if (first_pending_thread != NULL) {
|
2017-02-21 14:50:42 +02:00
|
|
|
prepare_thread_to_run(first_pending_thread, data);
|
2019-03-08 14:19:05 -07:00
|
|
|
z_reschedule(&queue->lock, key);
|
2018-04-27 13:21:22 -07:00
|
|
|
return 0;
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|
2017-07-13 12:43:59 +03:00
|
|
|
#endif /* !CONFIG_POLL */
|
|
|
|
|
2018-04-27 13:21:22 -07:00
|
|
|
/* Only need to actually allocate if no threads are pending */
|
|
|
|
if (alloc) {
|
|
|
|
struct alloc_node *anode;
|
|
|
|
|
|
|
|
anode = z_thread_malloc(sizeof(*anode));
|
2018-09-17 09:39:51 -07:00
|
|
|
if (anode == NULL) {
|
2018-07-25 13:01:54 -07:00
|
|
|
k_spin_unlock(&queue->lock, key);
|
2018-04-27 13:21:22 -07:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
anode->data = data;
|
|
|
|
sys_sfnode_init(&anode->node, 0x1);
|
|
|
|
data = anode;
|
|
|
|
} else {
|
|
|
|
sys_sfnode_init(data, 0x0);
|
|
|
|
}
|
|
|
|
sys_sflist_insert(&queue->data_q, prev, data);
|
2017-07-13 12:43:59 +03:00
|
|
|
|
|
|
|
#if defined(CONFIG_POLL)
|
kernel: Scheduler refactoring: use _reschedule_*() always
There was a somewhat promiscuous pattern in the kernel where IPC
mechanisms would do something that might effect the current thread
choice, then check _must_switch_threads() (or occasionally
__must_switch_threads -- don't ask, the distinction is being replaced
by real English words), sometimes _is_in_isr() (but not always, even
in contexts where that looks like it would be a mistake), and then
call _Swap() if everything is OK, otherwise releasing the irq_lock().
Sometimes this was done directly, sometimes via the inverted test,
sometimes (poll, heh) by doing the test when the thread state was
modified and then needlessly passing the result up the call stack to
the point of the _Swap().
And some places were just calling _reschedule_threads(), which did all
this already.
Unify all this madness. The old _reschedule_threads() function has
split into two variants: _reschedule_yield() and
_reschedule_noyield(). The latter is the "normal" one that respects
the cooperative priority of the current thread (i.e. it won't switch
out even if there is a higher priority thread ready -- the current
thread has to pend itself first), the former is used in the handful of
places where code was doing a swap unconditionally, just to preserve
precise behavior across the refactor. I'm not at all convinced it
should exist...
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-03-26 10:54:40 -07:00
|
|
|
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
|
2017-07-13 12:43:59 +03:00
|
|
|
#endif /* CONFIG_POLL */
|
2017-02-21 14:50:42 +02:00
|
|
|
|
2019-03-08 14:19:05 -07:00
|
|
|
z_reschedule(&queue->lock, key);
|
2018-04-27 13:21:22 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void k_queue_insert(struct k_queue *queue, void *prev, void *data)
|
|
|
|
{
|
2018-08-13 14:34:11 -07:00
|
|
|
(void)queue_insert(queue, prev, data, false);
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void k_queue_append(struct k_queue *queue, void *data)
|
|
|
|
{
|
2018-08-13 14:34:11 -07:00
|
|
|
(void)queue_insert(queue, sys_sflist_peek_tail(&queue->data_q),
|
|
|
|
data, false);
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void k_queue_prepend(struct k_queue *queue, void *data)
|
|
|
|
{
|
2018-08-13 14:34:11 -07:00
|
|
|
(void)queue_insert(queue, NULL, data, false);
|
2018-04-27 13:21:22 -07:00
|
|
|
}
|
|
|
|
|
2019-03-08 14:19:05 -07:00
|
|
|
s32_t z_impl_k_queue_alloc_append(struct k_queue *queue, void *data)
|
2018-04-27 13:21:22 -07:00
|
|
|
{
|
|
|
|
return queue_insert(queue, sys_sflist_peek_tail(&queue->data_q), data,
|
|
|
|
true);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_USERSPACE
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
static inline s32_t z_vrfy_k_queue_alloc_append(struct k_queue *queue, void *data)
|
2018-04-27 13:21:22 -07:00
|
|
|
{
|
2018-05-04 15:57:57 -07:00
|
|
|
Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
return z_impl_k_queue_alloc_append(queue, data);
|
2018-04-27 13:21:22 -07:00
|
|
|
}
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
#include <syscalls/k_queue_alloc_append_mrsh.c>
|
2018-04-27 13:21:22 -07:00
|
|
|
#endif
|
|
|
|
|
2019-03-08 14:19:05 -07:00
|
|
|
s32_t z_impl_k_queue_alloc_prepend(struct k_queue *queue, void *data)
|
2018-04-27 13:21:22 -07:00
|
|
|
{
|
|
|
|
return queue_insert(queue, NULL, data, true);
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|
|
|
|
|
2018-04-27 13:21:22 -07:00
|
|
|
#ifdef CONFIG_USERSPACE
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
static inline s32_t z_vrfy_k_queue_alloc_prepend(struct k_queue *queue, void *data)
|
2018-04-27 13:21:22 -07:00
|
|
|
{
|
2018-05-04 15:57:57 -07:00
|
|
|
Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
return z_impl_k_queue_alloc_prepend(queue, data);
|
2018-04-27 13:21:22 -07:00
|
|
|
}
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
#include <syscalls/k_queue_alloc_prepend_mrsh.c>
|
2018-04-27 13:21:22 -07:00
|
|
|
#endif
|
|
|
|
|
2017-02-21 14:50:42 +02:00
|
|
|
void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
|
|
|
|
{
|
|
|
|
__ASSERT(head && tail, "invalid head or tail");
|
|
|
|
|
2018-07-25 13:01:54 -07:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&queue->lock);
|
2017-07-13 12:43:59 +03:00
|
|
|
#if !defined(CONFIG_POLL)
|
2018-11-13 16:26:56 -08:00
|
|
|
struct k_thread *thread = NULL;
|
2017-02-21 14:50:42 +02:00
|
|
|
|
2018-12-16 12:48:29 -08:00
|
|
|
if (head != NULL) {
|
2019-03-08 14:19:05 -07:00
|
|
|
thread = z_unpend_first_thread(&queue->wait_q);
|
2018-11-13 16:26:56 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
while ((head != NULL) && (thread != NULL)) {
|
2017-02-21 14:50:42 +02:00
|
|
|
prepare_thread_to_run(thread, head);
|
|
|
|
head = *(void **)head;
|
2019-03-08 14:19:05 -07:00
|
|
|
thread = z_unpend_first_thread(&queue->wait_q);
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|
|
|
|
|
2018-09-17 09:39:51 -07:00
|
|
|
if (head != NULL) {
|
2018-04-27 13:21:22 -07:00
|
|
|
sys_sflist_append_list(&queue->data_q, head, tail);
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|
|
|
|
|
2017-07-13 12:43:59 +03:00
|
|
|
#else
|
2018-04-27 13:21:22 -07:00
|
|
|
sys_sflist_append_list(&queue->data_q, head, tail);
|
kernel: Scheduler refactoring: use _reschedule_*() always
There was a somewhat promiscuous pattern in the kernel where IPC
mechanisms would do something that might effect the current thread
choice, then check _must_switch_threads() (or occasionally
__must_switch_threads -- don't ask, the distinction is being replaced
by real English words), sometimes _is_in_isr() (but not always, even
in contexts where that looks like it would be a mistake), and then
call _Swap() if everything is OK, otherwise releasing the irq_lock().
Sometimes this was done directly, sometimes via the inverted test,
sometimes (poll, heh) by doing the test when the thread state was
modified and then needlessly passing the result up the call stack to
the point of the _Swap().
And some places were just calling _reschedule_threads(), which did all
this already.
Unify all this madness. The old _reschedule_threads() function has
split into two variants: _reschedule_yield() and
_reschedule_noyield(). The latter is the "normal" one that respects
the cooperative priority of the current thread (i.e. it won't switch
out even if there is a higher priority thread ready -- the current
thread has to pend itself first), the former is used in the handful of
places where code was doing a swap unconditionally, just to preserve
precise behavior across the refactor. I'm not at all convinced it
should exist...
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-03-26 10:54:40 -07:00
|
|
|
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
|
2017-07-13 12:43:59 +03:00
|
|
|
#endif /* !CONFIG_POLL */
|
2017-02-21 14:50:42 +02:00
|
|
|
|
2019-03-08 14:19:05 -07:00
|
|
|
z_reschedule(&queue->lock, key);
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
|
|
|
|
{
|
|
|
|
__ASSERT(!sys_slist_is_empty(list), "list must not be empty");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* note: this works as long as:
|
|
|
|
* - the slist implementation keeps the next pointer as the first
|
|
|
|
* field of the node object type
|
|
|
|
* - list->tail->next = NULL.
|
2018-04-27 13:21:22 -07:00
|
|
|
* - sflist implementation only differs from slist by stuffing
|
|
|
|
* flag bytes in the lower order bits of the data pointer
|
|
|
|
* - source list is really an slist and not an sflist with flags set
|
2017-02-21 14:50:42 +02:00
|
|
|
*/
|
|
|
|
k_queue_append_list(queue, list->head, list->tail);
|
|
|
|
sys_slist_init(list);
|
|
|
|
}
|
|
|
|
|
2017-07-13 12:43:59 +03:00
|
|
|
#if defined(CONFIG_POLL)
|
|
|
|
static void *k_queue_poll(struct k_queue *queue, s32_t timeout)
|
|
|
|
{
|
|
|
|
struct k_poll_event event;
|
kernel/queue: Fix spurious NULL exit condition when using timeouts
The queue loop when CONFIG_POLL is in used has an inherent race
between the return of k_poll() and the inspection of the list where no
lock can be held. Other contending readers of the same queue can
sneak in and steal the item out of the list before the current thread
gets to the sys_sflist_get() call, and the current loop will (if it
has a timeout) spuriously return NULL before the timeout expires.
It's not even a hard race to exercise. Consider three threads at
different priorities: High (which can be an ISR too), Mid, and Low:
1. Mid and Low both enter k_queue_get() and sleep inside k_poll() on
an empty queue.
2. High comes along and calls k_queue_insert(). The queue code then
wakes up Mid, and reschedules, but because High is still running Mid
doesn't get to run yet.
3. High inserts a SECOND item. The queue then unpends the next thread
in the list (Low), and readies it to run. But as before, it won't
be scheduled yet.
4. Now High sleeps (or if it's an interrupt, exits), and Mid gets to
run. It dequeues and returns the item it was delivered normally.
5. But Mid is still running! So it re-enters the loop it's sitting in
and calls k_queue_get() again, which sees and returns the second
item in the queue synchronously. Then it calls it a third time and
goes to sleep because the queue is empty.
6. Finally, Low wakes up to find an empty queue, and returns NULL
despite the fact that the timeout hadn't expired.
The fix is simple enough: check the timeout expiration inside the loop
so we don't return early.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-06-04 09:25:14 -07:00
|
|
|
int err, elapsed = 0, done = 0;
|
2018-07-25 13:01:54 -07:00
|
|
|
k_spinlock_key_t key;
|
2017-10-16 13:36:37 +03:00
|
|
|
void *val;
|
kernel/queue: Fix spurious NULL exit condition when using timeouts
The queue loop when CONFIG_POLL is in used has an inherent race
between the return of k_poll() and the inspection of the list where no
lock can be held. Other contending readers of the same queue can
sneak in and steal the item out of the list before the current thread
gets to the sys_sflist_get() call, and the current loop will (if it
has a timeout) spuriously return NULL before the timeout expires.
It's not even a hard race to exercise. Consider three threads at
different priorities: High (which can be an ISR too), Mid, and Low:
1. Mid and Low both enter k_queue_get() and sleep inside k_poll() on
an empty queue.
2. High comes along and calls k_queue_insert(). The queue code then
wakes up Mid, and reschedules, but because High is still running Mid
doesn't get to run yet.
3. High inserts a SECOND item. The queue then unpends the next thread
in the list (Low), and readies it to run. But as before, it won't
be scheduled yet.
4. Now High sleeps (or if it's an interrupt, exits), and Mid gets to
run. It dequeues and returns the item it was delivered normally.
5. But Mid is still running! So it re-enters the loop it's sitting in
and calls k_queue_get() again, which sees and returns the second
item in the queue synchronously. Then it calls it a third time and
goes to sleep because the queue is empty.
6. Finally, Low wakes up to find an empty queue, and returns NULL
despite the fact that the timeout hadn't expired.
The fix is simple enough: check the timeout expiration inside the loop
so we don't return early.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-06-04 09:25:14 -07:00
|
|
|
u32_t start;
|
2017-07-13 12:43:59 +03:00
|
|
|
|
|
|
|
k_poll_event_init(&event, K_POLL_TYPE_FIFO_DATA_AVAILABLE,
|
|
|
|
K_POLL_MODE_NOTIFY_ONLY, queue);
|
|
|
|
|
kernel/queue: Fix spurious NULL exit condition when using timeouts
The queue loop when CONFIG_POLL is in used has an inherent race
between the return of k_poll() and the inspection of the list where no
lock can be held. Other contending readers of the same queue can
sneak in and steal the item out of the list before the current thread
gets to the sys_sflist_get() call, and the current loop will (if it
has a timeout) spuriously return NULL before the timeout expires.
It's not even a hard race to exercise. Consider three threads at
different priorities: High (which can be an ISR too), Mid, and Low:
1. Mid and Low both enter k_queue_get() and sleep inside k_poll() on
an empty queue.
2. High comes along and calls k_queue_insert(). The queue code then
wakes up Mid, and reschedules, but because High is still running Mid
doesn't get to run yet.
3. High inserts a SECOND item. The queue then unpends the next thread
in the list (Low), and readies it to run. But as before, it won't
be scheduled yet.
4. Now High sleeps (or if it's an interrupt, exits), and Mid gets to
run. It dequeues and returns the item it was delivered normally.
5. But Mid is still running! So it re-enters the loop it's sitting in
and calls k_queue_get() again, which sees and returns the second
item in the queue synchronously. Then it calls it a third time and
goes to sleep because the queue is empty.
6. Finally, Low wakes up to find an empty queue, and returns NULL
despite the fact that the timeout hadn't expired.
The fix is simple enough: check the timeout expiration inside the loop
so we don't return early.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-06-04 09:25:14 -07:00
|
|
|
if (timeout != K_FOREVER) {
|
|
|
|
start = k_uptime_get_32();
|
|
|
|
}
|
|
|
|
|
2017-10-17 11:34:21 +03:00
|
|
|
do {
|
|
|
|
event.state = K_POLL_STATE_NOT_READY;
|
2017-07-13 12:43:59 +03:00
|
|
|
|
kernel/queue: Fix spurious NULL exit condition when using timeouts
The queue loop when CONFIG_POLL is in used has an inherent race
between the return of k_poll() and the inspection of the list where no
lock can be held. Other contending readers of the same queue can
sneak in and steal the item out of the list before the current thread
gets to the sys_sflist_get() call, and the current loop will (if it
has a timeout) spuriously return NULL before the timeout expires.
It's not even a hard race to exercise. Consider three threads at
different priorities: High (which can be an ISR too), Mid, and Low:
1. Mid and Low both enter k_queue_get() and sleep inside k_poll() on
an empty queue.
2. High comes along and calls k_queue_insert(). The queue code then
wakes up Mid, and reschedules, but because High is still running Mid
doesn't get to run yet.
3. High inserts a SECOND item. The queue then unpends the next thread
in the list (Low), and readies it to run. But as before, it won't
be scheduled yet.
4. Now High sleeps (or if it's an interrupt, exits), and Mid gets to
run. It dequeues and returns the item it was delivered normally.
5. But Mid is still running! So it re-enters the loop it's sitting in
and calls k_queue_get() again, which sees and returns the second
item in the queue synchronously. Then it calls it a third time and
goes to sleep because the queue is empty.
6. Finally, Low wakes up to find an empty queue, and returns NULL
despite the fact that the timeout hadn't expired.
The fix is simple enough: check the timeout expiration inside the loop
so we don't return early.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-06-04 09:25:14 -07:00
|
|
|
err = k_poll(&event, 1, timeout - elapsed);
|
|
|
|
|
|
|
|
if (err && err != -EAGAIN) {
|
2017-10-17 11:34:21 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
2017-07-13 12:43:59 +03:00
|
|
|
|
2018-07-25 13:01:54 -07:00
|
|
|
key = k_spin_lock(&queue->lock);
|
2018-04-27 13:21:22 -07:00
|
|
|
val = z_queue_node_peek(sys_sflist_get(&queue->data_q), true);
|
2018-07-25 13:01:54 -07:00
|
|
|
k_spin_unlock(&queue->lock, key);
|
kernel/queue: Fix spurious NULL exit condition when using timeouts
The queue loop when CONFIG_POLL is in used has an inherent race
between the return of k_poll() and the inspection of the list where no
lock can be held. Other contending readers of the same queue can
sneak in and steal the item out of the list before the current thread
gets to the sys_sflist_get() call, and the current loop will (if it
has a timeout) spuriously return NULL before the timeout expires.
It's not even a hard race to exercise. Consider three threads at
different priorities: High (which can be an ISR too), Mid, and Low:
1. Mid and Low both enter k_queue_get() and sleep inside k_poll() on
an empty queue.
2. High comes along and calls k_queue_insert(). The queue code then
wakes up Mid, and reschedules, but because High is still running Mid
doesn't get to run yet.
3. High inserts a SECOND item. The queue then unpends the next thread
in the list (Low), and readies it to run. But as before, it won't
be scheduled yet.
4. Now High sleeps (or if it's an interrupt, exits), and Mid gets to
run. It dequeues and returns the item it was delivered normally.
5. But Mid is still running! So it re-enters the loop it's sitting in
and calls k_queue_get() again, which sees and returns the second
item in the queue synchronously. Then it calls it a third time and
goes to sleep because the queue is empty.
6. Finally, Low wakes up to find an empty queue, and returns NULL
despite the fact that the timeout hadn't expired.
The fix is simple enough: check the timeout expiration inside the loop
so we don't return early.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-06-04 09:25:14 -07:00
|
|
|
|
2018-09-17 09:39:51 -07:00
|
|
|
if ((val == NULL) && (timeout != K_FOREVER)) {
|
kernel/queue: Fix spurious NULL exit condition when using timeouts
The queue loop when CONFIG_POLL is in used has an inherent race
between the return of k_poll() and the inspection of the list where no
lock can be held. Other contending readers of the same queue can
sneak in and steal the item out of the list before the current thread
gets to the sys_sflist_get() call, and the current loop will (if it
has a timeout) spuriously return NULL before the timeout expires.
It's not even a hard race to exercise. Consider three threads at
different priorities: High (which can be an ISR too), Mid, and Low:
1. Mid and Low both enter k_queue_get() and sleep inside k_poll() on
an empty queue.
2. High comes along and calls k_queue_insert(). The queue code then
wakes up Mid, and reschedules, but because High is still running Mid
doesn't get to run yet.
3. High inserts a SECOND item. The queue then unpends the next thread
in the list (Low), and readies it to run. But as before, it won't
be scheduled yet.
4. Now High sleeps (or if it's an interrupt, exits), and Mid gets to
run. It dequeues and returns the item it was delivered normally.
5. But Mid is still running! So it re-enters the loop it's sitting in
and calls k_queue_get() again, which sees and returns the second
item in the queue synchronously. Then it calls it a third time and
goes to sleep because the queue is empty.
6. Finally, Low wakes up to find an empty queue, and returns NULL
despite the fact that the timeout hadn't expired.
The fix is simple enough: check the timeout expiration inside the loop
so we don't return early.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-06-04 09:25:14 -07:00
|
|
|
elapsed = k_uptime_get_32() - start;
|
|
|
|
done = elapsed > timeout;
|
|
|
|
}
|
|
|
|
} while (!val && !done);
|
2017-07-13 12:43:59 +03:00
|
|
|
|
2017-10-16 13:36:37 +03:00
|
|
|
return val;
|
2017-07-13 12:43:59 +03:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_POLL */
|
|
|
|
|
2019-03-08 14:19:05 -07:00
|
|
|
void *z_impl_k_queue_get(struct k_queue *queue, s32_t timeout)
|
2017-02-21 14:50:42 +02:00
|
|
|
{
|
2018-07-25 13:01:54 -07:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&queue->lock);
|
2017-02-21 14:50:42 +02:00
|
|
|
void *data;
|
|
|
|
|
2018-04-27 13:21:22 -07:00
|
|
|
if (likely(!sys_sflist_is_empty(&queue->data_q))) {
|
|
|
|
sys_sfnode_t *node;
|
|
|
|
|
|
|
|
node = sys_sflist_get_not_empty(&queue->data_q);
|
|
|
|
data = z_queue_node_peek(node, true);
|
2018-07-25 13:01:54 -07:00
|
|
|
k_spin_unlock(&queue->lock, key);
|
2017-02-21 14:50:42 +02:00
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (timeout == K_NO_WAIT) {
|
2018-07-25 13:01:54 -07:00
|
|
|
k_spin_unlock(&queue->lock, key);
|
2017-02-21 14:50:42 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-07-13 12:43:59 +03:00
|
|
|
#if defined(CONFIG_POLL)
|
2018-07-25 13:01:54 -07:00
|
|
|
k_spin_unlock(&queue->lock, key);
|
2017-07-13 12:43:59 +03:00
|
|
|
|
|
|
|
return k_queue_poll(queue, timeout);
|
|
|
|
|
|
|
|
#else
|
2019-03-08 14:19:05 -07:00
|
|
|
int ret = z_pend_curr(&queue->lock, key, &queue->wait_q, timeout);
|
2017-02-21 14:50:42 +02:00
|
|
|
|
2018-10-25 12:09:04 +05:30
|
|
|
return (ret != 0) ? NULL : _current->base.swap_data;
|
2017-07-13 12:43:59 +03:00
|
|
|
#endif /* CONFIG_POLL */
|
2017-02-21 14:50:42 +02:00
|
|
|
}
|
2018-04-27 13:21:22 -07:00
|
|
|
|
|
|
|
#ifdef CONFIG_USERSPACE
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
static inline void *z_vrfy_k_queue_get(struct k_queue *queue, s32_t timeout)
|
2018-04-27 13:21:22 -07:00
|
|
|
{
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
|
|
|
|
return z_impl_k_queue_get(queue, timeout);
|
|
|
|
}
|
|
|
|
#include <syscalls/k_queue_get_mrsh.c>
|
2018-04-27 13:21:22 -07:00
|
|
|
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
static inline int z_vrfy_k_queue_is_empty(struct k_queue *queue)
|
|
|
|
{
|
|
|
|
Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
|
|
|
|
return z_impl_k_queue_is_empty(queue);
|
|
|
|
}
|
|
|
|
#include <syscalls/k_queue_is_empty_mrsh.c>
|
|
|
|
|
|
|
|
static inline void *z_vrfy_k_queue_peek_head(struct k_queue *queue)
|
|
|
|
{
|
2018-05-04 15:57:57 -07:00
|
|
|
Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
return z_impl_k_queue_peek_head(queue);
|
|
|
|
}
|
|
|
|
#include <syscalls/k_queue_peek_head_mrsh.c>
|
2018-04-27 13:21:22 -07:00
|
|
|
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
static inline void *z_vrfy_k_queue_peek_tail(struct k_queue *queue)
|
|
|
|
{
|
|
|
|
Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
|
|
|
|
return z_impl_k_queue_peek_tail(queue);
|
2018-04-27 13:21:22 -07:00
|
|
|
}
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
#include <syscalls/k_queue_peek_tail_mrsh.c>
|
2018-04-27 13:21:22 -07:00
|
|
|
|
|
|
|
#endif /* CONFIG_USERSPACE */
|