2018-09-27 16:50:00 -07:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018 Intel Corporation
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
2019-10-25 00:08:21 +09:00
|
|
|
|
|
|
|
#include <kernel.h>
|
2018-09-27 16:50:00 -07:00
|
|
|
#include <spinlock.h>
|
|
|
|
#include <ksched.h>
|
2019-10-25 00:08:21 +09:00
|
|
|
#include <timeout_q.h>
|
2018-09-27 16:50:00 -07:00
|
|
|
#include <syscall_handler.h>
|
2019-10-25 00:08:21 +09:00
|
|
|
#include <drivers/timer/system_timer.h>
|
|
|
|
#include <sys_clock.h>
|
2018-09-27 16:50:00 -07:00
|
|
|
|
|
|
|
#define LOCKED(lck) for (k_spinlock_key_t __i = {}, \
|
|
|
|
__key = k_spin_lock(lck); \
|
2019-03-14 14:32:45 -07:00
|
|
|
__i.key == 0; \
|
2018-09-27 16:50:00 -07:00
|
|
|
k_spin_unlock(lck, __key), __i.key = 1)
|
|
|
|
|
|
|
|
static u64_t curr_tick;
|
|
|
|
|
|
|
|
static sys_dlist_t timeout_list = SYS_DLIST_STATIC_INIT(&timeout_list);
|
|
|
|
|
|
|
|
static struct k_spinlock timeout_lock;
|
|
|
|
|
2019-06-25 10:09:45 -07:00
|
|
|
#define MAX_WAIT (IS_ENABLED(CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE) \
|
|
|
|
? K_FOREVER : INT_MAX)
|
2018-09-27 16:50:00 -07:00
|
|
|
|
2018-10-03 08:50:52 -07:00
|
|
|
/* Cycles left to process in the currently-executing z_clock_announce() */
|
|
|
|
static int announce_remaining;
|
2018-09-27 16:50:00 -07:00
|
|
|
|
|
|
|
#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
|
|
|
|
int z_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
|
2019-05-21 14:02:26 -07:00
|
|
|
|
|
|
|
#ifdef CONFIG_USERSPACE
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
static inline int z_vrfy_z_clock_hw_cycles_per_sec_runtime_get(void)
|
2019-05-21 14:02:26 -07:00
|
|
|
{
|
|
|
|
return z_impl_z_clock_hw_cycles_per_sec_runtime_get();
|
|
|
|
}
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
#include <syscalls/z_clock_hw_cycles_per_sec_runtime_get_mrsh.c>
|
2019-05-21 14:02:26 -07:00
|
|
|
#endif /* CONFIG_USERSPACE */
|
|
|
|
#endif /* CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME */
|
2018-09-27 16:50:00 -07:00
|
|
|
|
|
|
|
static struct _timeout *first(void)
|
|
|
|
{
|
|
|
|
sys_dnode_t *t = sys_dlist_peek_head(&timeout_list);
|
|
|
|
|
|
|
|
return t == NULL ? NULL : CONTAINER_OF(t, struct _timeout, node);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct _timeout *next(struct _timeout *t)
|
|
|
|
{
|
|
|
|
sys_dnode_t *n = sys_dlist_peek_next(&timeout_list, &t->node);
|
|
|
|
|
|
|
|
return n == NULL ? NULL : CONTAINER_OF(n, struct _timeout, node);
|
|
|
|
}
|
|
|
|
|
2018-10-17 08:29:19 -07:00
|
|
|
static void remove_timeout(struct _timeout *t)
|
2018-09-27 16:50:00 -07:00
|
|
|
{
|
2018-12-30 06:05:03 -06:00
|
|
|
if (next(t) != NULL) {
|
|
|
|
next(t)->dticks += t->dticks;
|
2018-12-06 15:39:28 -08:00
|
|
|
}
|
2018-12-30 06:05:03 -06:00
|
|
|
|
|
|
|
sys_dlist_remove(&t->node);
|
2018-09-27 16:50:00 -07:00
|
|
|
}
|
|
|
|
|
2018-10-03 15:02:50 -07:00
|
|
|
static s32_t elapsed(void)
|
2018-09-27 16:50:00 -07:00
|
|
|
{
|
2018-10-03 15:02:50 -07:00
|
|
|
return announce_remaining == 0 ? z_clock_elapsed() : 0;
|
2018-09-27 16:50:00 -07:00
|
|
|
}
|
|
|
|
|
2019-01-16 08:54:38 -08:00
|
|
|
static s32_t next_timeout(void)
|
|
|
|
{
|
|
|
|
struct _timeout *to = first();
|
2019-07-29 13:48:55 +02:00
|
|
|
s32_t ticks_elapsed = elapsed();
|
|
|
|
s32_t ret = to == NULL ? MAX_WAIT : MAX(0, to->dticks - ticks_elapsed);
|
2019-01-16 08:54:38 -08:00
|
|
|
|
|
|
|
#ifdef CONFIG_TIMESLICING
|
|
|
|
if (_current_cpu->slice_ticks && _current_cpu->slice_ticks < ret) {
|
|
|
|
ret = _current_cpu->slice_ticks;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-03-08 14:19:05 -07:00
|
|
|
void z_add_timeout(struct _timeout *to, _timeout_func_t fn, s32_t ticks)
|
2018-09-27 16:50:00 -07:00
|
|
|
{
|
2019-01-02 08:29:43 -06:00
|
|
|
__ASSERT(!sys_dnode_is_linked(&to->node), "");
|
2018-09-27 16:50:00 -07:00
|
|
|
to->fn = fn;
|
2019-02-11 17:14:19 +00:00
|
|
|
ticks = MAX(1, ticks);
|
2018-09-27 16:50:00 -07:00
|
|
|
|
|
|
|
LOCKED(&timeout_lock) {
|
|
|
|
struct _timeout *t;
|
|
|
|
|
2018-10-03 15:02:50 -07:00
|
|
|
to->dticks = ticks + elapsed();
|
2018-09-27 16:50:00 -07:00
|
|
|
for (t = first(); t != NULL; t = next(t)) {
|
|
|
|
__ASSERT(t->dticks >= 0, "");
|
|
|
|
|
|
|
|
if (t->dticks > to->dticks) {
|
|
|
|
t->dticks -= to->dticks;
|
2019-01-28 09:35:27 -08:00
|
|
|
sys_dlist_insert(&t->node, &to->node);
|
2018-09-27 16:50:00 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
to->dticks -= t->dticks;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (t == NULL) {
|
|
|
|
sys_dlist_append(&timeout_list, &to->node);
|
|
|
|
}
|
|
|
|
|
2018-11-22 11:49:32 +01:00
|
|
|
if (to == first()) {
|
2019-01-16 08:54:38 -08:00
|
|
|
z_clock_set_timeout(next_timeout(), false);
|
2018-11-22 11:49:32 +01:00
|
|
|
}
|
2018-11-20 08:26:34 -08:00
|
|
|
}
|
2018-09-27 16:50:00 -07:00
|
|
|
}
|
|
|
|
|
2019-03-08 14:19:05 -07:00
|
|
|
int z_abort_timeout(struct _timeout *to)
|
2018-09-27 16:50:00 -07:00
|
|
|
{
|
2019-01-02 08:29:43 -06:00
|
|
|
int ret = -EINVAL;
|
2018-09-27 16:50:00 -07:00
|
|
|
|
|
|
|
LOCKED(&timeout_lock) {
|
2018-12-30 06:05:03 -06:00
|
|
|
if (sys_dnode_is_linked(&to->node)) {
|
2018-10-17 08:29:19 -07:00
|
|
|
remove_timeout(to);
|
2018-09-27 16:50:00 -07:00
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-11-16 19:52:37 -08:00
|
|
|
s32_t z_timeout_remaining(struct _timeout *timeout)
|
2018-09-27 16:50:00 -07:00
|
|
|
{
|
|
|
|
s32_t ticks = 0;
|
|
|
|
|
2019-03-08 14:19:05 -07:00
|
|
|
if (z_is_inactive_timeout(timeout)) {
|
2018-09-27 16:50:00 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
LOCKED(&timeout_lock) {
|
|
|
|
for (struct _timeout *t = first(); t != NULL; t = next(t)) {
|
|
|
|
ticks += t->dticks;
|
2018-11-16 19:52:37 -08:00
|
|
|
if (timeout == t) {
|
2018-09-27 16:50:00 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-01 10:51:04 -08:00
|
|
|
return ticks - elapsed();
|
2018-09-27 16:50:00 -07:00
|
|
|
}
|
|
|
|
|
2019-03-08 14:19:05 -07:00
|
|
|
s32_t z_get_next_timeout_expiry(void)
|
2018-09-27 16:50:00 -07:00
|
|
|
{
|
2019-01-16 08:54:38 -08:00
|
|
|
s32_t ret = K_FOREVER;
|
2018-09-27 16:50:00 -07:00
|
|
|
|
|
|
|
LOCKED(&timeout_lock) {
|
2019-01-16 08:54:38 -08:00
|
|
|
ret = next_timeout();
|
2018-09-27 16:50:00 -07:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-11-22 11:49:32 +01:00
|
|
|
void z_set_timeout_expiry(s32_t ticks, bool idle)
|
|
|
|
{
|
|
|
|
LOCKED(&timeout_lock) {
|
2019-01-16 08:54:38 -08:00
|
|
|
int next = next_timeout();
|
2019-01-02 11:34:26 -08:00
|
|
|
bool sooner = (next == K_FOREVER) || (ticks < next);
|
|
|
|
bool imminent = next <= 1;
|
|
|
|
|
|
|
|
/* Only set new timeouts when they are sooner than
|
|
|
|
* what we have. Also don't try to set a timeout when
|
|
|
|
* one is about to expire: drivers have internal logic
|
|
|
|
* that will bump the timeout to the "next" tick if
|
|
|
|
* it's not considered to be settable as directed.
|
2019-08-19 21:40:01 -07:00
|
|
|
* SMP can't use this optimization though: we don't
|
|
|
|
* know when context switches happen until interrupt
|
|
|
|
* exit and so can't get the timeslicing clamp folded
|
|
|
|
* in.
|
2019-01-02 11:34:26 -08:00
|
|
|
*/
|
2019-08-19 21:40:01 -07:00
|
|
|
if (!imminent && (sooner || IS_ENABLED(CONFIG_SMP))) {
|
2018-11-22 11:49:32 +01:00
|
|
|
z_clock_set_timeout(ticks, idle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-20 09:23:31 -08:00
|
|
|
void z_clock_announce(s32_t ticks)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_TIMESLICING
|
|
|
|
z_time_slice(ticks);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
k_spinlock_key_t key = k_spin_lock(&timeout_lock);
|
|
|
|
|
|
|
|
announce_remaining = ticks;
|
|
|
|
|
|
|
|
while (first() != NULL && first()->dticks <= announce_remaining) {
|
|
|
|
struct _timeout *t = first();
|
|
|
|
int dt = t->dticks;
|
|
|
|
|
|
|
|
curr_tick += dt;
|
|
|
|
announce_remaining -= dt;
|
|
|
|
t->dticks = 0;
|
|
|
|
remove_timeout(t);
|
|
|
|
|
|
|
|
k_spin_unlock(&timeout_lock, key);
|
|
|
|
t->fn(t);
|
|
|
|
key = k_spin_lock(&timeout_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (first() != NULL) {
|
|
|
|
first()->dticks -= announce_remaining;
|
|
|
|
}
|
|
|
|
|
|
|
|
curr_tick += announce_remaining;
|
|
|
|
announce_remaining = 0;
|
|
|
|
|
2019-01-30 12:31:25 -08:00
|
|
|
z_clock_set_timeout(next_timeout(), false);
|
2018-12-20 09:23:31 -08:00
|
|
|
|
|
|
|
k_spin_unlock(&timeout_lock, key);
|
|
|
|
}
|
|
|
|
|
2018-09-27 16:50:00 -07:00
|
|
|
s64_t z_tick_get(void)
|
|
|
|
{
|
2018-11-29 11:13:40 -08:00
|
|
|
u64_t t = 0U;
|
2018-09-27 16:50:00 -07:00
|
|
|
|
|
|
|
LOCKED(&timeout_lock) {
|
|
|
|
t = curr_tick + z_clock_elapsed();
|
|
|
|
}
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
|
|
|
u32_t z_tick_get_32(void)
|
|
|
|
{
|
2018-10-02 11:12:08 -07:00
|
|
|
#ifdef CONFIG_TICKLESS_KERNEL
|
|
|
|
return (u32_t)z_tick_get();
|
|
|
|
#else
|
|
|
|
return (u32_t)curr_tick;
|
|
|
|
#endif
|
2018-09-27 16:50:00 -07:00
|
|
|
}
|
|
|
|
|
2019-03-08 14:19:05 -07:00
|
|
|
s64_t z_impl_k_uptime_get(void)
|
2018-09-27 16:50:00 -07:00
|
|
|
{
|
2019-10-03 11:43:10 -07:00
|
|
|
return k_ticks_to_ms_floor64(z_tick_get());
|
2018-09-27 16:50:00 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_USERSPACE
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
static inline s64_t z_vrfy_k_uptime_get(void)
|
2018-09-27 16:50:00 -07:00
|
|
|
{
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
return z_impl_k_uptime_get();
|
2018-09-27 16:50:00 -07:00
|
|
|
}
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
#include <syscalls/k_uptime_get_mrsh.c>
|
2018-09-27 16:50:00 -07:00
|
|
|
#endif
|