2016-11-08 10:36:50 -05:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2016 Wind River Systems, Inc.
|
|
|
|
*
|
2017-01-18 17:01:01 -08:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2016-11-08 10:36:50 -05:00
|
|
|
*/
|
|
|
|
|
2019-10-25 00:08:21 +09:00
|
|
|
/*
|
|
|
|
* The purpose of this file is to provide essential/minimal kernel structure
|
|
|
|
* definitions, so that they can be used without including kernel.h.
|
|
|
|
*
|
|
|
|
* The following rules must be observed:
|
|
|
|
* 1. kernel_structs.h shall not depend on kernel.h both directly and
|
|
|
|
* indirectly (i.e. it shall not include any header files that include
|
|
|
|
* kernel.h in their dependency chain).
|
|
|
|
* 2. kernel.h shall imply kernel_structs.h, such that it shall not be
|
|
|
|
* necessary to include kernel_structs.h explicitly when kernel.h is
|
|
|
|
* included.
|
|
|
|
*/
|
|
|
|
|
2018-09-13 15:06:35 -07:00
|
|
|
#ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_
|
|
|
|
#define ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_
|
2016-11-08 10:36:50 -05:00
|
|
|
|
2017-01-22 17:06:05 -05:00
|
|
|
#if !defined(_ASMLANGUAGE)
|
2019-10-25 00:08:21 +09:00
|
|
|
#include <zephyr/types.h>
|
|
|
|
#include <sched_priq.h>
|
2019-06-26 10:33:41 -04:00
|
|
|
#include <sys/dlist.h>
|
2019-06-26 10:33:55 -04:00
|
|
|
#include <sys/util.h>
|
2016-11-08 10:36:50 -05:00
|
|
|
#endif
|
|
|
|
|
2017-12-08 17:38:12 -08:00
|
|
|
#define K_NUM_PRIORITIES \
|
|
|
|
(CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES + 1)
|
|
|
|
|
|
|
|
#define K_NUM_PRIO_BITMAPS ((K_NUM_PRIORITIES + 31) >> 5)
|
|
|
|
|
2016-11-24 14:08:08 -05:00
|
|
|
/*
|
2017-01-22 13:05:08 -05:00
|
|
|
* Bitmask definitions for the struct k_thread.thread_state field.
|
2016-11-24 14:08:08 -05:00
|
|
|
*
|
|
|
|
* Must be before kerneL_arch_data.h because it might need them to be already
|
|
|
|
* defined.
|
|
|
|
*/
|
|
|
|
|
2016-12-21 15:38:54 -05:00
|
|
|
/* states: common uses low bits, arch-specific use high bits */
|
|
|
|
|
2017-01-22 11:51:25 -05:00
|
|
|
/* Not a real thread */
|
2018-08-15 11:52:00 -07:00
|
|
|
#define _THREAD_DUMMY (BIT(0))
|
2016-11-24 14:08:08 -05:00
|
|
|
|
|
|
|
/* Thread is waiting on an object */
|
2018-08-15 11:52:00 -07:00
|
|
|
#define _THREAD_PENDING (BIT(1))
|
2016-11-24 14:08:08 -05:00
|
|
|
|
|
|
|
/* Thread has not yet started */
|
2018-08-15 11:52:00 -07:00
|
|
|
#define _THREAD_PRESTART (BIT(2))
|
2016-11-24 14:08:08 -05:00
|
|
|
|
|
|
|
/* Thread has terminated */
|
2018-08-15 11:52:00 -07:00
|
|
|
#define _THREAD_DEAD (BIT(3))
|
2016-11-24 14:08:08 -05:00
|
|
|
|
|
|
|
/* Thread is suspended */
|
2018-08-15 11:52:00 -07:00
|
|
|
#define _THREAD_SUSPENDED (BIT(4))
|
2016-11-24 14:08:08 -05:00
|
|
|
|
2019-02-19 16:03:39 -08:00
|
|
|
/* Thread is being aborted (SMP only) */
|
|
|
|
#define _THREAD_ABORTING (BIT(5))
|
|
|
|
|
2018-05-03 14:51:49 -07:00
|
|
|
/* Thread is present in the ready queue */
|
2018-08-15 11:52:00 -07:00
|
|
|
#define _THREAD_QUEUED (BIT(6))
|
2018-05-03 14:51:49 -07:00
|
|
|
|
2016-12-21 15:38:54 -05:00
|
|
|
/* end - states */
|
|
|
|
|
2017-05-11 13:29:15 -07:00
|
|
|
#ifdef CONFIG_STACK_SENTINEL
|
|
|
|
/* Magic value in lowest bytes of the stack */
|
|
|
|
#define STACK_SENTINEL 0xF0F0F0F0
|
|
|
|
#endif
|
2016-12-21 15:38:54 -05:00
|
|
|
|
2016-12-21 16:00:35 -05:00
|
|
|
/* lowest value of _thread_base.preempt at which a thread is non-preemptible */
|
|
|
|
#define _NON_PREEMPT_THRESHOLD 0x0080
|
|
|
|
|
|
|
|
/* highest value of _thread_base.preempt at which a thread is preemptible */
|
|
|
|
#define _PREEMPT_THRESHOLD (_NON_PREEMPT_THRESHOLD - 1)
|
2016-11-24 14:08:08 -05:00
|
|
|
|
2016-11-08 10:36:50 -05:00
|
|
|
#if !defined(_ASMLANGUAGE)
|
|
|
|
|
|
|
|
struct _ready_q {
|
2018-01-29 14:55:20 -08:00
|
|
|
#ifndef CONFIG_SMP
|
2016-12-21 14:36:43 -05:00
|
|
|
/* always contains next thread to run: cannot be NULL */
|
2016-11-08 10:36:50 -05:00
|
|
|
struct k_thread *cache;
|
2018-01-29 14:55:20 -08:00
|
|
|
#endif
|
2016-11-08 10:36:50 -05:00
|
|
|
|
2018-06-28 10:38:14 -07:00
|
|
|
#if defined(CONFIG_SCHED_DUMB)
|
2018-05-03 14:51:49 -07:00
|
|
|
sys_dlist_t runq;
|
2018-06-28 10:38:14 -07:00
|
|
|
#elif defined(CONFIG_SCHED_SCALABLE)
|
2018-05-03 14:51:49 -07:00
|
|
|
struct _priq_rb runq;
|
2018-06-28 10:38:14 -07:00
|
|
|
#elif defined(CONFIG_SCHED_MULTIQ)
|
|
|
|
struct _priq_mq runq;
|
2018-05-03 14:51:49 -07:00
|
|
|
#endif
|
2016-11-08 10:36:50 -05:00
|
|
|
};
|
|
|
|
|
kernel/arch: enhance the "ready thread" cache
The way the ready thread cache was implemented caused it to not always
be "hot", i.e. there could be some misses, which happened when the
cached thread was taken out of the ready queue. When that happened, it
was not replaced immediately, since doing so could mean that the
replacement might not run because the flow could be interrupted and
another thread could take its place. This was the more conservative
approach that insured that moving a thread to the cache would never be
wasted.
However, this caused two problems:
1. The cache could not be refilled until another thread context-switched
in, since there was no thread in the cache to compare priorities
against.
2. Interrupt exit code would always have to call into C to find what
thread to run when the current thread was not coop and did not have the
scheduler locked. Furthermore, it was possible for this code path to
encounter a cold cache and then it had to find out what thread to run
the long way.
To fix this, filling the cache is now more aggressive, i.e. the next
thread to put in the cache is found even in the case the current cached
thread is context-switched out. This ensures the interrupt exit code is
much faster on the slow path. In addition, since finding the next thread
to run is now always "get it from the cache", which is a simple fetch
from memory (_kernel.ready_q.cache), there is no need to call the more
complex C code.
On the ARM FRDM K64F board, this improvement is seen:
Before:
1- Measure time to switch from ISR back to interrupted task
switching time is 215 tcs = 1791 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 315 tcs = 2625 nsec
After:
1- Measure time to switch from ISR back to interrupted task
switching time is 130 tcs = 1083 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 225 tcs = 1875 nsec
These are the most dramatic improvements, but most of the numbers
generated by the latency_measure test are improved.
Fixes ZEP-1401.
Change-Id: I2eaac147048b1ec71a93bd0a285e743a39533973
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-12-02 10:37:27 -05:00
|
|
|
typedef struct _ready_q _ready_q_t;
|
|
|
|
|
2018-01-25 16:39:35 -08:00
|
|
|
struct _cpu {
|
2016-11-08 10:36:50 -05:00
|
|
|
/* nested interrupt count */
|
2017-04-21 10:55:34 -05:00
|
|
|
u32_t nested;
|
2016-11-08 10:36:50 -05:00
|
|
|
|
|
|
|
/* interrupt stack pointer base */
|
|
|
|
char *irq_stack;
|
|
|
|
|
|
|
|
/* currently scheduled thread */
|
|
|
|
struct k_thread *current;
|
2018-01-29 14:55:20 -08:00
|
|
|
|
2018-05-03 14:51:49 -07:00
|
|
|
/* one assigned idle thread per CPU */
|
|
|
|
struct k_thread *idle_thread;
|
|
|
|
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
#ifdef CONFIG_USERSPACE
|
|
|
|
/* current syscall frame pointer */
|
|
|
|
void *syscall_frame;
|
|
|
|
#endif
|
|
|
|
|
2019-11-13 09:41:52 -08:00
|
|
|
#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0)
|
|
|
|
/* Coop thread preempted by current metairq, or NULL */
|
|
|
|
struct k_thread *metairq_preempted;
|
|
|
|
#endif
|
|
|
|
|
2018-09-25 10:56:09 -07:00
|
|
|
#ifdef CONFIG_TIMESLICING
|
|
|
|
/* number of ticks remaining in current time slice */
|
|
|
|
int slice_ticks;
|
|
|
|
#endif
|
|
|
|
|
2018-05-30 11:23:02 -07:00
|
|
|
u8_t id;
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/* True when _current is allowed to context switch */
|
|
|
|
u8_t swap_ok;
|
|
|
|
#endif
|
2018-01-25 16:39:35 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct _cpu _cpu_t;
|
|
|
|
|
2018-11-01 17:50:02 -07:00
|
|
|
struct z_kernel {
|
2018-01-25 16:39:35 -08:00
|
|
|
/* For compatibility with pre-SMP code, union the first CPU
|
|
|
|
* record with the legacy fields so code can continue to use
|
|
|
|
* the "_kernel.XXX" expressions and assembly offsets.
|
|
|
|
*/
|
|
|
|
union {
|
|
|
|
struct _cpu cpus[CONFIG_MP_NUM_CPUS];
|
|
|
|
#ifndef CONFIG_SMP
|
|
|
|
struct {
|
|
|
|
/* nested interrupt count */
|
|
|
|
u32_t nested;
|
|
|
|
|
|
|
|
/* interrupt stack pointer base */
|
|
|
|
char *irq_stack;
|
|
|
|
|
|
|
|
/* currently scheduled thread */
|
|
|
|
struct k_thread *current;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
};
|
2016-11-08 10:36:50 -05:00
|
|
|
|
|
|
|
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
|
|
|
/* queue of timeouts */
|
|
|
|
sys_dlist_t timeout_q;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
2017-04-21 10:55:34 -05:00
|
|
|
s32_t idle; /* Number of ticks for kernel idling */
|
2016-11-08 10:36:50 -05:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ready queue: can be big, keep after small fields, since some
|
2016-11-14 10:17:30 -05:00
|
|
|
* assembly (e.g. ARC) are limited in the encoding of the offset
|
2016-11-08 10:36:50 -05:00
|
|
|
*/
|
|
|
|
struct _ready_q ready_q;
|
|
|
|
|
|
|
|
#ifdef CONFIG_FP_SHARING
|
|
|
|
/*
|
|
|
|
* A 'current_sse' field does not exist in addition to the 'current_fp'
|
|
|
|
* field since it's not possible to divide the IA-32 non-integer
|
|
|
|
* registers into 2 distinct blocks owned by differing threads. In
|
|
|
|
* other words, given that the 'fxnsave/fxrstor' instructions
|
|
|
|
* save/restore both the X87 FPU and XMM registers, it's not possible
|
|
|
|
* for a thread to only "own" the XMM registers.
|
|
|
|
*/
|
|
|
|
|
2017-10-29 07:10:22 -04:00
|
|
|
/* thread that owns the FP regs */
|
2016-11-08 10:36:50 -05:00
|
|
|
struct k_thread *current_fp;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(CONFIG_THREAD_MONITOR)
|
2017-10-29 07:10:22 -04:00
|
|
|
struct k_thread *threads; /* singly linked list of ALL threads */
|
2016-11-08 10:36:50 -05:00
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
2018-11-01 17:50:02 -07:00
|
|
|
typedef struct z_kernel _kernel_t;
|
2016-11-08 10:36:50 -05:00
|
|
|
|
2018-11-01 17:50:02 -07:00
|
|
|
extern struct z_kernel _kernel;
|
2016-11-08 10:36:50 -05:00
|
|
|
|
2018-01-25 16:39:35 -08:00
|
|
|
#ifdef CONFIG_SMP
|
2019-11-07 12:43:29 -08:00
|
|
|
#define _current_cpu (arch_curr_cpu())
|
|
|
|
#define _current (arch_curr_cpu()->current)
|
2018-01-25 16:39:35 -08:00
|
|
|
#else
|
2018-05-03 14:51:49 -07:00
|
|
|
#define _current_cpu (&_kernel.cpus[0])
|
2016-11-08 10:36:50 -05:00
|
|
|
#define _current _kernel.current
|
2018-01-25 16:39:35 -08:00
|
|
|
#endif
|
|
|
|
|
2016-11-08 10:36:50 -05:00
|
|
|
#define _timeout_q _kernel.timeout_q
|
|
|
|
|
|
|
|
#endif /* _ASMLANGUAGE */
|
|
|
|
|
2018-09-13 15:06:35 -07:00
|
|
|
#endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_ */
|