2016-11-08 10:36:50 -05:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2016 Wind River Systems, Inc.
|
|
|
|
*
|
2017-01-18 17:01:01 -08:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2016-11-08 10:36:50 -05:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _kernel_structs__h_
|
|
|
|
#define _kernel_structs__h_
|
|
|
|
|
|
|
|
#include <kernel.h>
|
2017-01-22 17:06:05 -05:00
|
|
|
|
|
|
|
#if !defined(_ASMLANGUAGE)
|
2016-11-08 10:36:50 -05:00
|
|
|
#include <atomic.h>
|
|
|
|
#include <misc/dlist.h>
|
|
|
|
#endif
|
|
|
|
|
2016-11-24 14:08:08 -05:00
|
|
|
/*
|
2017-01-22 13:05:08 -05:00
|
|
|
* Bitmask definitions for the struct k_thread.thread_state field.
|
2016-11-24 14:08:08 -05:00
|
|
|
*
|
|
|
|
* Must be before kerneL_arch_data.h because it might need them to be already
|
|
|
|
* defined.
|
|
|
|
*/
|
|
|
|
|
2016-12-21 15:38:54 -05:00
|
|
|
|
|
|
|
/* states: common uses low bits, arch-specific use high bits */
|
|
|
|
|
2017-01-22 11:51:25 -05:00
|
|
|
/* Not a real thread */
|
|
|
|
#define _THREAD_DUMMY (1 << 0)
|
2016-11-24 14:08:08 -05:00
|
|
|
|
|
|
|
/* Thread is waiting on an object */
|
2017-01-22 11:41:59 -05:00
|
|
|
#define _THREAD_PENDING (1 << 1)
|
2016-11-24 14:08:08 -05:00
|
|
|
|
|
|
|
/* Thread has not yet started */
|
2017-01-22 11:41:59 -05:00
|
|
|
#define _THREAD_PRESTART (1 << 2)
|
2016-11-24 14:08:08 -05:00
|
|
|
|
|
|
|
/* Thread has terminated */
|
2017-01-22 11:41:59 -05:00
|
|
|
#define _THREAD_DEAD (1 << 3)
|
2016-11-24 14:08:08 -05:00
|
|
|
|
|
|
|
/* Thread is suspended */
|
2017-01-22 11:41:59 -05:00
|
|
|
#define _THREAD_SUSPENDED (1 << 4)
|
2016-11-24 14:08:08 -05:00
|
|
|
|
2017-01-28 11:54:48 -05:00
|
|
|
/* Thread is actively looking at events to see if they are ready */
|
|
|
|
#define _THREAD_POLLING (1 << 5)
|
|
|
|
|
2016-12-21 15:38:54 -05:00
|
|
|
/* end - states */
|
|
|
|
|
|
|
|
|
2016-12-21 16:00:35 -05:00
|
|
|
/* lowest value of _thread_base.preempt at which a thread is non-preemptible */
|
|
|
|
#define _NON_PREEMPT_THRESHOLD 0x0080
|
|
|
|
|
|
|
|
/* highest value of _thread_base.preempt at which a thread is preemptible */
|
|
|
|
#define _PREEMPT_THRESHOLD (_NON_PREEMPT_THRESHOLD - 1)
|
2016-11-24 14:08:08 -05:00
|
|
|
|
2016-11-08 10:36:50 -05:00
|
|
|
#include <kernel_arch_data.h>
|
|
|
|
|
|
|
|
#if !defined(_ASMLANGUAGE)
|
|
|
|
|
|
|
|
#ifdef CONFIG_THREAD_MONITOR
|
|
|
|
struct __thread_entry {
|
|
|
|
_thread_entry_t pEntry;
|
|
|
|
void *parameter1;
|
|
|
|
void *parameter2;
|
|
|
|
void *parameter3;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* can be used for creating 'dummy' threads, e.g. for pending on objects */
|
|
|
|
struct _thread_base {
|
|
|
|
|
|
|
|
/* this thread's entry in a ready/wait queue */
|
|
|
|
sys_dnode_t k_q_node;
|
|
|
|
|
2017-01-22 13:05:08 -05:00
|
|
|
/* user facing 'thread options'; values defined in include/kernel.h */
|
|
|
|
uint8_t user_options;
|
2016-11-08 10:36:50 -05:00
|
|
|
|
2016-12-21 15:38:54 -05:00
|
|
|
/* thread state */
|
|
|
|
uint8_t thread_state;
|
2016-11-08 10:36:50 -05:00
|
|
|
|
2016-12-21 16:00:35 -05:00
|
|
|
/*
|
|
|
|
* scheduler lock count and thread priority
|
|
|
|
*
|
|
|
|
* These two fields control the preemptibility of a thread.
|
|
|
|
*
|
|
|
|
* When the scheduler is locked, sched_locked is decremented, which
|
|
|
|
* means that the scheduler is locked for values from 0xff to 0x01. A
|
|
|
|
* thread is coop if its prio is negative, thus 0x80 to 0xff when
|
|
|
|
* looked at the value as unsigned.
|
|
|
|
*
|
|
|
|
* By putting them end-to-end, this means that a thread is
|
|
|
|
* non-preemptible if the bundled value is greater than or equal to
|
|
|
|
* 0x0080.
|
|
|
|
*/
|
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
2017-02-11 10:50:27 -05:00
|
|
|
uint8_t sched_locked;
|
2017-01-22 11:13:36 -05:00
|
|
|
int8_t prio;
|
2016-12-21 16:00:35 -05:00
|
|
|
#else /* LITTLE and PDP */
|
2017-01-22 11:13:36 -05:00
|
|
|
int8_t prio;
|
2017-02-11 10:50:27 -05:00
|
|
|
uint8_t sched_locked;
|
2016-12-21 16:00:35 -05:00
|
|
|
#endif
|
|
|
|
};
|
|
|
|
uint16_t preempt;
|
|
|
|
};
|
2016-11-08 10:36:50 -05:00
|
|
|
|
|
|
|
/* data returned by APIs */
|
|
|
|
void *swap_data;
|
|
|
|
|
2017-01-07 08:36:28 -05:00
|
|
|
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
2016-11-08 10:36:50 -05:00
|
|
|
/* this thread's entry in a timeout queue */
|
|
|
|
struct _timeout timeout;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct _thread_base _thread_base_t;
|
|
|
|
|
|
|
|
struct k_thread {
|
|
|
|
|
|
|
|
struct _thread_base base;
|
|
|
|
|
|
|
|
/* defined by the architecture, but all archs need these */
|
|
|
|
struct _caller_saved caller_saved;
|
|
|
|
struct _callee_saved callee_saved;
|
|
|
|
|
|
|
|
/* static thread init data */
|
|
|
|
void *init_data;
|
|
|
|
|
|
|
|
/* abort function */
|
|
|
|
void (*fn_abort)(void);
|
|
|
|
|
|
|
|
#if defined(CONFIG_THREAD_MONITOR)
|
|
|
|
/* thread entry and parameters description */
|
|
|
|
struct __thread_entry *entry;
|
|
|
|
|
|
|
|
/* next item in list of all threads */
|
|
|
|
struct k_thread *next_thread;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
|
|
|
/* crude thread-local storage */
|
|
|
|
void *custom_data;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_ERRNO
|
|
|
|
/* per-thread errno variable */
|
|
|
|
int errno_var;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* arch-specifics: must always be at the end */
|
|
|
|
struct _thread_arch arch;
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct k_thread _thread_t;
|
|
|
|
|
|
|
|
struct _ready_q {
|
|
|
|
|
2016-12-21 14:36:43 -05:00
|
|
|
/* always contains next thread to run: cannot be NULL */
|
2016-11-08 10:36:50 -05:00
|
|
|
struct k_thread *cache;
|
|
|
|
|
|
|
|
/* bitmap of priorities that contain at least one ready thread */
|
2016-11-18 15:35:05 -05:00
|
|
|
uint32_t prio_bmap[K_NUM_PRIO_BITMAPS];
|
2016-11-08 10:36:50 -05:00
|
|
|
|
|
|
|
/* ready queues, one per priority */
|
|
|
|
sys_dlist_t q[K_NUM_PRIORITIES];
|
|
|
|
};
|
|
|
|
|
kernel/arch: enhance the "ready thread" cache
The way the ready thread cache was implemented caused it to not always
be "hot", i.e. there could be some misses, which happened when the
cached thread was taken out of the ready queue. When that happened, it
was not replaced immediately, since doing so could mean that the
replacement might not run because the flow could be interrupted and
another thread could take its place. This was the more conservative
approach that insured that moving a thread to the cache would never be
wasted.
However, this caused two problems:
1. The cache could not be refilled until another thread context-switched
in, since there was no thread in the cache to compare priorities
against.
2. Interrupt exit code would always have to call into C to find what
thread to run when the current thread was not coop and did not have the
scheduler locked. Furthermore, it was possible for this code path to
encounter a cold cache and then it had to find out what thread to run
the long way.
To fix this, filling the cache is now more aggressive, i.e. the next
thread to put in the cache is found even in the case the current cached
thread is context-switched out. This ensures the interrupt exit code is
much faster on the slow path. In addition, since finding the next thread
to run is now always "get it from the cache", which is a simple fetch
from memory (_kernel.ready_q.cache), there is no need to call the more
complex C code.
On the ARM FRDM K64F board, this improvement is seen:
Before:
1- Measure time to switch from ISR back to interrupted task
switching time is 215 tcs = 1791 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 315 tcs = 2625 nsec
After:
1- Measure time to switch from ISR back to interrupted task
switching time is 130 tcs = 1083 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 225 tcs = 1875 nsec
These are the most dramatic improvements, but most of the numbers
generated by the latency_measure test are improved.
Fixes ZEP-1401.
Change-Id: I2eaac147048b1ec71a93bd0a285e743a39533973
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-12-02 10:37:27 -05:00
|
|
|
typedef struct _ready_q _ready_q_t;
|
|
|
|
|
2016-11-08 10:36:50 -05:00
|
|
|
struct _kernel {
|
|
|
|
|
|
|
|
/* nested interrupt count */
|
|
|
|
uint32_t nested;
|
|
|
|
|
|
|
|
/* interrupt stack pointer base */
|
|
|
|
char *irq_stack;
|
|
|
|
|
|
|
|
/* currently scheduled thread */
|
|
|
|
struct k_thread *current;
|
|
|
|
|
|
|
|
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
|
|
|
/* queue of timeouts */
|
|
|
|
sys_dlist_t timeout_q;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
|
|
|
int32_t idle; /* Number of ticks for kernel idling */
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ready queue: can be big, keep after small fields, since some
|
2016-11-14 10:17:30 -05:00
|
|
|
* assembly (e.g. ARC) are limited in the encoding of the offset
|
2016-11-08 10:36:50 -05:00
|
|
|
*/
|
|
|
|
struct _ready_q ready_q;
|
|
|
|
|
|
|
|
#ifdef CONFIG_FP_SHARING
|
|
|
|
/*
|
|
|
|
* A 'current_sse' field does not exist in addition to the 'current_fp'
|
|
|
|
* field since it's not possible to divide the IA-32 non-integer
|
|
|
|
* registers into 2 distinct blocks owned by differing threads. In
|
|
|
|
* other words, given that the 'fxnsave/fxrstor' instructions
|
|
|
|
* save/restore both the X87 FPU and XMM registers, it's not possible
|
|
|
|
* for a thread to only "own" the XMM registers.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* thread (fiber or task) that owns the FP regs */
|
|
|
|
struct k_thread *current_fp;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(CONFIG_THREAD_MONITOR)
|
|
|
|
struct k_thread *threads; /* singly linked list of ALL fiber+tasks */
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* arch-specific part of _kernel */
|
|
|
|
struct _kernel_arch arch;
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct _kernel _kernel_t;
|
|
|
|
|
|
|
|
extern struct _kernel _kernel;
|
|
|
|
|
|
|
|
#define _current _kernel.current
|
|
|
|
#define _ready_q _kernel.ready_q
|
|
|
|
#define _timeout_q _kernel.timeout_q
|
|
|
|
#define _threads _kernel.threads
|
|
|
|
|
|
|
|
#include <kernel_arch_func.h>
|
|
|
|
|
|
|
|
static ALWAYS_INLINE void
|
|
|
|
_set_thread_return_value_with_data(struct k_thread *thread,
|
|
|
|
unsigned int value,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
_set_thread_return_value(thread, value);
|
|
|
|
thread->base.swap_data = data;
|
|
|
|
}
|
|
|
|
|
2016-11-22 17:48:13 -05:00
|
|
|
extern void _init_thread_base(struct _thread_base *thread_base,
|
|
|
|
int priority, uint32_t initial_state,
|
|
|
|
unsigned int options);
|
|
|
|
|
2016-11-08 10:36:50 -05:00
|
|
|
#endif /* _ASMLANGUAGE */
|
|
|
|
|
|
|
|
#endif /* _kernel_structs__h_ */
|