2020-11-11 08:42:53 -05:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2016, Wind River Systems, Inc.
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef ZEPHYR_INCLUDE_KERNEL_THREAD_H_
|
|
|
|
#define ZEPHYR_INCLUDE_KERNEL_THREAD_H_
|
|
|
|
|
2021-03-26 12:03:42 -07:00
|
|
|
#ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
|
|
|
|
#include <sys/mem_manage.h>
|
|
|
|
#endif
|
|
|
|
|
2021-12-14 21:31:10 -05:00
|
|
|
#include <kernel/stats.h>
|
|
|
|
|
2020-11-11 08:42:53 -05:00
|
|
|
/**
|
|
|
|
* @typedef k_thread_entry_t
|
|
|
|
* @brief Thread entry point function type.
|
|
|
|
*
|
|
|
|
* A thread's entry point function is invoked when the thread starts executing.
|
|
|
|
* Up to 3 argument values can be passed to the function.
|
|
|
|
*
|
|
|
|
* The thread terminates execution permanently if the entry point function
|
|
|
|
* returns. The thread is responsible for releasing any shared resources
|
|
|
|
* it may own (such as mutexes and dynamically allocated memory), prior to
|
|
|
|
* returning.
|
|
|
|
*
|
|
|
|
* @param p1 First argument.
|
|
|
|
* @param p2 Second argument.
|
|
|
|
* @param p3 Third argument.
|
|
|
|
*
|
|
|
|
* @return N/A
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef CONFIG_THREAD_MONITOR
|
|
|
|
struct __thread_entry {
|
|
|
|
k_thread_entry_t pEntry;
|
|
|
|
void *parameter1;
|
|
|
|
void *parameter2;
|
|
|
|
void *parameter3;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* can be used for creating 'dummy' threads, e.g. for pending on objects */
|
|
|
|
struct _thread_base {
|
|
|
|
|
|
|
|
/* this thread's entry in a ready/wait queue */
|
|
|
|
union {
|
|
|
|
sys_dnode_t qnode_dlist;
|
|
|
|
struct rbnode qnode_rb;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* wait queue on which the thread is pended (needed only for
|
|
|
|
* trees, not dumb lists)
|
|
|
|
*/
|
|
|
|
_wait_q_t *pended_on;
|
|
|
|
|
|
|
|
/* user facing 'thread options'; values defined in include/kernel.h */
|
|
|
|
uint8_t user_options;
|
|
|
|
|
|
|
|
/* thread state */
|
|
|
|
uint8_t thread_state;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* scheduler lock count and thread priority
|
|
|
|
*
|
|
|
|
* These two fields control the preemptibility of a thread.
|
|
|
|
*
|
|
|
|
* When the scheduler is locked, sched_locked is decremented, which
|
|
|
|
* means that the scheduler is locked for values from 0xff to 0x01. A
|
|
|
|
* thread is coop if its prio is negative, thus 0x80 to 0xff when
|
|
|
|
* looked at the value as unsigned.
|
|
|
|
*
|
|
|
|
* By putting them end-to-end, this means that a thread is
|
|
|
|
* non-preemptible if the bundled value is greater than or equal to
|
|
|
|
* 0x0080.
|
|
|
|
*/
|
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
|
|
|
uint8_t sched_locked;
|
|
|
|
int8_t prio;
|
|
|
|
#else /* LITTLE and PDP */
|
|
|
|
int8_t prio;
|
|
|
|
uint8_t sched_locked;
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
uint16_t preempt;
|
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef CONFIG_SCHED_DEADLINE
|
|
|
|
int prio_deadline;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
uint32_t order_key;
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/* True for the per-CPU idle threads */
|
|
|
|
uint8_t is_idle;
|
|
|
|
|
|
|
|
/* CPU index on which thread was last run */
|
|
|
|
uint8_t cpu;
|
|
|
|
|
|
|
|
/* Recursive count of irq_lock() calls */
|
|
|
|
uint8_t global_lock_count;
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_SCHED_CPU_MASK
|
|
|
|
/* "May run on" bits for each CPU */
|
|
|
|
uint8_t cpu_mask;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* data returned by APIs */
|
|
|
|
void *swap_data;
|
|
|
|
|
|
|
|
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
|
|
|
/* this thread's entry in a timeout queue */
|
|
|
|
struct _timeout timeout;
|
|
|
|
#endif
|
kernel/sched: Add "thread_usage" API for thread runtime cycle monitoring
This is an alternate backend that does what THREAD_RUNTIME_STATS is
doing currently, but with a few advantages:
* Correctly synchronized: you can't race against a running thread
(potentially on another CPU!) while querying its usage.
* Realtime results: you get the right answer always, up to timer
precision, even if a thread has been running for a while
uninterrupted and hasn't updated its total.
* Portable, no need for per-architecture code at all for the simple
case. (It leverages the USE_SWITCH layer to do this, so won't work
on older architectures)
* Faster/smaller: minimizes use of 64 bit math; lower overhead in
thread struct (keeps the scratch "started" time in the CPU struct
instead). One 64 bit counter per thread and a 32 bit scratch
register in the CPU struct.
* Standalone. It's a core (but optional) scheduler feature, no
dependence on para-kernel configuration like the tracing
infrastructure.
* More precise: allows architectures to optionally call a trivial
zero-argument/no-result cdecl function out of interrupt entry to
avoid accounting for ISR runtime in thread totals. No configuration
needed here, if it's called then you get proper ISR accounting, and
if not you don't.
For right now, pending unification, it's added side-by-side with the
older API and left as a z_*() internal symbol.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2021-09-27 08:22:43 -07:00
|
|
|
|
|
|
|
#ifdef CONFIG_SCHED_THREAD_USAGE
|
2021-12-14 21:31:10 -05:00
|
|
|
struct k_cycle_stats usage; /* Track thread usage statistics */
|
kernel/sched: Add "thread_usage" API for thread runtime cycle monitoring
This is an alternate backend that does what THREAD_RUNTIME_STATS is
doing currently, but with a few advantages:
* Correctly synchronized: you can't race against a running thread
(potentially on another CPU!) while querying its usage.
* Realtime results: you get the right answer always, up to timer
precision, even if a thread has been running for a while
uninterrupted and hasn't updated its total.
* Portable, no need for per-architecture code at all for the simple
case. (It leverages the USE_SWITCH layer to do this, so won't work
on older architectures)
* Faster/smaller: minimizes use of 64 bit math; lower overhead in
thread struct (keeps the scratch "started" time in the CPU struct
instead). One 64 bit counter per thread and a 32 bit scratch
register in the CPU struct.
* Standalone. It's a core (but optional) scheduler feature, no
dependence on para-kernel configuration like the tracing
infrastructure.
* More precise: allows architectures to optionally call a trivial
zero-argument/no-result cdecl function out of interrupt entry to
avoid accounting for ISR runtime in thread totals. No configuration
needed here, if it's called then you get proper ISR accounting, and
if not you don't.
For right now, pending unification, it's added side-by-side with the
older API and left as a z_*() internal symbol.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2021-09-27 08:22:43 -07:00
|
|
|
#endif
|
2020-11-11 08:42:53 -05:00
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct _thread_base _thread_base_t;
|
|
|
|
|
|
|
|
#if defined(CONFIG_THREAD_STACK_INFO)
|
|
|
|
/* Contains the stack information of a thread */
|
|
|
|
struct _thread_stack_info {
|
|
|
|
/* Stack start - Represents the start address of the thread-writable
|
|
|
|
* stack area.
|
|
|
|
*/
|
|
|
|
uintptr_t start;
|
|
|
|
|
|
|
|
/* Thread writable stack buffer size. Represents the size of the actual
|
|
|
|
* buffer, starting from the 'start' member, that should be writable by
|
|
|
|
* the thread. This comprises of the thread stack area, any area reserved
|
|
|
|
* for local thread data storage, as well as any area left-out due to
|
|
|
|
* random adjustments applied to the initial thread stack pointer during
|
|
|
|
* thread initialization.
|
|
|
|
*/
|
|
|
|
size_t size;
|
|
|
|
|
|
|
|
/* Adjustment value to the size member, removing any storage
|
|
|
|
* used for TLS or random stack base offsets. (start + size - delta)
|
|
|
|
* is the initial stack pointer for a thread. May be 0.
|
|
|
|
*/
|
|
|
|
size_t delta;
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct _thread_stack_info _thread_stack_info_t;
|
|
|
|
#endif /* CONFIG_THREAD_STACK_INFO */
|
|
|
|
|
|
|
|
#if defined(CONFIG_USERSPACE)
|
|
|
|
struct _mem_domain_info {
|
|
|
|
/** memory domain queue node */
|
|
|
|
sys_dnode_t mem_domain_q_node;
|
|
|
|
/** memory domain of the thread */
|
|
|
|
struct k_mem_domain *mem_domain;
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif /* CONFIG_USERSPACE */
|
|
|
|
|
|
|
|
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
|
|
|
|
struct _thread_userspace_local_data {
|
|
|
|
#if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS)
|
|
|
|
int errno_var;
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2021-09-28 10:01:06 -07:00
|
|
|
typedef struct k_thread_runtime_stats {
|
|
|
|
#ifdef CONFIG_SCHED_THREAD_USAGE
|
2020-11-11 08:42:53 -05:00
|
|
|
uint64_t execution_cycles;
|
2021-12-14 22:26:22 -05:00
|
|
|
uint64_t total_cycles; /* total # of non-idle cycles */
|
2021-12-14 21:31:10 -05:00
|
|
|
/*
|
|
|
|
* In the context of thread statistics, [execution_cycles] is the same
|
|
|
|
* as the total # of non-idle cycles. In the context of CPU statistics,
|
|
|
|
* it refers to the sum of non-idle + idle cycles.
|
|
|
|
*/
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
|
2021-12-14 22:26:22 -05:00
|
|
|
/*
|
|
|
|
* For threads, the following fields refer to the time spent executing
|
|
|
|
* as bounded by when the thread was scheduled in and scheduled out.
|
|
|
|
* For CPUs, the same fields refer to the time spent executing
|
|
|
|
* non-idle threads as bounded by the idle thread(s).
|
|
|
|
*/
|
|
|
|
|
2021-12-14 21:31:10 -05:00
|
|
|
uint64_t current_cycles; /* current # of non-idle cycles */
|
|
|
|
uint64_t peak_cycles; /* peak # of non-idle cycles */
|
|
|
|
uint64_t average_cycles; /* average # of non-idle cycles */
|
2021-12-14 22:26:22 -05:00
|
|
|
#endif
|
2021-12-14 21:31:10 -05:00
|
|
|
|
2021-12-14 22:26:22 -05:00
|
|
|
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
|
2021-12-14 21:31:10 -05:00
|
|
|
/*
|
|
|
|
* This field is always zero for individual threads. It only comes
|
|
|
|
* into play when gathering statistics for the CPU. In that case it
|
|
|
|
* represents the total number of cycles spent idling.
|
|
|
|
*/
|
|
|
|
|
|
|
|
uint64_t idle_cycles;
|
2020-11-11 08:42:53 -05:00
|
|
|
#endif
|
2021-09-28 10:01:06 -07:00
|
|
|
} k_thread_runtime_stats_t;
|
2020-11-11 08:42:53 -05:00
|
|
|
|
|
|
|
struct z_poller {
|
|
|
|
bool is_polling;
|
|
|
|
uint8_t mode;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @ingroup thread_apis
|
|
|
|
* Thread Structure
|
|
|
|
*/
|
|
|
|
struct k_thread {
|
|
|
|
|
|
|
|
struct _thread_base base;
|
|
|
|
|
|
|
|
/** defined by the architecture, but all archs need these */
|
|
|
|
struct _callee_saved callee_saved;
|
|
|
|
|
|
|
|
/** static thread init data */
|
|
|
|
void *init_data;
|
|
|
|
|
2021-02-19 15:32:19 -08:00
|
|
|
/** threads waiting in k_thread_join() */
|
|
|
|
_wait_q_t join_queue;
|
|
|
|
|
2020-11-11 08:42:53 -05:00
|
|
|
#if defined(CONFIG_POLL)
|
|
|
|
struct z_poller poller;
|
|
|
|
#endif
|
|
|
|
|
2021-09-20 14:14:32 -04:00
|
|
|
#if defined(CONFIG_EVENTS)
|
|
|
|
struct k_thread *next_event_link;
|
|
|
|
|
|
|
|
uint32_t events;
|
|
|
|
uint32_t event_options;
|
|
|
|
#endif
|
|
|
|
|
2020-11-11 08:42:53 -05:00
|
|
|
#if defined(CONFIG_THREAD_MONITOR)
|
|
|
|
/** thread entry and parameters description */
|
|
|
|
struct __thread_entry entry;
|
|
|
|
|
|
|
|
/** next item in list of all threads */
|
|
|
|
struct k_thread *next_thread;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(CONFIG_THREAD_NAME)
|
|
|
|
/** Thread name */
|
|
|
|
char name[CONFIG_THREAD_MAX_NAME_LEN];
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
|
|
|
/** crude thread-local storage */
|
|
|
|
void *custom_data;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
|
|
|
|
struct _thread_userspace_local_data *userspace_local_data;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS)
|
|
|
|
#ifndef CONFIG_USERSPACE
|
|
|
|
/** per-thread errno variable */
|
|
|
|
int errno_var;
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(CONFIG_THREAD_STACK_INFO)
|
|
|
|
/** Stack Info */
|
|
|
|
struct _thread_stack_info stack_info;
|
|
|
|
#endif /* CONFIG_THREAD_STACK_INFO */
|
|
|
|
|
|
|
|
#if defined(CONFIG_USERSPACE)
|
|
|
|
/** memory domain info of the thread */
|
|
|
|
struct _mem_domain_info mem_domain_info;
|
|
|
|
/** Base address of thread stack */
|
|
|
|
k_thread_stack_t *stack_obj;
|
|
|
|
/** current syscall frame pointer */
|
|
|
|
void *syscall_frame;
|
|
|
|
#endif /* CONFIG_USERSPACE */
|
|
|
|
|
|
|
|
|
|
|
|
#if defined(CONFIG_USE_SWITCH)
|
|
|
|
/* When using __switch() a few previously arch-specific items
|
|
|
|
* become part of the core OS
|
|
|
|
*/
|
|
|
|
|
|
|
|
/** z_swap() return value */
|
|
|
|
int swap_retval;
|
|
|
|
|
|
|
|
/** Context handle returned via arch_switch() */
|
|
|
|
void *switch_handle;
|
|
|
|
#endif
|
|
|
|
/** resource pool */
|
|
|
|
struct k_heap *resource_pool;
|
|
|
|
|
|
|
|
#if defined(CONFIG_THREAD_LOCAL_STORAGE)
|
|
|
|
/* Pointer to arch-specific TLS area */
|
|
|
|
uintptr_t tls;
|
|
|
|
#endif /* CONFIG_THREAD_LOCAL_STORAGE */
|
|
|
|
|
2021-03-26 12:03:42 -07:00
|
|
|
#ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
|
|
|
|
/** Paging statistics */
|
|
|
|
struct k_mem_paging_stats_t paging_stats;
|
|
|
|
#endif
|
|
|
|
|
2020-11-11 08:42:53 -05:00
|
|
|
/** arch-specifics: must always be at the end */
|
|
|
|
struct _thread_arch arch;
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct k_thread _thread_t;
|
|
|
|
typedef struct k_thread *k_tid_t;
|
|
|
|
|
|
|
|
#endif
|