zephyr/kernel/thread.c

1109 lines
32 KiB
C
Raw Permalink Normal View History

unified: initial unified kernel implementation Summary of what this includes: initialization: Copy from nano_init.c, with the following changes: - the main thread is the continuation of the init thread, but an idle thread is created as well - _main() initializes threads in groups and starts the EXE group - the ready queues are initialized - the main thread is marked as non-essential once the system init is done - a weak main() symbol is provided if the application does not provide a main() function scheduler: Not an exhaustive list, but basically provide primitives for: - adding/removing a thread to/from a wait queue - adding/removing a thread to/from the ready queue - marking thread as ready - locking/unlocking the scheduler - instead of locking interrupts - getting/setting thread priority - checking what state (coop/preempt) a thread is currenlty running in - rescheduling threads - finding what thread is the next to run - yielding/sleeping/aborting sleep - finding the current thread threads: - Add operationns on threads, such as creating and starting them. standardized handling of kernel object return codes: - Kernel objects now cause _Swap() to return the following values: 0 => operation successful -EAGAIN => operation timed out -Exxxxx => operation failed for another reason - The thread's swap_data field can be used to return any additional information required to complete the operation, such as the actual result of a successful operation. timeouts: - same as nano timeouts, renamed to simply 'timeouts' - the kernel is still tick-based, but objects take timeout values in ms for forward compatibility with a tickless kernel. semaphores: - Port of the nanokernel semaphores, which have the same basic behaviour as the microkernel ones. Semaphore groups are not yet implemented. - These semaphores are enhanced in that they accept an initial count and a count limit. This allows configuring them as binary semaphores, and also provisioning them without having to "give" the semaphore multiple times before using them. mutexes: - Straight port of the microkernel mutexes. An init function is added to allow defining them at runtime. pipes: - straight port timers: - amalgamation of nano and micro timers, with all functionalities intact. events: - re-implementation, using semaphores and workqueues. mailboxes: - straight port message queues: - straight port of microkernel FIFOs memory maps: - straight port workqueues: - Basically, have all APIs follow the k_ naming rule, and use the _timeout subsystem from the unified kernel directory, and not the _nano_timeout one. stacks: - Port of the nanokernel stacks. They can now have multiple threads pending on them and threads can wait with a timeout. LIFOs: - Straight port of the nanokernel LIFOs. FIFOs: - Straight port of the nanokernel FIFOs. Work by: Dmitriy Korovkin <dmitriy.korovkin@windriver.com> Peter Mitsis <peter.mitsis@windriver.com> Allan Stephens <allan.stephens@windriver.com> Benjamin Walsh <benjamin.walsh@windriver.com> Change-Id: Id3cadb3694484ab2ca467889cfb029be3cd3a7d6 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-09-03 00:55:39 +02:00
/*
* Copyright (c) 2010-2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
unified: initial unified kernel implementation Summary of what this includes: initialization: Copy from nano_init.c, with the following changes: - the main thread is the continuation of the init thread, but an idle thread is created as well - _main() initializes threads in groups and starts the EXE group - the ready queues are initialized - the main thread is marked as non-essential once the system init is done - a weak main() symbol is provided if the application does not provide a main() function scheduler: Not an exhaustive list, but basically provide primitives for: - adding/removing a thread to/from a wait queue - adding/removing a thread to/from the ready queue - marking thread as ready - locking/unlocking the scheduler - instead of locking interrupts - getting/setting thread priority - checking what state (coop/preempt) a thread is currenlty running in - rescheduling threads - finding what thread is the next to run - yielding/sleeping/aborting sleep - finding the current thread threads: - Add operationns on threads, such as creating and starting them. standardized handling of kernel object return codes: - Kernel objects now cause _Swap() to return the following values: 0 => operation successful -EAGAIN => operation timed out -Exxxxx => operation failed for another reason - The thread's swap_data field can be used to return any additional information required to complete the operation, such as the actual result of a successful operation. timeouts: - same as nano timeouts, renamed to simply 'timeouts' - the kernel is still tick-based, but objects take timeout values in ms for forward compatibility with a tickless kernel. semaphores: - Port of the nanokernel semaphores, which have the same basic behaviour as the microkernel ones. Semaphore groups are not yet implemented. - These semaphores are enhanced in that they accept an initial count and a count limit. This allows configuring them as binary semaphores, and also provisioning them without having to "give" the semaphore multiple times before using them. mutexes: - Straight port of the microkernel mutexes. An init function is added to allow defining them at runtime. pipes: - straight port timers: - amalgamation of nano and micro timers, with all functionalities intact. events: - re-implementation, using semaphores and workqueues. mailboxes: - straight port message queues: - straight port of microkernel FIFOs memory maps: - straight port workqueues: - Basically, have all APIs follow the k_ naming rule, and use the _timeout subsystem from the unified kernel directory, and not the _nano_timeout one. stacks: - Port of the nanokernel stacks. They can now have multiple threads pending on them and threads can wait with a timeout. LIFOs: - Straight port of the nanokernel LIFOs. FIFOs: - Straight port of the nanokernel FIFOs. Work by: Dmitriy Korovkin <dmitriy.korovkin@windriver.com> Peter Mitsis <peter.mitsis@windriver.com> Allan Stephens <allan.stephens@windriver.com> Benjamin Walsh <benjamin.walsh@windriver.com> Change-Id: Id3cadb3694484ab2ca467889cfb029be3cd3a7d6 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-09-03 00:55:39 +02:00
*/
/**
* @file
* @brief Kernel thread support
unified: initial unified kernel implementation Summary of what this includes: initialization: Copy from nano_init.c, with the following changes: - the main thread is the continuation of the init thread, but an idle thread is created as well - _main() initializes threads in groups and starts the EXE group - the ready queues are initialized - the main thread is marked as non-essential once the system init is done - a weak main() symbol is provided if the application does not provide a main() function scheduler: Not an exhaustive list, but basically provide primitives for: - adding/removing a thread to/from a wait queue - adding/removing a thread to/from the ready queue - marking thread as ready - locking/unlocking the scheduler - instead of locking interrupts - getting/setting thread priority - checking what state (coop/preempt) a thread is currenlty running in - rescheduling threads - finding what thread is the next to run - yielding/sleeping/aborting sleep - finding the current thread threads: - Add operationns on threads, such as creating and starting them. standardized handling of kernel object return codes: - Kernel objects now cause _Swap() to return the following values: 0 => operation successful -EAGAIN => operation timed out -Exxxxx => operation failed for another reason - The thread's swap_data field can be used to return any additional information required to complete the operation, such as the actual result of a successful operation. timeouts: - same as nano timeouts, renamed to simply 'timeouts' - the kernel is still tick-based, but objects take timeout values in ms for forward compatibility with a tickless kernel. semaphores: - Port of the nanokernel semaphores, which have the same basic behaviour as the microkernel ones. Semaphore groups are not yet implemented. - These semaphores are enhanced in that they accept an initial count and a count limit. This allows configuring them as binary semaphores, and also provisioning them without having to "give" the semaphore multiple times before using them. mutexes: - Straight port of the microkernel mutexes. An init function is added to allow defining them at runtime. pipes: - straight port timers: - amalgamation of nano and micro timers, with all functionalities intact. events: - re-implementation, using semaphores and workqueues. mailboxes: - straight port message queues: - straight port of microkernel FIFOs memory maps: - straight port workqueues: - Basically, have all APIs follow the k_ naming rule, and use the _timeout subsystem from the unified kernel directory, and not the _nano_timeout one. stacks: - Port of the nanokernel stacks. They can now have multiple threads pending on them and threads can wait with a timeout. LIFOs: - Straight port of the nanokernel LIFOs. FIFOs: - Straight port of the nanokernel FIFOs. Work by: Dmitriy Korovkin <dmitriy.korovkin@windriver.com> Peter Mitsis <peter.mitsis@windriver.com> Allan Stephens <allan.stephens@windriver.com> Benjamin Walsh <benjamin.walsh@windriver.com> Change-Id: Id3cadb3694484ab2ca467889cfb029be3cd3a7d6 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-09-03 00:55:39 +02:00
*
* This module provides general purpose thread support.
unified: initial unified kernel implementation Summary of what this includes: initialization: Copy from nano_init.c, with the following changes: - the main thread is the continuation of the init thread, but an idle thread is created as well - _main() initializes threads in groups and starts the EXE group - the ready queues are initialized - the main thread is marked as non-essential once the system init is done - a weak main() symbol is provided if the application does not provide a main() function scheduler: Not an exhaustive list, but basically provide primitives for: - adding/removing a thread to/from a wait queue - adding/removing a thread to/from the ready queue - marking thread as ready - locking/unlocking the scheduler - instead of locking interrupts - getting/setting thread priority - checking what state (coop/preempt) a thread is currenlty running in - rescheduling threads - finding what thread is the next to run - yielding/sleeping/aborting sleep - finding the current thread threads: - Add operationns on threads, such as creating and starting them. standardized handling of kernel object return codes: - Kernel objects now cause _Swap() to return the following values: 0 => operation successful -EAGAIN => operation timed out -Exxxxx => operation failed for another reason - The thread's swap_data field can be used to return any additional information required to complete the operation, such as the actual result of a successful operation. timeouts: - same as nano timeouts, renamed to simply 'timeouts' - the kernel is still tick-based, but objects take timeout values in ms for forward compatibility with a tickless kernel. semaphores: - Port of the nanokernel semaphores, which have the same basic behaviour as the microkernel ones. Semaphore groups are not yet implemented. - These semaphores are enhanced in that they accept an initial count and a count limit. This allows configuring them as binary semaphores, and also provisioning them without having to "give" the semaphore multiple times before using them. mutexes: - Straight port of the microkernel mutexes. An init function is added to allow defining them at runtime. pipes: - straight port timers: - amalgamation of nano and micro timers, with all functionalities intact. events: - re-implementation, using semaphores and workqueues. mailboxes: - straight port message queues: - straight port of microkernel FIFOs memory maps: - straight port workqueues: - Basically, have all APIs follow the k_ naming rule, and use the _timeout subsystem from the unified kernel directory, and not the _nano_timeout one. stacks: - Port of the nanokernel stacks. They can now have multiple threads pending on them and threads can wait with a timeout. LIFOs: - Straight port of the nanokernel LIFOs. FIFOs: - Straight port of the nanokernel FIFOs. Work by: Dmitriy Korovkin <dmitriy.korovkin@windriver.com> Peter Mitsis <peter.mitsis@windriver.com> Allan Stephens <allan.stephens@windriver.com> Benjamin Walsh <benjamin.walsh@windriver.com> Change-Id: Id3cadb3694484ab2ca467889cfb029be3cd3a7d6 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-09-03 00:55:39 +02:00
*/
#include <zephyr/kernel.h>
#include <zephyr/spinlock.h>
#include <zephyr/sys/math_extras.h>
#include <zephyr/sys_clock.h>
#include <ksched.h>
#include <kthread.h>
#include <wait_q.h>
#include <zephyr/internal/syscall_handler.h>
#include <kernel_internal.h>
#include <kswap.h>
#include <zephyr/init.h>
#include <zephyr/tracing/tracing.h>
headers: Refactor kernel and arch headers. This commit refactors kernel and arch headers to establish a boundary between private and public interface headers. The refactoring strategy used in this commit is detailed in the issue This commit introduces the following major changes: 1. Establish a clear boundary between private and public headers by removing "kernel/include" and "arch/*/include" from the global include paths. Ideally, only kernel/ and arch/*/ source files should reference the headers in these directories. If these headers must be used by a component, these include paths shall be manually added to the CMakeLists.txt file of the component. This is intended to discourage applications from including private kernel and arch headers either knowingly and unknowingly. - kernel/include/ (PRIVATE) This directory contains the private headers that provide private kernel definitions which should not be visible outside the kernel and arch source code. All public kernel definitions must be added to an appropriate header located under include/. - arch/*/include/ (PRIVATE) This directory contains the private headers that provide private architecture-specific definitions which should not be visible outside the arch and kernel source code. All public architecture- specific definitions must be added to an appropriate header located under include/arch/*/. - include/ AND include/sys/ (PUBLIC) This directory contains the public headers that provide public kernel definitions which can be referenced by both kernel and application code. - include/arch/*/ (PUBLIC) This directory contains the public headers that provide public architecture-specific definitions which can be referenced by both kernel and application code. 2. Split arch_interface.h into "kernel-to-arch interface" and "public arch interface" divisions. - kernel/include/kernel_arch_interface.h * provides private "kernel-to-arch interface" definition. * includes arch/*/include/kernel_arch_func.h to ensure that the interface function implementations are always available. * includes sys/arch_interface.h so that public arch interface definitions are automatically included when including this file. - arch/*/include/kernel_arch_func.h * provides architecture-specific "kernel-to-arch interface" implementation. * only the functions that will be used in kernel and arch source files are defined here. - include/sys/arch_interface.h * provides "public arch interface" definition. * includes include/arch/arch_inlines.h to ensure that the architecture-specific public inline interface function implementations are always available. - include/arch/arch_inlines.h * includes architecture-specific arch_inlines.h in include/arch/*/arch_inline.h. - include/arch/*/arch_inline.h * provides architecture-specific "public arch interface" inline function implementation. * supersedes include/sys/arch_inline.h. 3. Refactor kernel and the existing architecture implementations. - Remove circular dependency of kernel and arch headers. The following general rules should be observed: * Never include any private headers from public headers * Never include kernel_internal.h in kernel_arch_data.h * Always include kernel_arch_data.h from kernel_arch_func.h * Never include kernel.h from kernel_struct.h either directly or indirectly. Only add the kernel structures that must be referenced from public arch headers in this file. - Relocate syscall_handler.h to include/ so it can be used in the public code. This is necessary because many user-mode public codes reference the functions defined in this header. - Relocate kernel_arch_thread.h to include/arch/*/thread.h. This is necessary to provide architecture-specific thread definition for 'struct k_thread' in kernel.h. - Remove any private header dependencies from public headers using the following methods: * If dependency is not required, simply omit * If dependency is required, - Relocate a portion of the required dependencies from the private header to an appropriate public header OR - Relocate the required private header to make it public. This commit supersedes #20047, addresses #19666, and fixes #3056. Signed-off-by: Stephanos Ioannidis <root@stephanos.io>
2019-10-24 17:08:21 +02:00
#include <string.h>
#include <stdbool.h>
#include <zephyr/sys/check.h>
#include <zephyr/random/random.h>
#include <zephyr/sys/atomic.h>
#include <zephyr/logging/log.h>
#include <zephyr/llext/symbol.h>
#include <zephyr/sys/iterable_sections.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
#ifdef CONFIG_OBJ_CORE_THREAD
static struct k_obj_type obj_type_thread;
#ifdef CONFIG_OBJ_CORE_STATS_THREAD
static struct k_obj_core_stats_desc thread_stats_desc = {
.raw_size = sizeof(struct k_cycle_stats),
.query_size = sizeof(struct k_thread_runtime_stats),
.raw = z_thread_stats_raw,
.query = z_thread_stats_query,
.reset = z_thread_stats_reset,
.disable = z_thread_stats_disable,
.enable = z_thread_stats_enable,
};
#endif /* CONFIG_OBJ_CORE_STATS_THREAD */
static int init_thread_obj_core_list(void)
{
/* Initialize mem_slab object type */
#ifdef CONFIG_OBJ_CORE_THREAD
z_obj_type_init(&obj_type_thread, K_OBJ_TYPE_THREAD_ID,
offsetof(struct k_thread, obj_core));
#endif /* CONFIG_OBJ_CORE_THREAD */
#ifdef CONFIG_OBJ_CORE_STATS_THREAD
k_obj_type_stats_init(&obj_type_thread, &thread_stats_desc);
#endif /* CONFIG_OBJ_CORE_STATS_THREAD */
return 0;
}
SYS_INIT(init_thread_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif /* CONFIG_OBJ_CORE_THREAD */
#define _FOREACH_STATIC_THREAD(thread_data) \
STRUCT_SECTION_FOREACH(_static_thread_data, thread_data)
bool k_is_in_isr(void)
unified: initial unified kernel implementation Summary of what this includes: initialization: Copy from nano_init.c, with the following changes: - the main thread is the continuation of the init thread, but an idle thread is created as well - _main() initializes threads in groups and starts the EXE group - the ready queues are initialized - the main thread is marked as non-essential once the system init is done - a weak main() symbol is provided if the application does not provide a main() function scheduler: Not an exhaustive list, but basically provide primitives for: - adding/removing a thread to/from a wait queue - adding/removing a thread to/from the ready queue - marking thread as ready - locking/unlocking the scheduler - instead of locking interrupts - getting/setting thread priority - checking what state (coop/preempt) a thread is currenlty running in - rescheduling threads - finding what thread is the next to run - yielding/sleeping/aborting sleep - finding the current thread threads: - Add operationns on threads, such as creating and starting them. standardized handling of kernel object return codes: - Kernel objects now cause _Swap() to return the following values: 0 => operation successful -EAGAIN => operation timed out -Exxxxx => operation failed for another reason - The thread's swap_data field can be used to return any additional information required to complete the operation, such as the actual result of a successful operation. timeouts: - same as nano timeouts, renamed to simply 'timeouts' - the kernel is still tick-based, but objects take timeout values in ms for forward compatibility with a tickless kernel. semaphores: - Port of the nanokernel semaphores, which have the same basic behaviour as the microkernel ones. Semaphore groups are not yet implemented. - These semaphores are enhanced in that they accept an initial count and a count limit. This allows configuring them as binary semaphores, and also provisioning them without having to "give" the semaphore multiple times before using them. mutexes: - Straight port of the microkernel mutexes. An init function is added to allow defining them at runtime. pipes: - straight port timers: - amalgamation of nano and micro timers, with all functionalities intact. events: - re-implementation, using semaphores and workqueues. mailboxes: - straight port message queues: - straight port of microkernel FIFOs memory maps: - straight port workqueues: - Basically, have all APIs follow the k_ naming rule, and use the _timeout subsystem from the unified kernel directory, and not the _nano_timeout one. stacks: - Port of the nanokernel stacks. They can now have multiple threads pending on them and threads can wait with a timeout. LIFOs: - Straight port of the nanokernel LIFOs. FIFOs: - Straight port of the nanokernel FIFOs. Work by: Dmitriy Korovkin <dmitriy.korovkin@windriver.com> Peter Mitsis <peter.mitsis@windriver.com> Allan Stephens <allan.stephens@windriver.com> Benjamin Walsh <benjamin.walsh@windriver.com> Change-Id: Id3cadb3694484ab2ca467889cfb029be3cd3a7d6 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-09-03 00:55:39 +02:00
{
return arch_is_in_isr();
unified: initial unified kernel implementation Summary of what this includes: initialization: Copy from nano_init.c, with the following changes: - the main thread is the continuation of the init thread, but an idle thread is created as well - _main() initializes threads in groups and starts the EXE group - the ready queues are initialized - the main thread is marked as non-essential once the system init is done - a weak main() symbol is provided if the application does not provide a main() function scheduler: Not an exhaustive list, but basically provide primitives for: - adding/removing a thread to/from a wait queue - adding/removing a thread to/from the ready queue - marking thread as ready - locking/unlocking the scheduler - instead of locking interrupts - getting/setting thread priority - checking what state (coop/preempt) a thread is currenlty running in - rescheduling threads - finding what thread is the next to run - yielding/sleeping/aborting sleep - finding the current thread threads: - Add operationns on threads, such as creating and starting them. standardized handling of kernel object return codes: - Kernel objects now cause _Swap() to return the following values: 0 => operation successful -EAGAIN => operation timed out -Exxxxx => operation failed for another reason - The thread's swap_data field can be used to return any additional information required to complete the operation, such as the actual result of a successful operation. timeouts: - same as nano timeouts, renamed to simply 'timeouts' - the kernel is still tick-based, but objects take timeout values in ms for forward compatibility with a tickless kernel. semaphores: - Port of the nanokernel semaphores, which have the same basic behaviour as the microkernel ones. Semaphore groups are not yet implemented. - These semaphores are enhanced in that they accept an initial count and a count limit. This allows configuring them as binary semaphores, and also provisioning them without having to "give" the semaphore multiple times before using them. mutexes: - Straight port of the microkernel mutexes. An init function is added to allow defining them at runtime. pipes: - straight port timers: - amalgamation of nano and micro timers, with all functionalities intact. events: - re-implementation, using semaphores and workqueues. mailboxes: - straight port message queues: - straight port of microkernel FIFOs memory maps: - straight port workqueues: - Basically, have all APIs follow the k_ naming rule, and use the _timeout subsystem from the unified kernel directory, and not the _nano_timeout one. stacks: - Port of the nanokernel stacks. They can now have multiple threads pending on them and threads can wait with a timeout. LIFOs: - Straight port of the nanokernel LIFOs. FIFOs: - Straight port of the nanokernel FIFOs. Work by: Dmitriy Korovkin <dmitriy.korovkin@windriver.com> Peter Mitsis <peter.mitsis@windriver.com> Allan Stephens <allan.stephens@windriver.com> Benjamin Walsh <benjamin.walsh@windriver.com> Change-Id: Id3cadb3694484ab2ca467889cfb029be3cd3a7d6 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-09-03 00:55:39 +02:00
}
EXPORT_SYMBOL(k_is_in_isr);
unified: initial unified kernel implementation Summary of what this includes: initialization: Copy from nano_init.c, with the following changes: - the main thread is the continuation of the init thread, but an idle thread is created as well - _main() initializes threads in groups and starts the EXE group - the ready queues are initialized - the main thread is marked as non-essential once the system init is done - a weak main() symbol is provided if the application does not provide a main() function scheduler: Not an exhaustive list, but basically provide primitives for: - adding/removing a thread to/from a wait queue - adding/removing a thread to/from the ready queue - marking thread as ready - locking/unlocking the scheduler - instead of locking interrupts - getting/setting thread priority - checking what state (coop/preempt) a thread is currenlty running in - rescheduling threads - finding what thread is the next to run - yielding/sleeping/aborting sleep - finding the current thread threads: - Add operationns on threads, such as creating and starting them. standardized handling of kernel object return codes: - Kernel objects now cause _Swap() to return the following values: 0 => operation successful -EAGAIN => operation timed out -Exxxxx => operation failed for another reason - The thread's swap_data field can be used to return any additional information required to complete the operation, such as the actual result of a successful operation. timeouts: - same as nano timeouts, renamed to simply 'timeouts' - the kernel is still tick-based, but objects take timeout values in ms for forward compatibility with a tickless kernel. semaphores: - Port of the nanokernel semaphores, which have the same basic behaviour as the microkernel ones. Semaphore groups are not yet implemented. - These semaphores are enhanced in that they accept an initial count and a count limit. This allows configuring them as binary semaphores, and also provisioning them without having to "give" the semaphore multiple times before using them. mutexes: - Straight port of the microkernel mutexes. An init function is added to allow defining them at runtime. pipes: - straight port timers: - amalgamation of nano and micro timers, with all functionalities intact. events: - re-implementation, using semaphores and workqueues. mailboxes: - straight port message queues: - straight port of microkernel FIFOs memory maps: - straight port workqueues: - Basically, have all APIs follow the k_ naming rule, and use the _timeout subsystem from the unified kernel directory, and not the _nano_timeout one. stacks: - Port of the nanokernel stacks. They can now have multiple threads pending on them and threads can wait with a timeout. LIFOs: - Straight port of the nanokernel LIFOs. FIFOs: - Straight port of the nanokernel FIFOs. Work by: Dmitriy Korovkin <dmitriy.korovkin@windriver.com> Peter Mitsis <peter.mitsis@windriver.com> Allan Stephens <allan.stephens@windriver.com> Benjamin Walsh <benjamin.walsh@windriver.com> Change-Id: Id3cadb3694484ab2ca467889cfb029be3cd3a7d6 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-09-03 00:55:39 +02:00
#ifdef CONFIG_THREAD_CUSTOM_DATA
void z_impl_k_thread_custom_data_set(void *value)
unified: initial unified kernel implementation Summary of what this includes: initialization: Copy from nano_init.c, with the following changes: - the main thread is the continuation of the init thread, but an idle thread is created as well - _main() initializes threads in groups and starts the EXE group - the ready queues are initialized - the main thread is marked as non-essential once the system init is done - a weak main() symbol is provided if the application does not provide a main() function scheduler: Not an exhaustive list, but basically provide primitives for: - adding/removing a thread to/from a wait queue - adding/removing a thread to/from the ready queue - marking thread as ready - locking/unlocking the scheduler - instead of locking interrupts - getting/setting thread priority - checking what state (coop/preempt) a thread is currenlty running in - rescheduling threads - finding what thread is the next to run - yielding/sleeping/aborting sleep - finding the current thread threads: - Add operationns on threads, such as creating and starting them. standardized handling of kernel object return codes: - Kernel objects now cause _Swap() to return the following values: 0 => operation successful -EAGAIN => operation timed out -Exxxxx => operation failed for another reason - The thread's swap_data field can be used to return any additional information required to complete the operation, such as the actual result of a successful operation. timeouts: - same as nano timeouts, renamed to simply 'timeouts' - the kernel is still tick-based, but objects take timeout values in ms for forward compatibility with a tickless kernel. semaphores: - Port of the nanokernel semaphores, which have the same basic behaviour as the microkernel ones. Semaphore groups are not yet implemented. - These semaphores are enhanced in that they accept an initial count and a count limit. This allows configuring them as binary semaphores, and also provisioning them without having to "give" the semaphore multiple times before using them. mutexes: - Straight port of the microkernel mutexes. An init function is added to allow defining them at runtime. pipes: - straight port timers: - amalgamation of nano and micro timers, with all functionalities intact. events: - re-implementation, using semaphores and workqueues. mailboxes: - straight port message queues: - straight port of microkernel FIFOs memory maps: - straight port workqueues: - Basically, have all APIs follow the k_ naming rule, and use the _timeout subsystem from the unified kernel directory, and not the _nano_timeout one. stacks: - Port of the nanokernel stacks. They can now have multiple threads pending on them and threads can wait with a timeout. LIFOs: - Straight port of the nanokernel LIFOs. FIFOs: - Straight port of the nanokernel FIFOs. Work by: Dmitriy Korovkin <dmitriy.korovkin@windriver.com> Peter Mitsis <peter.mitsis@windriver.com> Allan Stephens <allan.stephens@windriver.com> Benjamin Walsh <benjamin.walsh@windriver.com> Change-Id: Id3cadb3694484ab2ca467889cfb029be3cd3a7d6 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-09-03 00:55:39 +02:00
{
_current->custom_data = value;
}
#ifdef CONFIG_USERSPACE
userspace: Support for split 64 bit arguments System call arguments, at the arch layer, are single words. So passing wider values requires splitting them into two registers at call time. This gets even more complicated for values (e.g k_timeout_t) that may have different sizes depending on configuration. This patch adds a feature to gen_syscalls.py to detect functions with wide arguments and automatically generates code to split/unsplit them. Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't work with functions like this, because for N arguments (our current maximum N is 10) there are 2^N possible configurations of argument widths. So this generates the complete functions for each handler and wrapper, effectively doing in python what was originally done in the preprocessor. Another complexity is that traditional the z_hdlr_*() function for a system call has taken the raw list of word arguments, which does not work when some of those arguments must be 64 bit types. So instead of using a single Z_SYSCALL_HANDLER macro, this splits the job of z_hdlr_*() into two steps: An automatically-generated unmarshalling function, z_mrsh_*(), which then calls a user-supplied verification function z_vrfy_*(). The verification function is typesafe, and is a simple C function with exactly the same argument and return signature as the syscall impl function. It is also not responsible for validating the pointers to the extra parameter array or a wide return value, that code gets automatically generated. This commit includes new vrfy/msrh handling for all syscalls invoked during CI runs. Future commits will port the less testable code. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 22:34:31 +02:00
static inline void z_vrfy_k_thread_custom_data_set(void *data)
{
userspace: Support for split 64 bit arguments System call arguments, at the arch layer, are single words. So passing wider values requires splitting them into two registers at call time. This gets even more complicated for values (e.g k_timeout_t) that may have different sizes depending on configuration. This patch adds a feature to gen_syscalls.py to detect functions with wide arguments and automatically generates code to split/unsplit them. Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't work with functions like this, because for N arguments (our current maximum N is 10) there are 2^N possible configurations of argument widths. So this generates the complete functions for each handler and wrapper, effectively doing in python what was originally done in the preprocessor. Another complexity is that traditional the z_hdlr_*() function for a system call has taken the raw list of word arguments, which does not work when some of those arguments must be 64 bit types. So instead of using a single Z_SYSCALL_HANDLER macro, this splits the job of z_hdlr_*() into two steps: An automatically-generated unmarshalling function, z_mrsh_*(), which then calls a user-supplied verification function z_vrfy_*(). The verification function is typesafe, and is a simple C function with exactly the same argument and return signature as the syscall impl function. It is also not responsible for validating the pointers to the extra parameter array or a wide return value, that code gets automatically generated. This commit includes new vrfy/msrh handling for all syscalls invoked during CI runs. Future commits will port the less testable code. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 22:34:31 +02:00
z_impl_k_thread_custom_data_set(data);
}
userspace: Support for split 64 bit arguments System call arguments, at the arch layer, are single words. So passing wider values requires splitting them into two registers at call time. This gets even more complicated for values (e.g k_timeout_t) that may have different sizes depending on configuration. This patch adds a feature to gen_syscalls.py to detect functions with wide arguments and automatically generates code to split/unsplit them. Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't work with functions like this, because for N arguments (our current maximum N is 10) there are 2^N possible configurations of argument widths. So this generates the complete functions for each handler and wrapper, effectively doing in python what was originally done in the preprocessor. Another complexity is that traditional the z_hdlr_*() function for a system call has taken the raw list of word arguments, which does not work when some of those arguments must be 64 bit types. So instead of using a single Z_SYSCALL_HANDLER macro, this splits the job of z_hdlr_*() into two steps: An automatically-generated unmarshalling function, z_mrsh_*(), which then calls a user-supplied verification function z_vrfy_*(). The verification function is typesafe, and is a simple C function with exactly the same argument and return signature as the syscall impl function. It is also not responsible for validating the pointers to the extra parameter array or a wide return value, that code gets automatically generated. This commit includes new vrfy/msrh handling for all syscalls invoked during CI runs. Future commits will port the less testable code. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 22:34:31 +02:00
#include <syscalls/k_thread_custom_data_set_mrsh.c>
#endif /* CONFIG_USERSPACE */
void *z_impl_k_thread_custom_data_get(void)
unified: initial unified kernel implementation Summary of what this includes: initialization: Copy from nano_init.c, with the following changes: - the main thread is the continuation of the init thread, but an idle thread is created as well - _main() initializes threads in groups and starts the EXE group - the ready queues are initialized - the main thread is marked as non-essential once the system init is done - a weak main() symbol is provided if the application does not provide a main() function scheduler: Not an exhaustive list, but basically provide primitives for: - adding/removing a thread to/from a wait queue - adding/removing a thread to/from the ready queue - marking thread as ready - locking/unlocking the scheduler - instead of locking interrupts - getting/setting thread priority - checking what state (coop/preempt) a thread is currenlty running in - rescheduling threads - finding what thread is the next to run - yielding/sleeping/aborting sleep - finding the current thread threads: - Add operationns on threads, such as creating and starting them. standardized handling of kernel object return codes: - Kernel objects now cause _Swap() to return the following values: 0 => operation successful -EAGAIN => operation timed out -Exxxxx => operation failed for another reason - The thread's swap_data field can be used to return any additional information required to complete the operation, such as the actual result of a successful operation. timeouts: - same as nano timeouts, renamed to simply 'timeouts' - the kernel is still tick-based, but objects take timeout values in ms for forward compatibility with a tickless kernel. semaphores: - Port of the nanokernel semaphores, which have the same basic behaviour as the microkernel ones. Semaphore groups are not yet implemented. - These semaphores are enhanced in that they accept an initial count and a count limit. This allows configuring them as binary semaphores, and also provisioning them without having to "give" the semaphore multiple times before using them. mutexes: - Straight port of the microkernel mutexes. An init function is added to allow defining them at runtime. pipes: - straight port timers: - amalgamation of nano and micro timers, with all functionalities intact. events: - re-implementation, using semaphores and workqueues. mailboxes: - straight port message queues: - straight port of microkernel FIFOs memory maps: - straight port workqueues: - Basically, have all APIs follow the k_ naming rule, and use the _timeout subsystem from the unified kernel directory, and not the _nano_timeout one. stacks: - Port of the nanokernel stacks. They can now have multiple threads pending on them and threads can wait with a timeout. LIFOs: - Straight port of the nanokernel LIFOs. FIFOs: - Straight port of the nanokernel FIFOs. Work by: Dmitriy Korovkin <dmitriy.korovkin@windriver.com> Peter Mitsis <peter.mitsis@windriver.com> Allan Stephens <allan.stephens@windriver.com> Benjamin Walsh <benjamin.walsh@windriver.com> Change-Id: Id3cadb3694484ab2ca467889cfb029be3cd3a7d6 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-09-03 00:55:39 +02:00
{
return _current->custom_data;
}
#ifdef CONFIG_USERSPACE
userspace: Support for split 64 bit arguments System call arguments, at the arch layer, are single words. So passing wider values requires splitting them into two registers at call time. This gets even more complicated for values (e.g k_timeout_t) that may have different sizes depending on configuration. This patch adds a feature to gen_syscalls.py to detect functions with wide arguments and automatically generates code to split/unsplit them. Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't work with functions like this, because for N arguments (our current maximum N is 10) there are 2^N possible configurations of argument widths. So this generates the complete functions for each handler and wrapper, effectively doing in python what was originally done in the preprocessor. Another complexity is that traditional the z_hdlr_*() function for a system call has taken the raw list of word arguments, which does not work when some of those arguments must be 64 bit types. So instead of using a single Z_SYSCALL_HANDLER macro, this splits the job of z_hdlr_*() into two steps: An automatically-generated unmarshalling function, z_mrsh_*(), which then calls a user-supplied verification function z_vrfy_*(). The verification function is typesafe, and is a simple C function with exactly the same argument and return signature as the syscall impl function. It is also not responsible for validating the pointers to the extra parameter array or a wide return value, that code gets automatically generated. This commit includes new vrfy/msrh handling for all syscalls invoked during CI runs. Future commits will port the less testable code. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 22:34:31 +02:00
static inline void *z_vrfy_k_thread_custom_data_get(void)
{
return z_impl_k_thread_custom_data_get();
}
#include <syscalls/k_thread_custom_data_get_mrsh.c>
#endif /* CONFIG_USERSPACE */
unified: initial unified kernel implementation Summary of what this includes: initialization: Copy from nano_init.c, with the following changes: - the main thread is the continuation of the init thread, but an idle thread is created as well - _main() initializes threads in groups and starts the EXE group - the ready queues are initialized - the main thread is marked as non-essential once the system init is done - a weak main() symbol is provided if the application does not provide a main() function scheduler: Not an exhaustive list, but basically provide primitives for: - adding/removing a thread to/from a wait queue - adding/removing a thread to/from the ready queue - marking thread as ready - locking/unlocking the scheduler - instead of locking interrupts - getting/setting thread priority - checking what state (coop/preempt) a thread is currenlty running in - rescheduling threads - finding what thread is the next to run - yielding/sleeping/aborting sleep - finding the current thread threads: - Add operationns on threads, such as creating and starting them. standardized handling of kernel object return codes: - Kernel objects now cause _Swap() to return the following values: 0 => operation successful -EAGAIN => operation timed out -Exxxxx => operation failed for another reason - The thread's swap_data field can be used to return any additional information required to complete the operation, such as the actual result of a successful operation. timeouts: - same as nano timeouts, renamed to simply 'timeouts' - the kernel is still tick-based, but objects take timeout values in ms for forward compatibility with a tickless kernel. semaphores: - Port of the nanokernel semaphores, which have the same basic behaviour as the microkernel ones. Semaphore groups are not yet implemented. - These semaphores are enhanced in that they accept an initial count and a count limit. This allows configuring them as binary semaphores, and also provisioning them without having to "give" the semaphore multiple times before using them. mutexes: - Straight port of the microkernel mutexes. An init function is added to allow defining them at runtime. pipes: - straight port timers: - amalgamation of nano and micro timers, with all functionalities intact. events: - re-implementation, using semaphores and workqueues. mailboxes: - straight port message queues: - straight port of microkernel FIFOs memory maps: - straight port workqueues: - Basically, have all APIs follow the k_ naming rule, and use the _timeout subsystem from the unified kernel directory, and not the _nano_timeout one. stacks: - Port of the nanokernel stacks. They can now have multiple threads pending on them and threads can wait with a timeout. LIFOs: - Straight port of the nanokernel LIFOs. FIFOs: - Straight port of the nanokernel FIFOs. Work by: Dmitriy Korovkin <dmitriy.korovkin@windriver.com> Peter Mitsis <peter.mitsis@windriver.com> Allan Stephens <allan.stephens@windriver.com> Benjamin Walsh <benjamin.walsh@windriver.com> Change-Id: Id3cadb3694484ab2ca467889cfb029be3cd3a7d6 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-09-03 00:55:39 +02:00
#endif /* CONFIG_THREAD_CUSTOM_DATA */
int z_impl_k_is_preempt_thread(void)
{
return !arch_is_in_isr() && thread_is_preemptible(_current);
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_is_preempt_thread(void)
{
return z_impl_k_is_preempt_thread();
}
#include <syscalls/k_is_preempt_thread_mrsh.c>
#endif /* CONFIG_USERSPACE */
int z_impl_k_thread_priority_get(k_tid_t thread)
{
return thread->base.prio;
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_thread_priority_get(k_tid_t thread)
{
K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
return z_impl_k_thread_priority_get(thread);
}
#include <syscalls/k_thread_priority_get_mrsh.c>
#endif /* CONFIG_USERSPACE */
int z_impl_k_thread_name_set(struct k_thread *thread, const char *value)
{
#ifdef CONFIG_THREAD_NAME
if (thread == NULL) {
thread = _current;
}
strncpy(thread->name, value, CONFIG_THREAD_MAX_NAME_LEN - 1);
thread->name[CONFIG_THREAD_MAX_NAME_LEN - 1] = '\0';
SYS_PORT_TRACING_OBJ_FUNC(k_thread, name_set, thread, 0);
return 0;
#else
ARG_UNUSED(thread);
ARG_UNUSED(value);
SYS_PORT_TRACING_OBJ_FUNC(k_thread, name_set, thread, -ENOSYS);
return -ENOSYS;
#endif /* CONFIG_THREAD_NAME */
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_thread_name_set(struct k_thread *thread, const char *str)
{
#ifdef CONFIG_THREAD_NAME
char name[CONFIG_THREAD_MAX_NAME_LEN];
if (thread != NULL) {
if (K_SYSCALL_OBJ(thread, K_OBJ_THREAD) != 0) {
return -EINVAL;
}
}
/* In theory we could copy directly into thread->name, but
* the current z_vrfy / z_impl split does not provide a
* means of doing so.
*/
if (k_usermode_string_copy(name, str, sizeof(name)) != 0) {
return -EFAULT;
}
return z_impl_k_thread_name_set(thread, name);
#else
return -ENOSYS;
#endif /* CONFIG_THREAD_NAME */
}
userspace: Support for split 64 bit arguments System call arguments, at the arch layer, are single words. So passing wider values requires splitting them into two registers at call time. This gets even more complicated for values (e.g k_timeout_t) that may have different sizes depending on configuration. This patch adds a feature to gen_syscalls.py to detect functions with wide arguments and automatically generates code to split/unsplit them. Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't work with functions like this, because for N arguments (our current maximum N is 10) there are 2^N possible configurations of argument widths. So this generates the complete functions for each handler and wrapper, effectively doing in python what was originally done in the preprocessor. Another complexity is that traditional the z_hdlr_*() function for a system call has taken the raw list of word arguments, which does not work when some of those arguments must be 64 bit types. So instead of using a single Z_SYSCALL_HANDLER macro, this splits the job of z_hdlr_*() into two steps: An automatically-generated unmarshalling function, z_mrsh_*(), which then calls a user-supplied verification function z_vrfy_*(). The verification function is typesafe, and is a simple C function with exactly the same argument and return signature as the syscall impl function. It is also not responsible for validating the pointers to the extra parameter array or a wide return value, that code gets automatically generated. This commit includes new vrfy/msrh handling for all syscalls invoked during CI runs. Future commits will port the less testable code. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 22:34:31 +02:00
#include <syscalls/k_thread_name_set_mrsh.c>
#endif /* CONFIG_USERSPACE */
const char *k_thread_name_get(k_tid_t thread)
{
#ifdef CONFIG_THREAD_NAME
return (const char *)thread->name;
#else
ARG_UNUSED(thread);
return NULL;
#endif /* CONFIG_THREAD_NAME */
}
int z_impl_k_thread_name_copy(k_tid_t thread, char *buf, size_t size)
{
#ifdef CONFIG_THREAD_NAME
strncpy(buf, thread->name, size);
return 0;
#else
ARG_UNUSED(thread);
ARG_UNUSED(buf);
ARG_UNUSED(size);
return -ENOSYS;
#endif /* CONFIG_THREAD_NAME */
}
static size_t copy_bytes(char *dest, size_t dest_size, const char *src, size_t src_size)
{
size_t bytes_to_copy;
bytes_to_copy = MIN(dest_size, src_size);
memcpy(dest, src, bytes_to_copy);
return bytes_to_copy;
}
const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size)
{
size_t off = 0;
uint8_t bit;
uint8_t thread_state = thread_id->base.thread_state;
static const struct {
const char *str;
size_t len;
} state_string[] = {
{ Z_STATE_STR_DUMMY, sizeof(Z_STATE_STR_DUMMY) - 1},
{ Z_STATE_STR_PENDING, sizeof(Z_STATE_STR_PENDING) - 1},
{ Z_STATE_STR_PRESTART, sizeof(Z_STATE_STR_PRESTART) - 1},
{ Z_STATE_STR_DEAD, sizeof(Z_STATE_STR_DEAD) - 1},
{ Z_STATE_STR_SUSPENDED, sizeof(Z_STATE_STR_SUSPENDED) - 1},
{ Z_STATE_STR_ABORTING, sizeof(Z_STATE_STR_ABORTING) - 1},
{ Z_STATE_STR_SUSPENDING, sizeof(Z_STATE_STR_SUSPENDING) - 1},
{ Z_STATE_STR_QUEUED, sizeof(Z_STATE_STR_QUEUED) - 1},
};
if ((buf == NULL) || (buf_size == 0)) {
return "";
}
buf_size--; /* Reserve 1 byte for end-of-string character */
/*
* Loop through each bit in the thread_state. Stop once all have
* been processed. If more than one thread_state bit is set, then
* separate the descriptive strings with a '+'.
*/
for (unsigned int index = 0; thread_state != 0; index++) {
bit = BIT(index);
if ((thread_state & bit) == 0) {
continue;
}
off += copy_bytes(buf + off, buf_size - off,
state_string[index].str,
state_string[index].len);
thread_state &= ~bit;
if (thread_state != 0) {
off += copy_bytes(buf + off, buf_size - off, "+", 1);
}
}
buf[off] = '\0';
return (const char *)buf;
}
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_thread_name_copy(k_tid_t thread,
char *buf, size_t size)
{
#ifdef CONFIG_THREAD_NAME
size_t len;
struct k_object *ko = k_object_find(thread);
/* Special case: we allow reading the names of initialized threads
* even if we don't have permission on them
*/
if ((thread == NULL) || (ko->type != K_OBJ_THREAD) ||
((ko->flags & K_OBJ_FLAG_INITIALIZED) == 0)) {
return -EINVAL;
}
if (K_SYSCALL_MEMORY_WRITE(buf, size) != 0) {
return -EFAULT;
}
len = strlen(thread->name);
if ((len + 1) > size) {
return -ENOSPC;
}
return k_usermode_to_copy((void *)buf, thread->name, len + 1);
#else
ARG_UNUSED(thread);
ARG_UNUSED(buf);
ARG_UNUSED(size);
return -ENOSYS;
#endif /* CONFIG_THREAD_NAME */
}
userspace: Support for split 64 bit arguments System call arguments, at the arch layer, are single words. So passing wider values requires splitting them into two registers at call time. This gets even more complicated for values (e.g k_timeout_t) that may have different sizes depending on configuration. This patch adds a feature to gen_syscalls.py to detect functions with wide arguments and automatically generates code to split/unsplit them. Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't work with functions like this, because for N arguments (our current maximum N is 10) there are 2^N possible configurations of argument widths. So this generates the complete functions for each handler and wrapper, effectively doing in python what was originally done in the preprocessor. Another complexity is that traditional the z_hdlr_*() function for a system call has taken the raw list of word arguments, which does not work when some of those arguments must be 64 bit types. So instead of using a single Z_SYSCALL_HANDLER macro, this splits the job of z_hdlr_*() into two steps: An automatically-generated unmarshalling function, z_mrsh_*(), which then calls a user-supplied verification function z_vrfy_*(). The verification function is typesafe, and is a simple C function with exactly the same argument and return signature as the syscall impl function. It is also not responsible for validating the pointers to the extra parameter array or a wide return value, that code gets automatically generated. This commit includes new vrfy/msrh handling for all syscalls invoked during CI runs. Future commits will port the less testable code. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 22:34:31 +02:00
#include <syscalls/k_thread_name_copy_mrsh.c>
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_STACK_SENTINEL
/* Check that the stack sentinel is still present
*
* The stack sentinel feature writes a magic value to the lowest 4 bytes of
* the thread's stack when the thread is initialized. This value gets checked
* in a few places:
*
* 1) In k_yield() if the current thread is not swapped out
* 2) After servicing a non-nested interrupt
* 3) In z_swap(), check the sentinel in the outgoing thread
*
* Item 2 requires support in arch/ code.
*
* If the check fails, the thread will be terminated appropriately through
* the system fatal error handler.
*/
void z_check_stack_sentinel(void)
{
uint32_t *stack;
if ((_current->base.thread_state & _THREAD_DUMMY) != 0) {
return;
}
stack = (uint32_t *)_current->stack_info.start;
if (*stack != STACK_SENTINEL) {
/* Restore it so further checks don't trigger this same error */
*stack = STACK_SENTINEL;
z_except_reason(K_ERR_STACK_CHK_FAIL);
}
}
#endif /* CONFIG_STACK_SENTINEL */
void z_impl_k_thread_start(struct k_thread *thread)
unified: initial unified kernel implementation Summary of what this includes: initialization: Copy from nano_init.c, with the following changes: - the main thread is the continuation of the init thread, but an idle thread is created as well - _main() initializes threads in groups and starts the EXE group - the ready queues are initialized - the main thread is marked as non-essential once the system init is done - a weak main() symbol is provided if the application does not provide a main() function scheduler: Not an exhaustive list, but basically provide primitives for: - adding/removing a thread to/from a wait queue - adding/removing a thread to/from the ready queue - marking thread as ready - locking/unlocking the scheduler - instead of locking interrupts - getting/setting thread priority - checking what state (coop/preempt) a thread is currenlty running in - rescheduling threads - finding what thread is the next to run - yielding/sleeping/aborting sleep - finding the current thread threads: - Add operationns on threads, such as creating and starting them. standardized handling of kernel object return codes: - Kernel objects now cause _Swap() to return the following values: 0 => operation successful -EAGAIN => operation timed out -Exxxxx => operation failed for another reason - The thread's swap_data field can be used to return any additional information required to complete the operation, such as the actual result of a successful operation. timeouts: - same as nano timeouts, renamed to simply 'timeouts' - the kernel is still tick-based, but objects take timeout values in ms for forward compatibility with a tickless kernel. semaphores: - Port of the nanokernel semaphores, which have the same basic behaviour as the microkernel ones. Semaphore groups are not yet implemented. - These semaphores are enhanced in that they accept an initial count and a count limit. This allows configuring them as binary semaphores, and also provisioning them without having to "give" the semaphore multiple times before using them. mutexes: - Straight port of the microkernel mutexes. An init function is added to allow defining them at runtime. pipes: - straight port timers: - amalgamation of nano and micro timers, with all functionalities intact. events: - re-implementation, using semaphores and workqueues. mailboxes: - straight port message queues: - straight port of microkernel FIFOs memory maps: - straight port workqueues: - Basically, have all APIs follow the k_ naming rule, and use the _timeout subsystem from the unified kernel directory, and not the _nano_timeout one. stacks: - Port of the nanokernel stacks. They can now have multiple threads pending on them and threads can wait with a timeout. LIFOs: - Straight port of the nanokernel LIFOs. FIFOs: - Straight port of the nanokernel FIFOs. Work by: Dmitriy Korovkin <dmitriy.korovkin@windriver.com> Peter Mitsis <peter.mitsis@windriver.com> Allan Stephens <allan.stephens@windriver.com> Benjamin Walsh <benjamin.walsh@windriver.com> Change-Id: Id3cadb3694484ab2ca467889cfb029be3cd3a7d6 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-09-03 00:55:39 +02:00
{
SYS_PORT_TRACING_OBJ_FUNC(k_thread, start, thread);
z_sched_start(thread);
unified: initial unified kernel implementation Summary of what this includes: initialization: Copy from nano_init.c, with the following changes: - the main thread is the continuation of the init thread, but an idle thread is created as well - _main() initializes threads in groups and starts the EXE group - the ready queues are initialized - the main thread is marked as non-essential once the system init is done - a weak main() symbol is provided if the application does not provide a main() function scheduler: Not an exhaustive list, but basically provide primitives for: - adding/removing a thread to/from a wait queue - adding/removing a thread to/from the ready queue - marking thread as ready - locking/unlocking the scheduler - instead of locking interrupts - getting/setting thread priority - checking what state (coop/preempt) a thread is currenlty running in - rescheduling threads - finding what thread is the next to run - yielding/sleeping/aborting sleep - finding the current thread threads: - Add operationns on threads, such as creating and starting them. standardized handling of kernel object return codes: - Kernel objects now cause _Swap() to return the following values: 0 => operation successful -EAGAIN => operation timed out -Exxxxx => operation failed for another reason - The thread's swap_data field can be used to return any additional information required to complete the operation, such as the actual result of a successful operation. timeouts: - same as nano timeouts, renamed to simply 'timeouts' - the kernel is still tick-based, but objects take timeout values in ms for forward compatibility with a tickless kernel. semaphores: - Port of the nanokernel semaphores, which have the same basic behaviour as the microkernel ones. Semaphore groups are not yet implemented. - These semaphores are enhanced in that they accept an initial count and a count limit. This allows configuring them as binary semaphores, and also provisioning them without having to "give" the semaphore multiple times before using them. mutexes: - Straight port of the microkernel mutexes. An init function is added to allow defining them at runtime. pipes: - straight port timers: - amalgamation of nano and micro timers, with all functionalities intact. events: - re-implementation, using semaphores and workqueues. mailboxes: - straight port message queues: - straight port of microkernel FIFOs memory maps: - straight port workqueues: - Basically, have all APIs follow the k_ naming rule, and use the _timeout subsystem from the unified kernel directory, and not the _nano_timeout one. stacks: - Port of the nanokernel stacks. They can now have multiple threads pending on them and threads can wait with a timeout. LIFOs: - Straight port of the nanokernel LIFOs. FIFOs: - Straight port of the nanokernel FIFOs. Work by: Dmitriy Korovkin <dmitriy.korovkin@windriver.com> Peter Mitsis <peter.mitsis@windriver.com> Allan Stephens <allan.stephens@windriver.com> Benjamin Walsh <benjamin.walsh@windriver.com> Change-Id: Id3cadb3694484ab2ca467889cfb029be3cd3a7d6 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-09-03 00:55:39 +02:00
}
#ifdef CONFIG_USERSPACE
userspace: Support for split 64 bit arguments System call arguments, at the arch layer, are single words. So passing wider values requires splitting them into two registers at call time. This gets even more complicated for values (e.g k_timeout_t) that may have different sizes depending on configuration. This patch adds a feature to gen_syscalls.py to detect functions with wide arguments and automatically generates code to split/unsplit them. Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't work with functions like this, because for N arguments (our current maximum N is 10) there are 2^N possible configurations of argument widths. So this generates the complete functions for each handler and wrapper, effectively doing in python what was originally done in the preprocessor. Another complexity is that traditional the z_hdlr_*() function for a system call has taken the raw list of word arguments, which does not work when some of those arguments must be 64 bit types. So instead of using a single Z_SYSCALL_HANDLER macro, this splits the job of z_hdlr_*() into two steps: An automatically-generated unmarshalling function, z_mrsh_*(), which then calls a user-supplied verification function z_vrfy_*(). The verification function is typesafe, and is a simple C function with exactly the same argument and return signature as the syscall impl function. It is also not responsible for validating the pointers to the extra parameter array or a wide return value, that code gets automatically generated. This commit includes new vrfy/msrh handling for all syscalls invoked during CI runs. Future commits will port the less testable code. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 22:34:31 +02:00
static inline void z_vrfy_k_thread_start(struct k_thread *thread)
{
K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
userspace: Support for split 64 bit arguments System call arguments, at the arch layer, are single words. So passing wider values requires splitting them into two registers at call time. This gets even more complicated for values (e.g k_timeout_t) that may have different sizes depending on configuration. This patch adds a feature to gen_syscalls.py to detect functions with wide arguments and automatically generates code to split/unsplit them. Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't work with functions like this, because for N arguments (our current maximum N is 10) there are 2^N possible configurations of argument widths. So this generates the complete functions for each handler and wrapper, effectively doing in python what was originally done in the preprocessor. Another complexity is that traditional the z_hdlr_*() function for a system call has taken the raw list of word arguments, which does not work when some of those arguments must be 64 bit types. So instead of using a single Z_SYSCALL_HANDLER macro, this splits the job of z_hdlr_*() into two steps: An automatically-generated unmarshalling function, z_mrsh_*(), which then calls a user-supplied verification function z_vrfy_*(). The verification function is typesafe, and is a simple C function with exactly the same argument and return signature as the syscall impl function. It is also not responsible for validating the pointers to the extra parameter array or a wide return value, that code gets automatically generated. This commit includes new vrfy/msrh handling for all syscalls invoked during CI runs. Future commits will port the less testable code. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 22:34:31 +02:00
return z_impl_k_thread_start(thread);
}
#include <syscalls/k_thread_start_mrsh.c>
#endif /* CONFIG_USERSPACE */
unified: initial unified kernel implementation Summary of what this includes: initialization: Copy from nano_init.c, with the following changes: - the main thread is the continuation of the init thread, but an idle thread is created as well - _main() initializes threads in groups and starts the EXE group - the ready queues are initialized - the main thread is marked as non-essential once the system init is done - a weak main() symbol is provided if the application does not provide a main() function scheduler: Not an exhaustive list, but basically provide primitives for: - adding/removing a thread to/from a wait queue - adding/removing a thread to/from the ready queue - marking thread as ready - locking/unlocking the scheduler - instead of locking interrupts - getting/setting thread priority - checking what state (coop/preempt) a thread is currenlty running in - rescheduling threads - finding what thread is the next to run - yielding/sleeping/aborting sleep - finding the current thread threads: - Add operationns on threads, such as creating and starting them. standardized handling of kernel object return codes: - Kernel objects now cause _Swap() to return the following values: 0 => operation successful -EAGAIN => operation timed out -Exxxxx => operation failed for another reason - The thread's swap_data field can be used to return any additional information required to complete the operation, such as the actual result of a successful operation. timeouts: - same as nano timeouts, renamed to simply 'timeouts' - the kernel is still tick-based, but objects take timeout values in ms for forward compatibility with a tickless kernel. semaphores: - Port of the nanokernel semaphores, which have the same basic behaviour as the microkernel ones. Semaphore groups are not yet implemented. - These semaphores are enhanced in that they accept an initial count and a count limit. This allows configuring them as binary semaphores, and also provisioning them without having to "give" the semaphore multiple times before using them. mutexes: - Straight port of the microkernel mutexes. An init function is added to allow defining them at runtime. pipes: - straight port timers: - amalgamation of nano and micro timers, with all functionalities intact. events: - re-implementation, using semaphores and workqueues. mailboxes: - straight port message queues: - straight port of microkernel FIFOs memory maps: - straight port workqueues: - Basically, have all APIs follow the k_ naming rule, and use the _timeout subsystem from the unified kernel directory, and not the _nano_timeout one. stacks: - Port of the nanokernel stacks. They can now have multiple threads pending on them and threads can wait with a timeout. LIFOs: - Straight port of the nanokernel LIFOs. FIFOs: - Straight port of the nanokernel FIFOs. Work by: Dmitriy Korovkin <dmitriy.korovkin@windriver.com> Peter Mitsis <peter.mitsis@windriver.com> Allan Stephens <allan.stephens@windriver.com> Benjamin Walsh <benjamin.walsh@windriver.com> Change-Id: Id3cadb3694484ab2ca467889cfb029be3cd3a7d6 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-09-03 00:55:39 +02:00
#if defined(CONFIG_STACK_POINTER_RANDOM) && (CONFIG_STACK_POINTER_RANDOM != 0)
int z_stack_adjust_initialized;
kernel: overhaul stack specification The core kernel computes the initial stack pointer for a thread, properly aligning it and subtracting out any random offsets or thread-local storage areas. arch_new_thread() no longer needs to make any calculations, an initial stack frame may be placed at the bounds of the new 'stack_ptr' parameter passed in. This parameter replaces 'stack_size'. thread->stack_info is now set before arch_new_thread() is invoked, z_new_thread_init() has been removed. The values populated may need to be adjusted on arches which carve-out MPU guard space from the actual stack buffer. thread->stack_info now has a new member 'delta' which indicates any offset applied for TLS or random offset. It's used so the calculations don't need to be repeated if the thread later drops to user mode. CONFIG_INIT_STACKS logic is now performed inside z_setup_new_thread(), before arch_new_thread() is called. thread->stack_info is now defined as the canonical user-accessible area within the stack object, including random offsets and TLS. It will never include any carved-out memory for MPU guards and must be updated at runtime if guards are removed. Available stack space is now optimized. Some arches may need to significantly round up the buffer size to account for page-level granularity or MPU power-of-two requirements. This space is now accounted for and used by virtue of the Z_THREAD_STACK_SIZE_ADJUST() call in z_setup_new_thread. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2020-04-23 22:55:56 +02:00
static size_t random_offset(size_t stack_size)
{
size_t random_val;
if (!z_stack_adjust_initialized) {
z_early_rand_get((uint8_t *)&random_val, sizeof(random_val));
} else {
sys_rand_get((uint8_t *)&random_val, sizeof(random_val));
}
/* Don't need to worry about alignment of the size here,
* arch_new_thread() is required to do it.
*
* FIXME: Not the best way to get a random number in a range.
* See #6493
*/
const size_t fuzz = random_val % CONFIG_STACK_POINTER_RANDOM;
if (unlikely(fuzz * 2 > stack_size)) {
kernel: overhaul stack specification The core kernel computes the initial stack pointer for a thread, properly aligning it and subtracting out any random offsets or thread-local storage areas. arch_new_thread() no longer needs to make any calculations, an initial stack frame may be placed at the bounds of the new 'stack_ptr' parameter passed in. This parameter replaces 'stack_size'. thread->stack_info is now set before arch_new_thread() is invoked, z_new_thread_init() has been removed. The values populated may need to be adjusted on arches which carve-out MPU guard space from the actual stack buffer. thread->stack_info now has a new member 'delta' which indicates any offset applied for TLS or random offset. It's used so the calculations don't need to be repeated if the thread later drops to user mode. CONFIG_INIT_STACKS logic is now performed inside z_setup_new_thread(), before arch_new_thread() is called. thread->stack_info is now defined as the canonical user-accessible area within the stack object, including random offsets and TLS. It will never include any carved-out memory for MPU guards and must be updated at runtime if guards are removed. Available stack space is now optimized. Some arches may need to significantly round up the buffer size to account for page-level granularity or MPU power-of-two requirements. This space is now accounted for and used by virtue of the Z_THREAD_STACK_SIZE_ADJUST() call in z_setup_new_thread. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2020-04-23 22:55:56 +02:00
return 0;
}
kernel: overhaul stack specification The core kernel computes the initial stack pointer for a thread, properly aligning it and subtracting out any random offsets or thread-local storage areas. arch_new_thread() no longer needs to make any calculations, an initial stack frame may be placed at the bounds of the new 'stack_ptr' parameter passed in. This parameter replaces 'stack_size'. thread->stack_info is now set before arch_new_thread() is invoked, z_new_thread_init() has been removed. The values populated may need to be adjusted on arches which carve-out MPU guard space from the actual stack buffer. thread->stack_info now has a new member 'delta' which indicates any offset applied for TLS or random offset. It's used so the calculations don't need to be repeated if the thread later drops to user mode. CONFIG_INIT_STACKS logic is now performed inside z_setup_new_thread(), before arch_new_thread() is called. thread->stack_info is now defined as the canonical user-accessible area within the stack object, including random offsets and TLS. It will never include any carved-out memory for MPU guards and must be updated at runtime if guards are removed. Available stack space is now optimized. Some arches may need to significantly round up the buffer size to account for page-level granularity or MPU power-of-two requirements. This space is now accounted for and used by virtue of the Z_THREAD_STACK_SIZE_ADJUST() call in z_setup_new_thread. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2020-04-23 22:55:56 +02:00
return fuzz;
}
#if defined(CONFIG_STACK_GROWS_UP)
/* This is so rare not bothering for now */
#error "Stack pointer randomization not implemented for upward growing stacks"
#endif /* CONFIG_STACK_GROWS_UP */
#endif /* CONFIG_STACK_POINTER_RANDOM */
kernel: overhaul stack specification The core kernel computes the initial stack pointer for a thread, properly aligning it and subtracting out any random offsets or thread-local storage areas. arch_new_thread() no longer needs to make any calculations, an initial stack frame may be placed at the bounds of the new 'stack_ptr' parameter passed in. This parameter replaces 'stack_size'. thread->stack_info is now set before arch_new_thread() is invoked, z_new_thread_init() has been removed. The values populated may need to be adjusted on arches which carve-out MPU guard space from the actual stack buffer. thread->stack_info now has a new member 'delta' which indicates any offset applied for TLS or random offset. It's used so the calculations don't need to be repeated if the thread later drops to user mode. CONFIG_INIT_STACKS logic is now performed inside z_setup_new_thread(), before arch_new_thread() is called. thread->stack_info is now defined as the canonical user-accessible area within the stack object, including random offsets and TLS. It will never include any carved-out memory for MPU guards and must be updated at runtime if guards are removed. Available stack space is now optimized. Some arches may need to significantly round up the buffer size to account for page-level granularity or MPU power-of-two requirements. This space is now accounted for and used by virtue of the Z_THREAD_STACK_SIZE_ADJUST() call in z_setup_new_thread. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2020-04-23 22:55:56 +02:00
static char *setup_thread_stack(struct k_thread *new_thread,
k_thread_stack_t *stack, size_t stack_size)
{
size_t stack_obj_size, stack_buf_size;
char *stack_ptr, *stack_buf_start;
kernel: overhaul stack specification The core kernel computes the initial stack pointer for a thread, properly aligning it and subtracting out any random offsets or thread-local storage areas. arch_new_thread() no longer needs to make any calculations, an initial stack frame may be placed at the bounds of the new 'stack_ptr' parameter passed in. This parameter replaces 'stack_size'. thread->stack_info is now set before arch_new_thread() is invoked, z_new_thread_init() has been removed. The values populated may need to be adjusted on arches which carve-out MPU guard space from the actual stack buffer. thread->stack_info now has a new member 'delta' which indicates any offset applied for TLS or random offset. It's used so the calculations don't need to be repeated if the thread later drops to user mode. CONFIG_INIT_STACKS logic is now performed inside z_setup_new_thread(), before arch_new_thread() is called. thread->stack_info is now defined as the canonical user-accessible area within the stack object, including random offsets and TLS. It will never include any carved-out memory for MPU guards and must be updated at runtime if guards are removed. Available stack space is now optimized. Some arches may need to significantly round up the buffer size to account for page-level granularity or MPU power-of-two requirements. This space is now accounted for and used by virtue of the Z_THREAD_STACK_SIZE_ADJUST() call in z_setup_new_thread. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2020-04-23 22:55:56 +02:00
size_t delta = 0;
#ifdef CONFIG_USERSPACE
if (z_stack_is_user_capable(stack)) {
stack_obj_size = K_THREAD_STACK_LEN(stack_size);
stack_buf_start = K_THREAD_STACK_BUFFER(stack);
stack_buf_size = stack_obj_size - K_THREAD_STACK_RESERVED;
} else
#endif /* CONFIG_USERSPACE */
{
/* Object cannot host a user mode thread */
stack_obj_size = K_KERNEL_STACK_LEN(stack_size);
stack_buf_start = K_KERNEL_STACK_BUFFER(stack);
stack_buf_size = stack_obj_size - K_KERNEL_STACK_RESERVED;
/* Zephyr treats stack overflow as an app bug. But
* this particular overflow can be seen by static
* analysis so needs to be handled somehow.
*/
if (K_KERNEL_STACK_RESERVED > stack_obj_size) {
k_panic();
}
}
kernel: overhaul stack specification The core kernel computes the initial stack pointer for a thread, properly aligning it and subtracting out any random offsets or thread-local storage areas. arch_new_thread() no longer needs to make any calculations, an initial stack frame may be placed at the bounds of the new 'stack_ptr' parameter passed in. This parameter replaces 'stack_size'. thread->stack_info is now set before arch_new_thread() is invoked, z_new_thread_init() has been removed. The values populated may need to be adjusted on arches which carve-out MPU guard space from the actual stack buffer. thread->stack_info now has a new member 'delta' which indicates any offset applied for TLS or random offset. It's used so the calculations don't need to be repeated if the thread later drops to user mode. CONFIG_INIT_STACKS logic is now performed inside z_setup_new_thread(), before arch_new_thread() is called. thread->stack_info is now defined as the canonical user-accessible area within the stack object, including random offsets and TLS. It will never include any carved-out memory for MPU guards and must be updated at runtime if guards are removed. Available stack space is now optimized. Some arches may need to significantly round up the buffer size to account for page-level granularity or MPU power-of-two requirements. This space is now accounted for and used by virtue of the Z_THREAD_STACK_SIZE_ADJUST() call in z_setup_new_thread. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2020-04-23 22:55:56 +02:00
#ifdef CONFIG_THREAD_STACK_MEM_MAPPED
/* Map the stack into virtual memory and use that as the base to
* calculate the initial stack pointer at the high end of the stack
* object. The stack pointer may be reduced later in this function
* by TLS or random offset.
*
* K_MEM_MAP_UNINIT is used to mimic the behavior of non-mapped
* stack. If CONFIG_INIT_STACKS is enabled, the stack will be
* cleared below.
*/
void *stack_mapped = k_mem_phys_map((uintptr_t)stack, stack_obj_size,
K_MEM_PERM_RW | K_MEM_CACHE_WB | K_MEM_MAP_UNINIT);
__ASSERT_NO_MSG((uintptr_t)stack_mapped != 0);
#ifdef CONFIG_USERSPACE
if (z_stack_is_user_capable(stack)) {
stack_buf_start = K_THREAD_STACK_BUFFER(stack_mapped);
} else
#endif /* CONFIG_USERSPACE */
{
stack_buf_start = K_KERNEL_STACK_BUFFER(stack_mapped);
}
stack_ptr = (char *)stack_mapped + stack_obj_size;
/* Need to store the info on mapped stack so we can remove the mappings
* when the thread ends.
*/
new_thread->stack_info.mapped.addr = stack_mapped;
new_thread->stack_info.mapped.sz = stack_obj_size;
#else /* CONFIG_THREAD_STACK_MEM_MAPPED */
/* Initial stack pointer at the high end of the stack object, may
* be reduced later in this function by TLS or random offset
*/
stack_ptr = (char *)stack + stack_obj_size;
kernel: overhaul stack specification The core kernel computes the initial stack pointer for a thread, properly aligning it and subtracting out any random offsets or thread-local storage areas. arch_new_thread() no longer needs to make any calculations, an initial stack frame may be placed at the bounds of the new 'stack_ptr' parameter passed in. This parameter replaces 'stack_size'. thread->stack_info is now set before arch_new_thread() is invoked, z_new_thread_init() has been removed. The values populated may need to be adjusted on arches which carve-out MPU guard space from the actual stack buffer. thread->stack_info now has a new member 'delta' which indicates any offset applied for TLS or random offset. It's used so the calculations don't need to be repeated if the thread later drops to user mode. CONFIG_INIT_STACKS logic is now performed inside z_setup_new_thread(), before arch_new_thread() is called. thread->stack_info is now defined as the canonical user-accessible area within the stack object, including random offsets and TLS. It will never include any carved-out memory for MPU guards and must be updated at runtime if guards are removed. Available stack space is now optimized. Some arches may need to significantly round up the buffer size to account for page-level granularity or MPU power-of-two requirements. This space is now accounted for and used by virtue of the Z_THREAD_STACK_SIZE_ADJUST() call in z_setup_new_thread. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2020-04-23 22:55:56 +02:00
#endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
LOG_DBG("stack %p for thread %p: obj_size=%zu buf_start=%p "
" buf_size %zu stack_ptr=%p",
stack, new_thread, stack_obj_size, (void *)stack_buf_start,
stack_buf_size, (void *)stack_ptr);
kernel: overhaul stack specification The core kernel computes the initial stack pointer for a thread, properly aligning it and subtracting out any random offsets or thread-local storage areas. arch_new_thread() no longer needs to make any calculations, an initial stack frame may be placed at the bounds of the new 'stack_ptr' parameter passed in. This parameter replaces 'stack_size'. thread->stack_info is now set before arch_new_thread() is invoked, z_new_thread_init() has been removed. The values populated may need to be adjusted on arches which carve-out MPU guard space from the actual stack buffer. thread->stack_info now has a new member 'delta' which indicates any offset applied for TLS or random offset. It's used so the calculations don't need to be repeated if the thread later drops to user mode. CONFIG_INIT_STACKS logic is now performed inside z_setup_new_thread(), before arch_new_thread() is called. thread->stack_info is now defined as the canonical user-accessible area within the stack object, including random offsets and TLS. It will never include any carved-out memory for MPU guards and must be updated at runtime if guards are removed. Available stack space is now optimized. Some arches may need to significantly round up the buffer size to account for page-level granularity or MPU power-of-two requirements. This space is now accounted for and used by virtue of the Z_THREAD_STACK_SIZE_ADJUST() call in z_setup_new_thread. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2020-04-23 22:55:56 +02:00
#ifdef CONFIG_INIT_STACKS
memset(stack_buf_start, 0xaa, stack_buf_size);
#endif /* CONFIG_INIT_STACKS */
kernel: overhaul stack specification The core kernel computes the initial stack pointer for a thread, properly aligning it and subtracting out any random offsets or thread-local storage areas. arch_new_thread() no longer needs to make any calculations, an initial stack frame may be placed at the bounds of the new 'stack_ptr' parameter passed in. This parameter replaces 'stack_size'. thread->stack_info is now set before arch_new_thread() is invoked, z_new_thread_init() has been removed. The values populated may need to be adjusted on arches which carve-out MPU guard space from the actual stack buffer. thread->stack_info now has a new member 'delta' which indicates any offset applied for TLS or random offset. It's used so the calculations don't need to be repeated if the thread later drops to user mode. CONFIG_INIT_STACKS logic is now performed inside z_setup_new_thread(), before arch_new_thread() is called. thread->stack_info is now defined as the canonical user-accessible area within the stack object, including random offsets and TLS. It will never include any carved-out memory for MPU guards and must be updated at runtime if guards are removed. Available stack space is now optimized. Some arches may need to significantly round up the buffer size to account for page-level granularity or MPU power-of-two requirements. This space is now accounted for and used by virtue of the Z_THREAD_STACK_SIZE_ADJUST() call in z_setup_new_thread. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2020-04-23 22:55:56 +02:00
#ifdef CONFIG_STACK_SENTINEL
/* Put the stack sentinel at the lowest 4 bytes of the stack area.
* We periodically check that it's still present and kill the thread
* if it isn't.
*/
*((uint32_t *)stack_buf_start) = STACK_SENTINEL;
#endif /* CONFIG_STACK_SENTINEL */
#ifdef CONFIG_THREAD_LOCAL_STORAGE
/* TLS is always last within the stack buffer */
delta += arch_tls_stack_setup(new_thread, stack_ptr);
#endif /* CONFIG_THREAD_LOCAL_STORAGE */
kernel: overhaul stack specification The core kernel computes the initial stack pointer for a thread, properly aligning it and subtracting out any random offsets or thread-local storage areas. arch_new_thread() no longer needs to make any calculations, an initial stack frame may be placed at the bounds of the new 'stack_ptr' parameter passed in. This parameter replaces 'stack_size'. thread->stack_info is now set before arch_new_thread() is invoked, z_new_thread_init() has been removed. The values populated may need to be adjusted on arches which carve-out MPU guard space from the actual stack buffer. thread->stack_info now has a new member 'delta' which indicates any offset applied for TLS or random offset. It's used so the calculations don't need to be repeated if the thread later drops to user mode. CONFIG_INIT_STACKS logic is now performed inside z_setup_new_thread(), before arch_new_thread() is called. thread->stack_info is now defined as the canonical user-accessible area within the stack object, including random offsets and TLS. It will never include any carved-out memory for MPU guards and must be updated at runtime if guards are removed. Available stack space is now optimized. Some arches may need to significantly round up the buffer size to account for page-level granularity or MPU power-of-two requirements. This space is now accounted for and used by virtue of the Z_THREAD_STACK_SIZE_ADJUST() call in z_setup_new_thread. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2020-04-23 22:55:56 +02:00
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
size_t tls_size = sizeof(struct _thread_userspace_local_data);
/* reserve space on highest memory of stack buffer for local data */
delta += tls_size;
new_thread->userspace_local_data =
(struct _thread_userspace_local_data *)(stack_ptr - delta);
#endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
#if defined(CONFIG_STACK_POINTER_RANDOM) && (CONFIG_STACK_POINTER_RANDOM != 0)
kernel: overhaul stack specification The core kernel computes the initial stack pointer for a thread, properly aligning it and subtracting out any random offsets or thread-local storage areas. arch_new_thread() no longer needs to make any calculations, an initial stack frame may be placed at the bounds of the new 'stack_ptr' parameter passed in. This parameter replaces 'stack_size'. thread->stack_info is now set before arch_new_thread() is invoked, z_new_thread_init() has been removed. The values populated may need to be adjusted on arches which carve-out MPU guard space from the actual stack buffer. thread->stack_info now has a new member 'delta' which indicates any offset applied for TLS or random offset. It's used so the calculations don't need to be repeated if the thread later drops to user mode. CONFIG_INIT_STACKS logic is now performed inside z_setup_new_thread(), before arch_new_thread() is called. thread->stack_info is now defined as the canonical user-accessible area within the stack object, including random offsets and TLS. It will never include any carved-out memory for MPU guards and must be updated at runtime if guards are removed. Available stack space is now optimized. Some arches may need to significantly round up the buffer size to account for page-level granularity or MPU power-of-two requirements. This space is now accounted for and used by virtue of the Z_THREAD_STACK_SIZE_ADJUST() call in z_setup_new_thread. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2020-04-23 22:55:56 +02:00
delta += random_offset(stack_buf_size);
#endif /* CONFIG_STACK_POINTER_RANDOM */
kernel: overhaul stack specification The core kernel computes the initial stack pointer for a thread, properly aligning it and subtracting out any random offsets or thread-local storage areas. arch_new_thread() no longer needs to make any calculations, an initial stack frame may be placed at the bounds of the new 'stack_ptr' parameter passed in. This parameter replaces 'stack_size'. thread->stack_info is now set before arch_new_thread() is invoked, z_new_thread_init() has been removed. The values populated may need to be adjusted on arches which carve-out MPU guard space from the actual stack buffer. thread->stack_info now has a new member 'delta' which indicates any offset applied for TLS or random offset. It's used so the calculations don't need to be repeated if the thread later drops to user mode. CONFIG_INIT_STACKS logic is now performed inside z_setup_new_thread(), before arch_new_thread() is called. thread->stack_info is now defined as the canonical user-accessible area within the stack object, including random offsets and TLS. It will never include any carved-out memory for MPU guards and must be updated at runtime if guards are removed. Available stack space is now optimized. Some arches may need to significantly round up the buffer size to account for page-level granularity or MPU power-of-two requirements. This space is now accounted for and used by virtue of the Z_THREAD_STACK_SIZE_ADJUST() call in z_setup_new_thread. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2020-04-23 22:55:56 +02:00
delta = ROUND_UP(delta, ARCH_STACK_PTR_ALIGN);
#ifdef CONFIG_THREAD_STACK_INFO
/* Initial values. Arches which implement MPU guards that "borrow"
* memory from the stack buffer (not tracked in K_THREAD_STACK_RESERVED)
* will need to appropriately update this.
*
* The bounds tracked here correspond to the area of the stack object
* that the thread can access, which includes TLS.
*/
new_thread->stack_info.start = (uintptr_t)stack_buf_start;
new_thread->stack_info.size = stack_buf_size;
new_thread->stack_info.delta = delta;
#endif /* CONFIG_THREAD_STACK_INFO */
kernel: overhaul stack specification The core kernel computes the initial stack pointer for a thread, properly aligning it and subtracting out any random offsets or thread-local storage areas. arch_new_thread() no longer needs to make any calculations, an initial stack frame may be placed at the bounds of the new 'stack_ptr' parameter passed in. This parameter replaces 'stack_size'. thread->stack_info is now set before arch_new_thread() is invoked, z_new_thread_init() has been removed. The values populated may need to be adjusted on arches which carve-out MPU guard space from the actual stack buffer. thread->stack_info now has a new member 'delta' which indicates any offset applied for TLS or random offset. It's used so the calculations don't need to be repeated if the thread later drops to user mode. CONFIG_INIT_STACKS logic is now performed inside z_setup_new_thread(), before arch_new_thread() is called. thread->stack_info is now defined as the canonical user-accessible area within the stack object, including random offsets and TLS. It will never include any carved-out memory for MPU guards and must be updated at runtime if guards are removed. Available stack space is now optimized. Some arches may need to significantly round up the buffer size to account for page-level granularity or MPU power-of-two requirements. This space is now accounted for and used by virtue of the Z_THREAD_STACK_SIZE_ADJUST() call in z_setup_new_thread. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2020-04-23 22:55:56 +02:00
stack_ptr -= delta;
return stack_ptr;
}
/*
kernel: overhaul stack specification The core kernel computes the initial stack pointer for a thread, properly aligning it and subtracting out any random offsets or thread-local storage areas. arch_new_thread() no longer needs to make any calculations, an initial stack frame may be placed at the bounds of the new 'stack_ptr' parameter passed in. This parameter replaces 'stack_size'. thread->stack_info is now set before arch_new_thread() is invoked, z_new_thread_init() has been removed. The values populated may need to be adjusted on arches which carve-out MPU guard space from the actual stack buffer. thread->stack_info now has a new member 'delta' which indicates any offset applied for TLS or random offset. It's used so the calculations don't need to be repeated if the thread later drops to user mode. CONFIG_INIT_STACKS logic is now performed inside z_setup_new_thread(), before arch_new_thread() is called. thread->stack_info is now defined as the canonical user-accessible area within the stack object, including random offsets and TLS. It will never include any carved-out memory for MPU guards and must be updated at runtime if guards are removed. Available stack space is now optimized. Some arches may need to significantly round up the buffer size to account for page-level granularity or MPU power-of-two requirements. This space is now accounted for and used by virtue of the Z_THREAD_STACK_SIZE_ADJUST() call in z_setup_new_thread. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2020-04-23 22:55:56 +02:00
* The provided stack_size value is presumed to be either the result of
* K_THREAD_STACK_SIZEOF(stack), or the size value passed to the instance
* of K_THREAD_STACK_DEFINE() which defined 'stack'.
*/
char *z_setup_new_thread(struct k_thread *new_thread,
k_thread_stack_t *stack, size_t stack_size,
k_thread_entry_t entry,
void *p1, void *p2, void *p3,
int prio, uint32_t options, const char *name)
{
kernel: overhaul stack specification The core kernel computes the initial stack pointer for a thread, properly aligning it and subtracting out any random offsets or thread-local storage areas. arch_new_thread() no longer needs to make any calculations, an initial stack frame may be placed at the bounds of the new 'stack_ptr' parameter passed in. This parameter replaces 'stack_size'. thread->stack_info is now set before arch_new_thread() is invoked, z_new_thread_init() has been removed. The values populated may need to be adjusted on arches which carve-out MPU guard space from the actual stack buffer. thread->stack_info now has a new member 'delta' which indicates any offset applied for TLS or random offset. It's used so the calculations don't need to be repeated if the thread later drops to user mode. CONFIG_INIT_STACKS logic is now performed inside z_setup_new_thread(), before arch_new_thread() is called. thread->stack_info is now defined as the canonical user-accessible area within the stack object, including random offsets and TLS. It will never include any carved-out memory for MPU guards and must be updated at runtime if guards are removed. Available stack space is now optimized. Some arches may need to significantly round up the buffer size to account for page-level granularity or MPU power-of-two requirements. This space is now accounted for and used by virtue of the Z_THREAD_STACK_SIZE_ADJUST() call in z_setup_new_thread. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2020-04-23 22:55:56 +02:00
char *stack_ptr;
Z_ASSERT_VALID_PRIO(prio, entry);
#ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
k_thread_abort_cleanup_check_reuse(new_thread);
#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
#ifdef CONFIG_OBJ_CORE_THREAD
k_obj_core_init_and_link(K_OBJ_CORE(new_thread), &obj_type_thread);
#ifdef CONFIG_OBJ_CORE_STATS_THREAD
k_obj_core_stats_register(K_OBJ_CORE(new_thread),
&new_thread->base.usage,
sizeof(new_thread->base.usage));
#endif /* CONFIG_OBJ_CORE_STATS_THREAD */
#endif /* CONFIG_OBJ_CORE_THREAD */
#ifdef CONFIG_USERSPACE
__ASSERT((options & K_USER) == 0U || z_stack_is_user_capable(stack),
"user thread %p with kernel-only stack %p",
new_thread, stack);
k_object_init(new_thread);
k_object_init(stack);
new_thread->stack_obj = stack;
new_thread->syscall_frame = NULL;
/* Any given thread has access to itself */
k_object_access_grant(new_thread, new_thread);
#endif /* CONFIG_USERSPACE */
z_waitq_init(&new_thread->join_queue);
/* Initialize various struct k_thread members */
z_init_thread_base(&new_thread->base, prio, _THREAD_PRESTART, options);
kernel: overhaul stack specification The core kernel computes the initial stack pointer for a thread, properly aligning it and subtracting out any random offsets or thread-local storage areas. arch_new_thread() no longer needs to make any calculations, an initial stack frame may be placed at the bounds of the new 'stack_ptr' parameter passed in. This parameter replaces 'stack_size'. thread->stack_info is now set before arch_new_thread() is invoked, z_new_thread_init() has been removed. The values populated may need to be adjusted on arches which carve-out MPU guard space from the actual stack buffer. thread->stack_info now has a new member 'delta' which indicates any offset applied for TLS or random offset. It's used so the calculations don't need to be repeated if the thread later drops to user mode. CONFIG_INIT_STACKS logic is now performed inside z_setup_new_thread(), before arch_new_thread() is called. thread->stack_info is now defined as the canonical user-accessible area within the stack object, including random offsets and TLS. It will never include any carved-out memory for MPU guards and must be updated at runtime if guards are removed. Available stack space is now optimized. Some arches may need to significantly round up the buffer size to account for page-level granularity or MPU power-of-two requirements. This space is now accounted for and used by virtue of the Z_THREAD_STACK_SIZE_ADJUST() call in z_setup_new_thread. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2020-04-23 22:55:56 +02:00
stack_ptr = setup_thread_stack(new_thread, stack, stack_size);
#ifdef CONFIG_KERNEL_COHERENCE
/* Check that the thread object is safe, but that the stack is
* still cached!
*/
__ASSERT_NO_MSG(arch_mem_coherent(new_thread));
/* When dynamic thread stack is available, the stack may come from
* uncached area.
*/
#ifndef CONFIG_DYNAMIC_THREAD
__ASSERT_NO_MSG(!arch_mem_coherent(stack));
#endif /* CONFIG_DYNAMIC_THREAD */
#endif /* CONFIG_KERNEL_COHERENCE */
kernel: overhaul stack specification The core kernel computes the initial stack pointer for a thread, properly aligning it and subtracting out any random offsets or thread-local storage areas. arch_new_thread() no longer needs to make any calculations, an initial stack frame may be placed at the bounds of the new 'stack_ptr' parameter passed in. This parameter replaces 'stack_size'. thread->stack_info is now set before arch_new_thread() is invoked, z_new_thread_init() has been removed. The values populated may need to be adjusted on arches which carve-out MPU guard space from the actual stack buffer. thread->stack_info now has a new member 'delta' which indicates any offset applied for TLS or random offset. It's used so the calculations don't need to be repeated if the thread later drops to user mode. CONFIG_INIT_STACKS logic is now performed inside z_setup_new_thread(), before arch_new_thread() is called. thread->stack_info is now defined as the canonical user-accessible area within the stack object, including random offsets and TLS. It will never include any carved-out memory for MPU guards and must be updated at runtime if guards are removed. Available stack space is now optimized. Some arches may need to significantly round up the buffer size to account for page-level granularity or MPU power-of-two requirements. This space is now accounted for and used by virtue of the Z_THREAD_STACK_SIZE_ADJUST() call in z_setup_new_thread. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2020-04-23 22:55:56 +02:00
arch_new_thread(new_thread, stack, stack_ptr, entry, p1, p2, p3);
/* static threads overwrite it afterwards with real value */
new_thread->init_data = NULL;
#ifdef CONFIG_USE_SWITCH
/* switch_handle must be non-null except when inside z_swap()
* for synchronization reasons. Historically some notional
* USE_SWITCH architectures have actually ignored the field
*/
__ASSERT(new_thread->switch_handle != NULL,
"arch layer failed to initialize switch_handle");
#endif /* CONFIG_USE_SWITCH */
#ifdef CONFIG_THREAD_CUSTOM_DATA
/* Initialize custom data field (value is opaque to kernel) */
new_thread->custom_data = NULL;
#endif /* CONFIG_THREAD_CUSTOM_DATA */
#ifdef CONFIG_EVENTS
new_thread->no_wake_on_timeout = false;
#endif /* CONFIG_EVENTS */
#ifdef CONFIG_THREAD_MONITOR
new_thread->entry.pEntry = entry;
new_thread->entry.parameter1 = p1;
new_thread->entry.parameter2 = p2;
new_thread->entry.parameter3 = p3;
k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
new_thread->next_thread = _kernel.threads;
_kernel.threads = new_thread;
k_spin_unlock(&z_thread_monitor_lock, key);
#endif /* CONFIG_THREAD_MONITOR */
#ifdef CONFIG_THREAD_NAME
if (name != NULL) {
strncpy(new_thread->name, name,
CONFIG_THREAD_MAX_NAME_LEN - 1);
/* Ensure NULL termination, truncate if longer */
new_thread->name[CONFIG_THREAD_MAX_NAME_LEN - 1] = '\0';
} else {
new_thread->name[0] = '\0';
}
#endif /* CONFIG_THREAD_NAME */
#ifdef CONFIG_SCHED_CPU_MASK
if (IS_ENABLED(CONFIG_SCHED_CPU_MASK_PIN_ONLY)) {
new_thread->base.cpu_mask = 1; /* must specify only one cpu */
} else {
new_thread->base.cpu_mask = -1; /* allow all cpus */
}
#endif /* CONFIG_SCHED_CPU_MASK */
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
/* _current may be null if the dummy thread is not used */
if (!_current) {
new_thread->resource_pool = NULL;
return stack_ptr;
}
#endif /* CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN */
#ifdef CONFIG_USERSPACE
z_mem_domain_init_thread(new_thread);
if ((options & K_INHERIT_PERMS) != 0U) {
k_thread_perms_inherit(_current, new_thread);
}
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_SCHED_DEADLINE
new_thread->base.prio_deadline = 0;
#endif /* CONFIG_SCHED_DEADLINE */
new_thread->resource_pool = _current->resource_pool;
#ifdef CONFIG_SMP
z_waitq_init(&new_thread->halt_queue);
#endif /* CONFIG_SMP */
#ifdef CONFIG_SCHED_THREAD_USAGE
new_thread->base.usage = (struct k_cycle_stats) {};
new_thread->base.usage.track_usage =
CONFIG_SCHED_THREAD_USAGE_AUTO_ENABLE;
#endif /* CONFIG_SCHED_THREAD_USAGE */
SYS_PORT_TRACING_OBJ_FUNC(k_thread, create, new_thread);
return stack_ptr;
}
k_tid_t z_impl_k_thread_create(struct k_thread *new_thread,
k_thread_stack_t *stack,
size_t stack_size, k_thread_entry_t entry,
void *p1, void *p2, void *p3,
int prio, uint32_t options, k_timeout_t delay)
unified: initial unified kernel implementation Summary of what this includes: initialization: Copy from nano_init.c, with the following changes: - the main thread is the continuation of the init thread, but an idle thread is created as well - _main() initializes threads in groups and starts the EXE group - the ready queues are initialized - the main thread is marked as non-essential once the system init is done - a weak main() symbol is provided if the application does not provide a main() function scheduler: Not an exhaustive list, but basically provide primitives for: - adding/removing a thread to/from a wait queue - adding/removing a thread to/from the ready queue - marking thread as ready - locking/unlocking the scheduler - instead of locking interrupts - getting/setting thread priority - checking what state (coop/preempt) a thread is currenlty running in - rescheduling threads - finding what thread is the next to run - yielding/sleeping/aborting sleep - finding the current thread threads: - Add operationns on threads, such as creating and starting them. standardized handling of kernel object return codes: - Kernel objects now cause _Swap() to return the following values: 0 => operation successful -EAGAIN => operation timed out -Exxxxx => operation failed for another reason - The thread's swap_data field can be used to return any additional information required to complete the operation, such as the actual result of a successful operation. timeouts: - same as nano timeouts, renamed to simply 'timeouts' - the kernel is still tick-based, but objects take timeout values in ms for forward compatibility with a tickless kernel. semaphores: - Port of the nanokernel semaphores, which have the same basic behaviour as the microkernel ones. Semaphore groups are not yet implemented. - These semaphores are enhanced in that they accept an initial count and a count limit. This allows configuring them as binary semaphores, and also provisioning them without having to "give" the semaphore multiple times before using them. mutexes: - Straight port of the microkernel mutexes. An init function is added to allow defining them at runtime. pipes: - straight port timers: - amalgamation of nano and micro timers, with all functionalities intact. events: - re-implementation, using semaphores and workqueues. mailboxes: - straight port message queues: - straight port of microkernel FIFOs memory maps: - straight port workqueues: - Basically, have all APIs follow the k_ naming rule, and use the _timeout subsystem from the unified kernel directory, and not the _nano_timeout one. stacks: - Port of the nanokernel stacks. They can now have multiple threads pending on them and threads can wait with a timeout. LIFOs: - Straight port of the nanokernel LIFOs. FIFOs: - Straight port of the nanokernel FIFOs. Work by: Dmitriy Korovkin <dmitriy.korovkin@windriver.com> Peter Mitsis <peter.mitsis@windriver.com> Allan Stephens <allan.stephens@windriver.com> Benjamin Walsh <benjamin.walsh@windriver.com> Change-Id: Id3cadb3694484ab2ca467889cfb029be3cd3a7d6 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-09-03 00:55:39 +02:00
{
__ASSERT(!arch_is_in_isr(), "Threads may not be created in ISRs");
z_setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
prio, options, NULL);
unified: initial unified kernel implementation Summary of what this includes: initialization: Copy from nano_init.c, with the following changes: - the main thread is the continuation of the init thread, but an idle thread is created as well - _main() initializes threads in groups and starts the EXE group - the ready queues are initialized - the main thread is marked as non-essential once the system init is done - a weak main() symbol is provided if the application does not provide a main() function scheduler: Not an exhaustive list, but basically provide primitives for: - adding/removing a thread to/from a wait queue - adding/removing a thread to/from the ready queue - marking thread as ready - locking/unlocking the scheduler - instead of locking interrupts - getting/setting thread priority - checking what state (coop/preempt) a thread is currenlty running in - rescheduling threads - finding what thread is the next to run - yielding/sleeping/aborting sleep - finding the current thread threads: - Add operationns on threads, such as creating and starting them. standardized handling of kernel object return codes: - Kernel objects now cause _Swap() to return the following values: 0 => operation successful -EAGAIN => operation timed out -Exxxxx => operation failed for another reason - The thread's swap_data field can be used to return any additional information required to complete the operation, such as the actual result of a successful operation. timeouts: - same as nano timeouts, renamed to simply 'timeouts' - the kernel is still tick-based, but objects take timeout values in ms for forward compatibility with a tickless kernel. semaphores: - Port of the nanokernel semaphores, which have the same basic behaviour as the microkernel ones. Semaphore groups are not yet implemented. - These semaphores are enhanced in that they accept an initial count and a count limit. This allows configuring them as binary semaphores, and also provisioning them without having to "give" the semaphore multiple times before using them. mutexes: - Straight port of the microkernel mutexes. An init function is added to allow defining them at runtime. pipes: - straight port timers: - amalgamation of nano and micro timers, with all functionalities intact. events: - re-implementation, using semaphores and workqueues. mailboxes: - straight port message queues: - straight port of microkernel FIFOs memory maps: - straight port workqueues: - Basically, have all APIs follow the k_ naming rule, and use the _timeout subsystem from the unified kernel directory, and not the _nano_timeout one. stacks: - Port of the nanokernel stacks. They can now have multiple threads pending on them and threads can wait with a timeout. LIFOs: - Straight port of the nanokernel LIFOs. FIFOs: - Straight port of the nanokernel FIFOs. Work by: Dmitriy Korovkin <dmitriy.korovkin@windriver.com> Peter Mitsis <peter.mitsis@windriver.com> Allan Stephens <allan.stephens@windriver.com> Benjamin Walsh <benjamin.walsh@windriver.com> Change-Id: Id3cadb3694484ab2ca467889cfb029be3cd3a7d6 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-09-03 00:55:39 +02:00
kernel/timeout: Make timeout arguments an opaque type Add a k_timeout_t type, and use it everywhere that kernel API functions were accepting a millisecond timeout argument. Instead of forcing milliseconds everywhere (which are often not integrally representable as system ticks), do the conversion to ticks at the point where the timeout is created. This avoids an extra unit conversion in some application code, and allows us to express the timeout in units other than milliseconds to achieve greater precision. The existing K_MSEC() et. al. macros now return initializers for a k_timeout_t. The K_NO_WAIT and K_FOREVER constants have now become k_timeout_t values, which means they cannot be operated on as integers. Applications which have their own APIs that need to inspect these vs. user-provided timeouts can now use a K_TIMEOUT_EQ() predicate to test for equality. Timer drivers, which receive an integer tick count in ther z_clock_set_timeout() functions, now use the integer-valued K_TICKS_FOREVER constant instead of K_FOREVER. For the initial release, to preserve source compatibility, a CONFIG_LEGACY_TIMEOUT_API kconfig is provided. When true, the k_timeout_t will remain a compatible 32 bit value that will work with any legacy Zephyr application. Some subsystems present timeout (or timeout-like) values to their own users as APIs that would re-use the kernel's own constants and conventions. These will require some minor design work to adapt to the new scheme (in most cases just using k_timeout_t directly in their own API), and they have not been changed in this patch, instead selecting CONFIG_LEGACY_TIMEOUT_API via kconfig. These subsystems include: CAN Bus, the Microbit display driver, I2S, LoRa modem drivers, the UART Async API, Video hardware drivers, the console subsystem, and the network buffer abstraction. k_sleep() now takes a k_timeout_t argument, with a k_msleep() variant provided that works identically to the original API. Most of the changes here are just type/configuration management and documentation, but there are logic changes in mempool, where a loop that used a timeout numerically has been reworked using a new z_timeout_end_calc() predicate. Also in queue.c, a (when POLL was enabled) a similar loop was needlessly used to try to retry the k_poll() call after a spurious failure. But k_poll() does not fail spuriously, so the loop was removed. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2020-03-06 00:18:14 +01:00
if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
thread_schedule_new(new_thread, delay);
}
return new_thread;
}
#ifdef CONFIG_USERSPACE
bool z_stack_is_user_capable(k_thread_stack_t *stack)
{
return k_object_find(stack) != NULL;
}
userspace: Support for split 64 bit arguments System call arguments, at the arch layer, are single words. So passing wider values requires splitting them into two registers at call time. This gets even more complicated for values (e.g k_timeout_t) that may have different sizes depending on configuration. This patch adds a feature to gen_syscalls.py to detect functions with wide arguments and automatically generates code to split/unsplit them. Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't work with functions like this, because for N arguments (our current maximum N is 10) there are 2^N possible configurations of argument widths. So this generates the complete functions for each handler and wrapper, effectively doing in python what was originally done in the preprocessor. Another complexity is that traditional the z_hdlr_*() function for a system call has taken the raw list of word arguments, which does not work when some of those arguments must be 64 bit types. So instead of using a single Z_SYSCALL_HANDLER macro, this splits the job of z_hdlr_*() into two steps: An automatically-generated unmarshalling function, z_mrsh_*(), which then calls a user-supplied verification function z_vrfy_*(). The verification function is typesafe, and is a simple C function with exactly the same argument and return signature as the syscall impl function. It is also not responsible for validating the pointers to the extra parameter array or a wide return value, that code gets automatically generated. This commit includes new vrfy/msrh handling for all syscalls invoked during CI runs. Future commits will port the less testable code. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 22:34:31 +02:00
k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
k_thread_stack_t *stack,
size_t stack_size, k_thread_entry_t entry,
void *p1, void *p2, void *p3,
int prio, uint32_t options, k_timeout_t delay)
{
size_t total_size, stack_obj_size;
struct k_object *stack_object;
/* The thread and stack objects *must* be in an uninitialized state */
K_OOPS(K_SYSCALL_OBJ_NEVER_INIT(new_thread, K_OBJ_THREAD));
/* No need to check z_stack_is_user_capable(), it won't be in the
* object table if it isn't
*/
stack_object = k_object_find(stack);
K_OOPS(K_SYSCALL_VERIFY_MSG(k_object_validation_check(stack_object, stack,
K_OBJ_THREAD_STACK_ELEMENT,
_OBJ_INIT_FALSE) == 0,
"bad stack object"));
/* Verify that the stack size passed in is OK by computing the total
* size and comparing it with the size value in the object metadata
*/
K_OOPS(K_SYSCALL_VERIFY_MSG(!size_add_overflow(K_THREAD_STACK_RESERVED,
stack_size, &total_size),
"stack size overflow (%zu+%zu)",
stack_size,
K_THREAD_STACK_RESERVED));
/* Testing less-than-or-equal since additional room may have been
* allocated for alignment constraints
*/
#ifdef CONFIG_GEN_PRIV_STACKS
stack_obj_size = stack_object->data.stack_data->size;
#else
stack_obj_size = stack_object->data.stack_size;
#endif /* CONFIG_GEN_PRIV_STACKS */
K_OOPS(K_SYSCALL_VERIFY_MSG(total_size <= stack_obj_size,
"stack size %zu is too big, max is %zu",
total_size, stack_obj_size));
/* User threads may only create other user threads and they can't
* be marked as essential
*/
K_OOPS(K_SYSCALL_VERIFY(options & K_USER));
K_OOPS(K_SYSCALL_VERIFY(!(options & K_ESSENTIAL)));
/* Check validity of prio argument; must be the same or worse priority
* than the caller
*/
K_OOPS(K_SYSCALL_VERIFY(_is_valid_prio(prio, NULL)));
K_OOPS(K_SYSCALL_VERIFY(z_is_prio_lower_or_equal(prio,
_current->base.prio)));
userspace: Support for split 64 bit arguments System call arguments, at the arch layer, are single words. So passing wider values requires splitting them into two registers at call time. This gets even more complicated for values (e.g k_timeout_t) that may have different sizes depending on configuration. This patch adds a feature to gen_syscalls.py to detect functions with wide arguments and automatically generates code to split/unsplit them. Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't work with functions like this, because for N arguments (our current maximum N is 10) there are 2^N possible configurations of argument widths. So this generates the complete functions for each handler and wrapper, effectively doing in python what was originally done in the preprocessor. Another complexity is that traditional the z_hdlr_*() function for a system call has taken the raw list of word arguments, which does not work when some of those arguments must be 64 bit types. So instead of using a single Z_SYSCALL_HANDLER macro, this splits the job of z_hdlr_*() into two steps: An automatically-generated unmarshalling function, z_mrsh_*(), which then calls a user-supplied verification function z_vrfy_*(). The verification function is typesafe, and is a simple C function with exactly the same argument and return signature as the syscall impl function. It is also not responsible for validating the pointers to the extra parameter array or a wide return value, that code gets automatically generated. This commit includes new vrfy/msrh handling for all syscalls invoked during CI runs. Future commits will port the less testable code. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 22:34:31 +02:00
z_setup_new_thread(new_thread, stack, stack_size,
entry, p1, p2, p3, prio, options, NULL);
kernel/timeout: Make timeout arguments an opaque type Add a k_timeout_t type, and use it everywhere that kernel API functions were accepting a millisecond timeout argument. Instead of forcing milliseconds everywhere (which are often not integrally representable as system ticks), do the conversion to ticks at the point where the timeout is created. This avoids an extra unit conversion in some application code, and allows us to express the timeout in units other than milliseconds to achieve greater precision. The existing K_MSEC() et. al. macros now return initializers for a k_timeout_t. The K_NO_WAIT and K_FOREVER constants have now become k_timeout_t values, which means they cannot be operated on as integers. Applications which have their own APIs that need to inspect these vs. user-provided timeouts can now use a K_TIMEOUT_EQ() predicate to test for equality. Timer drivers, which receive an integer tick count in ther z_clock_set_timeout() functions, now use the integer-valued K_TICKS_FOREVER constant instead of K_FOREVER. For the initial release, to preserve source compatibility, a CONFIG_LEGACY_TIMEOUT_API kconfig is provided. When true, the k_timeout_t will remain a compatible 32 bit value that will work with any legacy Zephyr application. Some subsystems present timeout (or timeout-like) values to their own users as APIs that would re-use the kernel's own constants and conventions. These will require some minor design work to adapt to the new scheme (in most cases just using k_timeout_t directly in their own API), and they have not been changed in this patch, instead selecting CONFIG_LEGACY_TIMEOUT_API via kconfig. These subsystems include: CAN Bus, the Microbit display driver, I2S, LoRa modem drivers, the UART Async API, Video hardware drivers, the console subsystem, and the network buffer abstraction. k_sleep() now takes a k_timeout_t argument, with a k_msleep() variant provided that works identically to the original API. Most of the changes here are just type/configuration management and documentation, but there are logic changes in mempool, where a loop that used a timeout numerically has been reworked using a new z_timeout_end_calc() predicate. Also in queue.c, a (when POLL was enabled) a similar loop was needlessly used to try to retry the k_poll() call after a spurious failure. But k_poll() does not fail spuriously, so the loop was removed. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2020-03-06 00:18:14 +01:00
if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
thread_schedule_new(new_thread, delay);
}
userspace: Support for split 64 bit arguments System call arguments, at the arch layer, are single words. So passing wider values requires splitting them into two registers at call time. This gets even more complicated for values (e.g k_timeout_t) that may have different sizes depending on configuration. This patch adds a feature to gen_syscalls.py to detect functions with wide arguments and automatically generates code to split/unsplit them. Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't work with functions like this, because for N arguments (our current maximum N is 10) there are 2^N possible configurations of argument widths. So this generates the complete functions for each handler and wrapper, effectively doing in python what was originally done in the preprocessor. Another complexity is that traditional the z_hdlr_*() function for a system call has taken the raw list of word arguments, which does not work when some of those arguments must be 64 bit types. So instead of using a single Z_SYSCALL_HANDLER macro, this splits the job of z_hdlr_*() into two steps: An automatically-generated unmarshalling function, z_mrsh_*(), which then calls a user-supplied verification function z_vrfy_*(). The verification function is typesafe, and is a simple C function with exactly the same argument and return signature as the syscall impl function. It is also not responsible for validating the pointers to the extra parameter array or a wide return value, that code gets automatically generated. This commit includes new vrfy/msrh handling for all syscalls invoked during CI runs. Future commits will port the less testable code. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 22:34:31 +02:00
return new_thread;
}
userspace: Support for split 64 bit arguments System call arguments, at the arch layer, are single words. So passing wider values requires splitting them into two registers at call time. This gets even more complicated for values (e.g k_timeout_t) that may have different sizes depending on configuration. This patch adds a feature to gen_syscalls.py to detect functions with wide arguments and automatically generates code to split/unsplit them. Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't work with functions like this, because for N arguments (our current maximum N is 10) there are 2^N possible configurations of argument widths. So this generates the complete functions for each handler and wrapper, effectively doing in python what was originally done in the preprocessor. Another complexity is that traditional the z_hdlr_*() function for a system call has taken the raw list of word arguments, which does not work when some of those arguments must be 64 bit types. So instead of using a single Z_SYSCALL_HANDLER macro, this splits the job of z_hdlr_*() into two steps: An automatically-generated unmarshalling function, z_mrsh_*(), which then calls a user-supplied verification function z_vrfy_*(). The verification function is typesafe, and is a simple C function with exactly the same argument and return signature as the syscall impl function. It is also not responsible for validating the pointers to the extra parameter array or a wide return value, that code gets automatically generated. This commit includes new vrfy/msrh handling for all syscalls invoked during CI runs. Future commits will port the less testable code. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 22:34:31 +02:00
#include <syscalls/k_thread_create_mrsh.c>
#endif /* CONFIG_USERSPACE */
unified: initial unified kernel implementation Summary of what this includes: initialization: Copy from nano_init.c, with the following changes: - the main thread is the continuation of the init thread, but an idle thread is created as well - _main() initializes threads in groups and starts the EXE group - the ready queues are initialized - the main thread is marked as non-essential once the system init is done - a weak main() symbol is provided if the application does not provide a main() function scheduler: Not an exhaustive list, but basically provide primitives for: - adding/removing a thread to/from a wait queue - adding/removing a thread to/from the ready queue - marking thread as ready - locking/unlocking the scheduler - instead of locking interrupts - getting/setting thread priority - checking what state (coop/preempt) a thread is currenlty running in - rescheduling threads - finding what thread is the next to run - yielding/sleeping/aborting sleep - finding the current thread threads: - Add operationns on threads, such as creating and starting them. standardized handling of kernel object return codes: - Kernel objects now cause _Swap() to return the following values: 0 => operation successful -EAGAIN => operation timed out -Exxxxx => operation failed for another reason - The thread's swap_data field can be used to return any additional information required to complete the operation, such as the actual result of a successful operation. timeouts: - same as nano timeouts, renamed to simply 'timeouts' - the kernel is still tick-based, but objects take timeout values in ms for forward compatibility with a tickless kernel. semaphores: - Port of the nanokernel semaphores, which have the same basic behaviour as the microkernel ones. Semaphore groups are not yet implemented. - These semaphores are enhanced in that they accept an initial count and a count limit. This allows configuring them as binary semaphores, and also provisioning them without having to "give" the semaphore multiple times before using them. mutexes: - Straight port of the microkernel mutexes. An init function is added to allow defining them at runtime. pipes: - straight port timers: - amalgamation of nano and micro timers, with all functionalities intact. events: - re-implementation, using semaphores and workqueues. mailboxes: - straight port message queues: - straight port of microkernel FIFOs memory maps: - straight port workqueues: - Basically, have all APIs follow the k_ naming rule, and use the _timeout subsystem from the unified kernel directory, and not the _nano_timeout one. stacks: - Port of the nanokernel stacks. They can now have multiple threads pending on them and threads can wait with a timeout. LIFOs: - Straight port of the nanokernel LIFOs. FIFOs: - Straight port of the nanokernel FIFOs. Work by: Dmitriy Korovkin <dmitriy.korovkin@windriver.com> Peter Mitsis <peter.mitsis@windriver.com> Allan Stephens <allan.stephens@windriver.com> Benjamin Walsh <benjamin.walsh@windriver.com> Change-Id: Id3cadb3694484ab2ca467889cfb029be3cd3a7d6 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-09-03 00:55:39 +02:00
void z_init_thread_base(struct _thread_base *thread_base, int priority,
uint32_t initial_state, unsigned int options)
{
/* k_q_node is initialized upon first insertion in a list */
thread_base->pended_on = NULL;
thread_base->user_options = (uint8_t)options;
thread_base->thread_state = (uint8_t)initial_state;
thread_base->prio = priority;
thread_base->sched_locked = 0U;
#ifdef CONFIG_SMP
thread_base->is_idle = 0;
#endif /* CONFIG_SMP */
#ifdef CONFIG_TIMESLICE_PER_THREAD
thread_base->slice_ticks = 0;
thread_base->slice_expired = NULL;
#endif /* CONFIG_TIMESLICE_PER_THREAD */
/* swap_data does not need to be initialized */
z_init_thread_timeout(thread_base);
}
FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
void *p1, void *p2, void *p3)
{
SYS_PORT_TRACING_FUNC(k_thread, user_mode_enter);
_current->base.user_options |= K_USER;
z_thread_essential_clear(_current);
#ifdef CONFIG_THREAD_MONITOR
_current->entry.pEntry = entry;
_current->entry.parameter1 = p1;
_current->entry.parameter2 = p2;
_current->entry.parameter3 = p3;
#endif /* CONFIG_THREAD_MONITOR */
#ifdef CONFIG_USERSPACE
__ASSERT(z_stack_is_user_capable(_current->stack_obj),
"dropping to user mode with kernel-only stack object");
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
memset(_current->userspace_local_data, 0,
sizeof(struct _thread_userspace_local_data));
#endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
#ifdef CONFIG_THREAD_LOCAL_STORAGE
arch_tls_stack_setup(_current,
(char *)(_current->stack_info.start +
_current->stack_info.size));
#endif /* CONFIG_THREAD_LOCAL_STORAGE */
arch_user_mode_enter(entry, p1, p2, p3);
#else
/* XXX In this case we do not reset the stack */
z_thread_entry(entry, p1, p2, p3);
#endif /* CONFIG_USERSPACE */
}
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
#ifdef CONFIG_STACK_GROWS_UP
#error "Unsupported configuration for stack analysis"
#endif /* CONFIG_STACK_GROWS_UP */
int z_stack_space_get(const uint8_t *stack_start, size_t size, size_t *unused_ptr)
{
size_t unused = 0;
const uint8_t *checked_stack = stack_start;
/* Take the address of any local variable as a shallow bound for the
* stack pointer. Addresses above it are guaranteed to be
* accessible.
*/
const uint8_t *stack_pointer = (const uint8_t *)&stack_start;
/* If we are currently running on the stack being analyzed, some
* memory management hardware will generate an exception if we
* read unused stack memory.
*
* This never happens when invoked from user mode, as user mode
* will always run this function on the privilege elevation stack.
*/
if ((stack_pointer > stack_start) && (stack_pointer <= (stack_start + size)) &&
IS_ENABLED(CONFIG_NO_UNUSED_STACK_INSPECTION)) {
/* TODO: We could add an arch_ API call to temporarily
* disable the stack checking in the CPU, but this would
* need to be properly managed wrt context switches/interrupts
*/
return -ENOTSUP;
}
if (IS_ENABLED(CONFIG_STACK_SENTINEL)) {
/* First 4 bytes of the stack buffer reserved for the
* sentinel value, it won't be 0xAAAAAAAA for thread
* stacks.
*
* FIXME: thread->stack_info.start ought to reflect
* this!
*/
checked_stack += 4;
size -= 4;
}
for (size_t i = 0; i < size; i++) {
if ((checked_stack[i]) == 0xaaU) {
unused++;
} else {
break;
}
}
*unused_ptr = unused;
return 0;
}
int z_impl_k_thread_stack_space_get(const struct k_thread *thread,
size_t *unused_ptr)
{
#ifdef CONFIG_THREAD_STACK_MEM_MAPPED
if (thread->stack_info.mapped.addr == NULL) {
return -EINVAL;
}
#endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
return z_stack_space_get((const uint8_t *)thread->stack_info.start,
thread->stack_info.size, unused_ptr);
}
#ifdef CONFIG_USERSPACE
int z_vrfy_k_thread_stack_space_get(const struct k_thread *thread,
size_t *unused_ptr)
{
size_t unused;
int ret;
ret = K_SYSCALL_OBJ(thread, K_OBJ_THREAD);
CHECKIF(ret != 0) {
return ret;
}
ret = z_impl_k_thread_stack_space_get(thread, &unused);
CHECKIF(ret != 0) {
return ret;
}
ret = k_usermode_to_copy(unused_ptr, &unused, sizeof(size_t));
CHECKIF(ret != 0) {
return ret;
}
return 0;
}
#include <syscalls/k_thread_stack_space_get_mrsh.c>
#endif /* CONFIG_USERSPACE */
#endif /* CONFIG_INIT_STACKS && CONFIG_THREAD_STACK_INFO */
#ifdef CONFIG_USERSPACE
static inline k_ticks_t z_vrfy_k_thread_timeout_remaining_ticks(
const struct k_thread *thread)
{
K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
return z_impl_k_thread_timeout_remaining_ticks(thread);
}
#include <syscalls/k_thread_timeout_remaining_ticks_mrsh.c>
static inline k_ticks_t z_vrfy_k_thread_timeout_expires_ticks(
const struct k_thread *thread)
{
K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
return z_impl_k_thread_timeout_expires_ticks(thread);
}
#include <syscalls/k_thread_timeout_expires_ticks_mrsh.c>
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
void z_thread_mark_switched_in(void)
{
#if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
z_sched_usage_start(_current);
#endif /* CONFIG_SCHED_THREAD_USAGE && !CONFIG_USE_SWITCH */
#ifdef CONFIG_TRACING
SYS_PORT_TRACING_FUNC(k_thread, switched_in);
#endif /* CONFIG_TRACING */
}
void z_thread_mark_switched_out(void)
{
#if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
z_sched_usage_stop();
#endif /*CONFIG_SCHED_THREAD_USAGE && !CONFIG_USE_SWITCH */
#ifdef CONFIG_TRACING
#ifdef CONFIG_THREAD_LOCAL_STORAGE
/* Dummy thread won't have TLS set up to run arbitrary code */
if (!_current_cpu->current ||
(_current_cpu->current->base.thread_state & _THREAD_DUMMY) != 0)
return;
#endif /* CONFIG_THREAD_LOCAL_STORAGE */
SYS_PORT_TRACING_FUNC(k_thread, switched_out);
#endif /* CONFIG_TRACING */
}
#endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
int k_thread_runtime_stats_get(k_tid_t thread,
k_thread_runtime_stats_t *stats)
{
if ((thread == NULL) || (stats == NULL)) {
return -EINVAL;
}
#ifdef CONFIG_SCHED_THREAD_USAGE
z_sched_thread_usage(thread, stats);
#else
*stats = (k_thread_runtime_stats_t) {};
#endif /* CONFIG_SCHED_THREAD_USAGE */
return 0;
}
int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats)
{
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
k_thread_runtime_stats_t tmp_stats;
#endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
if (stats == NULL) {
return -EINVAL;
}
*stats = (k_thread_runtime_stats_t) {};
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
/* Retrieve the usage stats for each core and amalgamate them. */
unsigned int num_cpus = arch_num_cpus();
for (uint8_t i = 0; i < num_cpus; i++) {
z_sched_cpu_usage(i, &tmp_stats);
stats->execution_cycles += tmp_stats.execution_cycles;
stats->total_cycles += tmp_stats.total_cycles;
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
stats->current_cycles += tmp_stats.current_cycles;
stats->peak_cycles += tmp_stats.peak_cycles;
stats->average_cycles += tmp_stats.average_cycles;
#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
stats->idle_cycles += tmp_stats.idle_cycles;
}
#endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
return 0;
}
#ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
/** Pointer to thread which needs to be cleaned up. */
static struct k_thread *thread_to_cleanup;
/** Spinlock for thread abort cleanup. */
static struct k_spinlock thread_cleanup_lock;
#ifdef CONFIG_THREAD_STACK_MEM_MAPPED
static void *thread_cleanup_stack_addr;
static size_t thread_cleanup_stack_sz;
#endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
void defer_thread_cleanup(struct k_thread *thread)
{
/* Note when adding new deferred cleanup steps:
* - The thread object may have been overwritten by the time
* the actual cleanup is being done (e.g. thread object
* allocated on a stack). So stash any necessary data here
* that will be used in the actual cleanup steps.
*/
thread_to_cleanup = thread;
#ifdef CONFIG_THREAD_STACK_MEM_MAPPED
/* Note that the permission of the stack should have been
* stripped of user thread access due to the thread having
* already exited from a memory domain. That is done via
* k_thread_abort().
*/
/* Stash the address and size so the region can be unmapped
* later.
*/
thread_cleanup_stack_addr = thread->stack_info.mapped.addr;
thread_cleanup_stack_sz = thread->stack_info.mapped.sz;
/* The stack is now considered un-usable. This should prevent any functions
* from looking directly into the mapped stack if they are made to be aware
* of memory mapped stacks, e.g., z_stack_space_get().
*/
thread->stack_info.mapped.addr = NULL;
thread->stack_info.mapped.sz = 0;
#endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
}
void do_thread_cleanup(struct k_thread *thread)
{
/* Note when adding new actual cleanup steps:
* - The thread object may have been overwritten when this is
* called. So avoid using any data from the thread object.
*/
ARG_UNUSED(thread);
#ifdef CONFIG_THREAD_STACK_MEM_MAPPED
if (thread_cleanup_stack_addr != NULL) {
k_mem_phys_unmap(thread_cleanup_stack_addr,
thread_cleanup_stack_sz);
thread_cleanup_stack_addr = NULL;
}
#endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
}
void k_thread_abort_cleanup(struct k_thread *thread)
{
K_SPINLOCK(&thread_cleanup_lock) {
if (thread_to_cleanup != NULL) {
/* Finish the pending one first. */
do_thread_cleanup(thread_to_cleanup);
thread_to_cleanup = NULL;
}
if (thread == _current) {
/* Need to defer for current running thread as the cleanup
* might result in exception. Actual cleanup will be done
* at the next time k_thread_abort() is called, or at thread
* creation if the same thread object is being reused. This
* is to make sure the cleanup code no longer needs this
* thread's stack. This is not exactly ideal as the stack
* may still be memory mapped for a while. However, this is
* a simple solution without a) the need to workaround
* the schedule lock during k_thread_abort(), b) creating
* another thread to perform the cleanup, and c) does not
* require architecture code support (e.g. via exception).
*/
defer_thread_cleanup(thread);
} else {
/* Not the current running thread, so we are safe to do
* cleanups.
*/
do_thread_cleanup(thread);
}
}
}
void k_thread_abort_cleanup_check_reuse(struct k_thread *thread)
{
K_SPINLOCK(&thread_cleanup_lock) {
/* This is to guard reuse of the same thread object and make sure
* any pending cleanups of it needs to be finished before the thread
* object can be reused.
*/
if (thread_to_cleanup == thread) {
do_thread_cleanup(thread_to_cleanup);
thread_to_cleanup = NULL;
}
}
}
#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */