System call arguments, at the arch layer, are single words. So passing wider values requires splitting them into two registers at call time. This gets even more complicated for values (e.g k_timeout_t) that may have different sizes depending on configuration. This patch adds a feature to gen_syscalls.py to detect functions with wide arguments and automatically generates code to split/unsplit them. Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't work with functions like this, because for N arguments (our current maximum N is 10) there are 2^N possible configurations of argument widths. So this generates the complete functions for each handler and wrapper, effectively doing in python what was originally done in the preprocessor. Another complexity is that traditional the z_hdlr_*() function for a system call has taken the raw list of word arguments, which does not work when some of those arguments must be 64 bit types. So instead of using a single Z_SYSCALL_HANDLER macro, this splits the job of z_hdlr_*() into two steps: An automatically-generated unmarshalling function, z_mrsh_*(), which then calls a user-supplied verification function z_vrfy_*(). The verification function is typesafe, and is a simple C function with exactly the same argument and return signature as the syscall impl function. It is also not responsible for validating the pointers to the extra parameter array or a wide return value, that code gets automatically generated. This commit includes new vrfy/msrh handling for all syscalls invoked during CI runs. Future commits will port the less testable code. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
60 lines
1.6 KiB
C
60 lines
1.6 KiB
C
/*
|
|
* Copyright (c) 2016 Wind River Systems, Inc.
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
/**
|
|
* @file
|
|
* @brief Primitive for aborting a thread when an arch-specific one is not
|
|
* needed..
|
|
*/
|
|
|
|
#include <kernel.h>
|
|
#include <kernel_structs.h>
|
|
#include <kernel_internal.h>
|
|
#include <kswap.h>
|
|
#include <string.h>
|
|
#include <toolchain.h>
|
|
#include <linker/sections.h>
|
|
#include <wait_q.h>
|
|
#include <ksched.h>
|
|
#include <sys/__assert.h>
|
|
#include <syscall_handler.h>
|
|
|
|
extern void z_thread_single_abort(struct k_thread *thread);
|
|
|
|
#if !defined(CONFIG_ARCH_HAS_THREAD_ABORT)
|
|
void z_impl_k_thread_abort(k_tid_t thread)
|
|
{
|
|
/* We aren't trying to synchronize data access here (these
|
|
* APIs are internally synchronized). The original lock seems
|
|
* to have been in place to prevent the thread from waking up
|
|
* due to a delivered interrupt. Leave a dummy spinlock in
|
|
* place to do that. This API should be revisted though, it
|
|
* doesn't look SMP-safe as it stands.
|
|
*/
|
|
struct k_spinlock lock = {};
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
|
|
__ASSERT((thread->base.user_options & K_ESSENTIAL) == 0U,
|
|
"essential thread aborted");
|
|
|
|
z_thread_single_abort(thread);
|
|
z_thread_monitor_exit(thread);
|
|
|
|
if (thread == _current && !z_is_in_isr()) {
|
|
z_swap(&lock, key);
|
|
} else {
|
|
/* Really, there's no good reason for this to be a
|
|
* scheduling point if we aren't aborting _current (by
|
|
* definition, no higher priority thread is runnable,
|
|
* because we're running!). But it always has been
|
|
* and is thus part of our API, and we have tests that
|
|
* rely on k_thread_abort() scheduling out of
|
|
* cooperative threads.
|
|
*/
|
|
z_reschedule(&lock, key);
|
|
}
|
|
}
|
|
#endif
|