2016-10-05 12:01:54 -05:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2015-2016 Intel Corporation.
|
|
|
|
*
|
2017-01-18 17:01:01 -08:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2016-10-05 12:01:54 -05:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <string.h>
|
|
|
|
#include <device.h>
|
2019-06-25 12:25:32 -04:00
|
|
|
#include <sys/atomic.h>
|
2018-11-12 10:25:12 -08:00
|
|
|
#include <syscall_handler.h>
|
2016-10-05 12:01:54 -05:00
|
|
|
|
2020-03-09 11:02:20 +01:00
|
|
|
extern const struct init_entry __init_start[];
|
|
|
|
extern const struct init_entry __init_PRE_KERNEL_1_start[];
|
|
|
|
extern const struct init_entry __init_PRE_KERNEL_2_start[];
|
|
|
|
extern const struct init_entry __init_POST_KERNEL_start[];
|
|
|
|
extern const struct init_entry __init_APPLICATION_start[];
|
|
|
|
extern const struct init_entry __init_end[];
|
2016-10-05 12:01:54 -05:00
|
|
|
|
2020-01-15 08:57:29 -08:00
|
|
|
#ifdef CONFIG_SMP
|
2020-03-09 11:02:20 +01:00
|
|
|
extern const struct init_entry __init_SMP_start[];
|
2020-01-15 08:57:29 -08:00
|
|
|
#endif
|
2016-10-05 12:01:54 -05:00
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
extern const struct device __device_start[];
|
|
|
|
extern const struct device __device_end[];
|
2020-03-09 11:02:20 +01:00
|
|
|
|
2020-04-30 11:49:39 +02:00
|
|
|
extern uint32_t __device_init_status_start[];
|
|
|
|
|
2021-02-02 10:07:18 -06:00
|
|
|
/**
|
|
|
|
* @brief Initialize state for all static devices.
|
|
|
|
*
|
|
|
|
* The state object is always zero-initialized, but this may not be
|
|
|
|
* sufficient.
|
|
|
|
*/
|
|
|
|
void z_device_state_init(void)
|
|
|
|
{
|
|
|
|
const struct device *dev = __device_start;
|
|
|
|
|
|
|
|
while (dev < __device_end) {
|
|
|
|
z_object_init(dev);
|
|
|
|
++dev;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-05 12:01:54 -05:00
|
|
|
/**
|
2020-03-09 11:02:20 +01:00
|
|
|
* @brief Execute all the init entry initialization functions at a given level
|
2016-10-05 12:01:54 -05:00
|
|
|
*
|
2020-03-09 11:02:20 +01:00
|
|
|
* @details Invokes the initialization routine for each init entry object
|
|
|
|
* created by the INIT_ENTRY_DEFINE() macro using the specified level.
|
|
|
|
* The linker script places the init entry objects in memory in the order
|
2016-10-05 12:01:54 -05:00
|
|
|
* they need to be invoked, with symbols indicating where one level leaves
|
|
|
|
* off and the next one begins.
|
|
|
|
*
|
|
|
|
* @param level init level to run.
|
|
|
|
*/
|
2020-05-27 11:26:57 -05:00
|
|
|
void z_sys_init_run_level(int32_t level)
|
2016-10-05 12:01:54 -05:00
|
|
|
{
|
2020-03-09 11:02:20 +01:00
|
|
|
static const struct init_entry *levels[] = {
|
|
|
|
__init_PRE_KERNEL_1_start,
|
|
|
|
__init_PRE_KERNEL_2_start,
|
|
|
|
__init_POST_KERNEL_start,
|
|
|
|
__init_APPLICATION_start,
|
2020-01-15 08:57:29 -08:00
|
|
|
#ifdef CONFIG_SMP
|
2020-03-09 11:02:20 +01:00
|
|
|
__init_SMP_start,
|
2020-01-15 08:57:29 -08:00
|
|
|
#endif
|
2018-11-01 17:42:07 -07:00
|
|
|
/* End marker */
|
2020-03-09 11:02:20 +01:00
|
|
|
__init_end,
|
2018-11-01 17:42:07 -07:00
|
|
|
};
|
2020-03-09 11:02:20 +01:00
|
|
|
const struct init_entry *entry;
|
2016-10-05 12:01:54 -05:00
|
|
|
|
2020-03-09 11:02:20 +01:00
|
|
|
for (entry = levels[level]; entry < levels[level+1]; entry++) {
|
2020-04-30 20:33:38 +02:00
|
|
|
const struct device *dev = entry->dev;
|
2021-02-02 10:23:55 -06:00
|
|
|
int rc = entry->init(dev);
|
2016-10-05 12:01:54 -05:00
|
|
|
|
2021-02-02 10:23:55 -06:00
|
|
|
if (dev != NULL) {
|
|
|
|
/* Mark device initialized. If initialization
|
|
|
|
* failed, record the error condition.
|
2020-04-30 11:49:39 +02:00
|
|
|
*/
|
2021-02-02 10:23:55 -06:00
|
|
|
if (rc != 0) {
|
|
|
|
if (rc < 0) {
|
|
|
|
rc = -rc;
|
|
|
|
}
|
|
|
|
if (rc > UINT8_MAX) {
|
|
|
|
rc = UINT8_MAX;
|
|
|
|
}
|
|
|
|
dev->state->init_res = rc;
|
|
|
|
}
|
|
|
|
dev->state->initialized = true;
|
2018-12-07 13:12:21 -08:00
|
|
|
}
|
2016-10-05 12:01:54 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
const struct device *z_impl_device_get_binding(const char *name)
|
2016-10-05 12:01:54 -05:00
|
|
|
{
|
2020-04-30 20:33:38 +02:00
|
|
|
const struct device *dev;
|
2016-10-05 12:01:54 -05:00
|
|
|
|
2021-02-12 06:19:08 -06:00
|
|
|
/* A null string identifies no device. So does an empty
|
|
|
|
* string.
|
|
|
|
*/
|
2021-03-29 17:13:18 -04:00
|
|
|
if ((name == NULL) || (name[0] == '\0')) {
|
2021-02-12 06:19:08 -06:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-02-14 14:47:11 -08:00
|
|
|
/* Split the search into two loops: in the common scenario, where
|
|
|
|
* device names are stored in ROM (and are referenced by the user
|
|
|
|
* with CONFIG_* macros), only cheap pointer comparisons will be
|
2020-03-09 11:02:20 +01:00
|
|
|
* performed. Reserve string comparisons for a fallback.
|
2018-02-14 14:47:11 -08:00
|
|
|
*/
|
2020-03-09 11:02:20 +01:00
|
|
|
for (dev = __device_start; dev != __device_end; dev++) {
|
2021-12-23 17:18:36 +01:00
|
|
|
if (z_device_is_ready(dev) && (dev->name == name)) {
|
2020-03-09 11:02:20 +01:00
|
|
|
return dev;
|
2018-02-14 14:47:11 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-09 11:02:20 +01:00
|
|
|
for (dev = __device_start; dev != __device_end; dev++) {
|
2021-12-23 17:18:36 +01:00
|
|
|
if (z_device_is_ready(dev) && (strcmp(name, dev->name) == 0)) {
|
2020-03-09 11:02:20 +01:00
|
|
|
return dev;
|
2016-10-05 12:01:54 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-11-12 10:25:12 -08:00
|
|
|
#ifdef CONFIG_USERSPACE
|
2020-04-30 20:33:38 +02:00
|
|
|
static inline const struct device *z_vrfy_device_get_binding(const char *name)
|
2018-11-12 10:25:12 -08:00
|
|
|
{
|
|
|
|
char name_copy[Z_DEVICE_MAX_NAME_LEN];
|
|
|
|
|
|
|
|
if (z_user_string_copy(name_copy, (char *)name, sizeof(name_copy))
|
|
|
|
!= 0) {
|
2021-04-27 11:39:33 -07:00
|
|
|
return NULL;
|
2018-11-12 10:25:12 -08:00
|
|
|
}
|
|
|
|
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
return z_impl_device_get_binding(name_copy);
|
2018-11-12 10:25:12 -08:00
|
|
|
}
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
#include <syscalls/device_get_binding_mrsh.c>
|
2021-03-12 16:25:59 +01:00
|
|
|
|
|
|
|
static inline int z_vrfy_device_usable_check(const struct device *dev)
|
|
|
|
{
|
|
|
|
Z_OOPS(Z_SYSCALL_OBJ_INIT(dev, K_OBJ_ANY));
|
|
|
|
|
|
|
|
return z_impl_device_usable_check(dev);
|
|
|
|
}
|
|
|
|
#include <syscalls/device_usable_check_mrsh.c>
|
2018-11-12 10:25:12 -08:00
|
|
|
#endif /* CONFIG_USERSPACE */
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
size_t z_device_get_all_static(struct device const **devices)
|
2020-06-22 08:55:37 -05:00
|
|
|
{
|
|
|
|
*devices = __device_start;
|
|
|
|
return __device_end - __device_start;
|
2020-04-30 11:49:39 +02:00
|
|
|
}
|
|
|
|
|
2021-12-23 17:18:36 +01:00
|
|
|
bool z_device_is_ready(const struct device *dev)
|
2020-04-30 11:49:39 +02:00
|
|
|
{
|
2021-05-13 18:24:25 +02:00
|
|
|
/*
|
|
|
|
* if an invalid device pointer is passed as argument, this call
|
|
|
|
* reports the `device` as not ready for usage.
|
|
|
|
*/
|
|
|
|
if (dev == NULL) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-03-29 10:03:49 -04:00
|
|
|
return dev->state->initialized && (dev->state->init_res == 0U);
|
2020-06-22 08:55:37 -05:00
|
|
|
}
|
2021-02-22 11:42:08 -06:00
|
|
|
|
2021-08-20 19:29:39 +10:00
|
|
|
static int device_visitor(const device_handle_t *handles,
|
|
|
|
size_t handle_count,
|
|
|
|
device_visitor_callback_t visitor_cb,
|
|
|
|
void *context)
|
2021-02-22 11:42:08 -06:00
|
|
|
{
|
|
|
|
/* Iterate over fixed devices */
|
|
|
|
for (size_t i = 0; i < handle_count; ++i) {
|
|
|
|
device_handle_t dh = handles[i];
|
|
|
|
const struct device *rdev = device_from_handle(dh);
|
|
|
|
int rc = visitor_cb(rdev, context);
|
|
|
|
|
|
|
|
if (rc < 0) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return handle_count;
|
|
|
|
}
|
2021-08-20 19:29:39 +10:00
|
|
|
|
|
|
|
int device_required_foreach(const struct device *dev,
|
|
|
|
device_visitor_callback_t visitor_cb,
|
|
|
|
void *context)
|
|
|
|
{
|
|
|
|
size_t handle_count = 0;
|
|
|
|
const device_handle_t *handles = device_required_handles_get(dev, &handle_count);
|
|
|
|
|
|
|
|
return device_visitor(handles, handle_count, visitor_cb, context);
|
|
|
|
}
|
|
|
|
|
|
|
|
int device_supported_foreach(const struct device *dev,
|
|
|
|
device_visitor_callback_t visitor_cb,
|
|
|
|
void *context)
|
|
|
|
{
|
|
|
|
size_t handle_count = 0;
|
|
|
|
const device_handle_t *handles = device_supported_handles_get(dev, &handle_count);
|
|
|
|
|
|
|
|
return device_visitor(handles, handle_count, visitor_cb, context);
|
|
|
|
}
|