kernel: delete separate logic for priv stacks

This never needed to be put in a separate gperf table.
Privilege mode stacks can be generated by the main
gen_kobject_list.py logic, which we do here.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2020-03-11 10:56:19 -07:00 committed by Andrew Boie
commit 28be793cb6
21 changed files with 124 additions and 474 deletions

View file

@ -351,9 +351,6 @@ endif()
if(CONFIG_USERSPACE)
set(APP_SMEM_ALIGNED_DEP app_smem_aligned_linker)
set(APP_SMEM_UNALIGNED_DEP app_smem_unaligned_linker)
if(CONFIG_ARM)
set(PRIV_STACK_DEP priv_stacks_prebuilt)
endif()
endif()
get_property(TOPT GLOBAL PROPERTY TOPT)
@ -706,7 +703,6 @@ endif() # CONFIG_CODE_DATA_RELOCATION
configure_linker_script(
linker.cmd
""
${PRIV_STACK_DEP}
${APP_SMEM_ALIGNED_DEP}
${CODE_RELOCATION_DEP}
zephyr_generated_headers
@ -786,147 +782,7 @@ if(CONFIG_USERSPACE)
get_property(compile_definitions_interface TARGET zephyr_interface
PROPERTY INTERFACE_COMPILE_DEFINITIONS)
endif()
# Warning most of this gperf code is duplicated below for
# gen_kobject_list.py / output_lib
if(CONFIG_ARM AND CONFIG_USERSPACE)
set(GEN_PRIV_STACKS $ENV{ZEPHYR_BASE}/scripts/gen_priv_stacks.py)
set(PROCESS_PRIV_STACKS_GPERF $ENV{ZEPHYR_BASE}/scripts/process_gperf.py)
set(PRIV_STACKS priv_stacks_hash.gperf)
set(PRIV_STACKS_OUTPUT_SRC_PRE priv_stacks_hash_preprocessed.c)
set(PRIV_STACKS_OUTPUT_SRC priv_stacks_hash.c)
set(PRIV_STACKS_OUTPUT_OBJ priv_stacks_hash.c.obj)
set(PRIV_STACKS_OUTPUT_OBJ_RENAMED priv_stacks_hash_renamed.o)
# Essentially what we are doing here is extracting some information
# out of the nearly finished elf file, generating the source code
# for a hash table based on that information, and then compiling and
# linking the hash table back into a now even more nearly finished
# elf file.
# Use the script GEN_PRIV_STACKS to scan the kernel binary's
# (${ZEPHYR_PREBUILT_EXECUTABLE}) DWARF information to produce a table of kernel
# objects (PRIV_STACKS) which we will then pass to gperf
add_custom_command(
OUTPUT ${PRIV_STACKS}
COMMAND
${PYTHON_EXECUTABLE}
${GEN_PRIV_STACKS}
--kernel $<TARGET_FILE:priv_stacks_prebuilt>
--output ${PRIV_STACKS}
$<$<BOOL:${CMAKE_VERBOSE_MAKEFILE}>:--verbose>
DEPENDS priv_stacks_prebuilt
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
)
add_custom_target(priv_stacks DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${PRIV_STACKS})
if(${GPERF} STREQUAL GPERF-NOTFOUND)
message(FATAL_ERROR "Unable to find gperf")
endif()
# Use gperf to generate C code (PRIV_STACKS_OUTPUT_SRC_PRE) which implements a
# perfect hashtable based on PRIV_STACKS
add_custom_command(
OUTPUT ${PRIV_STACKS_OUTPUT_SRC_PRE}
COMMAND
${GPERF} -C
--output-file ${PRIV_STACKS_OUTPUT_SRC_PRE}
${PRIV_STACKS}
DEPENDS priv_stacks ${PRIV_STACKS}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
)
add_custom_target(priv_stacks_output_src_pre DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${PRIV_STACKS_OUTPUT_SRC_PRE})
# For our purposes the code/data generated by gperf is not optimal.
#
# The script PROCESS_GPERF creates a new c file OUTPUT_SRC based on
# OUTPUT_SRC_PRE to greatly reduce the amount of code/data generated
# since we know we are always working with pointer values
add_custom_command(
OUTPUT ${PRIV_STACKS_OUTPUT_SRC}
COMMAND
${PYTHON_EXECUTABLE}
${PROCESS_PRIV_STACKS_GPERF}
-i ${PRIV_STACKS_OUTPUT_SRC_PRE}
-o ${PRIV_STACKS_OUTPUT_SRC}
-p "struct _k_priv_stack_map"
$<$<BOOL:${CMAKE_VERBOSE_MAKEFILE}>:--verbose>
DEPENDS priv_stacks_output_src_pre ${PRIV_STACKS_OUTPUT_SRC_PRE}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
)
add_custom_target(priv_stacks_output_src DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${PRIV_STACKS_OUTPUT_SRC})
set_source_files_properties(${CMAKE_CURRENT_BINARY_DIR}/${PRIV_STACKS_OUTPUT_SRC}
PROPERTIES COMPILE_DEFINITIONS "${compile_definitions_interface}")
set_source_files_properties(${CMAKE_CURRENT_BINARY_DIR}/${PRIV_STACKS_OUTPUT_SRC}
PROPERTIES COMPILE_FLAGS
"${NO_COVERAGE_FLAGS} -fno-function-sections -fno-data-sections ")
# We need precise control of where generated text/data ends up in the final
# kernel image. Disable function/data sections and use objcopy to move
# generated data into special section names
add_library(priv_stacks_output_lib STATIC
${CMAKE_CURRENT_BINARY_DIR}/${PRIV_STACKS_OUTPUT_SRC}
)
# Turn off -ffunction-sections, etc.
# NB: Using a library instead of target_compile_options(priv_stacks_output_lib
# [...]) because a library's options have precedence
add_library(priv_stacks_output_lib_interface INTERFACE)
foreach(incl ${include_dir_in_interface})
target_include_directories(priv_stacks_output_lib_interface INTERFACE ${incl})
endforeach()
foreach(incl ${sys_include_dir_in_interface})
target_include_directories(priv_stacks_output_lib_interface SYSTEM INTERFACE ${incl})
endforeach()
target_link_libraries(priv_stacks_output_lib priv_stacks_output_lib_interface)
set(PRIV_STACKS_OUTPUT_OBJ_PATH ${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/priv_stacks_output_lib.dir/${PRIV_STACKS_OUTPUT_OBJ})
set(obj_copy_cmd "")
set(obj_copy_sections_rename
.bss=.priv_stacks.noinit
.data=.priv_stacks.data
.text=.priv_stacks.text
.rodata=.priv_stacks.rodata
)
bintools_objcopy(
RESULT_CMD_LIST obj_copy_cmd
SECTION_RENAME ${obj_copy_sections_rename}
FILE_INPUT ${PRIV_STACKS_OUTPUT_OBJ_PATH}
FILE_OUTPUT ${PRIV_STACKS_OUTPUT_OBJ_RENAMED}
)
add_custom_command(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${PRIV_STACKS_OUTPUT_OBJ_RENAMED}
${obj_copy_cmd}
DEPENDS priv_stacks_output_lib
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
)
add_custom_target(priv_stacks_output_obj_renamed DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${PRIV_STACKS_OUTPUT_OBJ_RENAMED})
add_library(priv_stacks_output_obj_renamed_lib STATIC IMPORTED GLOBAL)
set_property(
TARGET priv_stacks_output_obj_renamed_lib
PROPERTY
IMPORTED_LOCATION ${CMAKE_CURRENT_BINARY_DIR}/${PRIV_STACKS_OUTPUT_OBJ_RENAMED}
)
add_dependencies(
priv_stacks_output_obj_renamed_lib
priv_stacks_output_obj_renamed
)
set_property(GLOBAL APPEND PROPERTY GENERATED_KERNEL_OBJECT_FILES priv_stacks_output_obj_renamed_lib)
endif()
# Warning: most of this gperf code is duplicated above for
# gen_priv_stacks.py / priv_stacks_output_lib
if(CONFIG_USERSPACE)
set(GEN_KOBJ_LIST ${ZEPHYR_BASE}/scripts/gen_kobject_list.py)
set(PROCESS_GPERF ${ZEPHYR_BASE}/scripts/process_gperf.py)
@ -1168,42 +1024,6 @@ if(CONFIG_USERSPACE)
)
endif()
if(CONFIG_USERSPACE AND CONFIG_ARM)
configure_linker_script(
linker_priv_stacks.cmd
""
${CODE_RELOCATION_DEP}
${APP_SMEM_ALIGNED_DEP}
${APP_SMEM_ALIGNED_LD}
zephyr_generated_headers
)
add_custom_target(
linker_priv_stacks_script
DEPENDS
linker_priv_stacks.cmd
)
set_property(TARGET
linker_priv_stacks_script
PROPERTY INCLUDE_DIRECTORIES
${ZEPHYR_INCLUDE_DIRS}
)
set(PRIV_STACK_LIB priv_stacks_output_obj_renamed_lib)
add_executable( priv_stacks_prebuilt misc/empty_file.c)
toolchain_ld_link_elf(
TARGET_ELF priv_stacks_prebuilt
OUTPUT_MAP ${PROJECT_BINARY_DIR}/priv_stacks_prebuilt.map
LIBRARIES_PRE_SCRIPT ""
LINKER_SCRIPT ${PROJECT_BINARY_DIR}/linker_priv_stacks.cmd
LIBRARIES_POST_SCRIPT ""
DEPENDENCIES ${CODE_RELOCATION_DEP}
)
set_property(TARGET priv_stacks_prebuilt PROPERTY LINK_DEPENDS ${PROJECT_BINARY_DIR}/linker_priv_stacks.cmd)
add_dependencies( priv_stacks_prebuilt linker_priv_stacks_script ${OFFSETS_LIB})
endif()
# FIXME: Is there any way to get rid of empty_file.c?
add_executable( ${ZEPHYR_PREBUILT_EXECUTABLE} misc/empty_file.c)
toolchain_ld_link_elf(
@ -1211,11 +1031,10 @@ toolchain_ld_link_elf(
OUTPUT_MAP ${PROJECT_BINARY_DIR}/${ZEPHYR_PREBUILT_EXECUTABLE}.map
LIBRARIES_PRE_SCRIPT ""
LINKER_SCRIPT ${PROJECT_BINARY_DIR}/linker.cmd
LIBRARIES_POST_SCRIPT ${PRIV_STACK_LIB}
DEPENDENCIES ${CODE_RELOCATION_DEP}
)
set_property(TARGET ${ZEPHYR_PREBUILT_EXECUTABLE} PROPERTY LINK_DEPENDS ${PROJECT_BINARY_DIR}/linker.cmd)
add_dependencies( ${ZEPHYR_PREBUILT_EXECUTABLE} ${PRIV_STACK_DEP} ${LINKER_SCRIPT_TARGET} ${OFFSETS_LIB})
add_dependencies( ${ZEPHYR_PREBUILT_EXECUTABLE} ${LINKER_SCRIPT_TARGET} ${OFFSETS_LIB})
set(generated_kernel_files ${GKSF} ${GKOF})
@ -1230,7 +1049,6 @@ else()
configure_linker_script(
linker_pass_final.cmd
"-DLINKER_PASS2"
${PRIV_STACK_DEP}
${CODE_RELOCATION_DEP}
${ZEPHYR_PREBUILT_EXECUTABLE}
zephyr_generated_headers
@ -1258,7 +1076,7 @@ else()
DEPENDENCIES ${CODE_RELOCATION_DEP}
)
set_property(TARGET ${ZEPHYR_FINAL_EXECUTABLE} PROPERTY LINK_DEPENDS ${PROJECT_BINARY_DIR}/linker_pass_final.cmd)
add_dependencies( ${ZEPHYR_FINAL_EXECUTABLE} ${PRIV_STACK_DEP} ${LINKER_PASS_FINAL_SCRIPT_TARGET})
add_dependencies( ${ZEPHYR_FINAL_EXECUTABLE} ${LINKER_PASS_FINAL_SCRIPT_TARGET})
# Use the pass2 elf as the final elf
set(logical_target_for_zephyr_elf ${ZEPHYR_FINAL_EXECUTABLE})

View file

@ -383,7 +383,6 @@
/arch/x86/gen_gdt.py @andrewboie
/arch/x86/gen_idt.py @andrewboie
/scripts/gen_kobject_list.py @andrewboie
/scripts/gen_priv_stacks.py @andrewboie @agross-oss @ioannisg
/scripts/gen_syscalls.py @andrewboie
/scripts/net/ @jukkar @pfl
/scripts/process_gperf.py @andrewboie

View file

@ -27,6 +27,9 @@ config ARM
bool
select ARCH_IS_SET
select HAS_DTS
# FIXME: current state of the code for all ARM requires this, but
# is really only necessary for Cortex-M with ARM MPU!
select GEN_PRIV_STACKS
help
ARM architecture
@ -218,15 +221,8 @@ config PRIVILEGED_STACK_SIZE
This option sets the privileged stack region size that will be used
in addition to the user mode thread stack. During normal execution,
this region will be inaccessible from user mode. During system calls,
this region will be utilized by the system call.
config PRIVILEGED_STACK_TEXT_AREA
int "Privileged stacks text area"
default 512 if COVERAGE_GCOV
default 256
depends on ARCH_HAS_USERSPACE
help
Stack text area size for privileged stacks.
this region will be utilized by the system call. This value must be
a multiple of the minimum stack alignment.
config KOBJECT_TEXT_AREA
int "Size if kobject text area"
@ -237,6 +233,17 @@ config KOBJECT_TEXT_AREA
help
Size of kernel object text area. Used in linker script.
config GEN_PRIV_STACKS
bool
help
Selected if the architecture requires that privilege elevation stacks
be allocated in a separate memory area. This is typical of arches
whose MPUs require regions to be power-of-two aligned/sized.
FIXME: This should be removed and replaced with checks against
CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT, but both ARM and ARC
changes will be necessary for this.
config STACK_GROWS_UP
bool "Stack grows towards higher memory addresses"
help

View file

@ -16,10 +16,6 @@
#include <ksched.h>
#include <wait_q.h>
#ifdef CONFIG_USERSPACE
extern u8_t *z_priv_stack_find(void *obj);
#endif
/* An initial context, to be "restored" by z_arm_pendsv(), is put at the other
* end of the stack, and thus reusable by the stack when not needed anymore.
*

View file

@ -230,15 +230,6 @@ The following is a detailed description of the scripts used during the build pro
:start-after: """
:end-before: """
.. _gen_priv_stacks.py:
:zephyr_file:`scripts/gen_priv_stacks.py`
==========================================
.. include:: ../../../scripts/gen_priv_stacks.py
:start-after: """
:end-before: """
.. _gen_idt.py:
:zephyr_file:`arch/x86/gen_idt.py`

View file

@ -8,7 +8,7 @@ set of stacks. These stacks exist in a 1:1 relationship with each thread stack
defined in the system. The privileged stacks are created as a part of the
build process.
A post-build script :ref:`gen_priv_stacks.py` scans the generated
A post-build script :ref:`gen_kobject_list.py` scans the generated
ELF file and finds all of the thread stack objects. A set of privileged
stacks, a lookup table, and a set of helper functions are created and added
to the image.

View file

@ -156,7 +156,6 @@ SECTIONS
{
_image_text_start = .;
#include <linker/priv_stacks-text.ld>
#include <linker/kobject-text.ld>
*(.text)
@ -221,7 +220,6 @@ SECTIONS
#include <custom-rodata.ld>
#endif
#include <linker/priv_stacks-rom.ld>
#include <linker/kobject-rom.ld>
/*
@ -373,10 +371,8 @@ SECTIONS
__data_rom_start = LOADADDR(_DATA_SECTION_NAME);
#include <linker/common-ram.ld>
#include <linker/priv_stacks.ld>
#include <linker/kobject.ld>
#include <linker/priv_stacks-noinit.ld>
#include <linker/cplusplus-ram.ld>
__data_ram_end = .;

View file

@ -143,7 +143,6 @@ SECTIONS
*/
*(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
#include <linker/priv_stacks-text.ld>
#include <linker/kobject-text.ld>
} GROUP_LINK_IN(ROMABLE_REGION)
@ -198,7 +197,6 @@ SECTIONS
#include <custom-rodata.ld>
#endif
#include <linker/priv_stacks-rom.ld>
#include <linker/kobject-rom.ld>
/*
@ -333,11 +331,7 @@ SECTIONS
__data_rom_start = LOADADDR(_DATA_SECTION_NAME);
#include <linker/common-ram.ld>
#include <linker/priv_stacks.ld>
#include <linker/kobject.ld>
#include <linker/priv_stacks-noinit.ld>
#include <linker/cplusplus-ram.ld>
__data_ram_end = .;

View file

@ -132,7 +132,6 @@ SECTIONS
*/
*(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
#include <linker/priv_stacks-text.ld>
#include <linker/kobject-text.ld>
MMU_ALIGN;
@ -194,7 +193,6 @@ SECTIONS
#include <custom-rodata.ld>
#endif
#include <linker/priv_stacks-rom.ld>
#include <linker/kobject-rom.ld>
} GROUP_LINK_IN(ROMABLE_REGION)
@ -319,11 +317,7 @@ SECTIONS
__data_rom_start = LOADADDR(_DATA_SECTION_NAME);
#include <linker/common-ram.ld>
#include <linker/priv_stacks.ld>
#include <linker/kobject.ld>
#include <linker/priv_stacks-noinit.ld>
#include <linker/cplusplus-ram.ld>
__data_ram_end = .;

View file

@ -166,6 +166,17 @@ enum k_objects {
*/
#ifdef CONFIG_USERSPACE
#ifdef CONFIG_GEN_PRIV_STACKS
/* Metadata struct for K_OBJ_THREAD_STACK_ELEMENT */
struct z_stack_data {
/* Size of the entire stack object, including reserved areas */
size_t size;
/* Stack buffer for privilege mode elevations */
u8_t *priv;
};
#endif /* CONFIG_GEN_PRIV_STACKS */
/* Object extra data. Only some objects use this, determined by object type */
union z_object_data {
/* Backing mutex for K_OBJ_SYS_MUTEX */
@ -174,8 +185,13 @@ union z_object_data {
/* Numerical thread ID for K_OBJ_THREAD */
unsigned int thread_id;
#ifdef CONFIG_GEN_PRIV_STACKS
/* Metadata for K_OBJ_THREAD_STACK_ELEMENT */
struct z_stack_data *stack_data;
#else
/* Stack buffer size for K_OBJ_THREAD_STACK_ELEMENT */
size_t stack_size;
#endif /* CONFIG_GEN_PRIV_STACKS */
/* Futex wait queue and spinlock for K_OBJ_FUTEX */
struct z_futex_data *futex_data;

View file

@ -39,5 +39,13 @@
*(".kobject_data.rodata*")
#endif
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_GEN_PRIV_STACKS
SECTION_DATA_PROLOGUE(priv_stacks_noinit,,)
{
z_priv_stacks_ram_start = .;
*(".priv_stacks.noinit")
z_priv_stacks_ram_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#endif /* CONFIG_GEN_PRIV_STACKS */
#endif /* CONFIG_USERSPACE */

View file

@ -1,12 +0,0 @@
/*
* Copyright (c) 2017 Linaro Limited.
*
* SPDX-License-Identifier: Apache-2.0
*/
SECTION_DATA_PROLOGUE(priv_stacks_noinit,,)
{
z_priv_stacks_ram_start = .;
*(".priv_stacks.noinit")
z_priv_stacks_ram_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)

View file

@ -1,13 +0,0 @@
/*
* Copyright (c) 2017 Linaro Limited.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifdef CONFIG_USERSPACE
/* Kept in RAM on non-XIP */
#ifdef CONFIG_XIP
*(".priv_stacks.rodata*")
#endif
#endif /* CONFIG_USERSPACE */

View file

@ -1,38 +0,0 @@
/*
* Copyright (c) 2017 Linaro Limited.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifdef CONFIG_USERSPACE
/* We need to reserve room for the gperf generated hash functions.
* Fortunately, unlike the data tables, the size of the code is
* reasonably predictable.
*/
_priv_stacks_text_area_start = .;
*(".priv_stacks.text*")
_priv_stacks_text_area_end = .;
_priv_stacks_text_area_used = _priv_stacks_text_area_end - _priv_stacks_text_area_start;
#ifndef LINKER_PASS2
PROVIDE(z_priv_stack_find = .);
#endif
/* In a valid build the MAX function will always evaluate to the
second argument below, but to give the user a good error message
when the area overflows we need to temporarily corrupt the
location counter, and then detect the overflow with an assertion
later on. */
. = MAX(., _priv_stacks_text_area_start + CONFIG_PRIVILEGED_STACK_TEXT_AREA);
ASSERT(
CONFIG_PRIVILEGED_STACK_TEXT_AREA >= _priv_stacks_text_area_used,
"The configuration system has incorrectly set
'CONFIG_PRIVILEGED_STACK_TEXT_AREA' to
CONFIG_PRIVILEGED_STACK_TEXT_AREA, which is not big enough. You must
through Kconfig either disable 'CONFIG_USERSPACE', or set
'CONFIG_PRIVILEGED_STACK_TEXT_AREA' to a value larger than
CONFIG_PRIVILEGED_STACK_TEXT_AREA."
);
#endif /* CONFIG_USERSPACE */

View file

@ -1,36 +0,0 @@
/*
* Copyright (c) 2017 Linaro Limited
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifdef CONFIG_USERSPACE
/* Constraints:
*
* - changes to the size of this section between build phases
* *must not* shift the memory address of any kernel objects,
* since it contains a hashtable of the memory addresses of those
* kernel objects
*
* - It is OK if this section itself is shifted in between builds; for
* example some arches may precede this section with generated MMU
* page tables which are also unpredictable in size.
*
* The size of the
* gperf tables is both a function of the number of kernel objects,
* *and* the specific memory addresses being hashed. It is not something
* that can be predicted without actually building and compiling it.
*/
SECTION_DATA_PROLOGUE(priv_stacks,,)
{
*(".priv_stacks.data*")
/* This is also unpredictable in size, and has the same constraints.
* On XIP systems this will get put at the very end of ROM.
*/
#ifndef CONFIG_XIP
*(".priv_stacks.rodata*")
#endif
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#endif /* CONFIG_USERSPACE */

View file

@ -124,6 +124,10 @@ extern struct k_thread z_idle_threads[CONFIG_MP_NUM_CPUS];
extern K_THREAD_STACK_ARRAY_DEFINE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS,
CONFIG_ISR_STACK_SIZE);
#ifdef CONFIG_GEN_PRIV_STACKS
extern u8_t *z_priv_stack_find(k_thread_stack_t *stack);
#endif
#ifdef __cplusplus
}
#endif

View file

@ -641,7 +641,7 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
void *p1, void *p2, void *p3,
int prio, u32_t options, s32_t delay)
{
size_t total_size;
size_t total_size, stack_obj_size;
struct z_object *stack_object;
/* The thread and stack objects *must* be in an uninitialized state */
@ -664,9 +664,14 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
/* Testing less-than-or-equal since additional room may have been
* allocated for alignment constraints
*/
Z_OOPS(Z_SYSCALL_VERIFY_MSG(total_size <= stack_object->data.stack_size,
#ifdef CONFIG_GEN_PRIV_STACKS
stack_obj_size = stack_object->data.stack_data->size;
#else
stack_obj_size = stack_object->data.stack_size;
#endif
Z_OOPS(Z_SYSCALL_VERIFY_MSG(total_size <= stack_obj_size,
"stack size %zu is too big, max is %zu",
total_size, stack_object->data.stack_size));
total_size, stack_obj_size));
/* User threads may only create other user threads and they can't
* be marked as essential

View file

@ -90,6 +90,25 @@ struct perm_ctx {
struct k_thread *parent;
};
#ifdef CONFIG_GEN_PRIV_STACKS
/* See write_gperf_table() in scripts/gen_kobject_list.py. The privilege
* mode stacks are allocated as an array. The base of the array is
* aligned to Z_PRIVILEGE_STACK_ALIGN, and all members must be as well.
*/
BUILD_ASSERT(CONFIG_PRIVILEGED_STACK_SIZE % Z_PRIVILEGE_STACK_ALIGN == 0);
u8_t *z_priv_stack_find(k_thread_stack_t *stack)
{
struct z_object *obj = z_object_find(stack);
__ASSERT(obj != NULL, "stack object not found");
__ASSERT(obj->type == K_OBJ_THREAD_STACK_ELEMENT,
"bad stack object");
return obj->data.stack_data->priv;
}
#endif /* CONFIG_GEN_PRIV_STACKS */
#ifdef CONFIG_DYNAMIC_OBJECTS
struct dyn_obj {
struct z_object kobj;

View file

@ -38,6 +38,7 @@ STACK_TYPE = "z_thread_stack_element"
thread_counter = 0
sys_mutex_counter = 0
futex_counter = 0
stack_counter = 0
# Global type environment. Populated by pass 1.
type_env = {}
@ -57,6 +58,7 @@ class KobjectInstance:
global thread_counter
global sys_mutex_counter
global futex_counter
global stack_counter
self.addr = addr
self.type_obj = type_obj
@ -76,6 +78,8 @@ class KobjectInstance:
elif self.type_obj.name == "k_futex":
self.data = "&futex_data[%d]" % futex_counter
futex_counter += 1
elif self.type_obj.name == STACK_TYPE:
stack_counter += 1
else:
self.data = 0
@ -604,3 +608,8 @@ class ElfHelper:
@staticmethod
def get_futex_counter():
return futex_counter
@staticmethod
def get_stack_counter():
return stack_counter

View file

@ -144,14 +144,10 @@ void z_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
#endif
"""
metadata_names = {
"K_OBJ_THREAD" : "thread_id",
"K_OBJ_THREAD_STACK_ELEMENT" : "stack_size",
"K_OBJ_SYS_MUTEX" : "mutex",
"K_OBJ_FUTEX" : "futex_data"
}
def write_gperf_table(fp, eh, objs, static_begin, static_end):
syms = eh.get_symbols()
fp.write(header)
num_mutexes = eh.get_sys_mutex_counter()
if num_mutexes != 0:
@ -171,9 +167,46 @@ def write_gperf_table(fp, eh, objs, static_begin, static_end):
fp.write(", ")
fp.write("};\n")
metadata_names = {
"K_OBJ_THREAD" : "thread_id",
"K_OBJ_SYS_MUTEX" : "mutex",
"K_OBJ_FUTEX" : "futex_data"
}
if "CONFIG_GEN_PRIV_STACKS" in syms:
metadata_names["K_OBJ_THREAD_STACK_ELEMENT"] = "stack_data"
num_stack = eh.get_stack_counter()
if num_stack != 0:
fp.write("static u8_t Z_GENERIC_SECTION(.priv_stacks.noinit) "
" __aligned(Z_PRIVILEGE_STACK_ALIGN)"
" priv_stacks[%d][CONFIG_PRIVILEGED_STACK_SIZE];\n"
% num_stack);
fp.write("static struct z_stack_data stack_data[%d] = {\n"
% num_stack)
counter = 0
for _, ko in objs.items():
if ko.type_name != "K_OBJ_THREAD_STACK_ELEMENT":
continue
# ko.data currently has the stack size. fetch the value to
# populate the appropriate entry in stack_data, and put
# a reference to the entry in stack_data into the data value
# instead
size = ko.data
ko.data = "&stack_data[%d]" % counter
fp.write("\t{ %d, (u8_t *)(&priv_stacks[%d]) }"
% (size, counter))
if counter != (num_stack - 1):
fp.write(",")
fp.write("\n")
counter += 1
fp.write("};\n")
else:
metadata_names["K_OBJ_THREAD_STACK_ELEMENT"] = "stack_size"
fp.write("%%\n")
# Setup variables for mapping thread indexes
syms = eh.get_symbols()
thread_max_bytes = syms["CONFIG_MAX_THREAD_BYTES"]
thread_idx_map = {}

View file

@ -1,140 +0,0 @@
#!/usr/bin/env python3
#
# Copyright (c) 2017 Linaro Limited
#
# SPDX-License-Identifier: Apache-2.0
"""
Script to generate gperf tables mapping threads to their privileged mode stacks
Some MPU devices require that memory region definitions be aligned to their
own size, which must be a power of two. This introduces difficulties in
reserving memory for the thread's supervisor mode stack inline with the
K_THREAD_STACK_DEFINE() macro.
Instead, the stack used when a user thread elevates privileges is allocated
elsewhere in memory, and a gperf table is created to be able to quickly
determine where the supervisor mode stack is in memory. This is accomplished
by scanning the DWARF debug information in zephyr_prebuilt.elf, identifying
instances of 'struct k_thread', and emitting a gperf configuration file which
allocates memory for each thread's privileged stack and creates the table
mapping thread addresses to these stacks.
"""
import sys
import argparse
import struct
from elf_helper import ElfHelper
kobjects = {
"z_thread_stack_element": (None, False)
}
header = """%compare-lengths
%define lookup-function-name z_priv_stack_map_lookup
%language=ANSI-C
%global-table
%struct-type
"""
# Each privilege stack buffer needs to respect the alignment
# constraints as specified in arm/arch.h.
priv_stack_decl_temp = ("static u8_t __used"
" __aligned(Z_PRIVILEGE_STACK_ALIGN)"
" priv_stack_%x[CONFIG_PRIVILEGED_STACK_SIZE];\n")
includes = """#include <kernel.h>
#include <string.h>
"""
structure = """struct _k_priv_stack_map {
char *name;
u8_t *priv_stack_addr;
};
%%
"""
# Different versions of gperf have different prototypes for the lookup
# function, best to implement the wrapper here. The pointer value itself is
# turned into a string, we told gperf to expect binary strings that are not
# NULL-terminated.
footer = """%%
u8_t *z_priv_stack_find(void *obj)
{
const struct _k_priv_stack_map *map =
z_priv_stack_map_lookup((const char *)obj, sizeof(void *));
return map->priv_stack_addr;
}
"""
def write_gperf_table(fp, eh, objs):
fp.write(header)
# priv stack declarations
fp.write("%{\n")
fp.write(includes)
for obj_addr in objs:
fp.write(priv_stack_decl_temp % (obj_addr))
fp.write("%}\n")
# structure declaration
fp.write(structure)
for obj_addr in objs:
byte_str = struct.pack("<I" if eh.little_endian else ">I", obj_addr)
fp.write("\"")
for byte in byte_str:
val = "\\x%02x" % byte
fp.write(val)
fp.write("\",priv_stack_%x\n" % obj_addr)
fp.write(footer)
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-k", "--kernel", required=True,
help="Input zephyr ELF binary")
parser.add_argument(
"-o", "--output", required=True,
help="Output list of kernel object addresses for gperf use")
parser.add_argument("-v", "--verbose", action="store_true",
help="Print extra debugging information")
args = parser.parse_args()
def main():
parse_args()
eh = ElfHelper(args.kernel, args.verbose, kobjects, [])
syms = eh.get_symbols()
max_threads = syms["CONFIG_MAX_THREAD_BYTES"] * 8
objs = eh.find_kobjects(syms)
if not objs:
sys.stderr.write("WARNING: zero kobject found in %s\n"
% args.kernel)
thread_counter = eh.get_thread_counter()
if thread_counter > max_threads:
sys.exit("Too many thread objects ({})\n"
"Increase CONFIG_MAX_THREAD_BYTES to {}"
.format(thread_counter, -(-thread_counter // 8)))
with open(args.output, "w") as fp:
write_gperf_table(fp, eh, objs)
if __name__ == "__main__":
main()