x86: add support for thread local storage

Adds the necessary bits to initialize TLS in the stack
area and sets up CPU registers during context switch.

Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
Daniel Leung 2020-09-29 15:32:35 -07:00 committed by Andrew Boie
commit 4b38392ded
9 changed files with 91 additions and 2 deletions

View file

@ -46,7 +46,7 @@ config X86
select ARCH_MEM_DOMAIN_SYNCHRONOUS_API if USERSPACE
select ARCH_HAS_GDBSTUB if !X86_64
select ARCH_HAS_TIMING_FUNCTIONS
select ARCH_HAS_THREAD_LOCAL_STORAGE if X86_64
select ARCH_HAS_THREAD_LOCAL_STORAGE
help
x86 architecture

View file

@ -215,4 +215,12 @@ config X86_EXCEPTION_STACK_TRACE
help
Internal config to enable runtime stack traces on fatal exceptions.
config X86_USE_THREAD_LOCAL_STORAGE
bool
default y if THREAD_LOCAL_STORAGE
select SET_GDT
select GDT_DYNAMIC
help
Internal config to enable thread local storage.
endif # !X86_64

View file

@ -26,5 +26,10 @@ zephyr_library_sources_ifdef(CONFIG_GDBSTUB ia32/gdbstub.c)
zephyr_library_sources_ifdef(CONFIG_DEBUG_COREDUMP ia32/coredump.c)
zephyr_library_sources_ifdef(
CONFIG_X86_USE_THREAD_LOCAL_STORAGE
ia32/tls.c
)
# Last since we declare default exception handlers here
zephyr_library_sources(ia32/fatal.c)

View file

@ -303,6 +303,18 @@ CROHandlingDone:
movl %eax, _kernel_offset_to_current(%edi)
#if defined(CONFIG_X86_USE_THREAD_LOCAL_STORAGE)
pushl %eax
call z_x86_tls_update_gdt
/* Since segment descriptor has changed, need to reload */
movw $GS_TLS_SEG, %ax
movw %ax, %gs
popl %eax
#endif
/* recover thread stack pointer from k_thread */
movl _thread_offset_to_esp(%eax), %esp

30
arch/x86/core/ia32/tls.c Normal file
View file

@ -0,0 +1,30 @@
/*
* Copyright (c) 2020 Intel Corporation.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel_internal.h>
#include <arch/x86/ia32/arch.h>
#include <arch/x86/ia32/segmentation.h>
#define ENTRY_NUM (GS_TLS_SEG >> 3)
void z_x86_tls_update_gdt(struct k_thread *thread)
{
/*
* GS is used for thread local storage to pointer to
* the TLS storage area in stack. Here we update one
* of the descriptor so GS has the new address.
*
* The re-loading of descriptor into GS is taken care
* of inside the assembly swap code just before
* swapping into the new thread.
*/
struct segment_descriptor *sd = &_gdt.entries[ENTRY_NUM];
sd->base_low = thread->tls & 0xFFFFU;
sd->base_mid = (thread->tls >> 16) & 0xFFU;
sd->base_hi = (thread->tls >> 24) & 0xFFU;
}

View file

@ -180,6 +180,13 @@ def main():
else:
num_entries = 3
use_tls = False
if ("CONFIG_THREAD_LOCAL_STORAGE" in syms) and ("CONFIG_X86_64" not in syms):
use_tls = True
# x86_64 does not use descriptor for thread local storage
num_entries += 1
gdt_base = syms["_gdt"]
with open(args.output_gdt, "wb") as fp:
@ -205,7 +212,7 @@ def main():
# Selector 0x20: double-fault TSS
fp.write(create_tss_entry(df_tss, 0x67, 0))
if num_entries == 7:
if num_entries >= 7:
# Selector 0x28: code descriptor, dpl = 3
fp.write(create_code_data_entry(0, 0xFFFFF, 3,
FLAGS_GRAN, ACCESS_EX | ACCESS_RW))
@ -214,6 +221,15 @@ def main():
fp.write(create_code_data_entry(0, 0xFFFFF, 3,
FLAGS_GRAN, ACCESS_RW))
if use_tls:
# Selector 0x18, 0x28 or 0x38 (depending on entries above):
# data descriptor, dpl = 3
#
# for use with thread local storage while this will be
# modified at runtime.
fp.write(create_code_data_entry(0, 0xFFFFF, 3,
FLAGS_GRAN, ACCESS_RW))
if __name__ == "__main__":
main()

View file

@ -63,6 +63,10 @@ extern void z_x86_thread_entry_wrapper(k_thread_entry_t entry,
void *p1, void *p2, void *p3);
#endif /* _THREAD_WRAPPER_REQUIRED */
#ifdef CONFIG_THREAD_LOCAL_STORAGE
extern void z_x86_tls_update_gdt(struct k_thread *thread);
#endif
#ifdef __cplusplus
}
#endif

View file

@ -37,6 +37,19 @@
#define MAIN_TSS 0x18
#define DF_TSS 0x20
/*
* Use for thread local storage.
* Match these to gen_gdt.py.
* The 0x03 is added to limit privilege.
*/
#if defined(CONFIG_USERSPACE)
#define GS_TLS_SEG (0x38 | 0x03)
#elif defined(CONFIG_HW_STACK_PROTECTION)
#define GS_TLS_SEG (0x28 | 0x03)
#else
#define GS_TLS_SEG (0x18 | 0x03)
#endif
/**
* Macro used internally by NANO_CPU_INT_REGISTER and NANO_CPU_INT_REGISTER_ASM.
* Not meant to be used explicitly by platform, driver or application code.

View file

@ -120,6 +120,7 @@ SECTIONS
_image_rodata_start = .;
#include <linker/common-rom.ld>
#include <linker/thread-local-storage.ld>
SECTION_PROLOGUE(_RODATA_SECTION_NAME,,)
{