2015-04-11 01:44:37 +02:00
|
|
|
/*
|
2015-08-20 17:04:01 +02:00
|
|
|
* Copyright (c) 2010-2015 Wind River Systems, Inc.
|
2015-04-11 01:44:37 +02:00
|
|
|
*
|
2017-01-19 02:01:01 +01:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
|
|
|
|
2015-12-04 16:09:39 +01:00
|
|
|
/**
|
|
|
|
* @file
|
2016-12-18 15:42:55 +01:00
|
|
|
* @brief Kernel swapper code for IA-32
|
2015-12-04 16:09:39 +01:00
|
|
|
*
|
2019-11-07 21:43:29 +01:00
|
|
|
* This module implements the arch_swap() routine for the IA-32 architecture.
|
2015-07-01 23:22:39 +02:00
|
|
|
*/
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2022-05-09 13:56:13 +02:00
|
|
|
#include <zephyr/arch/x86/ia32/asm.h>
|
|
|
|
#include <zephyr/kernel.h>
|
|
|
|
#include <zephyr/arch/cpu.h>
|
2019-10-24 17:08:21 +02:00
|
|
|
#include <kernel_arch_data.h>
|
2016-11-08 16:36:50 +01:00
|
|
|
#include <offsets_short.h>
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
/* exports (internal APIs) */
|
|
|
|
|
2019-11-07 21:43:29 +01:00
|
|
|
GTEXT(arch_swap)
|
2019-03-14 16:20:46 +01:00
|
|
|
GTEXT(z_x86_thread_entry_wrapper)
|
2017-08-30 23:06:30 +02:00
|
|
|
GTEXT(_x86_user_thread_entry_wrapper)
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
/* externs */
|
2019-07-30 03:22:30 +02:00
|
|
|
#if !defined(CONFIG_X86_KPTI) && defined(CONFIG_X86_USERSPACE)
|
|
|
|
GTEXT(z_x86_swap_update_page_tables)
|
2017-08-30 23:06:30 +02:00
|
|
|
#endif
|
2016-09-02 22:34:35 +02:00
|
|
|
GDATA(_k_neg_eagain)
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2019-10-03 19:08:13 +02:00
|
|
|
/*
|
2019-11-07 21:43:29 +01:00
|
|
|
* Given that arch_swap() is called to effect a cooperative context switch,
|
2019-10-03 19:08:13 +02:00
|
|
|
* only the non-volatile integer registers need to be saved in the TCS of the
|
2015-08-20 17:04:01 +02:00
|
|
|
* outgoing thread. The restoration of the integer registers of the incoming
|
2016-11-08 16:36:50 +01:00
|
|
|
* thread depends on whether that thread was preemptively context switched out.
|
2019-10-03 19:08:13 +02:00
|
|
|
* The X86_THREAD_FLAG_INT and _EXC bits in the k_thread->arch.flags field will
|
|
|
|
* signify that the thread was preemptively context switched out, and thus both
|
|
|
|
* the volatile and non-volatile integer registers need to be restored.
|
2015-07-01 23:22:39 +02:00
|
|
|
*
|
2015-08-23 00:41:06 +02:00
|
|
|
* The non-volatile registers need to be scrubbed to ensure they contain no
|
2015-07-01 23:22:39 +02:00
|
|
|
* sensitive information that could compromise system security. This is to
|
2015-08-23 00:41:06 +02:00
|
|
|
* make sure that information will not be leaked from one application to
|
2015-07-01 23:22:39 +02:00
|
|
|
* another via these volatile registers.
|
|
|
|
*
|
|
|
|
* Here, the integer registers (EAX, ECX, EDX) have been scrubbed. Any changes
|
2015-08-23 00:41:06 +02:00
|
|
|
* to this routine that alter the values of these registers MUST be reviewed
|
2015-07-01 23:22:39 +02:00
|
|
|
* for potential security impacts.
|
|
|
|
*
|
2019-10-03 19:08:13 +02:00
|
|
|
* Floating point registers are handled using a lazy save/restore mechanism
|
|
|
|
* since it's expected relatively few threads will be created with the
|
|
|
|
* K_FP_REGS or K_SSE_REGS option bits. The kernel data structure maintains a
|
|
|
|
* 'current_fp' field to keep track of the thread that "owns" the floating
|
|
|
|
* point registers. Floating point registers consist of ST0->ST7 (x87 FPU and
|
|
|
|
* MMX registers) and XMM0 -> XMM7.
|
2015-07-01 23:22:39 +02:00
|
|
|
*
|
2019-10-03 19:08:13 +02:00
|
|
|
* All floating point registers are considered 'volatile' thus they will only
|
|
|
|
* be saved/restored when a preemptive context switch occurs.
|
2015-07-01 23:22:39 +02:00
|
|
|
*
|
|
|
|
* Floating point registers are currently NOT scrubbed, and are subject to
|
|
|
|
* potential security leaks.
|
|
|
|
*
|
|
|
|
* C function prototype:
|
|
|
|
*
|
2019-11-07 21:43:29 +01:00
|
|
|
* unsigned int arch_swap (unsigned int eflags);
|
2015-07-01 23:22:39 +02:00
|
|
|
*/
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2021-02-26 01:42:53 +01:00
|
|
|
SECTION_FUNC(PINNED_TEXT, arch_swap)
|
2020-08-28 01:12:01 +02:00
|
|
|
#if defined(CONFIG_INSTRUMENT_THREAD_SWITCHING)
|
2020-08-03 00:26:46 +02:00
|
|
|
pushl %eax
|
2020-08-28 01:12:01 +02:00
|
|
|
call z_thread_mark_switched_out
|
2020-08-03 00:26:46 +02:00
|
|
|
popl %eax
|
|
|
|
#endif
|
2015-04-11 01:44:37 +02:00
|
|
|
/*
|
|
|
|
* Push all non-volatile registers onto the stack; do not copy
|
2016-11-08 16:36:50 +01:00
|
|
|
* any of these registers into the k_thread. Only the 'esp' register
|
2015-04-11 01:44:37 +02:00
|
|
|
* after all the pushes have been performed) will be stored in the
|
2016-11-08 16:36:50 +01:00
|
|
|
* k_thread.
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
pushl %edi
|
2016-09-09 21:00:22 +02:00
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
movl $_kernel, %edi
|
2016-09-09 21:00:22 +02:00
|
|
|
|
2015-04-11 01:44:37 +02:00
|
|
|
pushl %esi
|
|
|
|
pushl %ebx
|
|
|
|
pushl %ebp
|
|
|
|
|
2015-08-23 00:41:06 +02:00
|
|
|
/*
|
2018-01-06 04:44:23 +01:00
|
|
|
* Carve space for the return value. Setting it to a default of
|
2016-09-02 22:34:35 +02:00
|
|
|
* -EAGAIN eliminates the need for the timeout code to set it.
|
|
|
|
* If another value is ever needed, it can be modified with
|
2019-11-07 21:43:29 +01:00
|
|
|
* arch_thread_return_value_set().
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
|
|
|
|
2016-09-02 22:34:35 +02:00
|
|
|
pushl _k_neg_eagain
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
/* save esp into k_thread structure */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2017-08-30 23:06:30 +02:00
|
|
|
movl _kernel_offset_to_current(%edi), %edx
|
|
|
|
movl %esp, _thread_offset_to_esp(%edx)
|
kernel/arch: enhance the "ready thread" cache
The way the ready thread cache was implemented caused it to not always
be "hot", i.e. there could be some misses, which happened when the
cached thread was taken out of the ready queue. When that happened, it
was not replaced immediately, since doing so could mean that the
replacement might not run because the flow could be interrupted and
another thread could take its place. This was the more conservative
approach that insured that moving a thread to the cache would never be
wasted.
However, this caused two problems:
1. The cache could not be refilled until another thread context-switched
in, since there was no thread in the cache to compare priorities
against.
2. Interrupt exit code would always have to call into C to find what
thread to run when the current thread was not coop and did not have the
scheduler locked. Furthermore, it was possible for this code path to
encounter a cold cache and then it had to find out what thread to run
the long way.
To fix this, filling the cache is now more aggressive, i.e. the next
thread to put in the cache is found even in the case the current cached
thread is context-switched out. This ensures the interrupt exit code is
much faster on the slow path. In addition, since finding the next thread
to run is now always "get it from the cache", which is a simple fetch
from memory (_kernel.ready_q.cache), there is no need to call the more
complex C code.
On the ARM FRDM K64F board, this improvement is seen:
Before:
1- Measure time to switch from ISR back to interrupted task
switching time is 215 tcs = 1791 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 315 tcs = 2625 nsec
After:
1- Measure time to switch from ISR back to interrupted task
switching time is 130 tcs = 1083 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 225 tcs = 1875 nsec
These are the most dramatic improvements, but most of the numbers
generated by the latency_measure test are improved.
Fixes ZEP-1401.
Change-Id: I2eaac147048b1ec71a93bd0a285e743a39533973
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-12-02 16:37:27 +01:00
|
|
|
movl _kernel_offset_to_ready_q_cache(%edi), %eax
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
/*
|
2016-11-08 16:36:50 +01:00
|
|
|
* At this point, the %eax register contains the 'k_thread *' of the
|
2017-08-30 23:06:30 +02:00
|
|
|
* thread to be swapped in, and %edi still contains &_kernel. %edx
|
|
|
|
* has the pointer to the outgoing thread.
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
2019-07-30 03:22:30 +02:00
|
|
|
#if defined(CONFIG_X86_USERSPACE) && !defined(CONFIG_X86_KPTI)
|
2017-08-30 23:06:30 +02:00
|
|
|
|
|
|
|
push %eax
|
2019-07-30 03:22:30 +02:00
|
|
|
call z_x86_swap_update_page_tables
|
2017-08-30 23:06:30 +02:00
|
|
|
pop %eax
|
2019-08-23 04:46:50 +02:00
|
|
|
|
2019-07-30 03:22:30 +02:00
|
|
|
/* Page tables updated. All memory access after this point needs to be
|
|
|
|
* to memory that has the same mappings and access attributes wrt
|
|
|
|
* supervisor mode!
|
|
|
|
*/
|
2017-08-30 23:06:30 +02:00
|
|
|
#endif
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2020-05-03 11:18:37 +02:00
|
|
|
#ifdef CONFIG_EAGER_FPU_SHARING
|
2019-03-02 22:15:16 +01:00
|
|
|
/* Eager floating point state restore logic
|
|
|
|
*
|
|
|
|
* Addresses CVE-2018-3665
|
2020-05-03 11:18:37 +02:00
|
|
|
* Used as an alternate to CONFIG_LAZY_FPU_SHARING if there is any
|
2019-03-02 22:15:16 +01:00
|
|
|
* sensitive data in the floating point/SIMD registers in a system
|
|
|
|
* with untrusted threads.
|
|
|
|
*
|
|
|
|
* Unconditionally save/restore floating point registers on context
|
|
|
|
* switch.
|
|
|
|
*/
|
|
|
|
/* Save outgpoing thread context */
|
2021-01-08 00:07:29 +01:00
|
|
|
#ifdef CONFIG_X86_SSE
|
2019-03-02 22:15:16 +01:00
|
|
|
fxsave _thread_offset_to_preempFloatReg(%edx)
|
|
|
|
fninit
|
|
|
|
#else
|
|
|
|
fnsave _thread_offset_to_preempFloatReg(%edx)
|
|
|
|
#endif
|
|
|
|
/* Restore incoming thread context */
|
2021-01-08 00:07:29 +01:00
|
|
|
#ifdef CONFIG_X86_SSE
|
2019-03-02 22:15:16 +01:00
|
|
|
fxrstor _thread_offset_to_preempFloatReg(%eax)
|
|
|
|
#else
|
|
|
|
frstor _thread_offset_to_preempFloatReg(%eax)
|
2021-01-08 00:07:29 +01:00
|
|
|
#endif /* CONFIG_X86_SSE */
|
2020-05-03 11:18:37 +02:00
|
|
|
#elif defined(CONFIG_LAZY_FPU_SHARING)
|
2015-04-11 01:44:37 +02:00
|
|
|
/*
|
2015-08-20 17:04:01 +02:00
|
|
|
* Clear the CR0[TS] bit (in the event the current thread
|
2015-04-11 01:44:37 +02:00
|
|
|
* doesn't have floating point enabled) to prevent the "device not
|
2015-08-23 00:41:06 +02:00
|
|
|
* available" exception when executing the subsequent fxsave/fnsave
|
|
|
|
* and/or fxrstor/frstor instructions.
|
2015-04-11 01:44:37 +02:00
|
|
|
*
|
|
|
|
* Indeed, it's possible that none of the aforementioned instructions
|
2015-08-20 17:04:01 +02:00
|
|
|
* need to be executed, for example, the incoming thread doesn't
|
2015-04-11 01:44:37 +02:00
|
|
|
* utilize floating point operations. However, the code responsible
|
2015-08-20 17:04:01 +02:00
|
|
|
* for setting the CR0[TS] bit appropriately for the incoming thread
|
2015-04-11 01:44:37 +02:00
|
|
|
* (just after the 'restoreContext_NoFloatSwap' label) will leverage
|
|
|
|
* the fact that the following 'clts' was performed already.
|
|
|
|
*/
|
|
|
|
|
|
|
|
clts
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2019-09-19 01:30:39 +02:00
|
|
|
* Determine whether the incoming thread utilizes floating point regs
|
2016-11-07 15:55:13 +01:00
|
|
|
* _and_ whether the thread was context switched out preemptively.
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
|
|
|
|
2017-01-22 19:05:08 +01:00
|
|
|
testb $_FP_USER_MASK, _thread_offset_to_user_options(%eax)
|
2015-04-11 01:44:37 +02:00
|
|
|
je restoreContext_NoFloatSwap
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2016-11-07 15:55:13 +01:00
|
|
|
* The incoming thread uses floating point registers:
|
|
|
|
* Was it the last thread to use floating point registers?
|
|
|
|
* If so, there there is no need to restore the floating point context.
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
movl _kernel_offset_to_current_fp(%edi), %ebx
|
2016-09-02 22:34:35 +02:00
|
|
|
cmpl %ebx, %eax
|
2015-04-11 01:44:37 +02:00
|
|
|
je restoreContext_NoFloatSwap
|
|
|
|
|
|
|
|
|
2015-08-23 00:41:06 +02:00
|
|
|
/*
|
2016-11-07 15:55:13 +01:00
|
|
|
* The incoming thread uses floating point registers and it was _not_
|
|
|
|
* the last thread to use those registers:
|
|
|
|
* Check whether the current FP context actually needs to be saved
|
|
|
|
* before swapping in the context of the incoming thread.
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
testl %ebx, %ebx
|
|
|
|
jz restoreContext_NoFloatSave
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2016-11-07 15:55:13 +01:00
|
|
|
* The incoming thread uses floating point registers and it was _not_
|
|
|
|
* the last thread to use those registers _and_ the current FP context
|
|
|
|
* needs to be saved.
|
2015-04-11 01:44:37 +02:00
|
|
|
*
|
|
|
|
* Given that the ST[0] -> ST[7] and XMM0 -> XMM7 registers are all
|
|
|
|
* 'volatile', only save the registers if the "current FP context"
|
|
|
|
* was preemptively context switched.
|
|
|
|
*/
|
|
|
|
|
2019-09-19 01:30:39 +02:00
|
|
|
testb $X86_THREAD_FLAG_ALL, _thread_offset_to_flags(%ebx)
|
2015-04-11 01:44:37 +02:00
|
|
|
je restoreContext_NoFloatSave
|
|
|
|
|
|
|
|
|
2021-01-08 00:07:29 +01:00
|
|
|
#ifdef CONFIG_X86_SSE
|
2017-01-22 19:05:08 +01:00
|
|
|
testb $K_SSE_REGS, _thread_offset_to_user_options(%ebx)
|
2015-04-11 01:44:37 +02:00
|
|
|
je x87FloatSave
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 'fxsave' does NOT perform an implicit 'fninit', therefore issue an
|
2015-08-20 17:04:01 +02:00
|
|
|
* 'fninit' to ensure a "clean" FPU state for the incoming thread
|
2015-04-11 01:44:37 +02:00
|
|
|
* (for the case when the fxrstor is not executed).
|
|
|
|
*/
|
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
fxsave _thread_offset_to_preempFloatReg(%ebx)
|
2015-04-11 01:44:37 +02:00
|
|
|
fninit
|
|
|
|
jmp floatSaveDone
|
|
|
|
|
2016-08-18 18:25:00 +02:00
|
|
|
x87FloatSave:
|
2021-01-08 00:07:29 +01:00
|
|
|
#endif /* CONFIG_X86_SSE */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
/* 'fnsave' performs an implicit 'fninit' after saving state! */
|
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
fnsave _thread_offset_to_preempFloatReg(%ebx)
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
/* fall through to 'floatSaveDone' */
|
|
|
|
|
2016-08-18 18:25:00 +02:00
|
|
|
floatSaveDone:
|
|
|
|
restoreContext_NoFloatSave:
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2015-08-23 00:41:06 +02:00
|
|
|
/*********************************************************
|
2015-08-20 17:04:01 +02:00
|
|
|
* Restore floating point context of the incoming thread.
|
2015-04-11 01:44:37 +02:00
|
|
|
*********************************************************/
|
|
|
|
|
2016-11-07 15:55:13 +01:00
|
|
|
/*
|
2015-04-11 01:44:37 +02:00
|
|
|
* Again, given that the ST[0] -> ST[7] and XMM0 -> XMM7 registers are
|
2016-11-07 15:55:13 +01:00
|
|
|
* all 'volatile', only restore the registers if the incoming thread
|
|
|
|
* was previously preemptively context switched out.
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
|
|
|
|
2019-09-19 01:30:39 +02:00
|
|
|
testb $X86_THREAD_FLAG_ALL, _thread_offset_to_flags(%eax)
|
2015-04-11 01:44:37 +02:00
|
|
|
je restoreContext_NoFloatRestore
|
|
|
|
|
2021-01-08 00:07:29 +01:00
|
|
|
#ifdef CONFIG_X86_SSE
|
2017-01-22 19:05:08 +01:00
|
|
|
testb $K_SSE_REGS, _thread_offset_to_user_options(%eax)
|
2015-04-11 01:44:37 +02:00
|
|
|
je x87FloatRestore
|
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
fxrstor _thread_offset_to_preempFloatReg(%eax)
|
2015-04-11 01:44:37 +02:00
|
|
|
jmp floatRestoreDone
|
|
|
|
|
2016-08-18 18:25:00 +02:00
|
|
|
x87FloatRestore:
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2021-01-08 00:07:29 +01:00
|
|
|
#endif /* CONFIG_X86_SSE */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
frstor _thread_offset_to_preempFloatReg(%eax)
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
/* fall through to 'floatRestoreDone' */
|
|
|
|
|
2016-08-18 18:25:00 +02:00
|
|
|
floatRestoreDone:
|
|
|
|
restoreContext_NoFloatRestore:
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-11-07 15:55:13 +01:00
|
|
|
/* record that the incoming thread "owns" the floating point registers */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
movl %eax, _kernel_offset_to_current_fp(%edi)
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
2016-11-07 15:55:13 +01:00
|
|
|
* Branch point when none of the floating point registers need to be
|
|
|
|
* swapped because: a) the incoming thread does not use them OR
|
|
|
|
* b) the incoming thread is the last thread that used those registers.
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
|
|
|
|
2016-08-18 18:25:00 +02:00
|
|
|
restoreContext_NoFloatSwap:
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
/*
|
2016-11-07 15:55:13 +01:00
|
|
|
* Leave CR0[TS] clear if incoming thread utilizes the floating point
|
|
|
|
* registers
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
2015-08-23 00:41:06 +02:00
|
|
|
|
2017-01-22 19:05:08 +01:00
|
|
|
testb $_FP_USER_MASK, _thread_offset_to_user_options(%eax)
|
2015-04-11 01:44:37 +02:00
|
|
|
jne CROHandlingDone
|
|
|
|
|
|
|
|
/*
|
2016-11-07 15:55:13 +01:00
|
|
|
* The incoming thread does NOT currently utilize the floating point
|
|
|
|
* registers, so set CR0[TS] to ensure the "device not available"
|
2015-04-11 01:44:37 +02:00
|
|
|
* exception occurs on the first attempt to access a x87 FPU, MMX,
|
|
|
|
* or XMM register.
|
|
|
|
*/
|
|
|
|
|
|
|
|
movl %cr0, %edx
|
|
|
|
orl $0x8, %edx
|
|
|
|
movl %edx, %cr0
|
|
|
|
|
2016-08-18 18:25:00 +02:00
|
|
|
CROHandlingDone:
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2020-05-03 11:18:37 +02:00
|
|
|
#endif /* CONFIG_LAZY_FPU_SHARING */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
/* update _kernel.current to reflect incoming thread */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
movl %eax, _kernel_offset_to_current(%edi)
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2020-09-30 00:32:35 +02:00
|
|
|
#if defined(CONFIG_X86_USE_THREAD_LOCAL_STORAGE)
|
|
|
|
pushl %eax
|
|
|
|
|
|
|
|
call z_x86_tls_update_gdt
|
|
|
|
|
|
|
|
/* Since segment descriptor has changed, need to reload */
|
|
|
|
movw $GS_TLS_SEG, %ax
|
|
|
|
movw %ax, %gs
|
|
|
|
|
|
|
|
popl %eax
|
|
|
|
#endif
|
|
|
|
|
2017-10-29 12:10:22 +01:00
|
|
|
/* recover thread stack pointer from k_thread */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
movl _thread_offset_to_esp(%eax), %esp
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
|
2019-11-07 21:43:29 +01:00
|
|
|
/* load return value from a possible arch_thread_return_value_set() */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
popl %eax
|
|
|
|
|
|
|
|
/* pop the non-volatile registers from the stack */
|
|
|
|
|
|
|
|
popl %ebp
|
|
|
|
popl %ebx
|
|
|
|
popl %esi
|
|
|
|
popl %edi
|
|
|
|
|
|
|
|
/*
|
2016-09-02 22:34:35 +02:00
|
|
|
* %eax may contain one of these values:
|
2015-04-11 01:44:37 +02:00
|
|
|
*
|
2019-11-07 21:43:29 +01:00
|
|
|
* - the return value for arch_swap() that was set up by a call to
|
|
|
|
* arch_thread_return_value_set()
|
2017-09-12 17:53:12 +02:00
|
|
|
* - -EINVAL
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
|
|
|
|
2019-11-07 21:43:29 +01:00
|
|
|
/* Utilize the 'eflags' parameter to arch_swap() */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
pushl 4(%esp)
|
2015-08-23 00:41:06 +02:00
|
|
|
popfl
|
2017-05-03 09:41:51 +02:00
|
|
|
|
2020-08-28 01:12:01 +02:00
|
|
|
#if defined(CONFIG_INSTRUMENT_THREAD_SWITCHING)
|
2020-08-03 00:26:46 +02:00
|
|
|
pushl %eax
|
2020-08-28 01:12:01 +02:00
|
|
|
call z_thread_mark_switched_in
|
2020-08-03 00:26:46 +02:00
|
|
|
popl %eax
|
2016-07-08 22:53:50 +02:00
|
|
|
#endif
|
2015-04-11 01:44:37 +02:00
|
|
|
ret
|
2017-08-30 23:06:30 +02:00
|
|
|
|
2018-03-16 21:01:33 +01:00
|
|
|
#ifdef _THREAD_WRAPPER_REQUIRED
|
2017-08-30 23:06:30 +02:00
|
|
|
/**
|
|
|
|
*
|
|
|
|
* @brief Adjust stack/parameters before invoking thread entry function
|
|
|
|
*
|
2019-11-07 21:43:29 +01:00
|
|
|
* This function adjusts the initial stack frame created by arch_new_thread()
|
2019-09-22 00:36:52 +02:00
|
|
|
* such that the GDB stack frame unwinders recognize it as the outermost frame
|
|
|
|
* in the thread's stack.
|
2017-08-30 23:06:30 +02:00
|
|
|
*
|
|
|
|
* GDB normally stops unwinding a stack when it detects that it has
|
2017-10-29 12:10:22 +01:00
|
|
|
* reached a function called main(). Kernel threads, however, do not have
|
2017-08-30 23:06:30 +02:00
|
|
|
* a main() function, and there does not appear to be a simple way of stopping
|
|
|
|
* the unwinding of the stack.
|
|
|
|
*
|
2019-11-07 21:43:29 +01:00
|
|
|
* Given the initial thread created by arch_new_thread(), GDB expects to find
|
2019-09-22 00:36:52 +02:00
|
|
|
* a return address on the stack immediately above the thread entry routine
|
|
|
|
* z_thread_entry, in the location occupied by the initial EFLAGS. GDB
|
|
|
|
* attempts to examine the memory at this return address, which typically
|
2017-08-30 23:06:30 +02:00
|
|
|
* results in an invalid access to page 0 of memory.
|
|
|
|
*
|
|
|
|
* This function overwrites the initial EFLAGS with zero. When GDB subsequently
|
|
|
|
* attempts to examine memory at address zero, the PeekPoke driver detects
|
|
|
|
* an invalid access to address zero and returns an error, which causes the
|
|
|
|
* GDB stack unwinder to stop somewhat gracefully.
|
|
|
|
*
|
2019-03-08 22:19:05 +01:00
|
|
|
* The initial EFLAGS cannot be overwritten until after z_swap() has swapped in
|
|
|
|
* the new thread for the first time. This routine is called by z_swap() the
|
2017-08-30 23:06:30 +02:00
|
|
|
* first time that the new thread is swapped in, and it jumps to
|
2019-03-08 22:19:05 +01:00
|
|
|
* z_thread_entry after it has done its work.
|
2017-08-30 23:06:30 +02:00
|
|
|
*
|
|
|
|
* __________________
|
|
|
|
* | param3 | <------ Top of the stack
|
|
|
|
* |__________________|
|
|
|
|
* | param2 | Stack Grows Down
|
|
|
|
* |__________________| |
|
|
|
|
* | param1 | V
|
|
|
|
* |__________________|
|
2019-08-23 04:46:50 +02:00
|
|
|
* | pEntry |
|
2017-08-30 23:06:30 +02:00
|
|
|
* |__________________|
|
2019-08-23 04:46:50 +02:00
|
|
|
* | initial EFLAGS | <---- ESP when invoked by z_swap()
|
|
|
|
* |__________________| (Zeroed by this routine)
|
2017-08-30 23:06:30 +02:00
|
|
|
*
|
|
|
|
* The address of the thread entry function needs to be in %edi when this is
|
2019-03-08 22:19:05 +01:00
|
|
|
* invoked. It will either be z_thread_entry, or if userspace is enabled,
|
2017-08-30 23:06:30 +02:00
|
|
|
* _arch_drop_to_user_mode if this is a user thread.
|
|
|
|
*
|
|
|
|
* @return this routine does NOT return.
|
|
|
|
*/
|
|
|
|
|
2021-02-26 01:42:53 +01:00
|
|
|
SECTION_FUNC(PINNED_TEXT, z_x86_thread_entry_wrapper)
|
2018-03-16 21:01:33 +01:00
|
|
|
movl $0, (%esp)
|
2019-12-19 00:11:59 +01:00
|
|
|
jmp *%edi
|
2018-03-16 21:01:33 +01:00
|
|
|
#endif /* _THREAD_WRAPPER_REQUIRED */
|