kernel: rename __swap

This is part of the core kernel -> architecture API and
has been renamed to z_arch_swap().

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2019-09-21 18:05:17 -07:00 committed by Anas Nashif
commit 2c1fb971e0
15 changed files with 59 additions and 58 deletions

View file

@ -17,13 +17,13 @@ extern const int _k_neg_eagain;
*
* @brief Initiate a cooperative context switch
*
* The __swap() routine is invoked by various kernel services to effect
* a cooperative context context switch. Prior to invoking __swap(), the caller
* The z_arch_swap() routine is invoked by various kernel services to effect
* a cooperative context context switch. Prior to invoking z_arch_swap(), the caller
* disables interrupts via irq_lock() and the return 'key' is passed as a
* parameter to __swap(). The 'key' actually represents the BASEPRI register
* parameter to z_arch_swap(). The 'key' actually represents the BASEPRI register
* prior to disabling interrupts via the BASEPRI mechanism.
*
* __swap() itself does not do much.
* z_arch_swap() itself does not do much.
*
* It simply stores the intlock key (the BASEPRI value) parameter into
* current->basepri, and then triggers a PendSV exception, which does
@ -33,7 +33,7 @@ extern const int _k_neg_eagain;
* __pendsv all come from handling an interrupt, which means we know the
* interrupts were not locked: in that case the BASEPRI value is 0.
*
* Given that __swap() is called to effect a cooperative context switch,
* Given that z_arch_swap() is called to effect a cooperative context switch,
* only the caller-saved integer registers need to be saved in the thread of the
* outgoing thread. This is all performed by the hardware, which stores it in
* its exception stack frame, created when handling the __pendsv exception.
@ -45,7 +45,7 @@ extern const int _k_neg_eagain;
* z_arch_thread_return_value_set()
*
*/
int __swap(int key)
int z_arch_swap(unsigned int key)
{
#ifdef CONFIG_EXECUTION_BENCHMARKING
read_timer_start_of_swap();

View file

@ -118,7 +118,7 @@ out_fp_endif:
isb /* Make the effect of disabling interrupts be realized immediately */
#elif defined(CONFIG_ARMV7_R)
/*
* Interrupts are still disabled from __swap so empty clause
* Interrupts are still disabled from z_arch_swap so empty clause
* here to avoid the preprocessor error below
*/
#else

View file

@ -12,7 +12,7 @@ GTEXT(_exception)
/* import */
GTEXT(_Fault)
GTEXT(__swap)
GTEXT(z_arch_swap)
#ifdef CONFIG_IRQ_OFFLOAD
GTEXT(z_irq_do_offload)
GTEXT(_offload_routine)
@ -126,7 +126,7 @@ on_irq_stack:
/*
* A context reschedule is required: keep the volatile registers of
* the interrupted thread on the context's stack. Utilize
* the existing __swap() primitive to save the remaining
* the existing z_arch_swap() primitive to save the remaining
* thread's registers (including floating point) and perform
* a switch to the new thread.
*/
@ -143,7 +143,7 @@ on_irq_stack:
*/
mov r4, et
call __swap
call z_arch_swap
jmpi _exception_exit
#else
jmpi no_reschedule

View file

@ -8,18 +8,18 @@
#include <offsets_short.h>
/* exports */
GTEXT(__swap)
GTEXT(z_arch_swap)
GTEXT(z_thread_entry_wrapper)
/* imports */
GTEXT(sys_trace_thread_switched_in)
GTEXT(_k_neg_eagain)
/* unsigned int __swap(unsigned int key)
/* unsigned int z_arch_swap(unsigned int key)
*
* Always called with interrupts locked
*/
SECTION_FUNC(exception.other, __swap)
SECTION_FUNC(exception.other, z_arch_swap)
#ifdef CONFIG_EXECUTION_BENCHMARKING
/* Get a reference to _kernel in r10 */
@ -56,7 +56,7 @@ SECTION_FUNC(exception.other, __swap)
ldw r11, _kernel_offset_to_current(r10)
/* Store all the callee saved registers. We either got here via
* an exception or from a cooperative invocation of __swap() from C
* an exception or from a cooperative invocation of z_arch_swap() from C
* domain, so all the caller-saved registers have already been
* saved by the exception asm or the calling C code already.
*/
@ -114,7 +114,7 @@ SECTION_FUNC(exception.other, __swap)
ldw sp, _thread_offset_to_sp(r2)
/* We need to irq_unlock(current->coopReg.key);
* key was supplied as argument to __swap(). Fetch it.
* key was supplied as argument to z_arch_swap(). Fetch it.
*/
ldw r3, _thread_offset_to_key(r2)

View file

@ -187,7 +187,7 @@ static void posix_preexit_cleanup(void)
/**
* Let the ready thread run and block this thread until it is allowed again
*
* called from __swap() which does the picking from the kernel structures
* called from z_arch_swap() which does the picking from the kernel structures
*/
void posix_swap(int next_allowed_thread_nbr, int this_th_nbr)
{
@ -256,7 +256,7 @@ static void posix_cleanup_handler(void *arg)
/**
* Helper function to start a Zephyr thread as a POSIX thread:
* It will block the thread until a __swap() is called for it
* It will block the thread until a z_arch_swap() is called for it
*
* Spawned from posix_new_thread() below
*/

View file

@ -9,7 +9,7 @@
* @file
* @brief Kernel swapper code for POSIX
*
* This module implements the __swap() routine for the POSIX architecture.
* This module implements the z_arch_swap() routine for the POSIX architecture.
*
*/
@ -23,10 +23,10 @@
*
* @brief Initiate a cooperative context switch
*
* The __swap() routine is invoked by various kernel services to effect
* a cooperative context switch. Prior to invoking __swap(), the
* The z_arch_swap() routine is invoked by various kernel services to effect
* a cooperative context switch. Prior to invoking z_arch_swap(), the
* caller disables interrupts (via irq_lock) and the return 'key'
* is passed as a parameter to __swap().
* is passed as a parameter to z_arch_swap().
*
*
* @return -EAGAIN, or a return value set by a call to
@ -34,7 +34,7 @@
*
*/
int __swap(unsigned int key)
int z_arch_swap(unsigned int key)
{
/*
* struct k_thread * _kernel.current is the currently runnig thread
@ -80,7 +80,7 @@ int __swap(unsigned int key)
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
/**
* This is just a version of __swap() in which we do not save anything about the
* This is just a version of z_arch_swap() in which we do not save anything about the
* current thread.
*
* Note that we will never come back to this thread:

View file

@ -9,18 +9,18 @@
#include <offsets_short.h>
/* exports */
GTEXT(__swap)
GTEXT(z_arch_swap)
GTEXT(z_thread_entry_wrapper)
/* Use ABI name of registers for the sake of simplicity */
/*
* unsigned int __swap(unsigned int key)
* unsigned int z_arch_swap(unsigned int key)
*
* Always called with interrupts locked
* key is stored in a0 register
*/
SECTION_FUNC(exception.other, __swap)
SECTION_FUNC(exception.other, z_arch_swap)
/* Make a system call to perform context switch */
#ifdef CONFIG_EXECUTION_BENCHMARKING
@ -76,7 +76,7 @@ SECTION_FUNC(exception.other, __swap)
* Restored register a0 contains IRQ lock state of thread.
*
* Prior to unlocking irq, load return value of
* __swap to temp register t2 (from
* z_arch_swap to temp register t2 (from
* _thread_offset_to_swap_return_value). Normally, it should be -EAGAIN,
* unless someone has previously called z_arch_thread_return_value_set(..).
*/
@ -85,7 +85,7 @@ SECTION_FUNC(exception.other, __swap)
/* Get pointer to _kernel.current */
RV_OP_LOADREG t1, _kernel_offset_to_current(t0)
/* Load return value of __swap function in temp register t2 */
/* Load return value of z_arch_swap function in temp register t2 */
lw t2, _thread_offset_to_swap_return_value(t1)
/*
@ -108,7 +108,7 @@ SECTION_FUNC(exception.other, __swap)
SECTION_FUNC(TEXT, z_thread_entry_wrapper)
/*
* z_thread_entry_wrapper is called for every new thread upon the return
* of __swap or ISR. Its address, as well as its input function
* of z_arch_swap or ISR. Its address, as well as its input function
* arguments thread_entry_t, void *, void *, void * are restored from
* the thread stack (initialized via function _thread).
* In this case, thread_entry_t, * void *, void * and void * are stored

View file

@ -30,7 +30,7 @@
/* externs */
GTEXT(__swap)
GTEXT(z_arch_swap)
#ifdef CONFIG_SYS_POWER_MANAGEMENT
GTEXT(z_sys_power_save_idle_exit)
@ -126,7 +126,7 @@ SECTION_FUNC(TEXT, _interrupt_enter)
/* Push EDI as we will use it for scratch space.
* Rest of the callee-saved regs get saved by invocation of C
* functions (isr handler, __swap(), etc)
* functions (isr handler, z_arch_swap(), etc)
*/
pushl %edi
@ -228,7 +228,7 @@ alreadyOnIntStack:
/*
* Set X86_THREAD_FLAG_INT bit in k_thread to allow the upcoming call
* to __swap() to determine whether non-floating registers need to be
* to z_arch_swap() to determine whether non-floating registers need to be
* preserved using the lazy save/restore algorithm, or to indicate to
* debug tools that a preemptive context switch has occurred.
*/
@ -240,7 +240,7 @@ alreadyOnIntStack:
/*
* A context reschedule is required: keep the volatile registers of
* the interrupted thread on the context's stack. Utilize
* the existing __swap() primitive to save the remaining
* the existing z_arch_swap() primitive to save the remaining
* thread's registers (including floating point) and perform
* a switch to the new thread.
*/
@ -251,12 +251,12 @@ alreadyOnIntStack:
call z_check_stack_sentinel
#endif
pushfl /* push KERNEL_LOCK_KEY argument */
call __swap
call z_arch_swap
addl $4, %esp /* pop KERNEL_LOCK_KEY argument */
/*
* The interrupted thread has now been scheduled,
* as the result of a _later_ invocation of __swap().
* as the result of a _later_ invocation of z_arch_swap().
*
* Now need to restore the interrupted thread's environment before
* returning control to it at the point where it was interrupted ...
@ -264,7 +264,7 @@ alreadyOnIntStack:
#if defined(CONFIG_LAZY_FP_SHARING)
/*
* __swap() has restored the floating point registers, if needed.
* z_arch_swap() has restored the floating point registers, if needed.
* Clear X86_THREAD_FLAG_INT in the interrupted thread's state
* since it has served its purpose.
*/

View file

@ -8,7 +8,7 @@
* @file
* @brief Kernel swapper code for IA-32
*
* This module implements the __swap() routine for the IA-32 architecture.
* This module implements the z_arch_swap() routine for the IA-32 architecture.
*/
#include <kernel_structs.h>
@ -17,7 +17,7 @@
/* exports (internal APIs) */
GTEXT(__swap)
GTEXT(z_arch_swap)
GTEXT(z_x86_thread_entry_wrapper)
GTEXT(_x86_user_thread_entry_wrapper)
@ -31,13 +31,13 @@
*
* @brief Initiate a cooperative context switch
*
* The __swap() routine is invoked by various kernel services to effect
* a cooperative context switch. Prior to invoking __swap(), the
* The z_arch_swap() routine is invoked by various kernel services to effect
* a cooperative context switch. Prior to invoking z_arch_swap(), the
* caller disables interrupts (via irq_lock) and the return 'key'
* is passed as a parameter to __swap(). The 'key' actually represents
* is passed as a parameter to z_arch_swap(). The 'key' actually represents
* the EFLAGS register prior to disabling interrupts via a 'cli' instruction.
*
* Given that __swap() is called to effect a cooperative context switch, only
* Given that z_arch_swap() is called to effect a cooperative context switch, only
* the non-volatile integer registers need to be saved in the TCS of the
* outgoing thread. The restoration of the integer registers of the incoming
* thread depends on whether that thread was preemptively context switched out.
@ -72,7 +72,7 @@
*
* C function prototype:
*
* unsigned int __swap (unsigned int eflags);
* unsigned int z_arch_swap (unsigned int eflags);
*
*/
@ -85,7 +85,7 @@
pop %edx
pop %eax
.endm
SECTION_FUNC(TEXT, __swap)
SECTION_FUNC(TEXT, z_arch_swap)
#ifdef CONFIG_EXECUTION_BENCHMARKING
/* Save the eax and edx registers before reading the time stamp
* once done pop the values.
@ -356,12 +356,12 @@ CROHandlingDone:
/*
* %eax may contain one of these values:
*
* - the return value for __swap() that was set up by a call to
* - the return value for z_arch_swap() that was set up by a call to
* z_arch_thread_return_value_set()
* - -EINVAL
*/
/* Utilize the 'eflags' parameter to __swap() */
/* Utilize the 'eflags' parameter to z_arch_swap() */
pushl 4(%esp)
popfl

View file

@ -695,13 +695,13 @@ config MAX_DOMAIN_PARTITIONS
menu "SMP Options"
config USE_SWITCH
bool "Use new-style _arch_switch instead of __swap"
bool "Use new-style _arch_switch instead of z_arch_swap"
depends on USE_SWITCH_SUPPORTED
help
The _arch_switch() API is a lower level context switching
primitive than the original __swap mechanism. It is required
primitive than the original z_arch_swap mechanism. It is required
for an SMP-aware scheduler, or if the architecture does not
provide __swap. In uniprocess situations where the
provide z_arch_swap. In uniprocess situations where the
architecture provides both, _arch_switch incurs more somewhat
overhead and may be slower.

View file

@ -275,6 +275,8 @@ extern int z_stack_adjust_initialized;
extern void z_arch_busy_wait(u32_t usec_to_wait);
#endif
int z_arch_swap(unsigned int key);
/**
* TODO: document
*/

View file

@ -113,7 +113,7 @@ static inline void z_swap_unlocked(void)
#else /* !CONFIG_USE_SWITCH */
extern int __swap(unsigned int key);
extern int z_arch_swap(unsigned int key);
static inline int z_swap_irqlock(unsigned int key)
{
@ -123,7 +123,7 @@ static inline int z_swap_irqlock(unsigned int key)
#ifndef CONFIG_ARM
sys_trace_thread_switched_out();
#endif
ret = __swap(key);
ret = z_arch_swap(key);
#ifndef CONFIG_ARM
sys_trace_thread_switched_in();
#endif

View file

@ -14,7 +14,7 @@
*
* The HW models raising an interrupt will "awake the cpu" by calling
* poisix_interrupt_raised() which will transfer control to the irq handler,
* which will run inside SW/Zephyr contenxt. After which a __swap() to whatever
* which will run inside SW/Zephyr contenxt. After which a z_arch_swap() to whatever
* Zephyr thread may follow.
* Again, once Zephyr is done, control is given back to the HW models.
*
@ -143,7 +143,7 @@ void posix_halt_cpu(void)
* => let the "irq handler" check if/what interrupt was raised
* and call the appropriate irq handler.
*
* Note that, the interrupt handling may trigger a __swap() to another
* Note that, the interrupt handling may trigger a z_arch_swap() to another
* Zephyr thread. When posix_irq_handler() returns, the Zephyr
* kernel has swapped back to this thread again
*/

View file

@ -20,7 +20,7 @@ Notes:
The test verifies the correct behavior of the thread contex-switch,
when it is triggered indirectly (by setting the PendSV interrupt
to pending state), as well as when the thread itself triggers its
swap-out (by calling __swap(.)).
swap-out (by calling z_arch_swap(.)).
The test is currently supported only in ARMv7-M and ARMv8-M Mainline
targets.

View file

@ -9,7 +9,7 @@
#include <arch/arm/cortex_m/cmsis.h>
#include <kernel_structs.h>
#include <offsets_short_arch.h>
#include <ksched.h>
#if !defined(__GNUC__)
#error __FILE__ goes only with Cortex-M GCC
@ -20,7 +20,6 @@
#define BASEPRI_MODIFIED_2 0x40
#define SWAP_RETVAL 0x1234
extern int __swap(unsigned int key);
extern void z_move_thread_to_end_of_prio_q(struct k_thread *thread);
static struct k_thread alt_thread;
@ -200,7 +199,7 @@ static void alt_thread_entry(void)
zassert_true(p_ztest_thread->arch.basepri == BASEPRI_MODIFIED_1,
"ztest thread basepri not preserved in swap-out\n");
/* Verify original swap return value (set by __swap() */
/* Verify original swap return value (set by z_arch_swap() */
zassert_true(p_ztest_thread->arch.swap_return_value == -EAGAIN,
"ztest thread swap-return-value not preserved in swap-out\n");
#endif
@ -420,7 +419,7 @@ void test_arm_thread_swap(void)
* This will be verified by the alternative test thread.
*/
register int swap_return_val __asm__("r0") =
__swap(BASEPRI_MODIFIED_1);
z_arch_swap(BASEPRI_MODIFIED_1);
#endif /* CONFIG_NO_OPTIMIZATIONS */