kernel: tickless: Rename _Swap to allow creation of macro
Future tickless kernel patches would be inserting some code before call to Swap. To enable this it will create a mcro named as the current _Swap which would call first the tickless kernel code and then call the real __swap() Jira: ZEP-339 Change-Id: Id778bfcee4f88982c958fcf22d7f04deb4bd572f Signed-off-by: Ramesh Thomas <ramesh.thomas@intel.com>
This commit is contained in:
parent
0b3322ecae
commit
62eea121b3
9 changed files with 70 additions and 68 deletions
|
@ -21,7 +21,7 @@
|
||||||
#include <v2/irq.h>
|
#include <v2/irq.h>
|
||||||
#include <swap_macros.h>
|
#include <swap_macros.h>
|
||||||
|
|
||||||
GTEXT(_Swap)
|
GTEXT(__swap)
|
||||||
GDATA(_k_neg_eagain)
|
GDATA(_k_neg_eagain)
|
||||||
GDATA(_kernel)
|
GDATA(_kernel)
|
||||||
|
|
||||||
|
@ -29,25 +29,25 @@ GDATA(_kernel)
|
||||||
*
|
*
|
||||||
* @brief Initiate a cooperative context switch
|
* @brief Initiate a cooperative context switch
|
||||||
*
|
*
|
||||||
* The _Swap() routine is invoked by various kernel services to effect
|
* The __swap() routine is invoked by various kernel services to effect
|
||||||
* a cooperative context switch. Prior to invoking _Swap(), the caller
|
* a cooperative context switch. Prior to invoking __swap(), the caller
|
||||||
* disables interrupts via irq_lock() and the return 'key' is passed as a
|
* disables interrupts via irq_lock() and the return 'key' is passed as a
|
||||||
* parameter to _Swap(). The key is in fact the value stored in the register
|
* parameter to __swap(). The key is in fact the value stored in the register
|
||||||
* operand of a CLRI instruction.
|
* operand of a CLRI instruction.
|
||||||
*
|
*
|
||||||
* It stores the intlock key parameter into current->intlock_key.
|
* It stores the intlock key parameter into current->intlock_key.
|
||||||
|
|
||||||
* Given that _Swap() is called to effect a cooperative context switch,
|
* Given that __swap() is called to effect a cooperative context switch,
|
||||||
* the caller-saved integer registers are saved on the stack by the function
|
* the caller-saved integer registers are saved on the stack by the function
|
||||||
* call preamble to _Swap(). This creates a custom stack frame that will be
|
* call preamble to __swap(). This creates a custom stack frame that will be
|
||||||
* popped when returning from _Swap(), but is not suitable for handling a return
|
* popped when returning from __swap(), but is not suitable for handling a
|
||||||
* from an exception. Thus, the fact that the thread is pending because of a
|
* return from an exception. Thus, the fact that the thread is pending because
|
||||||
* cooperative call to _Swap() has to be recorded via the _CAUSE_COOP code in
|
* of a cooperative call to __swap() has to be recorded via the _CAUSE_COOP code
|
||||||
* the relinquish_cause of the thread's k_thread structure. The
|
* in the relinquish_cause of the thread's k_thread structure. The
|
||||||
* _IrqExit()/_FirqExit() code will take care of doing the right thing to
|
* _IrqExit()/_FirqExit() code will take care of doing the right thing to
|
||||||
* restore the thread status.
|
* restore the thread status.
|
||||||
*
|
*
|
||||||
* When _Swap() is invoked, we know the decision to perform a context switch or
|
* When __swap() is invoked, we know the decision to perform a context switch or
|
||||||
* not has already been taken and a context switch must happen.
|
* not has already been taken and a context switch must happen.
|
||||||
*
|
*
|
||||||
* @return may contain a return value setup by a call to
|
* @return may contain a return value setup by a call to
|
||||||
|
@ -55,11 +55,11 @@ GDATA(_kernel)
|
||||||
*
|
*
|
||||||
* C function prototype:
|
* C function prototype:
|
||||||
*
|
*
|
||||||
* unsigned int _Swap (unsigned int key);
|
* unsigned int __swap (unsigned int key);
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
SECTION_FUNC(TEXT, _Swap)
|
SECTION_FUNC(TEXT, __swap)
|
||||||
|
|
||||||
/* interrupts are locked, interrupt key is in r0 */
|
/* interrupts are locked, interrupt key is in r0 */
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
|
|
||||||
_ASM_FILE_PROLOGUE
|
_ASM_FILE_PROLOGUE
|
||||||
|
|
||||||
GTEXT(_Swap)
|
GTEXT(__swap)
|
||||||
#if defined(CONFIG_ARMV6_M)
|
#if defined(CONFIG_ARMV6_M)
|
||||||
#elif defined(CONFIG_ARMV7_M)
|
#elif defined(CONFIG_ARMV7_M)
|
||||||
GTEXT(__svc)
|
GTEXT(__svc)
|
||||||
|
@ -186,7 +186,7 @@ _thread_irq_disabled:
|
||||||
*
|
*
|
||||||
* @brief Service call handler
|
* @brief Service call handler
|
||||||
*
|
*
|
||||||
* The service call (svc) is only used in _Swap() to enter handler mode so we
|
* The service call (svc) is only used in __swap() to enter handler mode so we
|
||||||
* can go through the PendSV exception to perform a context switch.
|
* can go through the PendSV exception to perform a context switch.
|
||||||
*
|
*
|
||||||
* @return N/A
|
* @return N/A
|
||||||
|
@ -257,13 +257,13 @@ _oops:
|
||||||
*
|
*
|
||||||
* @brief Initiate a cooperative context switch
|
* @brief Initiate a cooperative context switch
|
||||||
*
|
*
|
||||||
* The _Swap() routine is invoked by various kernel services to effect
|
* The __swap() routine is invoked by various kernel services to effect
|
||||||
* a cooperative context context switch. Prior to invoking _Swap(), the caller
|
* a cooperative context context switch. Prior to invoking __swap(), the caller
|
||||||
* disables interrupts via irq_lock() and the return 'key' is passed as a
|
* disables interrupts via irq_lock() and the return 'key' is passed as a
|
||||||
* parameter to _Swap(). The 'key' actually represents the BASEPRI register
|
* parameter to __swap(). The 'key' actually represents the BASEPRI register
|
||||||
* prior to disabling interrupts via the BASEPRI mechanism.
|
* prior to disabling interrupts via the BASEPRI mechanism.
|
||||||
*
|
*
|
||||||
* _Swap() itself does not do much.
|
* __swap() itself does not do much.
|
||||||
*
|
*
|
||||||
* It simply stores the intlock key (the BASEPRI value) parameter into
|
* It simply stores the intlock key (the BASEPRI value) parameter into
|
||||||
* current->basepri, and then triggers a service call exception (svc) to setup
|
* current->basepri, and then triggers a service call exception (svc) to setup
|
||||||
|
@ -273,7 +273,7 @@ _oops:
|
||||||
* __pendsv all come from handling an interrupt, which means we know the
|
* __pendsv all come from handling an interrupt, which means we know the
|
||||||
* interrupts were not locked: in that case the BASEPRI value is 0.
|
* interrupts were not locked: in that case the BASEPRI value is 0.
|
||||||
*
|
*
|
||||||
* Given that _Swap() is called to effect a cooperative context switch,
|
* Given that __swap() is called to effect a cooperative context switch,
|
||||||
* only the caller-saved integer registers need to be saved in the thread of the
|
* only the caller-saved integer registers need to be saved in the thread of the
|
||||||
* outgoing thread. This is all performed by the hardware, which stores it in
|
* outgoing thread. This is all performed by the hardware, which stores it in
|
||||||
* its exception stack frame, created when handling the svc exception.
|
* its exception stack frame, created when handling the svc exception.
|
||||||
|
@ -286,18 +286,18 @@ _oops:
|
||||||
*
|
*
|
||||||
* C function prototype:
|
* C function prototype:
|
||||||
*
|
*
|
||||||
* unsigned int _Swap (unsigned int basepri);
|
* unsigned int __swap (unsigned int basepri);
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
SECTION_FUNC(TEXT, _Swap)
|
SECTION_FUNC(TEXT, __swap)
|
||||||
|
|
||||||
ldr r1, =_kernel
|
ldr r1, =_kernel
|
||||||
ldr r2, [r1, #_kernel_offset_to_current]
|
ldr r2, [r1, #_kernel_offset_to_current]
|
||||||
str r0, [r2, #_thread_offset_to_basepri]
|
str r0, [r2, #_thread_offset_to_basepri]
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set _Swap()'s default return code to -EAGAIN. This eliminates the need
|
* Set __swap()'s default return code to -EAGAIN. This eliminates the need
|
||||||
* for the timeout code to set it itself.
|
* for the timeout code to set it itself.
|
||||||
*/
|
*/
|
||||||
ldr r1, =_k_neg_eagain
|
ldr r1, =_k_neg_eagain
|
||||||
|
|
|
@ -13,7 +13,7 @@ GTEXT(_exception)
|
||||||
|
|
||||||
/* import */
|
/* import */
|
||||||
GTEXT(_Fault)
|
GTEXT(_Fault)
|
||||||
GTEXT(_Swap)
|
GTEXT(__swap)
|
||||||
#ifdef CONFIG_IRQ_OFFLOAD
|
#ifdef CONFIG_IRQ_OFFLOAD
|
||||||
GTEXT(_irq_do_offload)
|
GTEXT(_irq_do_offload)
|
||||||
GTEXT(_offload_routine)
|
GTEXT(_offload_routine)
|
||||||
|
@ -135,7 +135,7 @@ on_irq_stack:
|
||||||
/*
|
/*
|
||||||
* A context reschedule is required: keep the volatile registers of
|
* A context reschedule is required: keep the volatile registers of
|
||||||
* the interrupted thread on the context's stack. Utilize
|
* the interrupted thread on the context's stack. Utilize
|
||||||
* the existing _Swap() primitive to save the remaining
|
* the existing __swap() primitive to save the remaining
|
||||||
* thread's registers (including floating point) and perform
|
* thread's registers (including floating point) and perform
|
||||||
* a switch to the new thread.
|
* a switch to the new thread.
|
||||||
*/
|
*/
|
||||||
|
@ -152,7 +152,7 @@ on_irq_stack:
|
||||||
*/
|
*/
|
||||||
mov r4, et
|
mov r4, et
|
||||||
|
|
||||||
call _Swap
|
call __swap
|
||||||
jmpi _exception_exit
|
jmpi _exception_exit
|
||||||
#else
|
#else
|
||||||
jmpi no_reschedule
|
jmpi no_reschedule
|
||||||
|
|
|
@ -9,18 +9,18 @@
|
||||||
#include <offsets_short.h>
|
#include <offsets_short.h>
|
||||||
|
|
||||||
/* exports */
|
/* exports */
|
||||||
GTEXT(_Swap)
|
GTEXT(__swap)
|
||||||
GTEXT(_thread_entry_wrapper)
|
GTEXT(_thread_entry_wrapper)
|
||||||
|
|
||||||
/* imports */
|
/* imports */
|
||||||
GTEXT(_sys_k_event_logger_context_switch)
|
GTEXT(_sys_k_event_logger_context_switch)
|
||||||
GTEXT(_k_neg_eagain)
|
GTEXT(_k_neg_eagain)
|
||||||
|
|
||||||
/* unsigned int _Swap(unsigned int key)
|
/* unsigned int __swap(unsigned int key)
|
||||||
*
|
*
|
||||||
* Always called with interrupts locked
|
* Always called with interrupts locked
|
||||||
*/
|
*/
|
||||||
SECTION_FUNC(exception.other, _Swap)
|
SECTION_FUNC(exception.other, __swap)
|
||||||
|
|
||||||
/* Get a reference to _kernel in r10 */
|
/* Get a reference to _kernel in r10 */
|
||||||
movhi r10, %hi(_kernel)
|
movhi r10, %hi(_kernel)
|
||||||
|
@ -30,7 +30,7 @@ SECTION_FUNC(exception.other, _Swap)
|
||||||
ldw r11, _kernel_offset_to_current(r10)
|
ldw r11, _kernel_offset_to_current(r10)
|
||||||
|
|
||||||
/* Store all the callee saved registers. We either got here via
|
/* Store all the callee saved registers. We either got here via
|
||||||
* an exception or from a cooperative invocation of _Swap() from C
|
* an exception or from a cooperative invocation of __swap() from C
|
||||||
* domain, so all the caller-saved registers have already been
|
* domain, so all the caller-saved registers have already been
|
||||||
* saved by the exception asm or the calling C code already.
|
* saved by the exception asm or the calling C code already.
|
||||||
*/
|
*/
|
||||||
|
@ -93,7 +93,7 @@ SECTION_FUNC(exception.other, _Swap)
|
||||||
ldw sp, _thread_offset_to_sp(r2)
|
ldw sp, _thread_offset_to_sp(r2)
|
||||||
|
|
||||||
/* We need to irq_unlock(current->coopReg.key);
|
/* We need to irq_unlock(current->coopReg.key);
|
||||||
* key was supplied as argument to _Swap(). Fetch it.
|
* key was supplied as argument to __swap(). Fetch it.
|
||||||
*/
|
*/
|
||||||
ldw r3, _thread_offset_to_key(r2)
|
ldw r3, _thread_offset_to_key(r2)
|
||||||
|
|
||||||
|
|
|
@ -9,18 +9,18 @@
|
||||||
#include <offsets_short.h>
|
#include <offsets_short.h>
|
||||||
|
|
||||||
/* exports */
|
/* exports */
|
||||||
GTEXT(_Swap)
|
GTEXT(__swap)
|
||||||
GTEXT(_thread_entry_wrapper)
|
GTEXT(_thread_entry_wrapper)
|
||||||
|
|
||||||
/* Use ABI name of registers for the sake of simplicity */
|
/* Use ABI name of registers for the sake of simplicity */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* unsigned int _Swap(unsigned int key)
|
* unsigned int __swap(unsigned int key)
|
||||||
*
|
*
|
||||||
* Always called with interrupts locked
|
* Always called with interrupts locked
|
||||||
* key is stored in a0 register
|
* key is stored in a0 register
|
||||||
*/
|
*/
|
||||||
SECTION_FUNC(exception.other, _Swap)
|
SECTION_FUNC(exception.other, __swap)
|
||||||
|
|
||||||
/* Make a system call to perform context switch */
|
/* Make a system call to perform context switch */
|
||||||
ecall
|
ecall
|
||||||
|
@ -30,16 +30,16 @@ SECTION_FUNC(exception.other, _Swap)
|
||||||
* Restored register a0 contains IRQ lock state of thread.
|
* Restored register a0 contains IRQ lock state of thread.
|
||||||
*
|
*
|
||||||
* Prior to unlocking irq, load return value of
|
* Prior to unlocking irq, load return value of
|
||||||
* _Swap to temp register t2 (from _thread_offset_to_swap_return_value).
|
* __swap to temp register t2 (from
|
||||||
* Normally, it should be -EAGAIN, unless someone has previously
|
* _thread_offset_to_swap_return_value). Normally, it should be -EAGAIN,
|
||||||
* called _set_thread_return_value(..).
|
* unless someone has previously called _set_thread_return_value(..).
|
||||||
*/
|
*/
|
||||||
la t0, _kernel
|
la t0, _kernel
|
||||||
|
|
||||||
/* Get pointer to _kernel.current */
|
/* Get pointer to _kernel.current */
|
||||||
lw t1, _kernel_offset_to_current(t0)
|
lw t1, _kernel_offset_to_current(t0)
|
||||||
|
|
||||||
/* Load return value of _Swap function in temp register t2 */
|
/* Load return value of __swap function in temp register t2 */
|
||||||
lw t2, _thread_offset_to_swap_return_value(t1)
|
lw t2, _thread_offset_to_swap_return_value(t1)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -62,9 +62,9 @@ SECTION_FUNC(exception.other, _Swap)
|
||||||
SECTION_FUNC(TEXT, _thread_entry_wrapper)
|
SECTION_FUNC(TEXT, _thread_entry_wrapper)
|
||||||
/*
|
/*
|
||||||
* _thread_entry_wrapper is called for every new thread upon the return
|
* _thread_entry_wrapper is called for every new thread upon the return
|
||||||
* of _Swap or ISR. Its address, as well as its input function arguments
|
* of __swap or ISR. Its address, as well as its input function
|
||||||
* thread_entry_t, void *, void *, void * are restored from the thread
|
* arguments thread_entry_t, void *, void *, void * are restored from
|
||||||
* stack (initialized via function _thread).
|
* the thread stack (initialized via function _thread).
|
||||||
* In this case, thread_entry_t, * void *, void * and void * are stored
|
* In this case, thread_entry_t, * void *, void * and void * are stored
|
||||||
* in registers a0, a1, a2 and a3. These registers are used as arguments
|
* in registers a0, a1, a2 and a3. These registers are used as arguments
|
||||||
* to function _thread_entry. Hence, just call _thread_entry with
|
* to function _thread_entry. Hence, just call _thread_entry with
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
|
|
||||||
/* externs */
|
/* externs */
|
||||||
|
|
||||||
GTEXT(_Swap)
|
GTEXT(__swap)
|
||||||
|
|
||||||
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
||||||
GTEXT(_sys_power_save_idle_exit)
|
GTEXT(_sys_power_save_idle_exit)
|
||||||
|
@ -123,7 +123,7 @@ SECTION_FUNC(TEXT, _interrupt_enter)
|
||||||
|
|
||||||
/* Push EDI as we will use it for scratch space.
|
/* Push EDI as we will use it for scratch space.
|
||||||
* Rest of the callee-saved regs get saved by invocation of C
|
* Rest of the callee-saved regs get saved by invocation of C
|
||||||
* functions (isr handler, _Swap(), etc)
|
* functions (isr handler, __swap(), etc)
|
||||||
*/
|
*/
|
||||||
pushl %edi
|
pushl %edi
|
||||||
|
|
||||||
|
@ -289,7 +289,7 @@ alreadyOnIntStack:
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set the _INT_ACTIVE bit in the k_thread to allow the upcoming call to
|
* Set the _INT_ACTIVE bit in the k_thread to allow the upcoming call to
|
||||||
* _Swap() to determine whether non-floating registers need to be
|
* __swap() to determine whether non-floating registers need to be
|
||||||
* preserved using the lazy save/restore algorithm, or to indicate to
|
* preserved using the lazy save/restore algorithm, or to indicate to
|
||||||
* debug tools that a preemptive context switch has occurred.
|
* debug tools that a preemptive context switch has occurred.
|
||||||
*/
|
*/
|
||||||
|
@ -301,7 +301,7 @@ alreadyOnIntStack:
|
||||||
/*
|
/*
|
||||||
* A context reschedule is required: keep the volatile registers of
|
* A context reschedule is required: keep the volatile registers of
|
||||||
* the interrupted thread on the context's stack. Utilize
|
* the interrupted thread on the context's stack. Utilize
|
||||||
* the existing _Swap() primitive to save the remaining
|
* the existing __swap() primitive to save the remaining
|
||||||
* thread's registers (including floating point) and perform
|
* thread's registers (including floating point) and perform
|
||||||
* a switch to the new thread.
|
* a switch to the new thread.
|
||||||
*/
|
*/
|
||||||
|
@ -321,14 +321,14 @@ alreadyOnIntStack:
|
||||||
*/
|
*/
|
||||||
popl %eax
|
popl %eax
|
||||||
#endif
|
#endif
|
||||||
call _Swap
|
call __swap
|
||||||
|
|
||||||
#ifndef CONFIG_X86_IAMCU
|
#ifndef CONFIG_X86_IAMCU
|
||||||
addl $4, %esp /* pop KERNEL_LOCK_KEY argument */
|
addl $4, %esp /* pop KERNEL_LOCK_KEY argument */
|
||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
* The interrupted thread has now been scheduled,
|
* The interrupted thread has now been scheduled,
|
||||||
* as the result of a _later_ invocation of _Swap().
|
* as the result of a _later_ invocation of __swap().
|
||||||
*
|
*
|
||||||
* Now need to restore the interrupted thread's environment before
|
* Now need to restore the interrupted thread's environment before
|
||||||
* returning control to it at the point where it was interrupted ...
|
* returning control to it at the point where it was interrupted ...
|
||||||
|
@ -337,7 +337,7 @@ alreadyOnIntStack:
|
||||||
#if ( defined(CONFIG_FP_SHARING) || \
|
#if ( defined(CONFIG_FP_SHARING) || \
|
||||||
defined(CONFIG_GDB_INFO) )
|
defined(CONFIG_GDB_INFO) )
|
||||||
/*
|
/*
|
||||||
* _Swap() has restored the floating point registers, if needed.
|
* __swap() has restored the floating point registers, if needed.
|
||||||
* Clear the _INT_ACTIVE bit in the interrupted thread's state
|
* Clear the _INT_ACTIVE bit in the interrupted thread's state
|
||||||
* since it has served its purpose.
|
* since it has served its purpose.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -8,10 +8,10 @@
|
||||||
* @file
|
* @file
|
||||||
* @brief Kernel swapper code for IA-32
|
* @brief Kernel swapper code for IA-32
|
||||||
*
|
*
|
||||||
* This module implements the _Swap() routine for the IA-32 architecture.
|
* This module implements the __swap() routine for the IA-32 architecture.
|
||||||
*
|
*
|
||||||
* Note that the file arch/x86/include/swapstk.h defines
|
* Note that the file arch/x86/include/swapstk.h defines
|
||||||
* a representation of the save stack frame generated by _Swap() in order
|
* a representation of the save stack frame generated by __swap() in order
|
||||||
* to generate offsets (in the form of absolute symbols) for consumption by
|
* to generate offsets (in the form of absolute symbols) for consumption by
|
||||||
* host tools. Please update swapstk.h if changing the structure of the
|
* host tools. Please update swapstk.h if changing the structure of the
|
||||||
* save frame on the stack.
|
* save frame on the stack.
|
||||||
|
@ -23,7 +23,7 @@
|
||||||
|
|
||||||
/* exports (internal APIs) */
|
/* exports (internal APIs) */
|
||||||
|
|
||||||
GTEXT(_Swap)
|
GTEXT(__swap)
|
||||||
|
|
||||||
/* externs */
|
/* externs */
|
||||||
GDATA(_k_neg_eagain)
|
GDATA(_k_neg_eagain)
|
||||||
|
@ -32,13 +32,13 @@
|
||||||
*
|
*
|
||||||
* @brief Initiate a cooperative context switch
|
* @brief Initiate a cooperative context switch
|
||||||
*
|
*
|
||||||
* The _Swap() routine is invoked by various kernel services to effect
|
* The __swap() routine is invoked by various kernel services to effect
|
||||||
* a cooperative context switch. Prior to invoking _Swap(), the
|
* a cooperative context switch. Prior to invoking __swap(), the
|
||||||
* caller disables interrupts (via irq_lock) and the return 'key'
|
* caller disables interrupts (via irq_lock) and the return 'key'
|
||||||
* is passed as a parameter to _Swap(). The 'key' actually represents
|
* is passed as a parameter to __swap(). The 'key' actually represents
|
||||||
* the EFLAGS register prior to disabling interrupts via a 'cli' instruction.
|
* the EFLAGS register prior to disabling interrupts via a 'cli' instruction.
|
||||||
*
|
*
|
||||||
* Given that _Swap() is called to effect a cooperative context switch, only
|
* Given that __swap() is called to effect a cooperative context switch, only
|
||||||
* the non-volatile integer registers need to be saved in the TCS of the
|
* the non-volatile integer registers need to be saved in the TCS of the
|
||||||
* outgoing thread. The restoration of the integer registers of the incoming
|
* outgoing thread. The restoration of the integer registers of the incoming
|
||||||
* thread depends on whether that thread was preemptively context switched out.
|
* thread depends on whether that thread was preemptively context switched out.
|
||||||
|
@ -73,11 +73,11 @@
|
||||||
*
|
*
|
||||||
* C function prototype:
|
* C function prototype:
|
||||||
*
|
*
|
||||||
* unsigned int _Swap (unsigned int eflags);
|
* unsigned int __swap (unsigned int eflags);
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
SECTION_FUNC(TEXT, _Swap)
|
SECTION_FUNC(TEXT, __swap)
|
||||||
#ifdef CONFIG_X86_IAMCU
|
#ifdef CONFIG_X86_IAMCU
|
||||||
/* save EFLAGS on stack right before return address, just as SYSV would
|
/* save EFLAGS on stack right before return address, just as SYSV would
|
||||||
* have done
|
* have done
|
||||||
|
@ -305,24 +305,24 @@ CROHandlingDone:
|
||||||
/*
|
/*
|
||||||
* %eax may contain one of these values:
|
* %eax may contain one of these values:
|
||||||
*
|
*
|
||||||
* - the return value for _Swap() that was set up by a call to
|
* - the return value for __swap() that was set up by a call to
|
||||||
* _set_thread_return_value()
|
* _set_thread_return_value()
|
||||||
* - in legacy kernel, same value as %ebx, which is non-volatile
|
* - in legacy kernel, same value as %ebx, which is non-volatile
|
||||||
* - in unified kernel, -EINVAL
|
* - in unified kernel, -EINVAL
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Utilize the 'eflags' parameter to _Swap() */
|
/* Utilize the 'eflags' parameter to __swap() */
|
||||||
|
|
||||||
pushl 4(%esp)
|
pushl 4(%esp)
|
||||||
#ifdef CONFIG_INT_LATENCY_BENCHMARK
|
#ifdef CONFIG_INT_LATENCY_BENCHMARK
|
||||||
testl $0x200, (%esp)
|
testl $0x200, (%esp)
|
||||||
jz skipIntLatencyStop
|
jz skipIntLatencyStop
|
||||||
|
|
||||||
/* save %eax since it used as the return value for _Swap */
|
/* save %eax since it used as the return value for __swap */
|
||||||
pushl %eax
|
pushl %eax
|
||||||
/* interrupts are being reenabled, stop accumulating time */
|
/* interrupts are being reenabled, stop accumulating time */
|
||||||
call _int_latency_stop
|
call _int_latency_stop
|
||||||
/* restore _Swap's %eax */
|
/* restore __swap's %eax */
|
||||||
popl %eax
|
popl %eax
|
||||||
|
|
||||||
skipIntLatencyStop:
|
skipIntLatencyStop:
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
* @file
|
* @file
|
||||||
* @brief kernel swapper code for Xtensa
|
* @brief kernel swapper code for Xtensa
|
||||||
*
|
*
|
||||||
* This module implements the _Swap() routine for the Xtensa architecture.
|
* This module implements the __swap() routine for the Xtensa architecture.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <xtensa_context.h>
|
#include <xtensa_context.h>
|
||||||
|
@ -15,11 +15,11 @@
|
||||||
#include <offsets_short.h>
|
#include <offsets_short.h>
|
||||||
|
|
||||||
.extern _kernel
|
.extern _kernel
|
||||||
/* unsigned int _Swap (unsigned int basepri); */
|
/* unsigned int __swap (unsigned int basepri); */
|
||||||
.globl _Swap
|
.globl __swap
|
||||||
.type _Swap,@function
|
.type __swap,@function
|
||||||
.align 4
|
.align 4
|
||||||
_Swap:
|
__swap:
|
||||||
#ifdef __XTENSA_CALL0_ABI__
|
#ifdef __XTENSA_CALL0_ABI__
|
||||||
addi sp, sp, -XT_SOL_FRMSZ
|
addi sp, sp, -XT_SOL_FRMSZ
|
||||||
#else
|
#else
|
||||||
|
@ -61,7 +61,7 @@ _Swap:
|
||||||
s32i a3, sp, XT_SOL_exit /* 0 to flag as solicited frame */
|
s32i a3, sp, XT_SOL_exit /* 0 to flag as solicited frame */
|
||||||
s32i sp, a4, THREAD_OFFSET(sp) /* current->arch.topOfStack := sp */
|
s32i sp, a4, THREAD_OFFSET(sp) /* current->arch.topOfStack := sp */
|
||||||
/*
|
/*
|
||||||
* Set _Swap()'s default return code to -EAGAIN. This eliminates the
|
* Set __swap()'s default return code to -EAGAIN. This eliminates the
|
||||||
* need for the timeout code to set it itself.
|
* need for the timeout code to set it itself.
|
||||||
*/
|
*/
|
||||||
movi a3, -11 /* a3 := -EAGAIN. TODO: Use a macro here insted of 11 */
|
movi a3, -11 /* a3 := -EAGAIN. TODO: Use a macro here insted of 11 */
|
||||||
|
|
|
@ -50,7 +50,9 @@ extern void _new_thread(char *pStack, size_t stackSize,
|
||||||
|
|
||||||
/* context switching and scheduling-related routines */
|
/* context switching and scheduling-related routines */
|
||||||
|
|
||||||
extern unsigned int _Swap(unsigned int);
|
extern unsigned int __swap(unsigned int key);
|
||||||
|
|
||||||
|
#define _Swap(x) __swap(x)
|
||||||
|
|
||||||
/* set and clear essential fiber/task flag */
|
/* set and clear essential fiber/task flag */
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue