From 62eea121b357eae64fe00d996e2193d2b4638d1e Mon Sep 17 00:00:00 2001 From: Ramesh Thomas Date: Thu, 6 Apr 2017 15:30:27 -0700 Subject: [PATCH] kernel: tickless: Rename _Swap to allow creation of macro Future tickless kernel patches would be inserting some code before call to Swap. To enable this it will create a mcro named as the current _Swap which would call first the tickless kernel code and then call the real __swap() Jira: ZEP-339 Change-Id: Id778bfcee4f88982c958fcf22d7f04deb4bd572f Signed-off-by: Ramesh Thomas --- arch/arc/core/swap.S | 26 +++++++++++++------------- arch/arm/core/swap.S | 20 ++++++++++---------- arch/nios2/core/exception.S | 6 +++--- arch/nios2/core/swap.S | 10 +++++----- arch/riscv32/core/swap.S | 20 ++++++++++---------- arch/x86/core/intstub.S | 14 +++++++------- arch/x86/core/swap.S | 26 +++++++++++++------------- arch/xtensa/core/swap.S | 12 ++++++------ kernel/include/nano_internal.h | 4 +++- 9 files changed, 70 insertions(+), 68 deletions(-) diff --git a/arch/arc/core/swap.S b/arch/arc/core/swap.S index 63278b1cf97..ca775406710 100644 --- a/arch/arc/core/swap.S +++ b/arch/arc/core/swap.S @@ -21,7 +21,7 @@ #include #include -GTEXT(_Swap) +GTEXT(__swap) GDATA(_k_neg_eagain) GDATA(_kernel) @@ -29,25 +29,25 @@ GDATA(_kernel) * * @brief Initiate a cooperative context switch * - * The _Swap() routine is invoked by various kernel services to effect - * a cooperative context switch. Prior to invoking _Swap(), the caller + * The __swap() routine is invoked by various kernel services to effect + * a cooperative context switch. Prior to invoking __swap(), the caller * disables interrupts via irq_lock() and the return 'key' is passed as a - * parameter to _Swap(). The key is in fact the value stored in the register + * parameter to __swap(). The key is in fact the value stored in the register * operand of a CLRI instruction. * * It stores the intlock key parameter into current->intlock_key. - * Given that _Swap() is called to effect a cooperative context switch, + * Given that __swap() is called to effect a cooperative context switch, * the caller-saved integer registers are saved on the stack by the function - * call preamble to _Swap(). This creates a custom stack frame that will be - * popped when returning from _Swap(), but is not suitable for handling a return - * from an exception. Thus, the fact that the thread is pending because of a - * cooperative call to _Swap() has to be recorded via the _CAUSE_COOP code in - * the relinquish_cause of the thread's k_thread structure. The + * call preamble to __swap(). This creates a custom stack frame that will be + * popped when returning from __swap(), but is not suitable for handling a + * return from an exception. Thus, the fact that the thread is pending because + * of a cooperative call to __swap() has to be recorded via the _CAUSE_COOP code + * in the relinquish_cause of the thread's k_thread structure. The * _IrqExit()/_FirqExit() code will take care of doing the right thing to * restore the thread status. * - * When _Swap() is invoked, we know the decision to perform a context switch or + * When __swap() is invoked, we know the decision to perform a context switch or * not has already been taken and a context switch must happen. * * @return may contain a return value setup by a call to @@ -55,11 +55,11 @@ GDATA(_kernel) * * C function prototype: * - * unsigned int _Swap (unsigned int key); + * unsigned int __swap (unsigned int key); * */ -SECTION_FUNC(TEXT, _Swap) +SECTION_FUNC(TEXT, __swap) /* interrupts are locked, interrupt key is in r0 */ diff --git a/arch/arm/core/swap.S b/arch/arm/core/swap.S index 1ecf7877164..e24a53d6b57 100644 --- a/arch/arm/core/swap.S +++ b/arch/arm/core/swap.S @@ -19,7 +19,7 @@ _ASM_FILE_PROLOGUE -GTEXT(_Swap) +GTEXT(__swap) #if defined(CONFIG_ARMV6_M) #elif defined(CONFIG_ARMV7_M) GTEXT(__svc) @@ -186,7 +186,7 @@ _thread_irq_disabled: * * @brief Service call handler * - * The service call (svc) is only used in _Swap() to enter handler mode so we + * The service call (svc) is only used in __swap() to enter handler mode so we * can go through the PendSV exception to perform a context switch. * * @return N/A @@ -257,13 +257,13 @@ _oops: * * @brief Initiate a cooperative context switch * - * The _Swap() routine is invoked by various kernel services to effect - * a cooperative context context switch. Prior to invoking _Swap(), the caller + * The __swap() routine is invoked by various kernel services to effect + * a cooperative context context switch. Prior to invoking __swap(), the caller * disables interrupts via irq_lock() and the return 'key' is passed as a - * parameter to _Swap(). The 'key' actually represents the BASEPRI register + * parameter to __swap(). The 'key' actually represents the BASEPRI register * prior to disabling interrupts via the BASEPRI mechanism. * - * _Swap() itself does not do much. + * __swap() itself does not do much. * * It simply stores the intlock key (the BASEPRI value) parameter into * current->basepri, and then triggers a service call exception (svc) to setup @@ -273,7 +273,7 @@ _oops: * __pendsv all come from handling an interrupt, which means we know the * interrupts were not locked: in that case the BASEPRI value is 0. * - * Given that _Swap() is called to effect a cooperative context switch, + * Given that __swap() is called to effect a cooperative context switch, * only the caller-saved integer registers need to be saved in the thread of the * outgoing thread. This is all performed by the hardware, which stores it in * its exception stack frame, created when handling the svc exception. @@ -286,18 +286,18 @@ _oops: * * C function prototype: * - * unsigned int _Swap (unsigned int basepri); + * unsigned int __swap (unsigned int basepri); * */ -SECTION_FUNC(TEXT, _Swap) +SECTION_FUNC(TEXT, __swap) ldr r1, =_kernel ldr r2, [r1, #_kernel_offset_to_current] str r0, [r2, #_thread_offset_to_basepri] /* - * Set _Swap()'s default return code to -EAGAIN. This eliminates the need + * Set __swap()'s default return code to -EAGAIN. This eliminates the need * for the timeout code to set it itself. */ ldr r1, =_k_neg_eagain diff --git a/arch/nios2/core/exception.S b/arch/nios2/core/exception.S index 58edec60115..9945da9308d 100644 --- a/arch/nios2/core/exception.S +++ b/arch/nios2/core/exception.S @@ -13,7 +13,7 @@ GTEXT(_exception) /* import */ GTEXT(_Fault) -GTEXT(_Swap) +GTEXT(__swap) #ifdef CONFIG_IRQ_OFFLOAD GTEXT(_irq_do_offload) GTEXT(_offload_routine) @@ -135,7 +135,7 @@ on_irq_stack: /* * A context reschedule is required: keep the volatile registers of * the interrupted thread on the context's stack. Utilize - * the existing _Swap() primitive to save the remaining + * the existing __swap() primitive to save the remaining * thread's registers (including floating point) and perform * a switch to the new thread. */ @@ -152,7 +152,7 @@ on_irq_stack: */ mov r4, et - call _Swap + call __swap jmpi _exception_exit #else jmpi no_reschedule diff --git a/arch/nios2/core/swap.S b/arch/nios2/core/swap.S index f3e126fad68..dcc2f0e1776 100644 --- a/arch/nios2/core/swap.S +++ b/arch/nios2/core/swap.S @@ -9,18 +9,18 @@ #include /* exports */ -GTEXT(_Swap) +GTEXT(__swap) GTEXT(_thread_entry_wrapper) /* imports */ GTEXT(_sys_k_event_logger_context_switch) GTEXT(_k_neg_eagain) -/* unsigned int _Swap(unsigned int key) +/* unsigned int __swap(unsigned int key) * * Always called with interrupts locked */ -SECTION_FUNC(exception.other, _Swap) +SECTION_FUNC(exception.other, __swap) /* Get a reference to _kernel in r10 */ movhi r10, %hi(_kernel) @@ -30,7 +30,7 @@ SECTION_FUNC(exception.other, _Swap) ldw r11, _kernel_offset_to_current(r10) /* Store all the callee saved registers. We either got here via - * an exception or from a cooperative invocation of _Swap() from C + * an exception or from a cooperative invocation of __swap() from C * domain, so all the caller-saved registers have already been * saved by the exception asm or the calling C code already. */ @@ -93,7 +93,7 @@ SECTION_FUNC(exception.other, _Swap) ldw sp, _thread_offset_to_sp(r2) /* We need to irq_unlock(current->coopReg.key); - * key was supplied as argument to _Swap(). Fetch it. + * key was supplied as argument to __swap(). Fetch it. */ ldw r3, _thread_offset_to_key(r2) diff --git a/arch/riscv32/core/swap.S b/arch/riscv32/core/swap.S index 1d18f811fb5..fc6742a7db0 100644 --- a/arch/riscv32/core/swap.S +++ b/arch/riscv32/core/swap.S @@ -9,18 +9,18 @@ #include /* exports */ -GTEXT(_Swap) +GTEXT(__swap) GTEXT(_thread_entry_wrapper) /* Use ABI name of registers for the sake of simplicity */ /* - * unsigned int _Swap(unsigned int key) + * unsigned int __swap(unsigned int key) * * Always called with interrupts locked * key is stored in a0 register */ -SECTION_FUNC(exception.other, _Swap) +SECTION_FUNC(exception.other, __swap) /* Make a system call to perform context switch */ ecall @@ -30,16 +30,16 @@ SECTION_FUNC(exception.other, _Swap) * Restored register a0 contains IRQ lock state of thread. * * Prior to unlocking irq, load return value of - * _Swap to temp register t2 (from _thread_offset_to_swap_return_value). - * Normally, it should be -EAGAIN, unless someone has previously - * called _set_thread_return_value(..). + * __swap to temp register t2 (from + * _thread_offset_to_swap_return_value). Normally, it should be -EAGAIN, + * unless someone has previously called _set_thread_return_value(..). */ la t0, _kernel /* Get pointer to _kernel.current */ lw t1, _kernel_offset_to_current(t0) - /* Load return value of _Swap function in temp register t2 */ + /* Load return value of __swap function in temp register t2 */ lw t2, _thread_offset_to_swap_return_value(t1) /* @@ -62,9 +62,9 @@ SECTION_FUNC(exception.other, _Swap) SECTION_FUNC(TEXT, _thread_entry_wrapper) /* * _thread_entry_wrapper is called for every new thread upon the return - * of _Swap or ISR. Its address, as well as its input function arguments - * thread_entry_t, void *, void *, void * are restored from the thread - * stack (initialized via function _thread). + * of __swap or ISR. Its address, as well as its input function + * arguments thread_entry_t, void *, void *, void * are restored from + * the thread stack (initialized via function _thread). * In this case, thread_entry_t, * void *, void * and void * are stored * in registers a0, a1, a2 and a3. These registers are used as arguments * to function _thread_entry. Hence, just call _thread_entry with diff --git a/arch/x86/core/intstub.S b/arch/x86/core/intstub.S index 74c1661a315..717056363ce 100644 --- a/arch/x86/core/intstub.S +++ b/arch/x86/core/intstub.S @@ -29,7 +29,7 @@ /* externs */ - GTEXT(_Swap) + GTEXT(__swap) #ifdef CONFIG_SYS_POWER_MANAGEMENT GTEXT(_sys_power_save_idle_exit) @@ -123,7 +123,7 @@ SECTION_FUNC(TEXT, _interrupt_enter) /* Push EDI as we will use it for scratch space. * Rest of the callee-saved regs get saved by invocation of C - * functions (isr handler, _Swap(), etc) + * functions (isr handler, __swap(), etc) */ pushl %edi @@ -289,7 +289,7 @@ alreadyOnIntStack: /* * Set the _INT_ACTIVE bit in the k_thread to allow the upcoming call to - * _Swap() to determine whether non-floating registers need to be + * __swap() to determine whether non-floating registers need to be * preserved using the lazy save/restore algorithm, or to indicate to * debug tools that a preemptive context switch has occurred. */ @@ -301,7 +301,7 @@ alreadyOnIntStack: /* * A context reschedule is required: keep the volatile registers of * the interrupted thread on the context's stack. Utilize - * the existing _Swap() primitive to save the remaining + * the existing __swap() primitive to save the remaining * thread's registers (including floating point) and perform * a switch to the new thread. */ @@ -321,14 +321,14 @@ alreadyOnIntStack: */ popl %eax #endif - call _Swap + call __swap #ifndef CONFIG_X86_IAMCU addl $4, %esp /* pop KERNEL_LOCK_KEY argument */ #endif /* * The interrupted thread has now been scheduled, - * as the result of a _later_ invocation of _Swap(). + * as the result of a _later_ invocation of __swap(). * * Now need to restore the interrupted thread's environment before * returning control to it at the point where it was interrupted ... @@ -337,7 +337,7 @@ alreadyOnIntStack: #if ( defined(CONFIG_FP_SHARING) || \ defined(CONFIG_GDB_INFO) ) /* - * _Swap() has restored the floating point registers, if needed. + * __swap() has restored the floating point registers, if needed. * Clear the _INT_ACTIVE bit in the interrupted thread's state * since it has served its purpose. */ diff --git a/arch/x86/core/swap.S b/arch/x86/core/swap.S index 06d66b1b963..6c931637382 100644 --- a/arch/x86/core/swap.S +++ b/arch/x86/core/swap.S @@ -8,10 +8,10 @@ * @file * @brief Kernel swapper code for IA-32 * - * This module implements the _Swap() routine for the IA-32 architecture. + * This module implements the __swap() routine for the IA-32 architecture. * * Note that the file arch/x86/include/swapstk.h defines - * a representation of the save stack frame generated by _Swap() in order + * a representation of the save stack frame generated by __swap() in order * to generate offsets (in the form of absolute symbols) for consumption by * host tools. Please update swapstk.h if changing the structure of the * save frame on the stack. @@ -23,7 +23,7 @@ /* exports (internal APIs) */ - GTEXT(_Swap) + GTEXT(__swap) /* externs */ GDATA(_k_neg_eagain) @@ -32,13 +32,13 @@ * * @brief Initiate a cooperative context switch * - * The _Swap() routine is invoked by various kernel services to effect - * a cooperative context switch. Prior to invoking _Swap(), the + * The __swap() routine is invoked by various kernel services to effect + * a cooperative context switch. Prior to invoking __swap(), the * caller disables interrupts (via irq_lock) and the return 'key' - * is passed as a parameter to _Swap(). The 'key' actually represents + * is passed as a parameter to __swap(). The 'key' actually represents * the EFLAGS register prior to disabling interrupts via a 'cli' instruction. * - * Given that _Swap() is called to effect a cooperative context switch, only + * Given that __swap() is called to effect a cooperative context switch, only * the non-volatile integer registers need to be saved in the TCS of the * outgoing thread. The restoration of the integer registers of the incoming * thread depends on whether that thread was preemptively context switched out. @@ -73,11 +73,11 @@ * * C function prototype: * - * unsigned int _Swap (unsigned int eflags); + * unsigned int __swap (unsigned int eflags); * */ -SECTION_FUNC(TEXT, _Swap) +SECTION_FUNC(TEXT, __swap) #ifdef CONFIG_X86_IAMCU /* save EFLAGS on stack right before return address, just as SYSV would * have done @@ -305,24 +305,24 @@ CROHandlingDone: /* * %eax may contain one of these values: * - * - the return value for _Swap() that was set up by a call to + * - the return value for __swap() that was set up by a call to * _set_thread_return_value() * - in legacy kernel, same value as %ebx, which is non-volatile * - in unified kernel, -EINVAL */ - /* Utilize the 'eflags' parameter to _Swap() */ + /* Utilize the 'eflags' parameter to __swap() */ pushl 4(%esp) #ifdef CONFIG_INT_LATENCY_BENCHMARK testl $0x200, (%esp) jz skipIntLatencyStop - /* save %eax since it used as the return value for _Swap */ + /* save %eax since it used as the return value for __swap */ pushl %eax /* interrupts are being reenabled, stop accumulating time */ call _int_latency_stop - /* restore _Swap's %eax */ + /* restore __swap's %eax */ popl %eax skipIntLatencyStop: diff --git a/arch/xtensa/core/swap.S b/arch/xtensa/core/swap.S index 5992c7bcd01..319248c6a9e 100644 --- a/arch/xtensa/core/swap.S +++ b/arch/xtensa/core/swap.S @@ -7,7 +7,7 @@ * @file * @brief kernel swapper code for Xtensa * - * This module implements the _Swap() routine for the Xtensa architecture. + * This module implements the __swap() routine for the Xtensa architecture. */ #include @@ -15,11 +15,11 @@ #include .extern _kernel -/* unsigned int _Swap (unsigned int basepri); */ - .globl _Swap - .type _Swap,@function +/* unsigned int __swap (unsigned int basepri); */ + .globl __swap + .type __swap,@function .align 4 -_Swap: +__swap: #ifdef __XTENSA_CALL0_ABI__ addi sp, sp, -XT_SOL_FRMSZ #else @@ -61,7 +61,7 @@ _Swap: s32i a3, sp, XT_SOL_exit /* 0 to flag as solicited frame */ s32i sp, a4, THREAD_OFFSET(sp) /* current->arch.topOfStack := sp */ /* - * Set _Swap()'s default return code to -EAGAIN. This eliminates the + * Set __swap()'s default return code to -EAGAIN. This eliminates the * need for the timeout code to set it itself. */ movi a3, -11 /* a3 := -EAGAIN. TODO: Use a macro here insted of 11 */ diff --git a/kernel/include/nano_internal.h b/kernel/include/nano_internal.h index 89cc0421ecc..a8b306cfa6f 100644 --- a/kernel/include/nano_internal.h +++ b/kernel/include/nano_internal.h @@ -50,7 +50,9 @@ extern void _new_thread(char *pStack, size_t stackSize, /* context switching and scheduling-related routines */ -extern unsigned int _Swap(unsigned int); +extern unsigned int __swap(unsigned int key); + +#define _Swap(x) __swap(x) /* set and clear essential fiber/task flag */