From 2c5086cc65f1745e3fe8aca8eefb5a79b5ce7ec6 Mon Sep 17 00:00:00 2001 From: Benjamin Walsh Date: Wed, 12 Aug 2015 18:31:41 -0400 Subject: [PATCH] irq: remove non-inline irq_lock/unlock The inline versions are renamed to remove the _inline suffix, and the non-inline versions are removed from the code base. Change-Id: I7314b96c42835f15df4c537ec11ab7961d4ee60f Signed-off-by: Benjamin Walsh --- arch/arc/core/Makefile | 2 +- arch/arc/core/irq_lock.S | 97 ----------------------- arch/arm/core/Makefile | 2 +- arch/arm/core/basepri.S | 95 ---------------------- arch/x86/core/intstub.S | 66 --------------- include/arch/arc/v2/irq.h | 47 +++++++---- include/arch/arm/CortexM/asm_inline_gcc.h | 30 ++++--- include/arch/arm/CortexM/irq.h | 5 -- include/arch/x86/arch.h | 37 +++------ include/arch/x86/asm_inline_gcc.h | 40 +++------- 10 files changed, 73 insertions(+), 348 deletions(-) delete mode 100644 arch/arc/core/irq_lock.S delete mode 100644 arch/arm/core/basepri.S diff --git a/arch/arc/core/Makefile b/arch/arc/core/Makefile index 2fcd434398a..4ff8c06d79d 100644 --- a/arch/arc/core/Makefile +++ b/arch/arc/core/Makefile @@ -3,6 +3,6 @@ ccflags-y += -I$(srctree)/kernel/microkernel/include obj-y += atomic.o context.o context_wrapper.o \ cpu_idle.o fast_irq.o fatal.o fault.o \ - fault_s.o ffs.o irq_lock.o irq_manage.o \ + fault_s.o ffs.o irq_manage.o \ isr_wrapper.o regular_irq.o swap_macros.h swap.o \ sys_fatal_error_handler.o diff --git a/arch/arc/core/irq_lock.S b/arch/arc/core/irq_lock.S deleted file mode 100644 index fa40e5b8f6e..00000000000 --- a/arch/arc/core/irq_lock.S +++ /dev/null @@ -1,97 +0,0 @@ -/* irq_lock.S - interrupt locking */ - -/* - * Copyright (c) 2014 Wind River Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1) Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2) Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * 3) Neither the name of Wind River Systems nor the names of its contributors - * may be used to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _IRQ_LOCK__H_ -#define _IRQ_LOCK__H_ - -#define _ASMLANGUAGE - -#include -#include -#include -#include - -/** - * - * @brief Disable all interrupts on the local CPU - * - * This routine disables interrupts. It can be called from either interrupt, - * task or fiber level. This routine returns an architecture-dependent - * lock-out key representing the "interrupt disable state" prior to the call; - * this key can be passed to irq_unlock() to re-enable interrupts. - * - * The lock-out key should only be used as the argument to the - * irq_unlock() API. It should never be used to manually re-enable - * interrupts or to inspect or manipulate the contents of the source register. - * - * WARNINGS - * Invoking a kernel routine with interrupts locked may result in - * interrupts being re-enabled for an unspecified period of time. If the - * called routine blocks, interrupts will be re-enabled while another - * context executes, or while the system is idle. - * - * The "interrupt disable state" is an attribute of a context. Thus, if a - * fiber or task disables interrupts and subsequently invokes a kernel - * routine that causes the calling context to block, the interrupt - * disable state will be restored when the context is later rescheduled - * for execution. - * - * @return An architecture-dependent lock-out key representing the - * "interrupt disable state" prior to the call. - * - * \NOMANUAL - */ - -SECTION_FUNC(TEXT, irq_lock) - j_s.d [blink] - clri r0 - -/** - * - * @brief Enable all interrupts on the local CPU - * - * This routine re-enables interrupts on the local CPU. The parameter - * is an architecture-dependent lock-out key that is returned by a previous - * invocation of irq_lock(). - * - * This routine can be called from either interrupt, task or fiber level. - * - * @return N/A - * - * \NOMANUAL - */ - -SECTION_FUNC(TEXT, irq_unlock) - j_s.d [blink] - seti r0 - -#endif /* _IRQ_LOCK__H_ */ diff --git a/arch/arm/core/Makefile b/arch/arm/core/Makefile index b8651f4d7d5..f95e3019f6c 100644 --- a/arch/arm/core/Makefile +++ b/arch/arm/core/Makefile @@ -4,7 +4,7 @@ ccflags-y += -I$(srctree)/kernel/microkernel/include asflags-y := ${ccflags-y} obj-y = atomic.o exc_exit.o ffs.o irq_init.o \ - fiber_abort.o swap.o basepri.o \ + fiber_abort.o swap.o \ fault.o gdb_stub_irq_vector_table.o \ irq_manage.o context.o cpu_idle.o \ fault_s.o gdb_stub.o isr_wrapper.o \ diff --git a/arch/arm/core/basepri.S b/arch/arm/core/basepri.S deleted file mode 100644 index 5fe6c6b9d84..00000000000 --- a/arch/arm/core/basepri.S +++ /dev/null @@ -1,95 +0,0 @@ -/* basepri.S - ARM Cortex-M interrupt locking via BASEPRI */ - -/* - * Copyright (c) 2013-2014 Wind River Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1) Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2) Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * 3) Neither the name of Wind River Systems nor the names of its contributors - * may be used to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* -DESCRIPTION - -Provide irq_lock() and irq_unlock() via the BASEPRI register. This -allows locking up to a certain interrupt priority. Kernel locks out priorities -2 and lower (higher numbered priorities), in essence leaving priorities 0 and 1 -unlocked. This achieves two purposes: - -1. The service call exception is installed at priority 0, allowing it to be - invoked with interrupts locked. This is needed since 'svc #0' is the - implementation of _Swap(), which is invoked with interrupts locked in the - common implementation of nanokernel objects. - -2. Zero Interrupt Latency (ZLI) is achievable via this by allowing certain - interrupts to set their priority to 1, thus being allowed in when interrupts - are locked for regular interrupts. - */ - -#define _ASMLANGUAGE - -#include -#include -#include - -_ASM_FILE_PROLOGUE - -GTEXT(irq_lock) -GTEXT(irq_unlock) - -/** - * - * @brief Lock interrupts - * - * Prevent exceptions of priority lower than to the two highest priorities from - * interrupting the CPU. - * - * This function can be called recursively: it will return a key to return the - * state of interrupt locking to the previous level. - * - * @return a key to return to the previous interrupt locking level - */ - -SECTION_FUNC(TEXT,irq_lock) - movs.n r1, #_EXC_IRQ_DEFAULT_PRIO - mrs r0, BASEPRI - msr BASEPRI, r1 - bx lr - -/** - * - * @brief Unlock interrupts - * - * Return the state of interrupt locking to a previous level, passed in via the - * parameter, obtained from a previous call to irq_lock(). - * - * @return N/A - */ - -SECTION_FUNC(TEXT,irq_unlock) - msr BASEPRI, r0 - bx lr - - .end diff --git a/arch/x86/core/intstub.S b/arch/x86/core/intstub.S index b1974a1f44b..7c33b92f771 100644 --- a/arch/x86/core/intstub.S +++ b/arch/x86/core/intstub.S @@ -56,11 +56,6 @@ entering and exiting a C interrupt handler. GTEXT(_SpuriousIntNoErrCodeHandler) GTEXT(_SpuriousIntHandler) - /* exports (public APIs) */ - - GTEXT(irq_lock) - GTEXT(irq_unlock) - /* externs */ GTEXT(_Swap) @@ -460,65 +455,4 @@ BRANCH_LABEL(callFatalHandler) jmp callFatalHandler - -/** - * - * @brief Disable interrupts on the local CPU - * - * This routine disables interrupts. It can be called from either interrupt - * or context level. This routine returns an architecture-dependent - * lock-out key representing the "interrupt disable state" prior to the call; - * this key can be passed to fiber_enable_ints() to re-enable interrupts. - * - * The lock-out key should only be used as the argument to the - * fiber_enable_ints() API. It should never be used to manually re-enable - * interrupts or to inspect or manipulate the contents of the source register. - * - * WARNINGS - * Invoking a kernel routine with interrupts locked may result in - * interrupts being re-enabled for an unspecified period of time. If the - * called routine blocks, interrupts will be re-enabled while another - * context executes, or while the system is idle. - * - * The "interrupt disable state" is an attribute of a context, i.e. it's part - * of the context context. Thus, if a context disables interrupts and - * subsequently invokes a kernel routine that causes the calling context - * to block, the interrupt disable state will be restored when the context is - * later rescheduled for execution. - * - * @return An architecture-dependent lock-out key representing the - * "interrupt disable state" prior to the call. - */ - -SECTION_FUNC(TEXT, irq_lock) - pushfl - cli -#ifdef CONFIG_INT_LATENCY_BENCHMARK - call _int_latency_start -#endif - popl %eax - ret - - -/** - * - * @brief Enable interrupts on the local CPU - * - * This routine re-enables interrupts on the local CPU. The parameter - * is an architecture-dependent lock-out key that is returned by a previous - * invocation of irq_lock(). - * - * This routine can be called from either a context or ISR context. - */ - -SECTION_FUNC(TEXT, irq_unlock) - testl $0x200, SP_ARG1(%esp) - jz skipIntEnable -#ifdef CONFIG_INT_LATENCY_BENCHMARK - call _int_latency_stop -#endif - sti -BRANCH_LABEL(skipIntEnable) - ret - #endif /* CONFIG_NO_ISRS */ diff --git a/include/arch/arc/v2/irq.h b/include/arch/arc/v2/irq.h index ec03ebb5a37..5d1227f6589 100644 --- a/include/arch/arc/v2/irq.h +++ b/include/arch/arc/v2/irq.h @@ -42,15 +42,10 @@ #ifdef _ASMLANGUAGE GTEXT(_irq_exit); -GTEXT(irq_lock) -GTEXT(irq_unlock) GTEXT(irq_connect) GTEXT(irq_enable) GTEXT(irq_disable) #else -extern int irq_lock(void); -extern void irq_unlock(int key); - extern int irq_connect(unsigned int irq, unsigned int prio, void (*isr)(void *arg), @@ -63,17 +58,37 @@ extern void _irq_exit(void); /** * - * @brief Disable all interrupts on the CPU (inline) + * @brief Disable all interrupts on the local CPU * - * See irq_lock() for full description + * This routine disables interrupts. It can be called from either interrupt, + * task or fiber level. This routine returns an architecture-dependent + * lock-out key representing the "interrupt disable state" prior to the call; + * this key can be passed to irq_unlock() to re-enable interrupts. + * + * The lock-out key should only be used as the argument to the + * irq_unlock() API. It should never be used to manually re-enable + * interrupts or to inspect or manipulate the contents of the source register. + * + * This function can be called recursively: it will return a key to return the + * state of interrupt locking to the previous level. + * + * WARNINGS + * Invoking a kernel routine with interrupts locked may result in + * interrupts being re-enabled for an unspecified period of time. If the + * called routine blocks, interrupts will be re-enabled while another + * context executes, or while the system is idle. + * + * The "interrupt disable state" is an attribute of a context. Thus, if a + * fiber or task disables interrupts and subsequently invokes a kernel + * routine that causes the calling context to block, the interrupt + * disable state will be restored when the context is later rescheduled + * for execution. * * @return An architecture-dependent lock-out key representing the * "interrupt disable state" prior to the call. - * - * \NOMANUAL */ -static ALWAYS_INLINE unsigned int irq_lock_inline(void) +static ALWAYS_INLINE unsigned int irq_lock(void) { unsigned int key; @@ -83,16 +98,18 @@ static ALWAYS_INLINE unsigned int irq_lock_inline(void) /** * - * @brief Enable all interrupts on the CPU (inline) + * @brief Enable all interrupts on the local CPU * - * See irq_unlock() for full description + * This routine re-enables interrupts on the local CPU. The parameter + * is an architecture-dependent lock-out key that is returned by a previous + * invocation of irq_lock(). + * + * This routine can be called from either interrupt, task or fiber level. * * @return N/A - * - * \NOMANUAL */ -static ALWAYS_INLINE void irq_unlock_inline(unsigned int key) +static ALWAYS_INLINE void irq_unlock(unsigned int key) { __asm__ volatile("seti %0" : : "ir"(key)); } diff --git a/include/arch/arm/CortexM/asm_inline_gcc.h b/include/arch/arm/CortexM/asm_inline_gcc.h index 8cd3006420b..f272a75f670 100644 --- a/include/arch/arm/CortexM/asm_inline_gcc.h +++ b/include/arch/arm/CortexM/asm_inline_gcc.h @@ -111,16 +111,19 @@ static ALWAYS_INLINE unsigned int find_first_set_inline(unsigned int op) /** * - * @brief Disable all interrupts on the CPU (inline) + * @brief Disable all interrupts on the CPU * * This routine disables interrupts. It can be called from either interrupt, * task or fiber level. This routine returns an architecture-dependent * lock-out key representing the "interrupt disable state" prior to the call; - * this key can be passed to irq_unlock_inline() to re-enable interrupts. + * this key can be passed to irq_unlock() to re-enable interrupts. * - * The lock-out key should only be used as the argument to the - * irq_unlock_inline() API. It should never be used to manually re-enable - * interrupts or to inspect or manipulate the contents of the source register. + * The lock-out key should only be used as the argument to the irq_unlock() + * API. It should never be used to manually re-enable interrupts or to inspect + * or manipulate the contents of the source register. + * + * This function can be called recursively: it will return a key to return the + * state of interrupt locking to the previous level. * * WARNINGS * Invoking a kernel routine with interrupts locked may result in @@ -137,10 +140,13 @@ static ALWAYS_INLINE unsigned int find_first_set_inline(unsigned int op) * @return An architecture-dependent lock-out key representing the * "interrupt disable state" prior to the call. * - * \NOMANUAL + * @internal + * + * On Cortex-M3/M4, this function prevents exceptions of priority lower than + * the two highest priorities from interrupting the CPU. */ -static ALWAYS_INLINE unsigned int irq_lock_inline(void) +static ALWAYS_INLINE unsigned int irq_lock(void) { unsigned int key; @@ -160,18 +166,16 @@ static ALWAYS_INLINE unsigned int irq_lock_inline(void) * * @brief Enable all interrupts on the CPU (inline) * - * This routine re-enables interrupts on the CPU. The parameter - * is an architecture-dependent lock-out key that is returned by a previous - * invocation of irq_lock_inline(). + * This routine re-enables interrupts on the CPU. The parameter is an + * architecture-dependent lock-out key that is returned by a previous + * invocation of irq_lock(). * * This routine can be called from either interrupt, task or fiber level. * * @return N/A - * - * \NOMANUAL */ -static ALWAYS_INLINE void irq_unlock_inline(unsigned int key) +static ALWAYS_INLINE void irq_unlock(unsigned int key) { __asm__ volatile("msr BASEPRI, %0;\n\t" : : "r"(key)); } diff --git a/include/arch/arm/CortexM/irq.h b/include/arch/arm/CortexM/irq.h index d0e7d6d3a0b..66ab07eb6a0 100644 --- a/include/arch/arm/CortexM/irq.h +++ b/include/arch/arm/CortexM/irq.h @@ -43,15 +43,10 @@ ARM-specific nanokernel interrupt handling interface. Included by ARM/arch.h. #ifdef _ASMLANGUAGE GTEXT(_IntExit); -GTEXT(irq_lock) -GTEXT(irq_unlock) GTEXT(irq_connect) GTEXT(irq_enable) GTEXT(irq_disable) #else -extern int irq_lock(void); -extern void irq_unlock(int key); - extern int irq_connect(unsigned int irq, unsigned int prio, void (*isr)(void *arg), diff --git a/include/arch/x86/arch.h b/include/arch/x86/arch.h index 02b0130f866..777c97f98af 100644 --- a/include/arch/x86/arch.h +++ b/include/arch/x86/arch.h @@ -286,8 +286,6 @@ typedef struct nanoIsf { static inline unsigned int irq_lock(void) {return 1;} static inline void irq_unlock(unsigned int key) {} -#define irq_lock_inline irq_lock -#define irq_unlock_inline irq_unlock #else /* CONFIG_NO_ISRS */ @@ -302,11 +300,14 @@ void _int_latency_stop(void); * This routine disables interrupts. It can be called from either interrupt, * task or fiber level. This routine returns an architecture-dependent * lock-out key representing the "interrupt disable state" prior to the call; - * this key can be passed to irq_unlock_inline() to re-enable interrupts. + * this key can be passed to irq_unlock() to re-enable interrupts. * - * The lock-out key should only be used as the argument to the - * irq_unlock_inline() API. It should never be used to manually re-enable - * interrupts or to inspect or manipulate the contents of the source register. + * The lock-out key should only be used as the argument to the irq_unlock() + * API. It should never be used to manually re-enable interrupts or to inspect + * or manipulate the contents of the source register. + * + * This function can be called recursively: it will return a key to return the + * state of interrupt locking to the previous level. * * WARNINGS * Invoking a kernel routine with interrupts locked may result in @@ -326,10 +327,9 @@ void _int_latency_stop(void); * \NOMANUAL */ -static inline __attribute__((always_inline)) - unsigned int irq_lock_inline(void) +static inline __attribute__((always_inline)) unsigned int irq_lock(void) { - unsigned int key = _do_irq_lock_inline(); + unsigned int key = _do_irq_lock(); #ifdef CONFIG_INT_LATENCY_BENCHMARK _int_latency_start(); @@ -345,7 +345,7 @@ static inline __attribute__((always_inline)) * * This routine re-enables interrupts on the CPU. The parameter * is an architecture-dependent lock-out key that is returned by a previous - * invocation of irq_lock_inline(). + * invocation of irq_lock(). * * This routine can be called from either interrupt, task or fiber level. * @@ -354,8 +354,7 @@ static inline __attribute__((always_inline)) * \NOMANUAL */ -static inline __attribute__((always_inline)) - void irq_unlock_inline(unsigned int key) +static inline __attribute__((always_inline)) void irq_unlock(unsigned int key) { if (!(key & 0x200)) { return; @@ -363,7 +362,7 @@ static inline __attribute__((always_inline)) #ifdef CONFIG_INT_LATENCY_BENCHMARK _int_latency_stop(); #endif - _do_irq_unlock_inline(); + _do_irq_unlock(); return; } #endif /* CONFIG_NO_ISRS */ @@ -408,18 +407,6 @@ extern void irq_enable(unsigned int irq); */ extern void irq_disable(unsigned int irq); -#ifndef CONFIG_NO_ISRS -/** - * @brief Lock out all interrupts - */ -extern int irq_lock(void); - -/** - * @brief Unlock all interrupts - */ -extern void irq_unlock(int key); -#endif /* CONFIG_NO_ISRS */ - #ifdef CONFIG_FP_SHARING /** * @brief Enable floating point hardware resources sharing diff --git a/include/arch/x86/asm_inline_gcc.h b/include/arch/x86/asm_inline_gcc.h index d69568ecf2f..a918df15971 100644 --- a/include/arch/x86/asm_inline_gcc.h +++ b/include/arch/x86/asm_inline_gcc.h @@ -46,37 +46,18 @@ /** * - * @brief Disable all interrupts on the CPU (inline) + * @internal * - * This routine disables interrupts. It can be called from either interrupt, - * task or fiber level. This routine returns an architecture-dependent - * lock-out key representing the "interrupt disable state" prior to the call; - * this key can be passed to irq_unlock_inline() to re-enable interrupts. + * @brief Disable all interrupts on the CPU * - * The lock-out key should only be used as the argument to the - * irq_unlock_inline() API. It should never be used to manually re-enable - * interrupts or to inspect or manipulate the contents of the source register. - * - * WARNINGS - * Invoking a kernel routine with interrupts locked may result in - * interrupts being re-enabled for an unspecified period of time. If the - * called routine blocks, interrupts will be re-enabled while another - * context executes, or while the system is idle. - * - * The "interrupt disable state" is an attribute of a context. Thus, if a - * fiber or task disables interrupts and subsequently invokes a kernel - * routine that causes the calling context to block, the interrupt - * disable state will be restored when the context is later rescheduled - * for execution. + * GCC assembly internals of irq_lock(). See irq_lock() for a complete + * description. * * @return An architecture-dependent lock-out key representing the * "interrupt disable state" prior to the call. - * - * \NOMANUAL */ -static inline __attribute__((always_inline)) - unsigned int _do_irq_lock_inline(void) +static inline __attribute__((always_inline)) unsigned int _do_irq_lock(void) { unsigned int key; @@ -94,19 +75,18 @@ static inline __attribute__((always_inline)) /** + * + * @internal * * @brief Enable all interrupts on the CPU (inline) * - * This routine can be called from either interrupt, task or fiber level. - * Invoked by kernel or by irq_unlock_inline() + * GCC assembly internals of irq_lock_unlock(). See irq_lock_unlock() for a + * complete description. * * @return N/A - * - * \NOMANUAL */ -static inline __attribute__((always_inline)) - void _do_irq_unlock_inline(void) +static inline __attribute__((always_inline)) void _do_irq_unlock(void) { __asm__ volatile ( "sti;\n\t"