irq: remove non-inline irq_lock/unlock
The inline versions are renamed to remove the _inline suffix, and the non-inline versions are removed from the code base. Change-Id: I7314b96c42835f15df4c537ec11ab7961d4ee60f Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
parent
6469e578cb
commit
2c5086cc65
10 changed files with 73 additions and 348 deletions
|
@ -3,6 +3,6 @@ ccflags-y += -I$(srctree)/kernel/microkernel/include
|
|||
|
||||
obj-y += atomic.o context.o context_wrapper.o \
|
||||
cpu_idle.o fast_irq.o fatal.o fault.o \
|
||||
fault_s.o ffs.o irq_lock.o irq_manage.o \
|
||||
fault_s.o ffs.o irq_manage.o \
|
||||
isr_wrapper.o regular_irq.o swap_macros.h swap.o \
|
||||
sys_fatal_error_handler.o
|
||||
|
|
|
@ -1,97 +0,0 @@
|
|||
/* irq_lock.S - interrupt locking */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2014 Wind River Systems, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1) Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2) Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* 3) Neither the name of Wind River Systems nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software without
|
||||
* specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _IRQ_LOCK__H_
|
||||
#define _IRQ_LOCK__H_
|
||||
|
||||
#define _ASMLANGUAGE
|
||||
|
||||
#include <nano_private.h>
|
||||
#include <offsets.h>
|
||||
#include <toolchain.h>
|
||||
#include <arch/cpu.h>
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Disable all interrupts on the local CPU
|
||||
*
|
||||
* This routine disables interrupts. It can be called from either interrupt,
|
||||
* task or fiber level. This routine returns an architecture-dependent
|
||||
* lock-out key representing the "interrupt disable state" prior to the call;
|
||||
* this key can be passed to irq_unlock() to re-enable interrupts.
|
||||
*
|
||||
* The lock-out key should only be used as the argument to the
|
||||
* irq_unlock() API. It should never be used to manually re-enable
|
||||
* interrupts or to inspect or manipulate the contents of the source register.
|
||||
*
|
||||
* WARNINGS
|
||||
* Invoking a kernel routine with interrupts locked may result in
|
||||
* interrupts being re-enabled for an unspecified period of time. If the
|
||||
* called routine blocks, interrupts will be re-enabled while another
|
||||
* context executes, or while the system is idle.
|
||||
*
|
||||
* The "interrupt disable state" is an attribute of a context. Thus, if a
|
||||
* fiber or task disables interrupts and subsequently invokes a kernel
|
||||
* routine that causes the calling context to block, the interrupt
|
||||
* disable state will be restored when the context is later rescheduled
|
||||
* for execution.
|
||||
*
|
||||
* @return An architecture-dependent lock-out key representing the
|
||||
* "interrupt disable state" prior to the call.
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, irq_lock)
|
||||
j_s.d [blink]
|
||||
clri r0
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Enable all interrupts on the local CPU
|
||||
*
|
||||
* This routine re-enables interrupts on the local CPU. The <key> parameter
|
||||
* is an architecture-dependent lock-out key that is returned by a previous
|
||||
* invocation of irq_lock().
|
||||
*
|
||||
* This routine can be called from either interrupt, task or fiber level.
|
||||
*
|
||||
* @return N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, irq_unlock)
|
||||
j_s.d [blink]
|
||||
seti r0
|
||||
|
||||
#endif /* _IRQ_LOCK__H_ */
|
|
@ -4,7 +4,7 @@ ccflags-y += -I$(srctree)/kernel/microkernel/include
|
|||
asflags-y := ${ccflags-y}
|
||||
|
||||
obj-y = atomic.o exc_exit.o ffs.o irq_init.o \
|
||||
fiber_abort.o swap.o basepri.o \
|
||||
fiber_abort.o swap.o \
|
||||
fault.o gdb_stub_irq_vector_table.o \
|
||||
irq_manage.o context.o cpu_idle.o \
|
||||
fault_s.o gdb_stub.o isr_wrapper.o \
|
||||
|
|
|
@ -1,95 +0,0 @@
|
|||
/* basepri.S - ARM Cortex-M interrupt locking via BASEPRI */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2013-2014 Wind River Systems, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1) Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2) Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* 3) Neither the name of Wind River Systems nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software without
|
||||
* specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/*
|
||||
DESCRIPTION
|
||||
|
||||
Provide irq_lock() and irq_unlock() via the BASEPRI register. This
|
||||
allows locking up to a certain interrupt priority. Kernel locks out priorities
|
||||
2 and lower (higher numbered priorities), in essence leaving priorities 0 and 1
|
||||
unlocked. This achieves two purposes:
|
||||
|
||||
1. The service call exception is installed at priority 0, allowing it to be
|
||||
invoked with interrupts locked. This is needed since 'svc #0' is the
|
||||
implementation of _Swap(), which is invoked with interrupts locked in the
|
||||
common implementation of nanokernel objects.
|
||||
|
||||
2. Zero Interrupt Latency (ZLI) is achievable via this by allowing certain
|
||||
interrupts to set their priority to 1, thus being allowed in when interrupts
|
||||
are locked for regular interrupts.
|
||||
*/
|
||||
|
||||
#define _ASMLANGUAGE
|
||||
|
||||
#include <toolchain.h>
|
||||
#include <sections.h>
|
||||
#include <arch/cpu.h>
|
||||
|
||||
_ASM_FILE_PROLOGUE
|
||||
|
||||
GTEXT(irq_lock)
|
||||
GTEXT(irq_unlock)
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Lock interrupts
|
||||
*
|
||||
* Prevent exceptions of priority lower than to the two highest priorities from
|
||||
* interrupting the CPU.
|
||||
*
|
||||
* This function can be called recursively: it will return a key to return the
|
||||
* state of interrupt locking to the previous level.
|
||||
*
|
||||
* @return a key to return to the previous interrupt locking level
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT,irq_lock)
|
||||
movs.n r1, #_EXC_IRQ_DEFAULT_PRIO
|
||||
mrs r0, BASEPRI
|
||||
msr BASEPRI, r1
|
||||
bx lr
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Unlock interrupts
|
||||
*
|
||||
* Return the state of interrupt locking to a previous level, passed in via the
|
||||
* <key> parameter, obtained from a previous call to irq_lock().
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT,irq_unlock)
|
||||
msr BASEPRI, r0
|
||||
bx lr
|
||||
|
||||
.end
|
|
@ -56,11 +56,6 @@ entering and exiting a C interrupt handler.
|
|||
GTEXT(_SpuriousIntNoErrCodeHandler)
|
||||
GTEXT(_SpuriousIntHandler)
|
||||
|
||||
/* exports (public APIs) */
|
||||
|
||||
GTEXT(irq_lock)
|
||||
GTEXT(irq_unlock)
|
||||
|
||||
/* externs */
|
||||
|
||||
GTEXT(_Swap)
|
||||
|
@ -460,65 +455,4 @@ BRANCH_LABEL(callFatalHandler)
|
|||
|
||||
jmp callFatalHandler
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Disable interrupts on the local CPU
|
||||
*
|
||||
* This routine disables interrupts. It can be called from either interrupt
|
||||
* or context level. This routine returns an architecture-dependent
|
||||
* lock-out key representing the "interrupt disable state" prior to the call;
|
||||
* this key can be passed to fiber_enable_ints() to re-enable interrupts.
|
||||
*
|
||||
* The lock-out key should only be used as the argument to the
|
||||
* fiber_enable_ints() API. It should never be used to manually re-enable
|
||||
* interrupts or to inspect or manipulate the contents of the source register.
|
||||
*
|
||||
* WARNINGS
|
||||
* Invoking a kernel routine with interrupts locked may result in
|
||||
* interrupts being re-enabled for an unspecified period of time. If the
|
||||
* called routine blocks, interrupts will be re-enabled while another
|
||||
* context executes, or while the system is idle.
|
||||
*
|
||||
* The "interrupt disable state" is an attribute of a context, i.e. it's part
|
||||
* of the context context. Thus, if a context disables interrupts and
|
||||
* subsequently invokes a kernel routine that causes the calling context
|
||||
* to block, the interrupt disable state will be restored when the context is
|
||||
* later rescheduled for execution.
|
||||
*
|
||||
* @return An architecture-dependent lock-out key representing the
|
||||
* "interrupt disable state" prior to the call.
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, irq_lock)
|
||||
pushfl
|
||||
cli
|
||||
#ifdef CONFIG_INT_LATENCY_BENCHMARK
|
||||
call _int_latency_start
|
||||
#endif
|
||||
popl %eax
|
||||
ret
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Enable interrupts on the local CPU
|
||||
*
|
||||
* This routine re-enables interrupts on the local CPU. The <key> parameter
|
||||
* is an architecture-dependent lock-out key that is returned by a previous
|
||||
* invocation of irq_lock().
|
||||
*
|
||||
* This routine can be called from either a context or ISR context.
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, irq_unlock)
|
||||
testl $0x200, SP_ARG1(%esp)
|
||||
jz skipIntEnable
|
||||
#ifdef CONFIG_INT_LATENCY_BENCHMARK
|
||||
call _int_latency_stop
|
||||
#endif
|
||||
sti
|
||||
BRANCH_LABEL(skipIntEnable)
|
||||
ret
|
||||
|
||||
#endif /* CONFIG_NO_ISRS */
|
||||
|
|
|
@ -42,15 +42,10 @@
|
|||
|
||||
#ifdef _ASMLANGUAGE
|
||||
GTEXT(_irq_exit);
|
||||
GTEXT(irq_lock)
|
||||
GTEXT(irq_unlock)
|
||||
GTEXT(irq_connect)
|
||||
GTEXT(irq_enable)
|
||||
GTEXT(irq_disable)
|
||||
#else
|
||||
extern int irq_lock(void);
|
||||
extern void irq_unlock(int key);
|
||||
|
||||
extern int irq_connect(unsigned int irq,
|
||||
unsigned int prio,
|
||||
void (*isr)(void *arg),
|
||||
|
@ -63,17 +58,37 @@ extern void _irq_exit(void);
|
|||
|
||||
/**
|
||||
*
|
||||
* @brief Disable all interrupts on the CPU (inline)
|
||||
* @brief Disable all interrupts on the local CPU
|
||||
*
|
||||
* See irq_lock() for full description
|
||||
* This routine disables interrupts. It can be called from either interrupt,
|
||||
* task or fiber level. This routine returns an architecture-dependent
|
||||
* lock-out key representing the "interrupt disable state" prior to the call;
|
||||
* this key can be passed to irq_unlock() to re-enable interrupts.
|
||||
*
|
||||
* The lock-out key should only be used as the argument to the
|
||||
* irq_unlock() API. It should never be used to manually re-enable
|
||||
* interrupts or to inspect or manipulate the contents of the source register.
|
||||
*
|
||||
* This function can be called recursively: it will return a key to return the
|
||||
* state of interrupt locking to the previous level.
|
||||
*
|
||||
* WARNINGS
|
||||
* Invoking a kernel routine with interrupts locked may result in
|
||||
* interrupts being re-enabled for an unspecified period of time. If the
|
||||
* called routine blocks, interrupts will be re-enabled while another
|
||||
* context executes, or while the system is idle.
|
||||
*
|
||||
* The "interrupt disable state" is an attribute of a context. Thus, if a
|
||||
* fiber or task disables interrupts and subsequently invokes a kernel
|
||||
* routine that causes the calling context to block, the interrupt
|
||||
* disable state will be restored when the context is later rescheduled
|
||||
* for execution.
|
||||
*
|
||||
* @return An architecture-dependent lock-out key representing the
|
||||
* "interrupt disable state" prior to the call.
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
static ALWAYS_INLINE unsigned int irq_lock_inline(void)
|
||||
static ALWAYS_INLINE unsigned int irq_lock(void)
|
||||
{
|
||||
unsigned int key;
|
||||
|
||||
|
@ -83,16 +98,18 @@ static ALWAYS_INLINE unsigned int irq_lock_inline(void)
|
|||
|
||||
/**
|
||||
*
|
||||
* @brief Enable all interrupts on the CPU (inline)
|
||||
* @brief Enable all interrupts on the local CPU
|
||||
*
|
||||
* See irq_unlock() for full description
|
||||
* This routine re-enables interrupts on the local CPU. The <key> parameter
|
||||
* is an architecture-dependent lock-out key that is returned by a previous
|
||||
* invocation of irq_lock().
|
||||
*
|
||||
* This routine can be called from either interrupt, task or fiber level.
|
||||
*
|
||||
* @return N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
static ALWAYS_INLINE void irq_unlock_inline(unsigned int key)
|
||||
static ALWAYS_INLINE void irq_unlock(unsigned int key)
|
||||
{
|
||||
__asm__ volatile("seti %0" : : "ir"(key));
|
||||
}
|
||||
|
|
|
@ -111,16 +111,19 @@ static ALWAYS_INLINE unsigned int find_first_set_inline(unsigned int op)
|
|||
|
||||
/**
|
||||
*
|
||||
* @brief Disable all interrupts on the CPU (inline)
|
||||
* @brief Disable all interrupts on the CPU
|
||||
*
|
||||
* This routine disables interrupts. It can be called from either interrupt,
|
||||
* task or fiber level. This routine returns an architecture-dependent
|
||||
* lock-out key representing the "interrupt disable state" prior to the call;
|
||||
* this key can be passed to irq_unlock_inline() to re-enable interrupts.
|
||||
* this key can be passed to irq_unlock() to re-enable interrupts.
|
||||
*
|
||||
* The lock-out key should only be used as the argument to the
|
||||
* irq_unlock_inline() API. It should never be used to manually re-enable
|
||||
* interrupts or to inspect or manipulate the contents of the source register.
|
||||
* The lock-out key should only be used as the argument to the irq_unlock()
|
||||
* API. It should never be used to manually re-enable interrupts or to inspect
|
||||
* or manipulate the contents of the source register.
|
||||
*
|
||||
* This function can be called recursively: it will return a key to return the
|
||||
* state of interrupt locking to the previous level.
|
||||
*
|
||||
* WARNINGS
|
||||
* Invoking a kernel routine with interrupts locked may result in
|
||||
|
@ -137,10 +140,13 @@ static ALWAYS_INLINE unsigned int find_first_set_inline(unsigned int op)
|
|||
* @return An architecture-dependent lock-out key representing the
|
||||
* "interrupt disable state" prior to the call.
|
||||
*
|
||||
* \NOMANUAL
|
||||
* @internal
|
||||
*
|
||||
* On Cortex-M3/M4, this function prevents exceptions of priority lower than
|
||||
* the two highest priorities from interrupting the CPU.
|
||||
*/
|
||||
|
||||
static ALWAYS_INLINE unsigned int irq_lock_inline(void)
|
||||
static ALWAYS_INLINE unsigned int irq_lock(void)
|
||||
{
|
||||
unsigned int key;
|
||||
|
||||
|
@ -160,18 +166,16 @@ static ALWAYS_INLINE unsigned int irq_lock_inline(void)
|
|||
*
|
||||
* @brief Enable all interrupts on the CPU (inline)
|
||||
*
|
||||
* This routine re-enables interrupts on the CPU. The <key> parameter
|
||||
* is an architecture-dependent lock-out key that is returned by a previous
|
||||
* invocation of irq_lock_inline().
|
||||
* This routine re-enables interrupts on the CPU. The <key> parameter is an
|
||||
* architecture-dependent lock-out key that is returned by a previous
|
||||
* invocation of irq_lock().
|
||||
*
|
||||
* This routine can be called from either interrupt, task or fiber level.
|
||||
*
|
||||
* @return N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
static ALWAYS_INLINE void irq_unlock_inline(unsigned int key)
|
||||
static ALWAYS_INLINE void irq_unlock(unsigned int key)
|
||||
{
|
||||
__asm__ volatile("msr BASEPRI, %0;\n\t" : : "r"(key));
|
||||
}
|
||||
|
|
|
@ -43,15 +43,10 @@ ARM-specific nanokernel interrupt handling interface. Included by ARM/arch.h.
|
|||
|
||||
#ifdef _ASMLANGUAGE
|
||||
GTEXT(_IntExit);
|
||||
GTEXT(irq_lock)
|
||||
GTEXT(irq_unlock)
|
||||
GTEXT(irq_connect)
|
||||
GTEXT(irq_enable)
|
||||
GTEXT(irq_disable)
|
||||
#else
|
||||
extern int irq_lock(void);
|
||||
extern void irq_unlock(int key);
|
||||
|
||||
extern int irq_connect(unsigned int irq,
|
||||
unsigned int prio,
|
||||
void (*isr)(void *arg),
|
||||
|
|
|
@ -286,8 +286,6 @@ typedef struct nanoIsf {
|
|||
|
||||
static inline unsigned int irq_lock(void) {return 1;}
|
||||
static inline void irq_unlock(unsigned int key) {}
|
||||
#define irq_lock_inline irq_lock
|
||||
#define irq_unlock_inline irq_unlock
|
||||
|
||||
#else /* CONFIG_NO_ISRS */
|
||||
|
||||
|
@ -302,11 +300,14 @@ void _int_latency_stop(void);
|
|||
* This routine disables interrupts. It can be called from either interrupt,
|
||||
* task or fiber level. This routine returns an architecture-dependent
|
||||
* lock-out key representing the "interrupt disable state" prior to the call;
|
||||
* this key can be passed to irq_unlock_inline() to re-enable interrupts.
|
||||
* this key can be passed to irq_unlock() to re-enable interrupts.
|
||||
*
|
||||
* The lock-out key should only be used as the argument to the
|
||||
* irq_unlock_inline() API. It should never be used to manually re-enable
|
||||
* interrupts or to inspect or manipulate the contents of the source register.
|
||||
* The lock-out key should only be used as the argument to the irq_unlock()
|
||||
* API. It should never be used to manually re-enable interrupts or to inspect
|
||||
* or manipulate the contents of the source register.
|
||||
*
|
||||
* This function can be called recursively: it will return a key to return the
|
||||
* state of interrupt locking to the previous level.
|
||||
*
|
||||
* WARNINGS
|
||||
* Invoking a kernel routine with interrupts locked may result in
|
||||
|
@ -326,10 +327,9 @@ void _int_latency_stop(void);
|
|||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
static inline __attribute__((always_inline))
|
||||
unsigned int irq_lock_inline(void)
|
||||
static inline __attribute__((always_inline)) unsigned int irq_lock(void)
|
||||
{
|
||||
unsigned int key = _do_irq_lock_inline();
|
||||
unsigned int key = _do_irq_lock();
|
||||
|
||||
#ifdef CONFIG_INT_LATENCY_BENCHMARK
|
||||
_int_latency_start();
|
||||
|
@ -345,7 +345,7 @@ static inline __attribute__((always_inline))
|
|||
*
|
||||
* This routine re-enables interrupts on the CPU. The <key> parameter
|
||||
* is an architecture-dependent lock-out key that is returned by a previous
|
||||
* invocation of irq_lock_inline().
|
||||
* invocation of irq_lock().
|
||||
*
|
||||
* This routine can be called from either interrupt, task or fiber level.
|
||||
*
|
||||
|
@ -354,8 +354,7 @@ static inline __attribute__((always_inline))
|
|||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
static inline __attribute__((always_inline))
|
||||
void irq_unlock_inline(unsigned int key)
|
||||
static inline __attribute__((always_inline)) void irq_unlock(unsigned int key)
|
||||
{
|
||||
if (!(key & 0x200)) {
|
||||
return;
|
||||
|
@ -363,7 +362,7 @@ static inline __attribute__((always_inline))
|
|||
#ifdef CONFIG_INT_LATENCY_BENCHMARK
|
||||
_int_latency_stop();
|
||||
#endif
|
||||
_do_irq_unlock_inline();
|
||||
_do_irq_unlock();
|
||||
return;
|
||||
}
|
||||
#endif /* CONFIG_NO_ISRS */
|
||||
|
@ -408,18 +407,6 @@ extern void irq_enable(unsigned int irq);
|
|||
*/
|
||||
extern void irq_disable(unsigned int irq);
|
||||
|
||||
#ifndef CONFIG_NO_ISRS
|
||||
/**
|
||||
* @brief Lock out all interrupts
|
||||
*/
|
||||
extern int irq_lock(void);
|
||||
|
||||
/**
|
||||
* @brief Unlock all interrupts
|
||||
*/
|
||||
extern void irq_unlock(int key);
|
||||
#endif /* CONFIG_NO_ISRS */
|
||||
|
||||
#ifdef CONFIG_FP_SHARING
|
||||
/**
|
||||
* @brief Enable floating point hardware resources sharing
|
||||
|
|
|
@ -46,37 +46,18 @@
|
|||
|
||||
/**
|
||||
*
|
||||
* @brief Disable all interrupts on the CPU (inline)
|
||||
* @internal
|
||||
*
|
||||
* This routine disables interrupts. It can be called from either interrupt,
|
||||
* task or fiber level. This routine returns an architecture-dependent
|
||||
* lock-out key representing the "interrupt disable state" prior to the call;
|
||||
* this key can be passed to irq_unlock_inline() to re-enable interrupts.
|
||||
* @brief Disable all interrupts on the CPU
|
||||
*
|
||||
* The lock-out key should only be used as the argument to the
|
||||
* irq_unlock_inline() API. It should never be used to manually re-enable
|
||||
* interrupts or to inspect or manipulate the contents of the source register.
|
||||
*
|
||||
* WARNINGS
|
||||
* Invoking a kernel routine with interrupts locked may result in
|
||||
* interrupts being re-enabled for an unspecified period of time. If the
|
||||
* called routine blocks, interrupts will be re-enabled while another
|
||||
* context executes, or while the system is idle.
|
||||
*
|
||||
* The "interrupt disable state" is an attribute of a context. Thus, if a
|
||||
* fiber or task disables interrupts and subsequently invokes a kernel
|
||||
* routine that causes the calling context to block, the interrupt
|
||||
* disable state will be restored when the context is later rescheduled
|
||||
* for execution.
|
||||
* GCC assembly internals of irq_lock(). See irq_lock() for a complete
|
||||
* description.
|
||||
*
|
||||
* @return An architecture-dependent lock-out key representing the
|
||||
* "interrupt disable state" prior to the call.
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
static inline __attribute__((always_inline))
|
||||
unsigned int _do_irq_lock_inline(void)
|
||||
static inline __attribute__((always_inline)) unsigned int _do_irq_lock(void)
|
||||
{
|
||||
unsigned int key;
|
||||
|
||||
|
@ -94,19 +75,18 @@ static inline __attribute__((always_inline))
|
|||
|
||||
|
||||
/**
|
||||
*
|
||||
* @internal
|
||||
*
|
||||
* @brief Enable all interrupts on the CPU (inline)
|
||||
*
|
||||
* This routine can be called from either interrupt, task or fiber level.
|
||||
* Invoked by kernel or by irq_unlock_inline()
|
||||
* GCC assembly internals of irq_lock_unlock(). See irq_lock_unlock() for a
|
||||
* complete description.
|
||||
*
|
||||
* @return N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
static inline __attribute__((always_inline))
|
||||
void _do_irq_unlock_inline(void)
|
||||
static inline __attribute__((always_inline)) void _do_irq_unlock(void)
|
||||
{
|
||||
__asm__ volatile (
|
||||
"sti;\n\t"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue