doxygen: change comment style to match javadoc
The change replaces multiple asterisks to ** at the beginning of comments and adds a space before the asterisks at the beginning of lines. Change-Id: I7656bde3bf4d9a31e38941e43b580520432dabc1 Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
parent
20764a2e8d
commit
ea0d0b220c
305 changed files with 11249 additions and 11249 deletions
|
@ -35,7 +35,7 @@ DESCRIPTION
|
|||
This library provides routines to perform a number of atomic operations
|
||||
on a memory location: add, subtract, increment, decrement, bitwise OR,
|
||||
bitwise NOR, bitwise AND, bitwise NAND, set, clear and compare-and-swap.
|
||||
*/
|
||||
*/
|
||||
|
||||
#define _ASMLANGUAGE
|
||||
|
||||
|
@ -59,49 +59,49 @@ GTEXT(atomic_inc)
|
|||
GTEXT(atomic_sub)
|
||||
GTEXT(atomic_cas)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* atomic_clear - atomically clear a memory location
|
||||
*
|
||||
* This routine atomically clears the contents of <target> and returns the old
|
||||
* value that was in <target>.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* RETURNS: Contents of <target> before the atomic operation
|
||||
*
|
||||
* ERRNO: N/A
|
||||
*
|
||||
* atomic_val_t atomic_clear
|
||||
* (
|
||||
* atomic_t *target /@ memory location to clear @/
|
||||
* )
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* atomic_clear - atomically clear a memory location
|
||||
*
|
||||
* This routine atomically clears the contents of <target> and returns the old
|
||||
* value that was in <target>.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* RETURNS: Contents of <target> before the atomic operation
|
||||
*
|
||||
* ERRNO: N/A
|
||||
*
|
||||
* atomic_val_t atomic_clear
|
||||
* (
|
||||
* atomic_t *target /@ memory location to clear @/
|
||||
* )
|
||||
*/
|
||||
|
||||
SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_clear)
|
||||
MOV r1, #0
|
||||
/* fall through into atomic_set */
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* atomic_set - atomically set a memory location
|
||||
*
|
||||
* This routine atomically sets the contents of <target> to <value> and returns
|
||||
* the old value that was in <target>.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* RETURNS: Contents of <target> before the atomic operation
|
||||
*
|
||||
* ERRNO: N/A
|
||||
*
|
||||
* atomic_val_t atomic_set
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to set @/
|
||||
* atomic_val_t value /@ set with this value @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* atomic_set - atomically set a memory location
|
||||
*
|
||||
* This routine atomically sets the contents of <target> to <value> and returns
|
||||
* the old value that was in <target>.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* RETURNS: Contents of <target> before the atomic operation
|
||||
*
|
||||
* ERRNO: N/A
|
||||
*
|
||||
* atomic_val_t atomic_set
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to set @/
|
||||
* atomic_val_t value /@ set with this value @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
|
||||
SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_set)
|
||||
|
||||
|
@ -114,72 +114,72 @@ SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_set)
|
|||
MOV r0, r2 /* return old value */
|
||||
MOV pc, lr
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* atomic_get - Get the value of a shared memory atomically
|
||||
*
|
||||
* This routine atomically retrieves the value in *target
|
||||
*
|
||||
* long atomic_get
|
||||
* (
|
||||
* atomic_t * target /@ address of atom to be retrieved @/
|
||||
* )
|
||||
*
|
||||
* RETURN: value read from address target.
|
||||
*
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* atomic_get - Get the value of a shared memory atomically
|
||||
*
|
||||
* This routine atomically retrieves the value in *target
|
||||
*
|
||||
* long atomic_get
|
||||
* (
|
||||
* atomic_t * target /@ address of atom to be retrieved @/
|
||||
* )
|
||||
*
|
||||
* RETURN: value read from address target.
|
||||
*
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, atomic_get)
|
||||
LDR r0, [r0]
|
||||
MOV pc, lr
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* atomic_inc - atomically increment a memory location
|
||||
*
|
||||
* This routine atomically increments the value in <target>. The operation is
|
||||
* done using unsigned integer arithmetic. Various CPU architectures may impose
|
||||
* restrictions with regards to the alignment and cache attributes of the
|
||||
* atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* RETURNS: Contents of <target> before the atomic operation
|
||||
*
|
||||
* ERRNO: N/A
|
||||
*
|
||||
* atomic_val_t atomic_inc
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to increment @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* atomic_inc - atomically increment a memory location
|
||||
*
|
||||
* This routine atomically increments the value in <target>. The operation is
|
||||
* done using unsigned integer arithmetic. Various CPU architectures may impose
|
||||
* restrictions with regards to the alignment and cache attributes of the
|
||||
* atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* RETURNS: Contents of <target> before the atomic operation
|
||||
*
|
||||
* ERRNO: N/A
|
||||
*
|
||||
* atomic_val_t atomic_inc
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to increment @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
|
||||
SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_inc)
|
||||
MOV r1, #1
|
||||
/* fall through into atomic_add */
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* atomic_add - atomically add a value to a memory location
|
||||
*
|
||||
* This routine atomically adds the contents of <target> and <value>, placing
|
||||
* the result in <target>. The operation is done using signed integer arithmetic.
|
||||
* Various CPU architectures may impose restrictions with regards to the
|
||||
* alignment and cache attributes of the atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* RETURNS: Contents of <target> before the atomic operation
|
||||
*
|
||||
* ERRNO: N/A
|
||||
*
|
||||
* atomic_val_t atomic_add
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to add to @/
|
||||
* atomic_val_t value /@ value to add @/
|
||||
* )
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* atomic_add - atomically add a value to a memory location
|
||||
*
|
||||
* This routine atomically adds the contents of <target> and <value>, placing
|
||||
* the result in <target>. The operation is done using signed integer arithmetic.
|
||||
* Various CPU architectures may impose restrictions with regards to the
|
||||
* alignment and cache attributes of the atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* RETURNS: Contents of <target> before the atomic operation
|
||||
*
|
||||
* ERRNO: N/A
|
||||
*
|
||||
* atomic_val_t atomic_add
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to add to @/
|
||||
* atomic_val_t value /@ value to add @/
|
||||
* )
|
||||
*/
|
||||
|
||||
SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_add)
|
||||
|
||||
|
@ -193,54 +193,54 @@ SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_add)
|
|||
MOV r0, r2 /* return old value */
|
||||
MOV pc, lr
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* atomic_dec - atomically decrement a memory location
|
||||
*
|
||||
* This routine atomically decrements the value in <target>. The operation is
|
||||
* done using unsigned integer arithmetic. Various CPU architectures may impose
|
||||
* restrictions with regards to the alignment and cache attributes of the
|
||||
* atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* RETURNS: Contents of <target> before the atomic operation
|
||||
*
|
||||
* ERRNO: N/A
|
||||
*
|
||||
* atomic_val_t atomic_dec
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to decrement @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* atomic_dec - atomically decrement a memory location
|
||||
*
|
||||
* This routine atomically decrements the value in <target>. The operation is
|
||||
* done using unsigned integer arithmetic. Various CPU architectures may impose
|
||||
* restrictions with regards to the alignment and cache attributes of the
|
||||
* atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* RETURNS: Contents of <target> before the atomic operation
|
||||
*
|
||||
* ERRNO: N/A
|
||||
*
|
||||
* atomic_val_t atomic_dec
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to decrement @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
|
||||
SECTION_SUBSEC_FUNC(TEXT, atomic_decSub, atomic_dec)
|
||||
MOV r1, #1
|
||||
/* fall through into atomic_sub */
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* atomic_sub - atomically subtract a value from a memory location
|
||||
*
|
||||
* This routine atomically subtracts <value> from the contents of <target>,
|
||||
* placing the result in <target>. The operation is done using signed integer
|
||||
* arithmetic. Various CPU architectures may impose restrictions with regards to
|
||||
* the alignment and cache attributes of the atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* RETURNS: Contents of <target> before the atomic operation
|
||||
*
|
||||
* ERRNO: N/A
|
||||
*
|
||||
* atomic_val_t atomic_sub
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to subtract from @/
|
||||
* atomic_val_t value /@ value to subtract @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* atomic_sub - atomically subtract a value from a memory location
|
||||
*
|
||||
* This routine atomically subtracts <value> from the contents of <target>,
|
||||
* placing the result in <target>. The operation is done using signed integer
|
||||
* arithmetic. Various CPU architectures may impose restrictions with regards to
|
||||
* the alignment and cache attributes of the atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* RETURNS: Contents of <target> before the atomic operation
|
||||
*
|
||||
* ERRNO: N/A
|
||||
*
|
||||
* atomic_val_t atomic_sub
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to subtract from @/
|
||||
* atomic_val_t value /@ value to subtract @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
|
||||
SECTION_SUBSEC_FUNC(TEXT, atomic_decSub, atomic_sub)
|
||||
|
||||
|
@ -253,28 +253,28 @@ SECTION_SUBSEC_FUNC(TEXT, atomic_decSub, atomic_sub)
|
|||
MOV r0, r2 /* return old value */
|
||||
MOV pc, lr
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* atomic_nand - atomically perform a bitwise NAND on a memory location
|
||||
*
|
||||
* This routine atomically performs a bitwise NAND operation of the contents of
|
||||
* <target> and <value>, placing the result in <target>.
|
||||
* Various CPU architectures may impose restrictions with regards to the
|
||||
* alignment and cache attributes of the atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* RETURNS: Contents of <target> before the atomic operation
|
||||
*
|
||||
* ERRNO: N/A
|
||||
*
|
||||
* atomic_val_t atomic_nand
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to NAND @/
|
||||
* atomic_val_t value /@ NAND with this value @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* atomic_nand - atomically perform a bitwise NAND on a memory location
|
||||
*
|
||||
* This routine atomically performs a bitwise NAND operation of the contents of
|
||||
* <target> and <value>, placing the result in <target>.
|
||||
* Various CPU architectures may impose restrictions with regards to the
|
||||
* alignment and cache attributes of the atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* RETURNS: Contents of <target> before the atomic operation
|
||||
*
|
||||
* ERRNO: N/A
|
||||
*
|
||||
* atomic_val_t atomic_nand
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to NAND @/
|
||||
* atomic_val_t value /@ NAND with this value @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, atomic_nand)
|
||||
|
||||
|
@ -288,28 +288,28 @@ SECTION_FUNC(TEXT, atomic_nand)
|
|||
MOV r0, r2 /* return old value */
|
||||
MOV pc, lr
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* atomic_and - atomically perform a bitwise AND on a memory location
|
||||
*
|
||||
* This routine atomically performs a bitwise AND operation of the contents of
|
||||
* <target> and <value>, placing the result in <target>.
|
||||
* Various CPU architectures may impose restrictions with regards to the
|
||||
* alignment and cache attributes of the atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* RETURNS: Contents of <target> before the atomic operation
|
||||
*
|
||||
* ERRNO: N/A
|
||||
*
|
||||
* atomic_val_t atomic_and
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to AND @/
|
||||
* atomic_val_t value /@ AND with this value @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* atomic_and - atomically perform a bitwise AND on a memory location
|
||||
*
|
||||
* This routine atomically performs a bitwise AND operation of the contents of
|
||||
* <target> and <value>, placing the result in <target>.
|
||||
* Various CPU architectures may impose restrictions with regards to the
|
||||
* alignment and cache attributes of the atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* RETURNS: Contents of <target> before the atomic operation
|
||||
*
|
||||
* ERRNO: N/A
|
||||
*
|
||||
* atomic_val_t atomic_and
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to AND @/
|
||||
* atomic_val_t value /@ AND with this value @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, atomic_and)
|
||||
|
||||
|
@ -322,28 +322,28 @@ SECTION_FUNC(TEXT, atomic_and)
|
|||
MOV r0, r2 /* return old value */
|
||||
MOV pc, lr
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* atomic_or - atomically perform a bitwise OR on memory location
|
||||
*
|
||||
* This routine atomically performs a bitwise OR operation of the contents of
|
||||
* <target> and <value>, placing the result in <target>.
|
||||
* Various CPU architectures may impose restrictions with regards to the
|
||||
* alignment and cache attributes of the atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* RETURNS: Contents of <target> before the atomic operation
|
||||
*
|
||||
* ERRNO: N/A
|
||||
*
|
||||
* atomic_val_t atomic_or
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to OR @/
|
||||
* atomic_val_t value /@ OR with this value @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* atomic_or - atomically perform a bitwise OR on memory location
|
||||
*
|
||||
* This routine atomically performs a bitwise OR operation of the contents of
|
||||
* <target> and <value>, placing the result in <target>.
|
||||
* Various CPU architectures may impose restrictions with regards to the
|
||||
* alignment and cache attributes of the atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* RETURNS: Contents of <target> before the atomic operation
|
||||
*
|
||||
* ERRNO: N/A
|
||||
*
|
||||
* atomic_val_t atomic_or
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to OR @/
|
||||
* atomic_val_t value /@ OR with this value @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, atomic_or)
|
||||
|
||||
|
@ -356,28 +356,28 @@ SECTION_FUNC(TEXT, atomic_or)
|
|||
MOV r0, r2 /* return old value */
|
||||
MOV pc, lr
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* atomic_xor - atomically perform a bitwise XOR on a memory location
|
||||
*
|
||||
* This routine atomically performs a bitwise XOR operation of the contents of
|
||||
* <target> and <value>, placing the result in <target>.
|
||||
* Various CPU architectures may impose restrictions with regards to the
|
||||
* alignment and cache attributes of the atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* RETURNS: Contents of <target> before the atomic operation
|
||||
*
|
||||
* ERRNO: N/A
|
||||
*
|
||||
* atomic_val_t atomic_xor
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to XOR @/
|
||||
* atomic_val_t value /@ XOR with this value @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* atomic_xor - atomically perform a bitwise XOR on a memory location
|
||||
*
|
||||
* This routine atomically performs a bitwise XOR operation of the contents of
|
||||
* <target> and <value>, placing the result in <target>.
|
||||
* Various CPU architectures may impose restrictions with regards to the
|
||||
* alignment and cache attributes of the atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* RETURNS: Contents of <target> before the atomic operation
|
||||
*
|
||||
* ERRNO: N/A
|
||||
*
|
||||
* atomic_val_t atomic_xor
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to XOR @/
|
||||
* atomic_val_t value /@ XOR with this value @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, atomic_xor)
|
||||
|
||||
|
@ -390,29 +390,29 @@ SECTION_FUNC(TEXT, atomic_xor)
|
|||
MOV r0, r2 /* return old value */
|
||||
MOV pc, lr
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* atomic_cas - atomically compare-and-swap the contents of a memory location
|
||||
*
|
||||
* This routine performs an atomic compare-and-swap. testing that the contents of
|
||||
* <target> contains <oldValue>, and if it does, setting the value of <target>
|
||||
* to <newValue>. Various CPU architectures may impose restrictions with regards
|
||||
* to the alignment and cache attributes of the atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* RETURNS: 1 if the swap is actually executed, 0 otherwise.
|
||||
*
|
||||
* ERRNO: N/A
|
||||
*
|
||||
* int atomic_cas
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to compare-and-swap @/
|
||||
* atomic_val_t oldValue, /@ compare to this value @/
|
||||
* atomic_val_t newValue, /@ swap with this value @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* atomic_cas - atomically compare-and-swap the contents of a memory location
|
||||
*
|
||||
* This routine performs an atomic compare-and-swap. testing that the contents of
|
||||
* <target> contains <oldValue>, and if it does, setting the value of <target>
|
||||
* to <newValue>. Various CPU architectures may impose restrictions with regards
|
||||
* to the alignment and cache attributes of the atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* RETURNS: 1 if the swap is actually executed, 0 otherwise.
|
||||
*
|
||||
* ERRNO: N/A
|
||||
*
|
||||
* int atomic_cas
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to compare-and-swap @/
|
||||
* atomic_val_t oldValue, /@ compare to this value @/
|
||||
* atomic_val_t newValue, /@ swap with this value @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, atomic_cas)
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ unlocked. This achieves two purposes:
|
|||
2. Zero Interrupt Latency (ZLI) is achievable via this by allowing certain
|
||||
interrupts to set their priority to 1, thus being allowed in when interrupts
|
||||
are locked for regular interrupts.
|
||||
*/
|
||||
*/
|
||||
|
||||
#define _ASMLANGUAGE
|
||||
|
||||
|
@ -59,18 +59,18 @@ _ASM_FILE_PROLOGUE
|
|||
GTEXT(irq_lock)
|
||||
GTEXT(irq_unlock)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* irq_lock - lock interrupts
|
||||
*
|
||||
* Prevent exceptions of priority lower than to the two highest priorities from
|
||||
* interrupting the CPU.
|
||||
*
|
||||
* This function can be called recursively: it will return a key to return the
|
||||
* state of interrupt locking to the previous level.
|
||||
*
|
||||
* RETURNS: a key to return to the previous interrupt locking level
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* irq_lock - lock interrupts
|
||||
*
|
||||
* Prevent exceptions of priority lower than to the two highest priorities from
|
||||
* interrupting the CPU.
|
||||
*
|
||||
* This function can be called recursively: it will return a key to return the
|
||||
* state of interrupt locking to the previous level.
|
||||
*
|
||||
* RETURNS: a key to return to the previous interrupt locking level
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT,irq_lock)
|
||||
movs.n r1, #_EXC_IRQ_DEFAULT_PRIO
|
||||
|
@ -78,15 +78,15 @@ SECTION_FUNC(TEXT,irq_lock)
|
|||
msr BASEPRI, r1
|
||||
bx lr
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* irq_unlock - unlock interrupts
|
||||
*
|
||||
* Return the state of interrupt locking to a previous level, passed in via the
|
||||
* <key> parameter, obtained from a previous call to irq_lock().
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* irq_unlock - unlock interrupts
|
||||
*
|
||||
* Return the state of interrupt locking to a previous level, passed in via the
|
||||
* <key> parameter, obtained from a previous call to irq_lock().
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT,irq_unlock)
|
||||
msr BASEPRI, r0
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
DESCRIPTION
|
||||
Core nanokernel fiber related primitives for the ARM Cortex-M processor
|
||||
architecture.
|
||||
*/
|
||||
*/
|
||||
|
||||
#include <nanokernel.h>
|
||||
#include <arch/cpu.h>
|
||||
|
@ -53,14 +53,14 @@ tNANO _nanokernel = {0};
|
|||
#endif
|
||||
|
||||
#if defined(CONFIG_CONTEXT_MONITOR)
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _context_monitor_init - initialize context monitoring support
|
||||
*
|
||||
* Currently only inserts the new context in the list of active contexts.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _context_monitor_init - initialize context monitoring support
|
||||
*
|
||||
* Currently only inserts the new context in the list of active contexts.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
|
||||
static ALWAYS_INLINE void _context_monitor_init(struct ccs *pCcs /* context */
|
||||
)
|
||||
|
@ -81,26 +81,26 @@ static ALWAYS_INLINE void _context_monitor_init(struct ccs *pCcs /* context */
|
|||
}
|
||||
#endif /* CONFIG_CONTEXT_MONITOR */
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _NewContext - intialize a new context (thread) from its stack space
|
||||
*
|
||||
* The control structure (CCS) is put at the lower address of the stack. An
|
||||
* initial context, to be "restored" by __pendsv(), is put at the other end of
|
||||
* the stack, and thus reusable by the stack when not needed anymore.
|
||||
*
|
||||
* The initial context is an exception stack frame (ESF) since exiting the
|
||||
* PendSV exception will want to pop an ESF. Interestingly, even if the lsb of
|
||||
* an instruction address to jump to must always be set since the CPU always
|
||||
* runs in thumb mode, the ESF expects the real address of the instruction,
|
||||
* with the lsb *not* set (instructions are always aligned on 16 bit halfwords).
|
||||
* Since the compiler automatically sets the lsb of function addresses, we have
|
||||
* to unset it manually before storing it in the 'pc' field of the ESF.
|
||||
*
|
||||
* <options> is currently unused.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _NewContext - intialize a new context (thread) from its stack space
|
||||
*
|
||||
* The control structure (CCS) is put at the lower address of the stack. An
|
||||
* initial context, to be "restored" by __pendsv(), is put at the other end of
|
||||
* the stack, and thus reusable by the stack when not needed anymore.
|
||||
*
|
||||
* The initial context is an exception stack frame (ESF) since exiting the
|
||||
* PendSV exception will want to pop an ESF. Interestingly, even if the lsb of
|
||||
* an instruction address to jump to must always be set since the CPU always
|
||||
* runs in thumb mode, the ESF expects the real address of the instruction,
|
||||
* with the lsb *not* set (instructions are always aligned on 16 bit halfwords).
|
||||
* Since the compiler automatically sets the lsb of function addresses, we have
|
||||
* to unset it manually before storing it in the 'pc' field of the ESF.
|
||||
*
|
||||
* <options> is currently unused.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
|
||||
void _NewContext(
|
||||
char *pStackMem, /* aligned stack memory */
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
|
||||
/*
|
||||
DESCRIPTION
|
||||
*/
|
||||
*/
|
||||
|
||||
#define _ASMLANGUAGE
|
||||
|
||||
|
@ -56,19 +56,19 @@ GTEXT(nano_cpu_atomic_idle)
|
|||
|
||||
#define _SCR_INIT_BITS _SCB_SCR_SEVONPEND
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _CpuIdleInit - initialization of CPU idle
|
||||
*
|
||||
* Only called by nanoArchInit(). Sets SEVONPEND bit once for the system's
|
||||
* duration.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* C function prototype:
|
||||
*
|
||||
* void _CpuIdleInit (void);
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _CpuIdleInit - initialization of CPU idle
|
||||
*
|
||||
* Only called by nanoArchInit(). Sets SEVONPEND bit once for the system's
|
||||
* duration.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* C function prototype:
|
||||
*
|
||||
* void _CpuIdleInit (void);
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, _CpuIdleInit)
|
||||
ldr r1, =_SCB_SCR
|
||||
|
@ -78,36 +78,36 @@ SECTION_FUNC(TEXT, _CpuIdleInit)
|
|||
|
||||
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _NanoIdleValGet - get the kernel idle setting
|
||||
*
|
||||
* Returns the nanokernel idle setting, in ticks. Only called by __systick().
|
||||
*
|
||||
* RETURNS: the requested number of ticks for the kernel to be idle
|
||||
*
|
||||
* C function prototype:
|
||||
*
|
||||
* int32_t _NanoIdleValGet (void);
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _NanoIdleValGet - get the kernel idle setting
|
||||
*
|
||||
* Returns the nanokernel idle setting, in ticks. Only called by __systick().
|
||||
*
|
||||
* RETURNS: the requested number of ticks for the kernel to be idle
|
||||
*
|
||||
* C function prototype:
|
||||
*
|
||||
* int32_t _NanoIdleValGet (void);
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, _NanoIdleValGet)
|
||||
ldr r0, =_nanokernel
|
||||
ldr r0, [r0, #__tNANO_idle_OFFSET]
|
||||
bx lr
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _NanoIdleValClear - clear the kernel idle setting
|
||||
*
|
||||
* Sets the nanokernel idle setting to 0. Only called by __systick().
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* C function prototype:
|
||||
*
|
||||
* void _NanoIdleValClear (void);
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _NanoIdleValClear - clear the kernel idle setting
|
||||
*
|
||||
* Sets the nanokernel idle setting to 0. Only called by __systick().
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* C function prototype:
|
||||
*
|
||||
* void _NanoIdleValClear (void);
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, _NanoIdleValClear)
|
||||
ldr r0, =_nanokernel
|
||||
|
@ -117,21 +117,21 @@ SECTION_FUNC(TEXT, _NanoIdleValClear)
|
|||
|
||||
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* nano_cpu_idle - power save idle routine for ARM Cortex-M
|
||||
*
|
||||
* This function will be called by the nanokernel idle loop or possibly within
|
||||
* an implementation of _sys_power_save_idle in the microkernel when the
|
||||
* '_sys_power_save_flag' variable is non-zero. The ARM 'wfi' instruction
|
||||
* will be issued, causing a low-power consumption sleep mode.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* C function prototype:
|
||||
*
|
||||
* void nano_cpu_idle (void);
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* nano_cpu_idle - power save idle routine for ARM Cortex-M
|
||||
*
|
||||
* This function will be called by the nanokernel idle loop or possibly within
|
||||
* an implementation of _sys_power_save_idle in the microkernel when the
|
||||
* '_sys_power_save_flag' variable is non-zero. The ARM 'wfi' instruction
|
||||
* will be issued, causing a low-power consumption sleep mode.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* C function prototype:
|
||||
*
|
||||
* void nano_cpu_idle (void);
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, nano_cpu_idle)
|
||||
/* clear BASEPRI so wfi is awakened by incoming interrupts */
|
||||
|
@ -142,31 +142,31 @@ SECTION_FUNC(TEXT, nano_cpu_idle)
|
|||
|
||||
bx lr
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* nano_cpu_atomic_idle - atomically re-enable interrupts and enter low power mode
|
||||
*
|
||||
* This function is utilized by the nanokernel object "wait" APIs for task
|
||||
* contexts, e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(), nano_task_stack_pop_wait(),
|
||||
* and nano_task_fifo_get_wait().
|
||||
*
|
||||
* INTERNAL
|
||||
* The requirements for nano_cpu_atomic_idle() are as follows:
|
||||
* 1) The enablement of interrupts and entering a low-power mode needs to be
|
||||
* atomic, i.e. there should be no period of time where interrupts are
|
||||
* enabled before the processor enters a low-power mode. See the comments
|
||||
* in nano_task_lifo_get_wait(), for example, of the race condition that occurs
|
||||
* if this requirement is not met.
|
||||
*
|
||||
* 2) After waking up from the low-power mode, the interrupt lockout state
|
||||
* must be restored as indicated in the 'imask' input parameter.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* C function prototype:
|
||||
*
|
||||
* void nano_cpu_atomic_idle (unsigned int imask);
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* nano_cpu_atomic_idle - atomically re-enable interrupts and enter low power mode
|
||||
*
|
||||
* This function is utilized by the nanokernel object "wait" APIs for task
|
||||
* contexts, e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(), nano_task_stack_pop_wait(),
|
||||
* and nano_task_fifo_get_wait().
|
||||
*
|
||||
* INTERNAL
|
||||
* The requirements for nano_cpu_atomic_idle() are as follows:
|
||||
* 1) The enablement of interrupts and entering a low-power mode needs to be
|
||||
* atomic, i.e. there should be no period of time where interrupts are
|
||||
* enabled before the processor enters a low-power mode. See the comments
|
||||
* in nano_task_lifo_get_wait(), for example, of the race condition that occurs
|
||||
* if this requirement is not met.
|
||||
*
|
||||
* 2) After waking up from the low-power mode, the interrupt lockout state
|
||||
* must be restored as indicated in the 'imask' input parameter.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* C function prototype:
|
||||
*
|
||||
* void nano_cpu_atomic_idle (unsigned int imask);
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, nano_cpu_atomic_idle)
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ DESCRIPTION
|
|||
Provides functions for performing kernel handling when exiting exceptions or
|
||||
interrupts that are installed directly in the vector table (i.e. that are not
|
||||
wrapped around by _isr_wrapper()).
|
||||
*/
|
||||
*/
|
||||
|
||||
#define _ASMLANGUAGE
|
||||
|
||||
|
@ -61,46 +61,46 @@ GDATA(_nanokernel)
|
|||
#endif
|
||||
#define _EXIT_EXC_IF_FIBER_NOT_READY _EXIT_EXC_IF_FIBER_PREEMPTED
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _IntExit - kernel housekeeping when exiting interrupt handler installed
|
||||
* directly in vector table
|
||||
*
|
||||
* Kernel allows installing interrupt handlers (ISRs) directly into the vector
|
||||
* table to get the lowest interrupt latency possible. This allows the ISR to be
|
||||
* invoked directly without going through a software interrupt table. However,
|
||||
* upon exiting the ISR, some kernel work must still be performed, namely
|
||||
* possible context switching. While ISRs connected in the software interrupt
|
||||
* table do this automatically via a wrapper, ISRs connected directly in the
|
||||
* vector table must invoke _IntExit() as the *very last* action before
|
||||
* returning.
|
||||
*
|
||||
* e.g.
|
||||
*
|
||||
* void myISR(void)
|
||||
* {
|
||||
* printk("in %s\n", __FUNCTION__);
|
||||
* doStuff();
|
||||
* _IntExit();
|
||||
* }
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _IntExit - kernel housekeeping when exiting interrupt handler installed
|
||||
* directly in vector table
|
||||
*
|
||||
* Kernel allows installing interrupt handlers (ISRs) directly into the vector
|
||||
* table to get the lowest interrupt latency possible. This allows the ISR to be
|
||||
* invoked directly without going through a software interrupt table. However,
|
||||
* upon exiting the ISR, some kernel work must still be performed, namely
|
||||
* possible context switching. While ISRs connected in the software interrupt
|
||||
* table do this automatically via a wrapper, ISRs connected directly in the
|
||||
* vector table must invoke _IntExit() as the *very last* action before
|
||||
* returning.
|
||||
*
|
||||
* e.g.
|
||||
*
|
||||
* void myISR(void)
|
||||
* {
|
||||
* printk("in %s\n", __FUNCTION__);
|
||||
* doStuff();
|
||||
* _IntExit();
|
||||
* }
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
|
||||
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit)
|
||||
|
||||
/* _IntExit falls through to _ExcExit (they are aliases of each other) */
|
||||
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _ExcExit - kernel housekeeping when exiting exception handler installed
|
||||
* directly in vector table
|
||||
*
|
||||
* See _IntExit().
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _ExcExit - kernel housekeeping when exiting exception handler installed
|
||||
* directly in vector table
|
||||
*
|
||||
* See _IntExit().
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
|
||||
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
/*
|
||||
DESCRIPTION
|
||||
This module provides the _NanoFatalErrorHandler() routine for ARM Cortex-M.
|
||||
*/
|
||||
*/
|
||||
|
||||
#include <toolchain.h>
|
||||
#include <sections.h>
|
||||
|
@ -62,23 +62,23 @@ const NANO_ESF _default_esf = {0xdeaddead, /* a1 */
|
|||
0xdeaddead, /* xpsr */
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _NanoFatalErrorHandler - nanokernel fatal error handler
|
||||
*
|
||||
* This routine is called when fatal error conditions are detected by software
|
||||
* and is responsible only for reporting the error. Once reported, it then
|
||||
* invokes the user provided routine _SysFatalErrorHandler() which is
|
||||
* responsible for implementing the error handling policy.
|
||||
*
|
||||
* The caller is expected to always provide a usable ESF. In the event that the
|
||||
* fatal error does not have a hardware generated ESF, the caller should either
|
||||
* create its own or use a pointer to the global default ESF <_default_esf>.
|
||||
*
|
||||
* RETURNS: This function does not return.
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _NanoFatalErrorHandler - nanokernel fatal error handler
|
||||
*
|
||||
* This routine is called when fatal error conditions are detected by software
|
||||
* and is responsible only for reporting the error. Once reported, it then
|
||||
* invokes the user provided routine _SysFatalErrorHandler() which is
|
||||
* responsible for implementing the error handling policy.
|
||||
*
|
||||
* The caller is expected to always provide a usable ESF. In the event that the
|
||||
* fatal error does not have a hardware generated ESF, the caller should either
|
||||
* create its own or use a pointer to the global default ESF <_default_esf>.
|
||||
*
|
||||
* RETURNS: This function does not return.
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
FUNC_NORETURN void _NanoFatalErrorHandler(
|
||||
unsigned int reason, /* reason that handler was called */
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
/*
|
||||
DESCRIPTION
|
||||
Common fault handler for ARM Cortex-M processors.
|
||||
*/
|
||||
*/
|
||||
|
||||
#include <toolchain.h>
|
||||
#include <sections.h>
|
||||
|
@ -59,24 +59,24 @@ Common fault handler for ARM Cortex-M processors.
|
|||
#endif
|
||||
|
||||
#if (CONFIG_FAULT_DUMP == 1)
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _FaultDump - dump information regarding fault (FAULT_DUMP == 1)
|
||||
*
|
||||
* Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 1
|
||||
* (short form).
|
||||
*
|
||||
* eg. (precise bus error escalated to hard fault):
|
||||
*
|
||||
* Fault! EXC #3, Thread: 0x200000dc, instr: 0x000011d3
|
||||
* HARD FAULT: Escalation (see below)!
|
||||
* MMFSR: 0x00000000, BFSR: 0x00000082, UFSR: 0x00000000
|
||||
* BFAR: 0xff001234
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _FaultDump - dump information regarding fault (FAULT_DUMP == 1)
|
||||
*
|
||||
* Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 1
|
||||
* (short form).
|
||||
*
|
||||
* eg. (precise bus error escalated to hard fault):
|
||||
*
|
||||
* Fault! EXC #3, Thread: 0x200000dc, instr: 0x000011d3
|
||||
* HARD FAULT: Escalation (see below)!
|
||||
* MMFSR: 0x00000000, BFSR: 0x00000082, UFSR: 0x00000000
|
||||
* BFAR: 0xff001234
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
void _FaultDump(const NANO_ESF *esf, int fault)
|
||||
{
|
||||
|
@ -118,16 +118,16 @@ void _FaultDump(const NANO_ESF *esf, int fault)
|
|||
#endif
|
||||
|
||||
#if (CONFIG_FAULT_DUMP == 2)
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _FaultContextShow - dump context information
|
||||
*
|
||||
* See _FaultDump() for example.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _FaultContextShow - dump context information
|
||||
*
|
||||
* See _FaultDump() for example.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
static void _FaultContextShow(const NANO_ESF *esf)
|
||||
{
|
||||
|
@ -137,16 +137,16 @@ static void _FaultContextShow(const NANO_ESF *esf)
|
|||
esf->pc);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _MpuFault - dump MPU fault information
|
||||
*
|
||||
* See _FaultDump() for example.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _MpuFault - dump MPU fault information
|
||||
*
|
||||
* See _FaultDump() for example.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
static void _MpuFault(const NANO_ESF *esf,
|
||||
int fromHardFault)
|
||||
|
@ -172,16 +172,16 @@ static void _MpuFault(const NANO_ESF *esf,
|
|||
}
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _BusFault - dump bus fault information
|
||||
*
|
||||
* See _FaultDump() for example.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _BusFault - dump bus fault information
|
||||
*
|
||||
* See _FaultDump() for example.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
static void _BusFault(const NANO_ESF *esf,
|
||||
int fromHardFault)
|
||||
|
@ -213,16 +213,16 @@ static void _BusFault(const NANO_ESF *esf,
|
|||
}
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _UsageFault - dump usage fault information
|
||||
*
|
||||
* See _FaultDump() for example.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _UsageFault - dump usage fault information
|
||||
*
|
||||
* See _FaultDump() for example.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
static void _UsageFault(const NANO_ESF *esf)
|
||||
{
|
||||
|
@ -253,16 +253,16 @@ static void _UsageFault(const NANO_ESF *esf)
|
|||
_ScbUsageFaultAllFaultsReset();
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _HardFault - dump hard fault information
|
||||
*
|
||||
* See _FaultDump() for example.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _HardFault - dump hard fault information
|
||||
*
|
||||
* See _FaultDump() for example.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
static void _HardFault(const NANO_ESF *esf)
|
||||
{
|
||||
|
@ -281,32 +281,32 @@ static void _HardFault(const NANO_ESF *esf)
|
|||
}
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _DebugMonitor - dump debug monitor exception information
|
||||
*
|
||||
* See _FaultDump() for example.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _DebugMonitor - dump debug monitor exception information
|
||||
*
|
||||
* See _FaultDump() for example.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
static void _DebugMonitor(const NANO_ESF *esf)
|
||||
{
|
||||
PR_EXC("***** Debug monitor exception (not implemented) *****\n");
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _ReservedException - dump reserved exception information
|
||||
*
|
||||
* See _FaultDump() for example.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _ReservedException - dump reserved exception information
|
||||
*
|
||||
* See _FaultDump() for example.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
static void _ReservedException(const NANO_ESF *esf,
|
||||
int fault)
|
||||
|
@ -316,27 +316,27 @@ static void _ReservedException(const NANO_ESF *esf,
|
|||
fault - 16);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _FaultDump - dump information regarding fault (FAULT_DUMP == 2)
|
||||
*
|
||||
* Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 2
|
||||
* (long form).
|
||||
*
|
||||
* eg. (precise bus error escalated to hard fault):
|
||||
*
|
||||
* Executing context ID (thread): 0x200000dc
|
||||
* Faulting instruction address: 0x000011d3
|
||||
* ***** HARD FAULT *****
|
||||
* Fault escalation (see below)
|
||||
* ***** BUS FAULT *****
|
||||
* Precise data bus error
|
||||
* Address: 0xff001234
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _FaultDump - dump information regarding fault (FAULT_DUMP == 2)
|
||||
*
|
||||
* Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 2
|
||||
* (long form).
|
||||
*
|
||||
* eg. (precise bus error escalated to hard fault):
|
||||
*
|
||||
* Executing context ID (thread): 0x200000dc
|
||||
* Faulting instruction address: 0x000011d3
|
||||
* ***** HARD FAULT *****
|
||||
* Fault escalation (see below)
|
||||
* ***** BUS FAULT *****
|
||||
* Precise data bus error
|
||||
* Address: 0xff001234
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
static void _FaultDump(const NANO_ESF *esf, int fault)
|
||||
{
|
||||
|
@ -363,23 +363,23 @@ static void _FaultDump(const NANO_ESF *esf, int fault)
|
|||
}
|
||||
#endif /* FAULT_DUMP == 2 */
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _Fault - fault handler
|
||||
*
|
||||
* This routine is called when fatal error conditions are detected by hardware
|
||||
* and is responsible only for reporting the error. Once reported, it then
|
||||
* invokes the user provided routine _SysFatalErrorHandler() which is
|
||||
* responsible for implementing the error handling policy.
|
||||
*
|
||||
* Since the ESF can be either on the MSP or PSP depending if an exception or
|
||||
* interrupt was already being handled, it is passed a pointer to both and has
|
||||
* to find out on which the ESP is present.
|
||||
*
|
||||
* RETURNS: This function does not return.
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _Fault - fault handler
|
||||
*
|
||||
* This routine is called when fatal error conditions are detected by hardware
|
||||
* and is responsible only for reporting the error. Once reported, it then
|
||||
* invokes the user provided routine _SysFatalErrorHandler() which is
|
||||
* responsible for implementing the error handling policy.
|
||||
*
|
||||
* Since the ESF can be either on the MSP or PSP depending if an exception or
|
||||
* interrupt was already being handled, it is passed a pointer to both and has
|
||||
* to find out on which the ESP is present.
|
||||
*
|
||||
* RETURNS: This function does not return.
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
void _Fault(
|
||||
const NANO_ESF *msp, /* pointer to potential ESF on MSP */
|
||||
|
@ -394,16 +394,16 @@ void _Fault(
|
|||
_SysFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, esf);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _FaultInit - initialization of fault handling
|
||||
*
|
||||
* Turns on the desired hardware faults.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _FaultInit - initialization of fault handling
|
||||
*
|
||||
* Turns on the desired hardware faults.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
void _FaultInit(void)
|
||||
{
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
/*
|
||||
DESCRIPTION
|
||||
Fault handlers for ARM Cortex-M processors.
|
||||
*/
|
||||
*/
|
||||
|
||||
#define _ASMLANGUAGE
|
||||
|
||||
|
@ -52,28 +52,28 @@ GTEXT(__usage_fault)
|
|||
GTEXT(__debug_monitor)
|
||||
GTEXT(__reserved)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* __fault - fault handler installed in the fault and reserved vectors
|
||||
*
|
||||
* Entry point for the hard fault, MPU fault, bus fault, usage fault, debug
|
||||
* monitor and reserved exceptions.
|
||||
*
|
||||
* Save the values of the MSP and PSP in r0 and r1 respectively, so the first
|
||||
* and second parameters to the _Fault() C function that will handle the rest.
|
||||
* This has to be done because at this point we do not know if the fault
|
||||
* happened while handling an exception or not, and thus the ESF could be on
|
||||
* either stack. _Fault() will find out where the ESF resides.
|
||||
*
|
||||
* Provides these symbols:
|
||||
*
|
||||
* __hard_fault
|
||||
* __mpu_fault
|
||||
* __bus_fault
|
||||
* __usage_fault
|
||||
* __debug_monitor
|
||||
* __reserved
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* __fault - fault handler installed in the fault and reserved vectors
|
||||
*
|
||||
* Entry point for the hard fault, MPU fault, bus fault, usage fault, debug
|
||||
* monitor and reserved exceptions.
|
||||
*
|
||||
* Save the values of the MSP and PSP in r0 and r1 respectively, so the first
|
||||
* and second parameters to the _Fault() C function that will handle the rest.
|
||||
* This has to be done because at this point we do not know if the fault
|
||||
* happened while handling an exception or not, and thus the ESF could be on
|
||||
* either stack. _Fault() will find out where the ESF resides.
|
||||
*
|
||||
* Provides these symbols:
|
||||
*
|
||||
* __hard_fault
|
||||
* __mpu_fault
|
||||
* __bus_fault
|
||||
* __usage_fault
|
||||
* __debug_monitor
|
||||
* __reserved
|
||||
*/
|
||||
|
||||
SECTION_SUBSEC_FUNC(TEXT,__fault,__hard_fault)
|
||||
SECTION_SUBSEC_FUNC(TEXT,__fault,__mpu_fault)
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
DESCRIPTION
|
||||
This library implements find_last_set() and find_first_set() which returns the
|
||||
most and least significant bit set respectively.
|
||||
*/
|
||||
*/
|
||||
|
||||
#define _ASMLANGUAGE
|
||||
|
||||
|
@ -48,17 +48,17 @@ _ASM_FILE_PROLOGUE
|
|||
GTEXT(find_last_set)
|
||||
GTEXT(find_first_set)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* find_last_set - find first set bit (searching from the most significant bit)
|
||||
*
|
||||
* This routine finds the first bit set in the argument passed it and
|
||||
* returns the index of that bit. Bits are numbered starting
|
||||
* at 1 from the least significant bit. A return value of zero indicates that
|
||||
* the value passed is zero.
|
||||
*
|
||||
* RETURNS: most significant bit set
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* find_last_set - find first set bit (searching from the most significant bit)
|
||||
*
|
||||
* This routine finds the first bit set in the argument passed it and
|
||||
* returns the index of that bit. Bits are numbered starting
|
||||
* at 1 from the least significant bit. A return value of zero indicates that
|
||||
* the value passed is zero.
|
||||
*
|
||||
* RETURNS: most significant bit set
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, find_last_set)
|
||||
|
||||
|
@ -69,17 +69,17 @@ SECTION_FUNC(TEXT, find_last_set)
|
|||
|
||||
mov pc, lr
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* find_first_set - find first set bit (searching from the least significant bit)
|
||||
*
|
||||
* This routine finds the first bit set in the argument passed it and
|
||||
* returns the index of that bit. Bits are numbered starting
|
||||
* at 1 from the least significant bit. A return value of zero indicates that
|
||||
* the value passed is zero.
|
||||
*
|
||||
* RETURNS: least significant bit set
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* find_first_set - find first set bit (searching from the least significant bit)
|
||||
*
|
||||
* This routine finds the first bit set in the argument passed it and
|
||||
* returns the index of that bit. Bits are numbered starting
|
||||
* at 1 from the least significant bit. A return value of zero indicates that
|
||||
* the value passed is zero.
|
||||
*
|
||||
* RETURNS: least significant bit set
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, find_first_set)
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ point returns or when it aborts itself, the CPU is in thread mode and must
|
|||
call _Swap() (which triggers a service call), but when in handler mode, the
|
||||
CPU must exit handler mode to cause the context switch, and thus must queue
|
||||
the PendSV exception.
|
||||
*/
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_MICROKERNEL
|
||||
#include <microkernel.h>
|
||||
|
@ -51,18 +51,18 @@ the PendSV exception.
|
|||
#include <nanokernel.h>
|
||||
#include <arch/cpu.h>
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* fiber_abort - abort the currently executing fiber
|
||||
*
|
||||
* Possible reasons for a fiber aborting:
|
||||
*
|
||||
* - the fiber explicitly aborts itself by calling this routine
|
||||
* - the fiber implicitly aborts by returning from its entry point
|
||||
* - the fiber encounters a fatal exception
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* fiber_abort - abort the currently executing fiber
|
||||
*
|
||||
* Possible reasons for a fiber aborting:
|
||||
*
|
||||
* - the fiber explicitly aborts itself by calling this routine
|
||||
* - the fiber implicitly aborts by returning from its entry point
|
||||
* - the fiber encounters a fatal exception
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
|
||||
void fiber_abort(void)
|
||||
{
|
||||
|
|
|
@ -39,7 +39,7 @@ that we are running in an exception.
|
|||
|
||||
Upon exception exit, it must be recorded that the task is not in an exception
|
||||
anymore.
|
||||
*/
|
||||
*/
|
||||
|
||||
#define _ASMLANGUAGE
|
||||
|
||||
|
@ -51,27 +51,27 @@ anymore.
|
|||
|
||||
_ASM_FILE_PROLOGUE
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _GdbStubExcEntry - exception entry extra work when GDB_INFO is enabled
|
||||
*
|
||||
* During normal system operation, the callee-saved registers are saved lazily
|
||||
* only when a context switch is required. To allow looking at the current
|
||||
* threads registers while debugging an exception/interrupt, they must be saved
|
||||
* upon entry since the handler could be using them: thus, looking at the CPU
|
||||
* registers would show the current system state and not the current *thread*'s
|
||||
* state.
|
||||
*
|
||||
* Also, record the fact that the thread is currently interrupted so that VQEMU
|
||||
* looks into the CCS and not the CPU registers to obtain the current thread's
|
||||
* register values.
|
||||
*
|
||||
* NOTE:
|
||||
* - must be called with interrupts locked
|
||||
* - cannot use r0 without saving it first
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _GdbStubExcEntry - exception entry extra work when GDB_INFO is enabled
|
||||
*
|
||||
* During normal system operation, the callee-saved registers are saved lazily
|
||||
* only when a context switch is required. To allow looking at the current
|
||||
* threads registers while debugging an exception/interrupt, they must be saved
|
||||
* upon entry since the handler could be using them: thus, looking at the CPU
|
||||
* registers would show the current system state and not the current *thread*'s
|
||||
* state.
|
||||
*
|
||||
* Also, record the fact that the thread is currently interrupted so that VQEMU
|
||||
* looks into the CCS and not the CPU registers to obtain the current thread's
|
||||
* register values.
|
||||
*
|
||||
* NOTE:
|
||||
* - must be called with interrupts locked
|
||||
* - cannot use r0 without saving it first
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, _GdbStubExcEntry)
|
||||
|
||||
|
@ -95,20 +95,20 @@ SECTION_FUNC(TEXT, _GdbStubExcEntry)
|
|||
|
||||
bx lr
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _GdbStubExcExit - exception exit extra clean up when GDB_INFO is enabled
|
||||
*
|
||||
* Record the fact that the thread is not interrupted anymore so that VQEMU
|
||||
* looks at the CPU registers and not into the CCS to obtain the current
|
||||
* thread's register values. Only do this if this is not a nested exception.
|
||||
*
|
||||
* NOTE:
|
||||
* - must be called with interrupts locked
|
||||
* - cannot use r0 without saving it first
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _GdbStubExcExit - exception exit extra clean up when GDB_INFO is enabled
|
||||
*
|
||||
* Record the fact that the thread is not interrupted anymore so that VQEMU
|
||||
* looks at the CPU registers and not into the CCS to obtain the current
|
||||
* thread's register values. Only do this if this is not a nested exception.
|
||||
*
|
||||
* NOTE:
|
||||
* - must be called with interrupts locked
|
||||
* - cannot use r0 without saving it first
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, _GdbStubExcExit)
|
||||
|
||||
|
@ -129,24 +129,24 @@ SECTION_FUNC(TEXT, _GdbStubExcExit)
|
|||
|
||||
bx lr
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _irq_vector_table_entry_with_gdb_stub - stub for ISRs installed directly in
|
||||
* vector table
|
||||
*
|
||||
* The kernel on Cortex-M3/4 can be configured so that ISRs
|
||||
* are installed directly in the vector table for maximum efficiency.
|
||||
*
|
||||
* When OS-awareness is enabled, a stub must be inserted to invoke
|
||||
* _GdbStubExcEntry() before the user ISR runs, to save the current task's
|
||||
* registers. This stub thus gets inserted in the vector table instead of the
|
||||
* user's ISR. The user's IRQ vector table gets pushed after the vector table
|
||||
* automatically by the linker script: this is all transparent to the user.
|
||||
* This stub must also act as a demuxer that find the running exception and
|
||||
* invoke the user's real ISR.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _irq_vector_table_entry_with_gdb_stub - stub for ISRs installed directly in
|
||||
* vector table
|
||||
*
|
||||
* The kernel on Cortex-M3/4 can be configured so that ISRs
|
||||
* are installed directly in the vector table for maximum efficiency.
|
||||
*
|
||||
* When OS-awareness is enabled, a stub must be inserted to invoke
|
||||
* _GdbStubExcEntry() before the user ISR runs, to save the current task's
|
||||
* registers. This stub thus gets inserted in the vector table instead of the
|
||||
* user's ISR. The user's IRQ vector table gets pushed after the vector table
|
||||
* automatically by the linker script: this is all transparent to the user.
|
||||
* This stub must also act as a demuxer that find the running exception and
|
||||
* invoke the user's real ISR.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, _irq_vector_table_entry_with_gdb_stub)
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ DESCRIPTION
|
|||
When GDB is enabled, the static IRQ vector table needs to install the
|
||||
_irq_vector_table_entry_with_gdb_stub stub to do some work before calling the
|
||||
user-installed ISRs.
|
||||
*/
|
||||
*/
|
||||
|
||||
#include <toolchain.h>
|
||||
#include <sections.h>
|
||||
|
|
|
@ -38,23 +38,23 @@ point returns or when it aborts itself, the CPU is in thread mode and must
|
|||
call _Swap() (which triggers a service call), but when in handler mode, the
|
||||
CPU must exit handler mode to cause the context switch, and thus must queue
|
||||
the PendSV exception.
|
||||
*/
|
||||
*/
|
||||
|
||||
#include <toolchain.h>
|
||||
#include <sections.h>
|
||||
#include <nanokernel.h>
|
||||
#include <arch/cpu.h>
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _IntLibInit - initialize interrupts
|
||||
*
|
||||
* Ensures all interrupts have their priority set to _EXC_IRQ_DEFAULT_PRIO and
|
||||
* not 0, which they have it set to when coming out of reset. This ensures that
|
||||
* interrupt locking via BASEPRI works as expected.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _IntLibInit - initialize interrupts
|
||||
*
|
||||
* Ensures all interrupts have their priority set to _EXC_IRQ_DEFAULT_PRIO and
|
||||
* not 0, which they have it set to when coming out of reset. This ensures that
|
||||
* interrupt locking via BASEPRI works as expected.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
|
||||
void _IntLibInit(void)
|
||||
{
|
||||
|
|
|
@ -35,7 +35,7 @@ DESCRIPTION
|
|||
|
||||
Interrupt management: enabling/disabling and dynamic ISR connecting/replacing.
|
||||
SW_ISR_TABLE_DYNAMIC has to be enabled for connecting ISRs at runtime.
|
||||
*/
|
||||
*/
|
||||
|
||||
#include <nanokernel.h>
|
||||
#include <arch/cpu.h>
|
||||
|
@ -46,18 +46,18 @@ SW_ISR_TABLE_DYNAMIC has to be enabled for connecting ISRs at runtime.
|
|||
|
||||
extern void __reserved(void);
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* irq_handler_set - replace an interrupt handler by another
|
||||
*
|
||||
* An interrupt's ISR can be replaced at runtime. Care must be taken that the
|
||||
* interrupt is disabled before doing this.
|
||||
*
|
||||
* This routine will hang if <old> is not found in the table and ASSERT_ON is
|
||||
* enabled.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* irq_handler_set - replace an interrupt handler by another
|
||||
*
|
||||
* An interrupt's ISR can be replaced at runtime. Care must be taken that the
|
||||
* interrupt is disabled before doing this.
|
||||
*
|
||||
* This routine will hang if <old> is not found in the table and ASSERT_ON is
|
||||
* enabled.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
|
||||
void irq_handler_set(unsigned int irq,
|
||||
void (*old)(void *arg),
|
||||
|
@ -76,16 +76,16 @@ void irq_handler_set(unsigned int irq,
|
|||
irq_unlock_inline(key);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* irq_enable - enable an interrupt line
|
||||
*
|
||||
* Clear possible pending interrupts on the line, and enable the interrupt
|
||||
* line. After this call, the CPU will receive interrupts for the specified
|
||||
* <irq>.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* irq_enable - enable an interrupt line
|
||||
*
|
||||
* Clear possible pending interrupts on the line, and enable the interrupt
|
||||
* line. After this call, the CPU will receive interrupts for the specified
|
||||
* <irq>.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
|
||||
void irq_enable(unsigned int irq)
|
||||
{
|
||||
|
@ -94,35 +94,35 @@ void irq_enable(unsigned int irq)
|
|||
_NvicIrqEnable(irq);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* irq_disable - disable an interrupt line
|
||||
*
|
||||
* Disable an interrupt line. After this call, the CPU will stop receiving
|
||||
* interrupts for the specified <irq>.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* irq_disable - disable an interrupt line
|
||||
*
|
||||
* Disable an interrupt line. After this call, the CPU will stop receiving
|
||||
* interrupts for the specified <irq>.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
|
||||
void irq_disable(unsigned int irq)
|
||||
{
|
||||
_NvicIrqDisable(irq);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* irq_priority_set - set an interrupt's priority
|
||||
*
|
||||
* Valid values are from 1 to 255. Interrupts of priority 1 are not masked when
|
||||
* interrupts are locked system-wide, so care must be taken when using them. ISR
|
||||
* installed with priority 1 interrupts cannot make kernel calls.
|
||||
*
|
||||
* Priority 0 is reserved for kernel usage and cannot be used.
|
||||
*
|
||||
* The priority is verified if ASSERT_ON is enabled.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* irq_priority_set - set an interrupt's priority
|
||||
*
|
||||
* Valid values are from 1 to 255. Interrupts of priority 1 are not masked when
|
||||
* interrupts are locked system-wide, so care must be taken when using them. ISR
|
||||
* installed with priority 1 interrupts cannot make kernel calls.
|
||||
*
|
||||
* Priority 0 is reserved for kernel usage and cannot be used.
|
||||
*
|
||||
* The priority is verified if ASSERT_ON is enabled.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
|
||||
void irq_priority_set(unsigned int irq,
|
||||
unsigned int prio)
|
||||
|
@ -131,17 +131,17 @@ void irq_priority_set(unsigned int irq,
|
|||
_NvicIrqPrioSet(irq, _EXC_PRIO(prio));
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _irq_spurious - spurious interrupt handler
|
||||
*
|
||||
* Installed in all dynamic interrupt slots at boot time. Throws an error if
|
||||
* called.
|
||||
*
|
||||
* See __reserved().
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _irq_spurious - spurious interrupt handler
|
||||
*
|
||||
* Installed in all dynamic interrupt slots at boot time. Throws an error if
|
||||
* called.
|
||||
*
|
||||
* See __reserved().
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
|
||||
void _irq_spurious(void *unused)
|
||||
{
|
||||
|
@ -149,18 +149,18 @@ void _irq_spurious(void *unused)
|
|||
__reserved();
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* irq_connect - connect an ISR to an interrupt line
|
||||
*
|
||||
* <isr> is connected to interrupt line <irq> (exception #<irq>+16). No prior
|
||||
* ISR can have been connected on <irq> interrupt line since the system booted.
|
||||
*
|
||||
* This routine will hang if another ISR was connected for interrupt line <irq>
|
||||
* and ASSERT_ON is enabled; if ASSERT_ON is disabled, it will fail silently.
|
||||
*
|
||||
* RETURNS: the interrupt line number
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* irq_connect - connect an ISR to an interrupt line
|
||||
*
|
||||
* <isr> is connected to interrupt line <irq> (exception #<irq>+16). No prior
|
||||
* ISR can have been connected on <irq> interrupt line since the system booted.
|
||||
*
|
||||
* This routine will hang if another ISR was connected for interrupt line <irq>
|
||||
* and ASSERT_ON is enabled; if ASSERT_ON is disabled, it will fail silently.
|
||||
*
|
||||
* RETURNS: the interrupt line number
|
||||
*/
|
||||
|
||||
int irq_connect(unsigned int irq,
|
||||
unsigned int prio,
|
||||
|
@ -172,16 +172,16 @@ int irq_connect(unsigned int irq,
|
|||
return irq;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* irq_disconnect - disconnect an ISR from an interrupt line
|
||||
*
|
||||
* Interrupt line <irq> (exception #<irq>+16) is disconnected from its ISR and
|
||||
* the latter is replaced by _irq_spurious(). irq_disable() should have
|
||||
* been called before invoking this routine.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* irq_disconnect - disconnect an ISR from an interrupt line
|
||||
*
|
||||
* Interrupt line <irq> (exception #<irq>+16) is disconnected from its ISR and
|
||||
* the latter is replaced by _irq_spurious(). irq_disable() should have
|
||||
* been called before invoking this routine.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
|
||||
void irq_disconnect(unsigned int irq)
|
||||
{
|
||||
|
|
|
@ -35,7 +35,7 @@ DESCRIPTION
|
|||
|
||||
Wrapper installed in vector table for handling dynamic interrupts that accept
|
||||
a parameter.
|
||||
*/
|
||||
*/
|
||||
|
||||
#define _ASMLANGUAGE
|
||||
|
||||
|
@ -53,19 +53,19 @@ GDATA(_sw_isr_table)
|
|||
GTEXT(_isr_wrapper)
|
||||
GTEXT(_IntExit)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _isr_wrapper - wrapper around ISRs when inserted in software ISR table
|
||||
*
|
||||
* When inserted in the vector table, _isr_wrapper() demuxes the ISR table using
|
||||
* the running interrupt number as the index, and invokes the registered ISR
|
||||
* with its correspoding argument. When returning from the ISR, it determines
|
||||
* if a context switch needs to happen (see documentation for __pendsv()) and
|
||||
* pends the PendSV exception if so: the latter will perform the context switch
|
||||
* itself.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _isr_wrapper - wrapper around ISRs when inserted in software ISR table
|
||||
*
|
||||
* When inserted in the vector table, _isr_wrapper() demuxes the ISR table using
|
||||
* the running interrupt number as the index, and invokes the registered ISR
|
||||
* with its correspoding argument. When returning from the ISR, it determines
|
||||
* if a context switch needs to happen (see documentation for __pendsv()) and
|
||||
* pends the PendSV exception if so: the latter will perform the context switch
|
||||
* itself.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
SECTION_FUNC(TEXT, _isr_wrapper)
|
||||
|
||||
_GDB_STUB_EXC_ENTRY
|
||||
|
|
|
@ -46,7 +46,7 @@ Typically, only those members that are accessed by assembly language routines
|
|||
are defined; however, it doesn't hurt to define all fields for the sake of
|
||||
completeness.
|
||||
|
||||
*/
|
||||
*/
|
||||
|
||||
#include <gen_offset.h>
|
||||
#include <nano_private.h>
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
DESCRIPTION
|
||||
This module implements the routines necessary for thread context switching
|
||||
on ARM Cortex-M3/M4 CPUs.
|
||||
*/
|
||||
*/
|
||||
|
||||
#define _ASMLANGUAGE
|
||||
|
||||
|
@ -51,23 +51,23 @@ GTEXT(__pendsv)
|
|||
|
||||
GDATA(_nanokernel)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* __pendsv - PendSV exception handler, handling context switches
|
||||
*
|
||||
* The PendSV exception is the only context in the system that can perform
|
||||
* context switching. When an execution context finds out it has to switch
|
||||
* contexts, it pends the PendSV exception.
|
||||
*
|
||||
* When PendSV is pended, the decision that a context switch must happen has
|
||||
* already been taken. In other words, when __pendsv() runs, we *know* we have
|
||||
* to swap *something*.
|
||||
*
|
||||
* The scheduling algorithm is simple: schedule the head of the runnable FIBER
|
||||
* context list, which is represented by _nanokernel.fiber. If there are no
|
||||
* runnable FIBER contexts, then schedule the TASK context represented by
|
||||
* _nanokernel.task. The _nanokernel.task field will never be NULL.
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* __pendsv - PendSV exception handler, handling context switches
|
||||
*
|
||||
* The PendSV exception is the only context in the system that can perform
|
||||
* context switching. When an execution context finds out it has to switch
|
||||
* contexts, it pends the PendSV exception.
|
||||
*
|
||||
* When PendSV is pended, the decision that a context switch must happen has
|
||||
* already been taken. In other words, when __pendsv() runs, we *know* we have
|
||||
* to swap *something*.
|
||||
*
|
||||
* The scheduling algorithm is simple: schedule the head of the runnable FIBER
|
||||
* context list, which is represented by _nanokernel.fiber. If there are no
|
||||
* runnable FIBER contexts, then schedule the TASK context represented by
|
||||
* _nanokernel.task. The _nanokernel.task field will never be NULL.
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, __pendsv)
|
||||
|
||||
|
@ -146,15 +146,15 @@ SECTION_FUNC(TEXT, __pendsv)
|
|||
/* exc return */
|
||||
bx lr
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* __svc - service call handler
|
||||
*
|
||||
* The service call (svc) is only used in _Swap() to enter handler mode so we
|
||||
* can go through the PendSV exception to perform a context switch.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* __svc - service call handler
|
||||
*
|
||||
* The service call (svc) is only used in _Swap() to enter handler mode so we
|
||||
* can go through the PendSV exception to perform a context switch.
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, __svc)
|
||||
|
||||
|
@ -178,38 +178,38 @@ SECTION_FUNC(TEXT, __svc)
|
|||
/* handler mode exit, to PendSV */
|
||||
bx lr
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _Swap - initiate a cooperative context switch
|
||||
*
|
||||
* The _Swap() routine is invoked by various nanokernel services to effect
|
||||
* a cooperative context context switch. Prior to invoking _Swap(), the caller
|
||||
* disables interrupts via irq_lock() and the return 'key' is passed as a
|
||||
* parameter to _Swap(). The 'key' actually represents the BASEPRI register
|
||||
* prior to disabling interrupts via the BASEPRI mechanism.
|
||||
*
|
||||
* _Swap() itself does not do much.
|
||||
*
|
||||
* It simply stores the intlock key (the BASEPRI value) parameter into
|
||||
* current->basepri, and then triggers a service call exception (svc) to setup
|
||||
* the PendSV exception, which does the heavy lifting of context switching.
|
||||
/**
|
||||
*
|
||||
* _Swap - initiate a cooperative context switch
|
||||
*
|
||||
* The _Swap() routine is invoked by various nanokernel services to effect
|
||||
* a cooperative context context switch. Prior to invoking _Swap(), the caller
|
||||
* disables interrupts via irq_lock() and the return 'key' is passed as a
|
||||
* parameter to _Swap(). The 'key' actually represents the BASEPRI register
|
||||
* prior to disabling interrupts via the BASEPRI mechanism.
|
||||
*
|
||||
* _Swap() itself does not do much.
|
||||
*
|
||||
* It simply stores the intlock key (the BASEPRI value) parameter into
|
||||
* current->basepri, and then triggers a service call exception (svc) to setup
|
||||
* the PendSV exception, which does the heavy lifting of context switching.
|
||||
|
||||
* This is the only place we have to save BASEPRI since the other paths to
|
||||
* __pendsv all come from handling an interrupt, which means we know the
|
||||
* interrupts were not locked: in that case the BASEPRI value is 0.
|
||||
*
|
||||
* Given that _Swap() is called to effect a cooperative context context switch,
|
||||
* only the caller-saved integer registers need to be saved in the tCCS of the
|
||||
* outgoing context. This is all performed by the hardware, which stores it in
|
||||
* its exception stack frame, created when handling the svc exception.
|
||||
*
|
||||
* RETURNS: may contain a return value setup by a call to fiberRtnValueSet()
|
||||
*
|
||||
* C function prototype:
|
||||
*
|
||||
* unsigned int _Swap (unsigned int basepri);
|
||||
*
|
||||
*/
|
||||
* This is the only place we have to save BASEPRI since the other paths to
|
||||
* __pendsv all come from handling an interrupt, which means we know the
|
||||
* interrupts were not locked: in that case the BASEPRI value is 0.
|
||||
*
|
||||
* Given that _Swap() is called to effect a cooperative context context switch,
|
||||
* only the caller-saved integer registers need to be saved in the tCCS of the
|
||||
* outgoing context. This is all performed by the hardware, which stores it in
|
||||
* its exception stack frame, created when handling the svc exception.
|
||||
*
|
||||
* RETURNS: may contain a return value setup by a call to fiberRtnValueSet()
|
||||
*
|
||||
* C function prototype:
|
||||
*
|
||||
* unsigned int _Swap (unsigned int basepri);
|
||||
*
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, _Swap)
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ point returns or when it aborts itself, the CPU is in thread mode and must
|
|||
call the equivalent of task_abort(<self>), but when in handler mode, the
|
||||
CPU must queue a packet to K_swapper(), then exit handler mode to queue the
|
||||
PendSV exception and cause the immediate context switch to K_swapper.
|
||||
*/
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_MICROKERNEL
|
||||
|
||||
|
@ -52,20 +52,20 @@ PendSV exception and cause the immediate context switch to K_swapper.
|
|||
|
||||
static struct k_args cmd_packet;
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* _TaskAbort - abort the current task
|
||||
*
|
||||
* Possible reasons for a task aborting:
|
||||
*
|
||||
* - the task explicitly aborts itself by calling this routine
|
||||
* - the task implicitly aborts by returning from its entry point
|
||||
* - the task encounters a fatal exception
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* _TaskAbort - abort the current task
|
||||
*
|
||||
* Possible reasons for a task aborting:
|
||||
*
|
||||
* - the task explicitly aborts itself by calling this routine
|
||||
* - the task implicitly aborts by returning from its entry point
|
||||
* - the task encounters a fatal exception
|
||||
*
|
||||
* RETURNS: N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
void _TaskAbort(void)
|
||||
{
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue