doxygen: change comment style to match javadoc

The change replaces multiple asterisks to ** at
the beginning of comments and adds a space before
the asterisks at the beginning of lines.

Change-Id: I7656bde3bf4d9a31e38941e43b580520432dabc1
Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2015-07-01 17:22:39 -04:00
commit ea0d0b220c
305 changed files with 11249 additions and 11249 deletions

View file

@ -35,7 +35,7 @@ DESCRIPTION
This library provides routines to perform a number of atomic operations This library provides routines to perform a number of atomic operations
on a memory location: add, subtract, increment, decrement, bitwise OR, on a memory location: add, subtract, increment, decrement, bitwise OR,
bitwise NOR, bitwise AND, bitwise NAND, set, clear and compare-and-swap. bitwise NOR, bitwise AND, bitwise NAND, set, clear and compare-and-swap.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -60,49 +60,49 @@ GTEXT(atomic_cas)
.section .TEXT._Atomic, "ax" .section .TEXT._Atomic, "ax"
.balign 2 .balign 2
/******************************************************************************* /**
* *
* atomic_clear - atomically clear a memory location * atomic_clear - atomically clear a memory location
* *
* This routine atomically clears the contents of <target> and returns the old * This routine atomically clears the contents of <target> and returns the old
* value that was in <target>. * value that was in <target>.
* *
* This routine can be used from both task and interrupt level. * This routine can be used from both task and interrupt level.
* *
* RETURNS: Contents of <target> before the atomic operation * RETURNS: Contents of <target> before the atomic operation
* *
* ERRNO: N/A * ERRNO: N/A
* *
* atomic_val_t atomic_clear * atomic_val_t atomic_clear
* ( * (
* atomic_t *target /@ memory location to clear @/ * atomic_t *target /@ memory location to clear @/
* ) * )
*/ */
SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_clear) SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_clear)
mov_s r1, 0 mov_s r1, 0
/* fall through into atomic_set */ /* fall through into atomic_set */
/******************************************************************************* /**
* *
* atomic_set - atomically set a memory location * atomic_set - atomically set a memory location
* *
* This routine atomically sets the contents of <target> to <value> and returns * This routine atomically sets the contents of <target> to <value> and returns
* the old value that was in <target>. * the old value that was in <target>.
* *
* This routine can be used from both task and interrupt level. * This routine can be used from both task and interrupt level.
* *
* RETURNS: Contents of <target> before the atomic operation * RETURNS: Contents of <target> before the atomic operation
* *
* ERRNO: N/A * ERRNO: N/A
* *
* atomic_val_t atomic_set * atomic_val_t atomic_set
* ( * (
* atomic_t *target, /@ memory location to set @/ * atomic_t *target, /@ memory location to set @/
* atomic_val_t value /@ set with this value @/ * atomic_val_t value /@ set with this value @/
* ) * )
* *
*/ */
SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_set) SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_set)
@ -111,72 +111,72 @@ SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_set)
j_s.d [blink] j_s.d [blink]
mov_s r0, r1 /* return old value */ mov_s r0, r1 /* return old value */
/****************************************************************************** /**
* *
* atomic_get - Get the value of a shared memory atomically * atomic_get - Get the value of a shared memory atomically
* *
* This routine atomically retrieves the value in *target * This routine atomically retrieves the value in *target
* *
* atomic_val_t atomic_get * atomic_val_t atomic_get
* ( * (
* atomic_t *target /@ address of atom to be retrieved @/ * atomic_t *target /@ address of atom to be retrieved @/
* ) * )
* *
* RETURN: value read from address target. * RETURN: value read from address target.
* *
*/ */
SECTION_FUNC(TEXT, atomic_get) SECTION_FUNC(TEXT, atomic_get)
ld_s r0, [r0, 0] ld_s r0, [r0, 0]
j_s [blink] j_s [blink]
/******************************************************************************* /**
* *
* atomic_inc - atomically increment a memory location * atomic_inc - atomically increment a memory location
* *
* This routine atomically increments the value in <target>. The operation is * This routine atomically increments the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose * done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the * restrictions with regards to the alignment and cache attributes of the
* atomic_t type. * atomic_t type.
* *
* This routine can be used from both task and interrupt level. * This routine can be used from both task and interrupt level.
* *
* RETURNS: Contents of <target> before the atomic operation * RETURNS: Contents of <target> before the atomic operation
* *
* ERRNO: N/A * ERRNO: N/A
* *
* atomic_val_t atomic_inc * atomic_val_t atomic_inc
* ( * (
* atomic_t *target, /@ memory location to increment @/ * atomic_t *target, /@ memory location to increment @/
* ) * )
* *
*/ */
SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_inc) SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_inc)
mov_s r1, 1 mov_s r1, 1
/* fall through into atomic_add */ /* fall through into atomic_add */
/******************************************************************************* /**
* *
* atomic_add - atomically add a value to a memory location * atomic_add - atomically add a value to a memory location
* *
* This routine atomically adds the contents of <target> and <value>, placing * This routine atomically adds the contents of <target> and <value>, placing
* the result in <target>. The operation is done using signed integer arithmetic. * the result in <target>. The operation is done using signed integer arithmetic.
* Various CPU architectures may impose restrictions with regards to the * Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type. * alignment and cache attributes of the atomic_t type.
* *
* This routine can be used from both task and interrupt level. * This routine can be used from both task and interrupt level.
* *
* RETURNS: Contents of <target> before the atomic operation * RETURNS: Contents of <target> before the atomic operation
* *
* ERRNO: N/A * ERRNO: N/A
* *
* atomic_val_t atomic_add * atomic_val_t atomic_add
* ( * (
* atomic_t *target, /@ memory location to add to @/ * atomic_t *target, /@ memory location to add to @/
* atomic_val_t value /@ value to add @/ * atomic_val_t value /@ value to add @/
* ) * )
*/ */
SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_add) SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_add)
@ -191,54 +191,54 @@ SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_add)
j_s.d [blink] j_s.d [blink]
mov_s r0, r2 /* return old value */ mov_s r0, r2 /* return old value */
/******************************************************************************* /**
* *
* atomic_dec - atomically decrement a memory location * atomic_dec - atomically decrement a memory location
* *
* This routine atomically decrements the value in <target>. The operation is * This routine atomically decrements the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose * done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the * restrictions with regards to the alignment and cache attributes of the
* atomic_t type. * atomic_t type.
* *
* This routine can be used from both task and interrupt level. * This routine can be used from both task and interrupt level.
* *
* RETURNS: Contents of <target> before the atomic operation * RETURNS: Contents of <target> before the atomic operation
* *
* ERRNO: N/A * ERRNO: N/A
* *
* atomic_val_t atomic_dec * atomic_val_t atomic_dec
* ( * (
* atomic_t *target, /@ memory location to decrement @/ * atomic_t *target, /@ memory location to decrement @/
* ) * )
* *
*/ */
SECTION_SUBSEC_FUNC(TEXT, atomic_dec_sub, atomic_dec) SECTION_SUBSEC_FUNC(TEXT, atomic_dec_sub, atomic_dec)
mov_s r1, 1 mov_s r1, 1
/* fall through into atomic_sub */ /* fall through into atomic_sub */
/******************************************************************************* /**
* *
* atomic_sub - atomically subtract a value from a memory location * atomic_sub - atomically subtract a value from a memory location
* *
* This routine atomically subtracts <value> from the contents of <target>, * This routine atomically subtracts <value> from the contents of <target>,
* placing the result in <target>. The operation is done using signed integer * placing the result in <target>. The operation is done using signed integer
* arithmetic. Various CPU architectures may impose restrictions with regards to * arithmetic. Various CPU architectures may impose restrictions with regards to
* the alignment and cache attributes of the atomic_t type. * the alignment and cache attributes of the atomic_t type.
* *
* This routine can be used from both task and interrupt level. * This routine can be used from both task and interrupt level.
* *
* RETURNS: Contents of <target> before the atomic operation * RETURNS: Contents of <target> before the atomic operation
* *
* ERRNO: N/A * ERRNO: N/A
* *
* atomic_val_t atomic_sub * atomic_val_t atomic_sub
* ( * (
* atomic_t *target, /@ memory location to subtract from @/ * atomic_t *target, /@ memory location to subtract from @/
* atomic_val_t value /@ value to subtract @/ * atomic_val_t value /@ value to subtract @/
* ) * )
* *
*/ */
SECTION_SUBSEC_FUNC(TEXT, atomic_dec_sub, atomic_sub) SECTION_SUBSEC_FUNC(TEXT, atomic_dec_sub, atomic_sub)
@ -253,28 +253,28 @@ SECTION_SUBSEC_FUNC(TEXT, atomic_dec_sub, atomic_sub)
j_s.d [blink] j_s.d [blink]
mov_s r0, r2 /* return old value */ mov_s r0, r2 /* return old value */
/****************************************************************************** /**
* *
* atomic_nand - atomically perform a bitwise NAND on a memory location * atomic_nand - atomically perform a bitwise NAND on a memory location
* *
* This routine atomically performs a bitwise NAND operation of the contents of * This routine atomically performs a bitwise NAND operation of the contents of
* <target> and <value>, placing the result in <target>. * <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the * Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type. * alignment and cache attributes of the atomic_t type.
* *
* This routine can be used from both task and interrupt level. * This routine can be used from both task and interrupt level.
* *
* RETURNS: Contents of <target> before the atomic operation * RETURNS: Contents of <target> before the atomic operation
* *
* ERRNO: N/A * ERRNO: N/A
* *
* atomic_val_t atomic_nand * atomic_val_t atomic_nand
* ( * (
* atomic_t *target, /@ memory location to NAND @/ * atomic_t *target, /@ memory location to NAND @/
* atomic_val_t value /@ NAND with this value @/ * atomic_val_t value /@ NAND with this value @/
* ) * )
* *
*/ */
SECTION_FUNC(TEXT, atomic_nand) SECTION_FUNC(TEXT, atomic_nand)
@ -290,28 +290,28 @@ SECTION_FUNC(TEXT, atomic_nand)
j_s.d [blink] j_s.d [blink]
mov_s r0, r2 /* return old value */ mov_s r0, r2 /* return old value */
/****************************************************************************** /**
* *
* atomic_and - atomically perform a bitwise AND on a memory location * atomic_and - atomically perform a bitwise AND on a memory location
* *
* This routine atomically performs a bitwise AND operation of the contents of * This routine atomically performs a bitwise AND operation of the contents of
* <target> and <value>, placing the result in <target>. * <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the * Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type. * alignment and cache attributes of the atomic_t type.
* *
* This routine can be used from both task and interrupt level. * This routine can be used from both task and interrupt level.
* *
* RETURNS: Contents of <target> before the atomic operation * RETURNS: Contents of <target> before the atomic operation
* *
* ERRNO: N/A * ERRNO: N/A
* *
* atomic_val_t atomic_and * atomic_val_t atomic_and
* ( * (
* atomic_t *target, /@ memory location to AND @/ * atomic_t *target, /@ memory location to AND @/
* atomic_val_t value /@ AND with this value @/ * atomic_val_t value /@ AND with this value @/
* ) * )
* *
*/ */
SECTION_FUNC(TEXT, atomic_and) SECTION_FUNC(TEXT, atomic_and)
@ -326,28 +326,28 @@ SECTION_FUNC(TEXT, atomic_and)
j_s.d [blink] j_s.d [blink]
mov_s r0, r2 /* return old value */ mov_s r0, r2 /* return old value */
/******************************************************************************* /**
* *
* atomic_or - atomically perform a bitwise OR on memory location * atomic_or - atomically perform a bitwise OR on memory location
* *
* This routine atomically performs a bitwise OR operation of the contents of * This routine atomically performs a bitwise OR operation of the contents of
* <target> and <value>, placing the result in <target>. * <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the * Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type. * alignment and cache attributes of the atomic_t type.
* *
* This routine can be used from both task and interrupt level. * This routine can be used from both task and interrupt level.
* *
* RETURNS: Contents of <target> before the atomic operation * RETURNS: Contents of <target> before the atomic operation
* *
* ERRNO: N/A * ERRNO: N/A
* *
* atomic_val_t atomic_or * atomic_val_t atomic_or
* ( * (
* atomic_t *target, /@ memory location to OR @/ * atomic_t *target, /@ memory location to OR @/
* atomic_val_t value /@ OR with this value @/ * atomic_val_t value /@ OR with this value @/
* ) * )
* *
*/ */
SECTION_FUNC(TEXT, atomic_or) SECTION_FUNC(TEXT, atomic_or)
@ -362,28 +362,28 @@ SECTION_FUNC(TEXT, atomic_or)
j_s.d [blink] j_s.d [blink]
mov_s r0, r2 /* return old value */ mov_s r0, r2 /* return old value */
/******************************************************************************* /**
* *
* atomic_xor - atomically perform a bitwise XOR on a memory location * atomic_xor - atomically perform a bitwise XOR on a memory location
* *
* This routine atomically performs a bitwise XOR operation of the contents of * This routine atomically performs a bitwise XOR operation of the contents of
* <target> and <value>, placing the result in <target>. * <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the * Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type. * alignment and cache attributes of the atomic_t type.
* *
* This routine can be used from both task and interrupt level. * This routine can be used from both task and interrupt level.
* *
* RETURNS: Contents of <target> before the atomic operation * RETURNS: Contents of <target> before the atomic operation
* *
* ERRNO: N/A * ERRNO: N/A
* *
* atomic_val_t atomic_xor * atomic_val_t atomic_xor
* ( * (
* atomic_t *target, /@ memory location to XOR @/ * atomic_t *target, /@ memory location to XOR @/
* atomic_val_t value /@ XOR with this value @/ * atomic_val_t value /@ XOR with this value @/
* ) * )
* *
*/ */
SECTION_FUNC(TEXT, atomic_xor) SECTION_FUNC(TEXT, atomic_xor)
@ -398,29 +398,29 @@ SECTION_FUNC(TEXT, atomic_xor)
j_s.d [blink] j_s.d [blink]
mov_s r0, r2 /* return old value */ mov_s r0, r2 /* return old value */
/******************************************************************************* /**
* *
* atomic_cas - atomically compare-and-swap the contents of a memory location * atomic_cas - atomically compare-and-swap the contents of a memory location
* *
* This routine performs an atomic compare-and-swap. testing that the contents of * This routine performs an atomic compare-and-swap. testing that the contents of
* <target> contains <oldValue>, and if it does, setting the value of <target> * <target> contains <oldValue>, and if it does, setting the value of <target>
* to <newValue>. Various CPU architectures may impose restrictions with regards * to <newValue>. Various CPU architectures may impose restrictions with regards
* to the alignment and cache attributes of the atomic_t type. * to the alignment and cache attributes of the atomic_t type.
* *
* This routine can be used from both task and interrupt level. * This routine can be used from both task and interrupt level.
* *
* RETURNS: 1 if the swap is actually executed, 0 otherwise. * RETURNS: 1 if the swap is actually executed, 0 otherwise.
* *
* ERRNO: N/A * ERRNO: N/A
* *
* int atomic_cas * int atomic_cas
* ( * (
* atomic_t *target, /@ memory location to compare-and-swap @/ * atomic_t *target, /@ memory location to compare-and-swap @/
* atomic_val_t oldValue, /@ compare to this value @/ * atomic_val_t oldValue, /@ compare to this value @/
* atomic_val_t newValue, /@ swap with this value @/ * atomic_val_t newValue, /@ swap with this value @/
* ) * )
* *
*/ */
SECTION_FUNC(TEXT, atomic_cas) SECTION_FUNC(TEXT, atomic_cas)

View file

@ -33,7 +33,7 @@
/* /*
DESCRIPTION DESCRIPTION
CPU power management routines. CPU power management routines.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE

View file

@ -35,7 +35,7 @@ DESCRIPTION
This module implements the code for handling entry to and exit from Fast IRQs. This module implements the code for handling entry to and exit from Fast IRQs.
See isr_wrapper.S for details. See isr_wrapper.S for details.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -53,25 +53,25 @@ GDATA(_firq_stack)
SECTION_VAR(NOINIT, _firq_stack) SECTION_VAR(NOINIT, _firq_stack)
.space CONFIG_FIRQ_STACK_SIZE .space CONFIG_FIRQ_STACK_SIZE
/******************************************************************************* /**
* *
* _firq_enter - work to be done before handing control to a FIRQ ISR * _firq_enter - work to be done before handing control to a FIRQ ISR
* *
* The processor switches to a second register bank so registers from the * The processor switches to a second register bank so registers from the
* current bank do not have to be preserved yet. The only issue is the LP_START/ * current bank do not have to be preserved yet. The only issue is the LP_START/
* LP_COUNT/LP_END registers, which are not banked. * LP_COUNT/LP_END registers, which are not banked.
* *
* If all FIRQ ISRs are programmed such that there are no use of the LP * If all FIRQ ISRs are programmed such that there are no use of the LP
* registers (ie. no LPcc instruction), then the kernel can be configured to * registers (ie. no LPcc instruction), then the kernel can be configured to
* remove the use of _firq_enter(). * remove the use of _firq_enter().
* *
* When entering a FIRQ, interrupts might as well be locked: the processor is * When entering a FIRQ, interrupts might as well be locked: the processor is
* running at its highest priority, and cannot be preempted by anything. * running at its highest priority, and cannot be preempted by anything.
* *
* Assumption by _isr_demux: r3 is untouched by _firq_enter. * Assumption by _isr_demux: r3 is untouched by _firq_enter.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
SECTION_FUNC(TEXT, _firq_enter) SECTION_FUNC(TEXT, _firq_enter)
@ -97,12 +97,12 @@ SECTION_FUNC(TEXT, _firq_enter)
j @_isr_demux j @_isr_demux
/******************************************************************************* /**
* *
* _firq_exit - work to be done exiting a FIRQ * _firq_exit - work to be done exiting a FIRQ
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
SECTION_FUNC(TEXT, _firq_exit) SECTION_FUNC(TEXT, _firq_exit)
@ -256,12 +256,12 @@ _firq_no_reschedule:
/* LP registers are already restored, just switch back to bank 0 */ /* LP registers are already restored, just switch back to bank 0 */
rtie rtie
/******************************************************************************* /**
* *
* _firq_stack_setup - install the FIRQ stack in register bank 1 * _firq_stack_setup - install the FIRQ stack in register bank 1
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
SECTION_FUNC(TEXT, _firq_stack_setup) SECTION_FUNC(TEXT, _firq_stack_setup)

View file

@ -34,7 +34,7 @@
DESCRIPTION DESCRIPTION
This module implements the routines necessary for handling fatal faults on This module implements the routines necessary for handling fatal faults on
ARCv2 CPUs. ARCv2 CPUs.
*/ */
#include <nano_private.h> #include <nano_private.h>
#include <offsets.h> #include <offsets.h>
@ -52,23 +52,23 @@ const NANO_ESF _default_esf = {
0xdeaddead, /* placeholder */ 0xdeaddead, /* placeholder */
}; };
/******************************************************************************* /**
* *
* _NanoFatalErrorHandler - nanokernel fatal error handler * _NanoFatalErrorHandler - nanokernel fatal error handler
* *
* This routine is called when fatal error conditions are detected by software * This routine is called when fatal error conditions are detected by software
* and is responsible only for reporting the error. Once reported, it then * and is responsible only for reporting the error. Once reported, it then
* invokes the user provided routine _SysFatalErrorHandler() which is * invokes the user provided routine _SysFatalErrorHandler() which is
* responsible for implementing the error handling policy. * responsible for implementing the error handling policy.
* *
* The caller is expected to always provide a usable ESF. In the event that the * The caller is expected to always provide a usable ESF. In the event that the
* fatal error does not have a hardware generated ESF, the caller should either * fatal error does not have a hardware generated ESF, the caller should either
* create its own or use a pointer to the global default ESF <_default_esf>. * create its own or use a pointer to the global default ESF <_default_esf>.
* *
* RETURNS: This function does not return. * RETURNS: This function does not return.
* *
* \NOMANUAL * \NOMANUAL
*/ */
FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason, FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
const NANO_ESF *pEsf) const NANO_ESF *pEsf)

View file

@ -34,7 +34,7 @@
DESCRIPTION DESCRIPTION
This library implements nanoFfsMsb() and nanoFfsLsb() which returns the This library implements nanoFfsMsb() and nanoFfsLsb() which returns the
most and least significant bit set respectively. most and least significant bit set respectively.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -46,17 +46,17 @@ most and least significant bit set respectively.
GTEXT(nanoFfsMsb) GTEXT(nanoFfsMsb)
GTEXT(nanoFfsLsb) GTEXT(nanoFfsLsb)
/******************************************************************************* /**
* *
* nanoFfsMsb - find first set bit (searching from the most significant bit) * nanoFfsMsb - find first set bit (searching from the most significant bit)
* *
* This routine finds the first bit set in the argument passed it and * This routine finds the first bit set in the argument passed it and
* returns the index of that bit. Bits are numbered starting * returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit. A return value of zero indicates that * at 1 from the least significant bit. A return value of zero indicates that
* the value passed is zero. * the value passed is zero.
* *
* RETURNS: most significant bit set * RETURNS: most significant bit set
*/ */
SECTION_FUNC(TEXT, nanoFfsMsb) SECTION_FUNC(TEXT, nanoFfsMsb)
@ -69,17 +69,17 @@ SECTION_FUNC(TEXT, nanoFfsMsb)
j_s.d [blink] j_s.d [blink]
add.nz r0, r0, 1 add.nz r0, r0, 1
/******************************************************************************* /**
* *
* nanoFfsLsb - find first set bit (searching from the least significant bit) * nanoFfsLsb - find first set bit (searching from the least significant bit)
* *
* This routine finds the first bit set in the argument passed it and * This routine finds the first bit set in the argument passed it and
* returns the index of that bit. Bits are numbered starting * returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit. A return value of zero indicates that * at 1 from the least significant bit. A return value of zero indicates that
* the value passed is zero. * the value passed is zero.
* *
* RETURNS: least significant bit set * RETURNS: least significant bit set
*/ */
SECTION_FUNC(TEXT, nanoFfsLsb) SECTION_FUNC(TEXT, nanoFfsLsb)

View file

@ -40,55 +40,55 @@
#include <toolchain.h> #include <toolchain.h>
#include <arch/cpu.h> #include <arch/cpu.h>
/******************************************************************************* /**
* *
* irq_lock - disable all interrupts on the local CPU * irq_lock - disable all interrupts on the local CPU
* *
* This routine disables interrupts. It can be called from either interrupt, * This routine disables interrupts. It can be called from either interrupt,
* task or fiber level. This routine returns an architecture-dependent * task or fiber level. This routine returns an architecture-dependent
* lock-out key representing the "interrupt disable state" prior to the call; * lock-out key representing the "interrupt disable state" prior to the call;
* this key can be passed to irq_unlock() to re-enable interrupts. * this key can be passed to irq_unlock() to re-enable interrupts.
* *
* The lock-out key should only be used as the argument to the * The lock-out key should only be used as the argument to the
* irq_unlock() API. It should never be used to manually re-enable * irq_unlock() API. It should never be used to manually re-enable
* interrupts or to inspect or manipulate the contents of the source register. * interrupts or to inspect or manipulate the contents of the source register.
* *
* WARNINGS * WARNINGS
* Invoking a kernel routine with interrupts locked may result in * Invoking a kernel routine with interrupts locked may result in
* interrupts being re-enabled for an unspecified period of time. If the * interrupts being re-enabled for an unspecified period of time. If the
* called routine blocks, interrupts will be re-enabled while another * called routine blocks, interrupts will be re-enabled while another
* context executes, or while the system is idle. * context executes, or while the system is idle.
* *
* The "interrupt disable state" is an attribute of a context. Thus, if a * The "interrupt disable state" is an attribute of a context. Thus, if a
* fiber or task disables interrupts and subsequently invokes a kernel * fiber or task disables interrupts and subsequently invokes a kernel
* routine that causes the calling context to block, the interrupt * routine that causes the calling context to block, the interrupt
* disable state will be restored when the context is later rescheduled * disable state will be restored when the context is later rescheduled
* for execution. * for execution.
* *
* RETURNS: An architecture-dependent lock-out key representing the * RETURNS: An architecture-dependent lock-out key representing the
* "interrupt disable state" prior to the call. * "interrupt disable state" prior to the call.
* *
* \NOMANUAL * \NOMANUAL
*/ */
SECTION_FUNC(TEXT, irq_lock) SECTION_FUNC(TEXT, irq_lock)
j_s.d [blink] j_s.d [blink]
clri r0 clri r0
/******************************************************************************* /**
* *
* irq_unlock - enable all interrupts on the local CPU * irq_unlock - enable all interrupts on the local CPU
* *
* This routine re-enables interrupts on the local CPU. The <key> parameter * This routine re-enables interrupts on the local CPU. The <key> parameter
* is an architecture-dependent lock-out key that is returned by a previous * is an architecture-dependent lock-out key that is returned by a previous
* invocation of irq_lock(). * invocation of irq_lock().
* *
* This routine can be called from either interrupt, task or fiber level. * This routine can be called from either interrupt, task or fiber level.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
SECTION_FUNC(TEXT, irq_unlock) SECTION_FUNC(TEXT, irq_unlock)
j_s.d [blink] j_s.d [blink]

View file

@ -35,7 +35,7 @@ DESCRIPTION
Wrapper installed in vector table for handling dynamic interrupts that accept Wrapper installed in vector table for handling dynamic interrupts that accept
a parameter. a parameter.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -207,7 +207,7 @@ From RIRQ:
Both types of IRQs already have an IRQ stack frame: simply return from Both types of IRQs already have an IRQ stack frame: simply return from
interrupt. interrupt.
*/ */
SECTION_FUNC(TEXT, _isr_enter) SECTION_FUNC(TEXT, _isr_enter)
lr r0, [_ARC_V2_AUX_IRQ_ACT] lr r0, [_ARC_V2_AUX_IRQ_ACT]

View file

@ -46,7 +46,7 @@ Typically, only those members that are accessed by assembly language routines
are defined; however, it doesn't hurt to define all fields for the sake of are defined; however, it doesn't hurt to define all fields for the sake of
completeness. completeness.
*/ */
#include <gen_offset.h> #include <gen_offset.h>
#include <nano_private.h> #include <nano_private.h>

View file

@ -36,7 +36,7 @@ This module implements the code for handling entry to and exit from regular
IRQs. IRQs.
See isr_wrapper.S for details. See isr_wrapper.S for details.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -49,18 +49,18 @@ See isr_wrapper.S for details.
GTEXT(_rirq_enter) GTEXT(_rirq_enter)
GTEXT(_rirq_exit) GTEXT(_rirq_exit)
/******************************************************************************* /**
* *
* _rirq_enter - work to be done before handing control to an IRQ ISR * _rirq_enter - work to be done before handing control to an IRQ ISR
* *
* The processor pushes automatically all registers that need to be saved. * The processor pushes automatically all registers that need to be saved.
* However, since the processor always runs at kernel privilege there is no * However, since the processor always runs at kernel privilege there is no
* automatic switch to the IRQ stack: this must be done in software. * automatic switch to the IRQ stack: this must be done in software.
* *
* Assumption by _isr_demux: r3 is untouched by _rirq_enter. * Assumption by _isr_demux: r3 is untouched by _rirq_enter.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
SECTION_FUNC(TEXT, _rirq_enter) SECTION_FUNC(TEXT, _rirq_enter)
@ -75,12 +75,12 @@ SECTION_FUNC(TEXT, _rirq_enter)
j _isr_demux j _isr_demux
/******************************************************************************* /**
* *
* _rirq_exit - work to be done exiting an IRQ * _rirq_exit - work to be done exiting an IRQ
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
SECTION_FUNC(TEXT, _rirq_exit) SECTION_FUNC(TEXT, _rirq_exit)

View file

@ -36,7 +36,7 @@ This module implements the routines necessary for thread context switching
on ARCv2 CPUs. on ARCv2 CPUs.
See isr_wrapper.S for details. See isr_wrapper.S for details.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -51,37 +51,37 @@ GTEXT(_Swap)
GDATA(_nanokernel) GDATA(_nanokernel)
/******************************************************************************* /**
* *
* _Swap - initiate a cooperative context switch * _Swap - initiate a cooperative context switch
* *
* The _Swap() routine is invoked by various nanokernel services to effect * The _Swap() routine is invoked by various nanokernel services to effect
* a cooperative context context switch. Prior to invoking _Swap(), the caller * a cooperative context context switch. Prior to invoking _Swap(), the caller
* disables interrupts via nanoCpuIntLock() and the return 'key' is passed as a * disables interrupts via nanoCpuIntLock() and the return 'key' is passed as a
* parameter to _Swap(). The key is in fact the value stored in the register * parameter to _Swap(). The key is in fact the value stored in the register
* operand of a CLRI instruction. * operand of a CLRI instruction.
* *
* It stores the intlock key parameter into current->intlock_key. * It stores the intlock key parameter into current->intlock_key.
* Given that _Swap() is called to effect a cooperative context context switch, * Given that _Swap() is called to effect a cooperative context context switch,
* the caller-saved integer registers are saved on the stack by the function * the caller-saved integer registers are saved on the stack by the function
* call preamble to _Swap(). This creates a custom stack frame that will be * call preamble to _Swap(). This creates a custom stack frame that will be
* popped when returning from _Swap(), but is not suitable for handling a return * popped when returning from _Swap(), but is not suitable for handling a return
* from an exception. Thus, the fact that the thread is pending because of a * from an exception. Thus, the fact that the thread is pending because of a
* cooperative call to _Swap() has to be recorded via the _CAUSE_COOP code in * cooperative call to _Swap() has to be recorded via the _CAUSE_COOP code in
* the relinquish_cause of the context's tCCS. The _IrqExit()/_FirqExit() code * the relinquish_cause of the context's tCCS. The _IrqExit()/_FirqExit() code
* will take care of doing the right thing to restore the thread status. * will take care of doing the right thing to restore the thread status.
* *
* When _Swap() is invoked, we know the decision to perform a context switch or * When _Swap() is invoked, we know the decision to perform a context switch or
* not has already been taken and a context switch must happen. * not has already been taken and a context switch must happen.
* *
* RETURNS: may contain a return value setup by a call to fiberRtnValueSet() * RETURNS: may contain a return value setup by a call to fiberRtnValueSet()
* *
* C function prototype: * C function prototype:
* *
* unsigned int _Swap (unsigned int key); * unsigned int _Swap (unsigned int key);
* *
*/ */
SECTION_FUNC(TEXT, _Swap) SECTION_FUNC(TEXT, _Swap)

View file

@ -33,7 +33,7 @@
/* /*
DESCRIPTION DESCRIPTION
This module provides the _SysFatalErrorHandler() routine for ARCv2 BSPs. This module provides the _SysFatalErrorHandler() routine for ARCv2 BSPs.
*/ */
#include <nanokernel.h> #include <nanokernel.h>
#include <toolchain.h> #include <toolchain.h>
@ -61,25 +61,25 @@ static inline void nonEssentialTaskAbort(void)
} while ((0)) } while ((0))
#endif #endif
/******************************************************************************* /**
* *
* _SysFatalErrorHandler - fatal error handler * _SysFatalErrorHandler - fatal error handler
* *
* This routine implements the corrective action to be taken when the system * This routine implements the corrective action to be taken when the system
* detects a fatal error. * detects a fatal error.
* *
* This sample implementation attempts to abort the current context and allow * This sample implementation attempts to abort the current context and allow
* the system to continue executing, which may permit the system to continue * the system to continue executing, which may permit the system to continue
* functioning with degraded capabilities. * functioning with degraded capabilities.
* *
* System designers may wish to enhance or substitute this sample * System designers may wish to enhance or substitute this sample
* implementation to take other actions, such as logging error (or debug) * implementation to take other actions, such as logging error (or debug)
* information to a persistent repository and/or rebooting the system. * information to a persistent repository and/or rebooting the system.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
void _SysFatalErrorHandler( void _SysFatalErrorHandler(
unsigned int reason, /* fatal error reason */ unsigned int reason, /* fatal error reason */

View file

@ -39,7 +39,7 @@ This file is also included by assembly language files which must #define
_ASMLANGUAGE before including this header file. Note that nanokernel assembly _ASMLANGUAGE before including this header file. Note that nanokernel assembly
source files obtains structure offset values via "absolute symbols" in the source files obtains structure offset values via "absolute symbols" in the
offsets.o module. offsets.o module.
*/ */
#ifndef _NANO_PRIVATE_H #ifndef _NANO_PRIVATE_H
#define _NANO_PRIVATE_H #define _NANO_PRIVATE_H
@ -238,32 +238,32 @@ static ALWAYS_INLINE void nanoArchInit(void)
_irq_setup(); _irq_setup();
} }
/******************************************************************************* /**
* *
* fiberRtnValueSet - set the return value for the specified fiber (inline) * fiberRtnValueSet - set the return value for the specified fiber (inline)
* *
* The register used to store the return value from a function call invocation * The register used to store the return value from a function call invocation
* to <value>. It is assumed that the specified <fiber> is pending, and thus * to <value>. It is assumed that the specified <fiber> is pending, and thus
* the fiber's context is stored in its tCCS structure. * the fiber's context is stored in its tCCS structure.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
static ALWAYS_INLINE void fiberRtnValueSet(tCCS *fiber, unsigned int value) static ALWAYS_INLINE void fiberRtnValueSet(tCCS *fiber, unsigned int value)
{ {
fiber->return_value = value; fiber->return_value = value;
} }
/******************************************************************************* /**
* *
* _IS_IN_ISR - indicates if kernel is handling interrupt * _IS_IN_ISR - indicates if kernel is handling interrupt
* *
* RETURNS: 1 if interrupt handler is executed, 0 otherwise * RETURNS: 1 if interrupt handler is executed, 0 otherwise
* *
* \NOMANUAL * \NOMANUAL
*/ */
static ALWAYS_INLINE int _IS_IN_ISR(void) static ALWAYS_INLINE int _IS_IN_ISR(void)
{ {

View file

@ -38,20 +38,20 @@ call _Cstart().
Stack is available in this module, but not the global data/bss until their Stack is available in this module, but not the global data/bss until their
initialization is performed. initialization is performed.
*/ */
#include <stdint.h> #include <stdint.h>
#include <toolchain.h> #include <toolchain.h>
#include <linker-defs.h> #include <linker-defs.h>
/******************************************************************************* /**
* *
* bssZero - clear BSS * bssZero - clear BSS
* *
* This routine clears the BSS region, so all bytes are 0. * This routine clears the BSS region, so all bytes are 0.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
static void bssZero(void) static void bssZero(void)
{ {
@ -63,14 +63,14 @@ static void bssZero(void)
} }
} }
/******************************************************************************* /**
* *
* dataCopy - copy the data section from ROM to RAM * dataCopy - copy the data section from ROM to RAM
* *
* This routine copies the data section from ROM to RAM. * This routine copies the data section from ROM to RAM.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
#ifdef CONFIG_XIP #ifdef CONFIG_XIP
static void dataCopy(void) static void dataCopy(void)
@ -90,14 +90,14 @@ static void dataCopy(void)
#endif #endif
extern FUNC_NORETURN void _Cstart(void); extern FUNC_NORETURN void _Cstart(void);
/******************************************************************************* /**
* *
* _PrepC - prepare to and run C code * _PrepC - prepare to and run C code
* *
* This routine prepares for the execution of and runs C code. * This routine prepares for the execution of and runs C code.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _PrepC(void) void _PrepC(void)
{ {

View file

@ -33,7 +33,7 @@
/* /*
DESCRIPTION DESCRIPTION
Reset handler that prepares the system for running C code. Reset handler that prepares the system for running C code.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -46,19 +46,19 @@ Reset handler that prepares the system for running C code.
GTEXT(__reset) GTEXT(__reset)
/******************************************************************************* /**
* *
* __reset - reset vector * __reset - reset vector
* *
* Ran when the system comes out of reset. The processor is at supervisor level. * Ran when the system comes out of reset. The processor is at supervisor level.
* *
* Locking interrupts prevents anything from interrupting the CPU. * Locking interrupts prevents anything from interrupting the CPU.
* *
* When these steps are completed, jump to _PrepC(), which will finish setting * When these steps are completed, jump to _PrepC(), which will finish setting
* up the system for running C code. * up the system for running C code.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
SECTION_FUNC(TEXT,__reset) SECTION_FUNC(TEXT,__reset)

View file

@ -45,7 +45,7 @@ to work around an issue with the assembler where:
statements would end up with the two half-words of the functions' addresses statements would end up with the two half-words of the functions' addresses
swapped. swapped.
*/ */
#include <stdint.h> #include <stdint.h>
#include <toolchain.h> #include <toolchain.h>

View file

@ -40,7 +40,7 @@ System exception handler names all have the same format:
__<exception name with underscores> __<exception name with underscores>
Refer to the ARCv2 manual for an explanation of the exceptions. Refer to the ARCv2 manual for an explanation of the exceptions.
*/ */
#ifndef _VECTOR_TABLE__H_ #ifndef _VECTOR_TABLE__H_
#define _VECTOR_TABLE__H_ #define _VECTOR_TABLE__H_

View file

@ -35,7 +35,7 @@ DESCRIPTION
Provides a boot time handler that simply hangs in a sleep loop, and a run time Provides a boot time handler that simply hangs in a sleep loop, and a run time
handler that resets the CPU. Also provides a mechanism for hooking a custom handler that resets the CPU. Also provides a mechanism for hooking a custom
run time handler. run time handler.
*/ */
#include <nanokernel.h> #include <nanokernel.h>
#include <arch/cpu.h> #include <arch/cpu.h>
@ -52,15 +52,15 @@ extern void _SysNmiOnReset(void);
typedef void (*_NmiHandler_t)(void); typedef void (*_NmiHandler_t)(void);
static _NmiHandler_t handler = _SysNmiOnReset; static _NmiHandler_t handler = _SysNmiOnReset;
/******************************************************************************* /**
* *
* _DefaultHandler - default NMI handler installed when kernel is up * _DefaultHandler - default NMI handler installed when kernel is up
* *
* The default handler outputs a error message and reboots the target. It is * The default handler outputs a error message and reboots the target. It is
* installed by calling _NmiInit(); * installed by calling _NmiInit();
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
static void _DefaultHandler(void) static void _DefaultHandler(void)
{ {
@ -68,32 +68,32 @@ static void _DefaultHandler(void)
_ScbSystemReset(); _ScbSystemReset();
} }
/******************************************************************************* /**
* *
* _NmiInit - install default runtime NMI handler * _NmiInit - install default runtime NMI handler
* *
* Meant to be called by BSP code if they want to install a simple NMI handler * Meant to be called by BSP code if they want to install a simple NMI handler
* that reboots the target. It should be installed after the console is * that reboots the target. It should be installed after the console is
* initialized. * initialized.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _NmiInit(void) void _NmiInit(void)
{ {
handler = _DefaultHandler; handler = _DefaultHandler;
} }
/******************************************************************************* /**
* *
* _NmiHandlerSet - install a custom runtime NMI handler * _NmiHandlerSet - install a custom runtime NMI handler
* *
* Meant to be called by BSP code if they want to install a custom NMI handler * Meant to be called by BSP code if they want to install a custom NMI handler
* that reboots. It should be installed after the console is initialized if it is * that reboots. It should be installed after the console is initialized if it is
* meant to output to the console. * meant to output to the console.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _NmiHandlerSet(void (*pHandler)(void)) void _NmiHandlerSet(void (*pHandler)(void))
{ {
@ -101,14 +101,14 @@ void _NmiHandlerSet(void (*pHandler)(void))
} }
#endif /* CONFIG_RUNTIME_NMI */ #endif /* CONFIG_RUNTIME_NMI */
/******************************************************************************* /**
* *
* __nmi - handler installed in the vector table * __nmi - handler installed in the vector table
* *
* Simply call what is installed in 'static void(*handler)(void)'. * Simply call what is installed in 'static void(*handler)(void)'.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void __nmi(void) void __nmi(void)
{ {

View file

@ -38,20 +38,20 @@ call _Cstart().
Stack is available in this module, but not the global data/bss until their Stack is available in this module, but not the global data/bss until their
initialization is performed. initialization is performed.
*/ */
#include <stdint.h> #include <stdint.h>
#include <toolchain.h> #include <toolchain.h>
#include <linker-defs.h> #include <linker-defs.h>
/******************************************************************************* /**
* *
* bssZero - clear BSS * bssZero - clear BSS
* *
* This routine clears the BSS region, so all bytes are 0. * This routine clears the BSS region, so all bytes are 0.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
static void bssZero(void) static void bssZero(void)
{ {
@ -63,14 +63,14 @@ static void bssZero(void)
} }
} }
/******************************************************************************* /**
* *
* dataCopy - copy the data section from ROM to RAM * dataCopy - copy the data section from ROM to RAM
* *
* This routine copies the data section from ROM to RAM. * This routine copies the data section from ROM to RAM.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
#ifdef CONFIG_XIP #ifdef CONFIG_XIP
static void dataCopy(void) static void dataCopy(void)
@ -90,14 +90,14 @@ static void dataCopy(void)
#endif #endif
extern FUNC_NORETURN void _Cstart(void); extern FUNC_NORETURN void _Cstart(void);
/******************************************************************************* /**
* *
* _PrepC - prepare to and run C code * _PrepC - prepare to and run C code
* *
* This routine prepares for the execution of and runs C code. * This routine prepares for the execution of and runs C code.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _PrepC(void) void _PrepC(void)
{ {

View file

@ -33,7 +33,7 @@
/* /*
DESCRIPTION DESCRIPTION
Reset handler that prepares the system for running C code. Reset handler that prepares the system for running C code.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -47,28 +47,28 @@ _ASM_FILE_PROLOGUE
GTEXT(__reset) GTEXT(__reset)
/******************************************************************************* /**
* *
* __reset - reset vector * __reset - reset vector
* *
* Ran when the system comes out of reset. The processor is in thread mode with * Ran when the system comes out of reset. The processor is in thread mode with
* privileged level. At this point, the main stack pointer (MSP) is already * privileged level. At this point, the main stack pointer (MSP) is already
* pointing to a valid area in SRAM. * pointing to a valid area in SRAM.
* *
* Locking interrupts prevents anything but NMIs and hard faults from * Locking interrupts prevents anything but NMIs and hard faults from
* interrupting the CPU. A default NMI handler is already in place in the * interrupting the CPU. A default NMI handler is already in place in the
* vector table, and the boot code should not generate hard fault, or we're in * vector table, and the boot code should not generate hard fault, or we're in
* deep trouble. * deep trouble.
* *
* We want to use the process stack pointer (PSP) instead of the MSP, since the * We want to use the process stack pointer (PSP) instead of the MSP, since the
* MSP is to be set up to point to the one-and-only interrupt stack during later * MSP is to be set up to point to the one-and-only interrupt stack during later
* boot. That would not be possible if in use for running C code. * boot. That would not be possible if in use for running C code.
* *
* When these steps are completed, jump to _PrepC(), which will finish setting * When these steps are completed, jump to _PrepC(), which will finish setting
* up the system for running C code. * up the system for running C code.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
SECTION_FUNC(TEXT,__reset) SECTION_FUNC(TEXT,__reset)

View file

@ -36,7 +36,7 @@ DESCRIPTION
Most of the SCB interface consists of simple bit-flipping methods, and is Most of the SCB interface consists of simple bit-flipping methods, and is
implemented as inline functions in scb.h. This module thus contains only data implemented as inline functions in scb.h. This module thus contains only data
definitions and more complex routines, if needed. definitions and more complex routines, if needed.
*/ */
#include <nanokernel.h> #include <nanokernel.h>
#include <arch/cpu.h> #include <arch/cpu.h>
@ -44,14 +44,14 @@ definitions and more complex routines, if needed.
#define SCB_AIRCR_VECTKEY_EN_W 0x05FA #define SCB_AIRCR_VECTKEY_EN_W 0x05FA
/******************************************************************************* /**
* *
* _ScbSystemReset - reset the system * _ScbSystemReset - reset the system
* *
* This routine resets the processor. * This routine resets the processor.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _ScbSystemReset(void) void _ScbSystemReset(void)
{ {
@ -63,19 +63,19 @@ void _ScbSystemReset(void)
__scs.scb.aircr.val = reg.val; __scs.scb.aircr.val = reg.val;
} }
/******************************************************************************* /**
* *
* _ScbNumPriGroupSet - set the number of priority groups based on the number * _ScbNumPriGroupSet - set the number of priority groups based on the number
* of exception priorities desired * of exception priorities desired
* *
* Exception priorities can be divided in priority groups, inside which there is * Exception priorities can be divided in priority groups, inside which there is
* no preemption. The priorities inside a group are only used to decide which * no preemption. The priorities inside a group are only used to decide which
* exception will run when more than one is ready to be handled. * exception will run when more than one is ready to be handled.
* *
* The number of priorities has to be a power of two, from 1 to 128. * The number of priorities has to be a power of two, from 1 to 128.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _ScbNumPriGroupSet(unsigned int n /* number of priorities */ void _ScbNumPriGroupSet(unsigned int n /* number of priorities */
) )

View file

@ -35,7 +35,7 @@ DESCRIPTION
Most of the SCS interface consists of simple bit-flipping methods, and is Most of the SCS interface consists of simple bit-flipping methods, and is
implemented as inline functions in scs.h. This module thus contains only data implemented as inline functions in scs.h. This module thus contains only data
definitions and more complex routines, if needed. definitions and more complex routines, if needed.
*/ */
#include <nanokernel.h> #include <nanokernel.h>
#include <arch/cpu.h> #include <arch/cpu.h>

View file

@ -33,7 +33,7 @@
/* /*
DESCRIPTION DESCRIPTION
Software ISR table for ARM Software ISR table for ARM
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE

View file

@ -38,7 +38,7 @@ point, ie. the first instruction executed.
The table is populated with all the system exception handlers. The NMI vector The table is populated with all the system exception handlers. The NMI vector
must be populated with a valid handler since it can happen at any time. The must be populated with a valid handler since it can happen at any time. The
rest should not be triggered until the kernel is ready to handle them. rest should not be triggered until the kernel is ready to handle them.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE

View file

@ -40,7 +40,7 @@ System exception handler names all have the same format:
__<exception name with underscores> __<exception name with underscores>
No other symbol has the same format, so they are easy to spot. No other symbol has the same format, so they are easy to spot.
*/ */
#ifndef _VECTOR_TABLE__H_ #ifndef _VECTOR_TABLE__H_
#define _VECTOR_TABLE__H_ #define _VECTOR_TABLE__H_

View file

@ -33,7 +33,7 @@
/* /*
DESCRIPTION DESCRIPTION
This module provides the _SysFatalErrorHandler() routine for Cortex-M BSPs. This module provides the _SysFatalErrorHandler() routine for Cortex-M BSPs.
*/ */
#include <nanokernel.h> #include <nanokernel.h>
#include <toolchain.h> #include <toolchain.h>
@ -61,25 +61,25 @@ static inline void nonEssentialTaskAbort(void)
} while ((0)) } while ((0))
#endif #endif
/******************************************************************************* /**
* *
* _SysFatalErrorHandler - fatal error handler * _SysFatalErrorHandler - fatal error handler
* *
* This routine implements the corrective action to be taken when the system * This routine implements the corrective action to be taken when the system
* detects a fatal error. * detects a fatal error.
* *
* This sample implementation attempts to abort the current context and allow * This sample implementation attempts to abort the current context and allow
* the system to continue executing, which may permit the system to continue * the system to continue executing, which may permit the system to continue
* functioning with degraded capabilities. * functioning with degraded capabilities.
* *
* System designers may wish to enhance or substitute this sample * System designers may wish to enhance or substitute this sample
* implementation to take other actions, such as logging error (or debug) * implementation to take other actions, such as logging error (or debug)
* information to a persistent repository and/or rebooting the system. * information to a persistent repository and/or rebooting the system.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
void _SysFatalErrorHandler( void _SysFatalErrorHandler(
unsigned int reason, /* fatal error reason */ unsigned int reason, /* fatal error reason */

View file

@ -35,7 +35,7 @@ DESCRIPTION
This library provides routines to perform a number of atomic operations This library provides routines to perform a number of atomic operations
on a memory location: add, subtract, increment, decrement, bitwise OR, on a memory location: add, subtract, increment, decrement, bitwise OR,
bitwise NOR, bitwise AND, bitwise NAND, set, clear and compare-and-swap. bitwise NOR, bitwise AND, bitwise NAND, set, clear and compare-and-swap.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -59,49 +59,49 @@ GTEXT(atomic_inc)
GTEXT(atomic_sub) GTEXT(atomic_sub)
GTEXT(atomic_cas) GTEXT(atomic_cas)
/******************************************************************************* /**
* *
* atomic_clear - atomically clear a memory location * atomic_clear - atomically clear a memory location
* *
* This routine atomically clears the contents of <target> and returns the old * This routine atomically clears the contents of <target> and returns the old
* value that was in <target>. * value that was in <target>.
* *
* This routine can be used from both task and interrupt level. * This routine can be used from both task and interrupt level.
* *
* RETURNS: Contents of <target> before the atomic operation * RETURNS: Contents of <target> before the atomic operation
* *
* ERRNO: N/A * ERRNO: N/A
* *
* atomic_val_t atomic_clear * atomic_val_t atomic_clear
* ( * (
* atomic_t *target /@ memory location to clear @/ * atomic_t *target /@ memory location to clear @/
* ) * )
*/ */
SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_clear) SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_clear)
MOV r1, #0 MOV r1, #0
/* fall through into atomic_set */ /* fall through into atomic_set */
/******************************************************************************* /**
* *
* atomic_set - atomically set a memory location * atomic_set - atomically set a memory location
* *
* This routine atomically sets the contents of <target> to <value> and returns * This routine atomically sets the contents of <target> to <value> and returns
* the old value that was in <target>. * the old value that was in <target>.
* *
* This routine can be used from both task and interrupt level. * This routine can be used from both task and interrupt level.
* *
* RETURNS: Contents of <target> before the atomic operation * RETURNS: Contents of <target> before the atomic operation
* *
* ERRNO: N/A * ERRNO: N/A
* *
* atomic_val_t atomic_set * atomic_val_t atomic_set
* ( * (
* atomic_t *target, /@ memory location to set @/ * atomic_t *target, /@ memory location to set @/
* atomic_val_t value /@ set with this value @/ * atomic_val_t value /@ set with this value @/
* ) * )
* *
*/ */
SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_set) SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_set)
@ -114,72 +114,72 @@ SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_set)
MOV r0, r2 /* return old value */ MOV r0, r2 /* return old value */
MOV pc, lr MOV pc, lr
/****************************************************************************** /**
* *
* atomic_get - Get the value of a shared memory atomically * atomic_get - Get the value of a shared memory atomically
* *
* This routine atomically retrieves the value in *target * This routine atomically retrieves the value in *target
* *
* long atomic_get * long atomic_get
* ( * (
* atomic_t * target /@ address of atom to be retrieved @/ * atomic_t * target /@ address of atom to be retrieved @/
* ) * )
* *
* RETURN: value read from address target. * RETURN: value read from address target.
* *
*/ */
SECTION_FUNC(TEXT, atomic_get) SECTION_FUNC(TEXT, atomic_get)
LDR r0, [r0] LDR r0, [r0]
MOV pc, lr MOV pc, lr
/******************************************************************************* /**
* *
* atomic_inc - atomically increment a memory location * atomic_inc - atomically increment a memory location
* *
* This routine atomically increments the value in <target>. The operation is * This routine atomically increments the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose * done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the * restrictions with regards to the alignment and cache attributes of the
* atomic_t type. * atomic_t type.
* *
* This routine can be used from both task and interrupt level. * This routine can be used from both task and interrupt level.
* *
* RETURNS: Contents of <target> before the atomic operation * RETURNS: Contents of <target> before the atomic operation
* *
* ERRNO: N/A * ERRNO: N/A
* *
* atomic_val_t atomic_inc * atomic_val_t atomic_inc
* ( * (
* atomic_t *target, /@ memory location to increment @/ * atomic_t *target, /@ memory location to increment @/
* ) * )
* *
*/ */
SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_inc) SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_inc)
MOV r1, #1 MOV r1, #1
/* fall through into atomic_add */ /* fall through into atomic_add */
/******************************************************************************* /**
* *
* atomic_add - atomically add a value to a memory location * atomic_add - atomically add a value to a memory location
* *
* This routine atomically adds the contents of <target> and <value>, placing * This routine atomically adds the contents of <target> and <value>, placing
* the result in <target>. The operation is done using signed integer arithmetic. * the result in <target>. The operation is done using signed integer arithmetic.
* Various CPU architectures may impose restrictions with regards to the * Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type. * alignment and cache attributes of the atomic_t type.
* *
* This routine can be used from both task and interrupt level. * This routine can be used from both task and interrupt level.
* *
* RETURNS: Contents of <target> before the atomic operation * RETURNS: Contents of <target> before the atomic operation
* *
* ERRNO: N/A * ERRNO: N/A
* *
* atomic_val_t atomic_add * atomic_val_t atomic_add
* ( * (
* atomic_t *target, /@ memory location to add to @/ * atomic_t *target, /@ memory location to add to @/
* atomic_val_t value /@ value to add @/ * atomic_val_t value /@ value to add @/
* ) * )
*/ */
SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_add) SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_add)
@ -193,54 +193,54 @@ SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_add)
MOV r0, r2 /* return old value */ MOV r0, r2 /* return old value */
MOV pc, lr MOV pc, lr
/******************************************************************************* /**
* *
* atomic_dec - atomically decrement a memory location * atomic_dec - atomically decrement a memory location
* *
* This routine atomically decrements the value in <target>. The operation is * This routine atomically decrements the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose * done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the * restrictions with regards to the alignment and cache attributes of the
* atomic_t type. * atomic_t type.
* *
* This routine can be used from both task and interrupt level. * This routine can be used from both task and interrupt level.
* *
* RETURNS: Contents of <target> before the atomic operation * RETURNS: Contents of <target> before the atomic operation
* *
* ERRNO: N/A * ERRNO: N/A
* *
* atomic_val_t atomic_dec * atomic_val_t atomic_dec
* ( * (
* atomic_t *target, /@ memory location to decrement @/ * atomic_t *target, /@ memory location to decrement @/
* ) * )
* *
*/ */
SECTION_SUBSEC_FUNC(TEXT, atomic_decSub, atomic_dec) SECTION_SUBSEC_FUNC(TEXT, atomic_decSub, atomic_dec)
MOV r1, #1 MOV r1, #1
/* fall through into atomic_sub */ /* fall through into atomic_sub */
/******************************************************************************* /**
* *
* atomic_sub - atomically subtract a value from a memory location * atomic_sub - atomically subtract a value from a memory location
* *
* This routine atomically subtracts <value> from the contents of <target>, * This routine atomically subtracts <value> from the contents of <target>,
* placing the result in <target>. The operation is done using signed integer * placing the result in <target>. The operation is done using signed integer
* arithmetic. Various CPU architectures may impose restrictions with regards to * arithmetic. Various CPU architectures may impose restrictions with regards to
* the alignment and cache attributes of the atomic_t type. * the alignment and cache attributes of the atomic_t type.
* *
* This routine can be used from both task and interrupt level. * This routine can be used from both task and interrupt level.
* *
* RETURNS: Contents of <target> before the atomic operation * RETURNS: Contents of <target> before the atomic operation
* *
* ERRNO: N/A * ERRNO: N/A
* *
* atomic_val_t atomic_sub * atomic_val_t atomic_sub
* ( * (
* atomic_t *target, /@ memory location to subtract from @/ * atomic_t *target, /@ memory location to subtract from @/
* atomic_val_t value /@ value to subtract @/ * atomic_val_t value /@ value to subtract @/
* ) * )
* *
*/ */
SECTION_SUBSEC_FUNC(TEXT, atomic_decSub, atomic_sub) SECTION_SUBSEC_FUNC(TEXT, atomic_decSub, atomic_sub)
@ -253,28 +253,28 @@ SECTION_SUBSEC_FUNC(TEXT, atomic_decSub, atomic_sub)
MOV r0, r2 /* return old value */ MOV r0, r2 /* return old value */
MOV pc, lr MOV pc, lr
/****************************************************************************** /**
* *
* atomic_nand - atomically perform a bitwise NAND on a memory location * atomic_nand - atomically perform a bitwise NAND on a memory location
* *
* This routine atomically performs a bitwise NAND operation of the contents of * This routine atomically performs a bitwise NAND operation of the contents of
* <target> and <value>, placing the result in <target>. * <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the * Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type. * alignment and cache attributes of the atomic_t type.
* *
* This routine can be used from both task and interrupt level. * This routine can be used from both task and interrupt level.
* *
* RETURNS: Contents of <target> before the atomic operation * RETURNS: Contents of <target> before the atomic operation
* *
* ERRNO: N/A * ERRNO: N/A
* *
* atomic_val_t atomic_nand * atomic_val_t atomic_nand
* ( * (
* atomic_t *target, /@ memory location to NAND @/ * atomic_t *target, /@ memory location to NAND @/
* atomic_val_t value /@ NAND with this value @/ * atomic_val_t value /@ NAND with this value @/
* ) * )
* *
*/ */
SECTION_FUNC(TEXT, atomic_nand) SECTION_FUNC(TEXT, atomic_nand)
@ -288,28 +288,28 @@ SECTION_FUNC(TEXT, atomic_nand)
MOV r0, r2 /* return old value */ MOV r0, r2 /* return old value */
MOV pc, lr MOV pc, lr
/****************************************************************************** /**
* *
* atomic_and - atomically perform a bitwise AND on a memory location * atomic_and - atomically perform a bitwise AND on a memory location
* *
* This routine atomically performs a bitwise AND operation of the contents of * This routine atomically performs a bitwise AND operation of the contents of
* <target> and <value>, placing the result in <target>. * <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the * Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type. * alignment and cache attributes of the atomic_t type.
* *
* This routine can be used from both task and interrupt level. * This routine can be used from both task and interrupt level.
* *
* RETURNS: Contents of <target> before the atomic operation * RETURNS: Contents of <target> before the atomic operation
* *
* ERRNO: N/A * ERRNO: N/A
* *
* atomic_val_t atomic_and * atomic_val_t atomic_and
* ( * (
* atomic_t *target, /@ memory location to AND @/ * atomic_t *target, /@ memory location to AND @/
* atomic_val_t value /@ AND with this value @/ * atomic_val_t value /@ AND with this value @/
* ) * )
* *
*/ */
SECTION_FUNC(TEXT, atomic_and) SECTION_FUNC(TEXT, atomic_and)
@ -322,28 +322,28 @@ SECTION_FUNC(TEXT, atomic_and)
MOV r0, r2 /* return old value */ MOV r0, r2 /* return old value */
MOV pc, lr MOV pc, lr
/******************************************************************************* /**
* *
* atomic_or - atomically perform a bitwise OR on memory location * atomic_or - atomically perform a bitwise OR on memory location
* *
* This routine atomically performs a bitwise OR operation of the contents of * This routine atomically performs a bitwise OR operation of the contents of
* <target> and <value>, placing the result in <target>. * <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the * Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type. * alignment and cache attributes of the atomic_t type.
* *
* This routine can be used from both task and interrupt level. * This routine can be used from both task and interrupt level.
* *
* RETURNS: Contents of <target> before the atomic operation * RETURNS: Contents of <target> before the atomic operation
* *
* ERRNO: N/A * ERRNO: N/A
* *
* atomic_val_t atomic_or * atomic_val_t atomic_or
* ( * (
* atomic_t *target, /@ memory location to OR @/ * atomic_t *target, /@ memory location to OR @/
* atomic_val_t value /@ OR with this value @/ * atomic_val_t value /@ OR with this value @/
* ) * )
* *
*/ */
SECTION_FUNC(TEXT, atomic_or) SECTION_FUNC(TEXT, atomic_or)
@ -356,28 +356,28 @@ SECTION_FUNC(TEXT, atomic_or)
MOV r0, r2 /* return old value */ MOV r0, r2 /* return old value */
MOV pc, lr MOV pc, lr
/******************************************************************************* /**
* *
* atomic_xor - atomically perform a bitwise XOR on a memory location * atomic_xor - atomically perform a bitwise XOR on a memory location
* *
* This routine atomically performs a bitwise XOR operation of the contents of * This routine atomically performs a bitwise XOR operation of the contents of
* <target> and <value>, placing the result in <target>. * <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the * Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type. * alignment and cache attributes of the atomic_t type.
* *
* This routine can be used from both task and interrupt level. * This routine can be used from both task and interrupt level.
* *
* RETURNS: Contents of <target> before the atomic operation * RETURNS: Contents of <target> before the atomic operation
* *
* ERRNO: N/A * ERRNO: N/A
* *
* atomic_val_t atomic_xor * atomic_val_t atomic_xor
* ( * (
* atomic_t *target, /@ memory location to XOR @/ * atomic_t *target, /@ memory location to XOR @/
* atomic_val_t value /@ XOR with this value @/ * atomic_val_t value /@ XOR with this value @/
* ) * )
* *
*/ */
SECTION_FUNC(TEXT, atomic_xor) SECTION_FUNC(TEXT, atomic_xor)
@ -390,29 +390,29 @@ SECTION_FUNC(TEXT, atomic_xor)
MOV r0, r2 /* return old value */ MOV r0, r2 /* return old value */
MOV pc, lr MOV pc, lr
/******************************************************************************* /**
* *
* atomic_cas - atomically compare-and-swap the contents of a memory location * atomic_cas - atomically compare-and-swap the contents of a memory location
* *
* This routine performs an atomic compare-and-swap. testing that the contents of * This routine performs an atomic compare-and-swap. testing that the contents of
* <target> contains <oldValue>, and if it does, setting the value of <target> * <target> contains <oldValue>, and if it does, setting the value of <target>
* to <newValue>. Various CPU architectures may impose restrictions with regards * to <newValue>. Various CPU architectures may impose restrictions with regards
* to the alignment and cache attributes of the atomic_t type. * to the alignment and cache attributes of the atomic_t type.
* *
* This routine can be used from both task and interrupt level. * This routine can be used from both task and interrupt level.
* *
* RETURNS: 1 if the swap is actually executed, 0 otherwise. * RETURNS: 1 if the swap is actually executed, 0 otherwise.
* *
* ERRNO: N/A * ERRNO: N/A
* *
* int atomic_cas * int atomic_cas
* ( * (
* atomic_t *target, /@ memory location to compare-and-swap @/ * atomic_t *target, /@ memory location to compare-and-swap @/
* atomic_val_t oldValue, /@ compare to this value @/ * atomic_val_t oldValue, /@ compare to this value @/
* atomic_val_t newValue, /@ swap with this value @/ * atomic_val_t newValue, /@ swap with this value @/
* ) * )
* *
*/ */
SECTION_FUNC(TEXT, atomic_cas) SECTION_FUNC(TEXT, atomic_cas)

View file

@ -46,7 +46,7 @@ unlocked. This achieves two purposes:
2. Zero Interrupt Latency (ZLI) is achievable via this by allowing certain 2. Zero Interrupt Latency (ZLI) is achievable via this by allowing certain
interrupts to set their priority to 1, thus being allowed in when interrupts interrupts to set their priority to 1, thus being allowed in when interrupts
are locked for regular interrupts. are locked for regular interrupts.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -59,18 +59,18 @@ _ASM_FILE_PROLOGUE
GTEXT(irq_lock) GTEXT(irq_lock)
GTEXT(irq_unlock) GTEXT(irq_unlock)
/******************************************************************************* /**
* *
* irq_lock - lock interrupts * irq_lock - lock interrupts
* *
* Prevent exceptions of priority lower than to the two highest priorities from * Prevent exceptions of priority lower than to the two highest priorities from
* interrupting the CPU. * interrupting the CPU.
* *
* This function can be called recursively: it will return a key to return the * This function can be called recursively: it will return a key to return the
* state of interrupt locking to the previous level. * state of interrupt locking to the previous level.
* *
* RETURNS: a key to return to the previous interrupt locking level * RETURNS: a key to return to the previous interrupt locking level
*/ */
SECTION_FUNC(TEXT,irq_lock) SECTION_FUNC(TEXT,irq_lock)
movs.n r1, #_EXC_IRQ_DEFAULT_PRIO movs.n r1, #_EXC_IRQ_DEFAULT_PRIO
@ -78,15 +78,15 @@ SECTION_FUNC(TEXT,irq_lock)
msr BASEPRI, r1 msr BASEPRI, r1
bx lr bx lr
/******************************************************************************* /**
* *
* irq_unlock - unlock interrupts * irq_unlock - unlock interrupts
* *
* Return the state of interrupt locking to a previous level, passed in via the * Return the state of interrupt locking to a previous level, passed in via the
* <key> parameter, obtained from a previous call to irq_lock(). * <key> parameter, obtained from a previous call to irq_lock().
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
SECTION_FUNC(TEXT,irq_unlock) SECTION_FUNC(TEXT,irq_unlock)
msr BASEPRI, r0 msr BASEPRI, r0

View file

@ -34,7 +34,7 @@
DESCRIPTION DESCRIPTION
Core nanokernel fiber related primitives for the ARM Cortex-M processor Core nanokernel fiber related primitives for the ARM Cortex-M processor
architecture. architecture.
*/ */
#include <nanokernel.h> #include <nanokernel.h>
#include <arch/cpu.h> #include <arch/cpu.h>
@ -53,14 +53,14 @@ tNANO _nanokernel = {0};
#endif #endif
#if defined(CONFIG_CONTEXT_MONITOR) #if defined(CONFIG_CONTEXT_MONITOR)
/******************************************************************************* /**
* *
* _context_monitor_init - initialize context monitoring support * _context_monitor_init - initialize context monitoring support
* *
* Currently only inserts the new context in the list of active contexts. * Currently only inserts the new context in the list of active contexts.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
static ALWAYS_INLINE void _context_monitor_init(struct ccs *pCcs /* context */ static ALWAYS_INLINE void _context_monitor_init(struct ccs *pCcs /* context */
) )
@ -81,26 +81,26 @@ static ALWAYS_INLINE void _context_monitor_init(struct ccs *pCcs /* context */
} }
#endif /* CONFIG_CONTEXT_MONITOR */ #endif /* CONFIG_CONTEXT_MONITOR */
/******************************************************************************* /**
* *
* _NewContext - intialize a new context (thread) from its stack space * _NewContext - intialize a new context (thread) from its stack space
* *
* The control structure (CCS) is put at the lower address of the stack. An * The control structure (CCS) is put at the lower address of the stack. An
* initial context, to be "restored" by __pendsv(), is put at the other end of * initial context, to be "restored" by __pendsv(), is put at the other end of
* the stack, and thus reusable by the stack when not needed anymore. * the stack, and thus reusable by the stack when not needed anymore.
* *
* The initial context is an exception stack frame (ESF) since exiting the * The initial context is an exception stack frame (ESF) since exiting the
* PendSV exception will want to pop an ESF. Interestingly, even if the lsb of * PendSV exception will want to pop an ESF. Interestingly, even if the lsb of
* an instruction address to jump to must always be set since the CPU always * an instruction address to jump to must always be set since the CPU always
* runs in thumb mode, the ESF expects the real address of the instruction, * runs in thumb mode, the ESF expects the real address of the instruction,
* with the lsb *not* set (instructions are always aligned on 16 bit halfwords). * with the lsb *not* set (instructions are always aligned on 16 bit halfwords).
* Since the compiler automatically sets the lsb of function addresses, we have * Since the compiler automatically sets the lsb of function addresses, we have
* to unset it manually before storing it in the 'pc' field of the ESF. * to unset it manually before storing it in the 'pc' field of the ESF.
* *
* <options> is currently unused. * <options> is currently unused.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _NewContext( void _NewContext(
char *pStackMem, /* aligned stack memory */ char *pStackMem, /* aligned stack memory */

View file

@ -32,7 +32,7 @@
/* /*
DESCRIPTION DESCRIPTION
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -56,19 +56,19 @@ GTEXT(nano_cpu_atomic_idle)
#define _SCR_INIT_BITS _SCB_SCR_SEVONPEND #define _SCR_INIT_BITS _SCB_SCR_SEVONPEND
/******************************************************************************* /**
* *
* _CpuIdleInit - initialization of CPU idle * _CpuIdleInit - initialization of CPU idle
* *
* Only called by nanoArchInit(). Sets SEVONPEND bit once for the system's * Only called by nanoArchInit(). Sets SEVONPEND bit once for the system's
* duration. * duration.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* C function prototype: * C function prototype:
* *
* void _CpuIdleInit (void); * void _CpuIdleInit (void);
*/ */
SECTION_FUNC(TEXT, _CpuIdleInit) SECTION_FUNC(TEXT, _CpuIdleInit)
ldr r1, =_SCB_SCR ldr r1, =_SCB_SCR
@ -78,36 +78,36 @@ SECTION_FUNC(TEXT, _CpuIdleInit)
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT #ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
/******************************************************************************* /**
* *
* _NanoIdleValGet - get the kernel idle setting * _NanoIdleValGet - get the kernel idle setting
* *
* Returns the nanokernel idle setting, in ticks. Only called by __systick(). * Returns the nanokernel idle setting, in ticks. Only called by __systick().
* *
* RETURNS: the requested number of ticks for the kernel to be idle * RETURNS: the requested number of ticks for the kernel to be idle
* *
* C function prototype: * C function prototype:
* *
* int32_t _NanoIdleValGet (void); * int32_t _NanoIdleValGet (void);
*/ */
SECTION_FUNC(TEXT, _NanoIdleValGet) SECTION_FUNC(TEXT, _NanoIdleValGet)
ldr r0, =_nanokernel ldr r0, =_nanokernel
ldr r0, [r0, #__tNANO_idle_OFFSET] ldr r0, [r0, #__tNANO_idle_OFFSET]
bx lr bx lr
/******************************************************************************* /**
* *
* _NanoIdleValClear - clear the kernel idle setting * _NanoIdleValClear - clear the kernel idle setting
* *
* Sets the nanokernel idle setting to 0. Only called by __systick(). * Sets the nanokernel idle setting to 0. Only called by __systick().
* *
* RETURNS: N/A * RETURNS: N/A
* *
* C function prototype: * C function prototype:
* *
* void _NanoIdleValClear (void); * void _NanoIdleValClear (void);
*/ */
SECTION_FUNC(TEXT, _NanoIdleValClear) SECTION_FUNC(TEXT, _NanoIdleValClear)
ldr r0, =_nanokernel ldr r0, =_nanokernel
@ -117,21 +117,21 @@ SECTION_FUNC(TEXT, _NanoIdleValClear)
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */ #endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
/******************************************************************************* /**
* *
* nano_cpu_idle - power save idle routine for ARM Cortex-M * nano_cpu_idle - power save idle routine for ARM Cortex-M
* *
* This function will be called by the nanokernel idle loop or possibly within * This function will be called by the nanokernel idle loop or possibly within
* an implementation of _sys_power_save_idle in the microkernel when the * an implementation of _sys_power_save_idle in the microkernel when the
* '_sys_power_save_flag' variable is non-zero. The ARM 'wfi' instruction * '_sys_power_save_flag' variable is non-zero. The ARM 'wfi' instruction
* will be issued, causing a low-power consumption sleep mode. * will be issued, causing a low-power consumption sleep mode.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* C function prototype: * C function prototype:
* *
* void nano_cpu_idle (void); * void nano_cpu_idle (void);
*/ */
SECTION_FUNC(TEXT, nano_cpu_idle) SECTION_FUNC(TEXT, nano_cpu_idle)
/* clear BASEPRI so wfi is awakened by incoming interrupts */ /* clear BASEPRI so wfi is awakened by incoming interrupts */
@ -142,31 +142,31 @@ SECTION_FUNC(TEXT, nano_cpu_idle)
bx lr bx lr
/******************************************************************************* /**
* *
* nano_cpu_atomic_idle - atomically re-enable interrupts and enter low power mode * nano_cpu_atomic_idle - atomically re-enable interrupts and enter low power mode
* *
* This function is utilized by the nanokernel object "wait" APIs for task * This function is utilized by the nanokernel object "wait" APIs for task
* contexts, e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(), nano_task_stack_pop_wait(), * contexts, e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(), nano_task_stack_pop_wait(),
* and nano_task_fifo_get_wait(). * and nano_task_fifo_get_wait().
* *
* INTERNAL * INTERNAL
* The requirements for nano_cpu_atomic_idle() are as follows: * The requirements for nano_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be * 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are * atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments * enabled before the processor enters a low-power mode. See the comments
* in nano_task_lifo_get_wait(), for example, of the race condition that occurs * in nano_task_lifo_get_wait(), for example, of the race condition that occurs
* if this requirement is not met. * if this requirement is not met.
* *
* 2) After waking up from the low-power mode, the interrupt lockout state * 2) After waking up from the low-power mode, the interrupt lockout state
* must be restored as indicated in the 'imask' input parameter. * must be restored as indicated in the 'imask' input parameter.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* C function prototype: * C function prototype:
* *
* void nano_cpu_atomic_idle (unsigned int imask); * void nano_cpu_atomic_idle (unsigned int imask);
*/ */
SECTION_FUNC(TEXT, nano_cpu_atomic_idle) SECTION_FUNC(TEXT, nano_cpu_atomic_idle)

View file

@ -36,7 +36,7 @@ DESCRIPTION
Provides functions for performing kernel handling when exiting exceptions or Provides functions for performing kernel handling when exiting exceptions or
interrupts that are installed directly in the vector table (i.e. that are not interrupts that are installed directly in the vector table (i.e. that are not
wrapped around by _isr_wrapper()). wrapped around by _isr_wrapper()).
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -61,46 +61,46 @@ GDATA(_nanokernel)
#endif #endif
#define _EXIT_EXC_IF_FIBER_NOT_READY _EXIT_EXC_IF_FIBER_PREEMPTED #define _EXIT_EXC_IF_FIBER_NOT_READY _EXIT_EXC_IF_FIBER_PREEMPTED
/******************************************************************************* /**
* *
* _IntExit - kernel housekeeping when exiting interrupt handler installed * _IntExit - kernel housekeeping when exiting interrupt handler installed
* directly in vector table * directly in vector table
* *
* Kernel allows installing interrupt handlers (ISRs) directly into the vector * Kernel allows installing interrupt handlers (ISRs) directly into the vector
* table to get the lowest interrupt latency possible. This allows the ISR to be * table to get the lowest interrupt latency possible. This allows the ISR to be
* invoked directly without going through a software interrupt table. However, * invoked directly without going through a software interrupt table. However,
* upon exiting the ISR, some kernel work must still be performed, namely * upon exiting the ISR, some kernel work must still be performed, namely
* possible context switching. While ISRs connected in the software interrupt * possible context switching. While ISRs connected in the software interrupt
* table do this automatically via a wrapper, ISRs connected directly in the * table do this automatically via a wrapper, ISRs connected directly in the
* vector table must invoke _IntExit() as the *very last* action before * vector table must invoke _IntExit() as the *very last* action before
* returning. * returning.
* *
* e.g. * e.g.
* *
* void myISR(void) * void myISR(void)
* { * {
* printk("in %s\n", __FUNCTION__); * printk("in %s\n", __FUNCTION__);
* doStuff(); * doStuff();
* _IntExit(); * _IntExit();
* } * }
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit) SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit)
/* _IntExit falls through to _ExcExit (they are aliases of each other) */ /* _IntExit falls through to _ExcExit (they are aliases of each other) */
/******************************************************************************* /**
* *
* _ExcExit - kernel housekeeping when exiting exception handler installed * _ExcExit - kernel housekeeping when exiting exception handler installed
* directly in vector table * directly in vector table
* *
* See _IntExit(). * See _IntExit().
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit) SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)

View file

@ -33,7 +33,7 @@
/* /*
DESCRIPTION DESCRIPTION
This module provides the _NanoFatalErrorHandler() routine for ARM Cortex-M. This module provides the _NanoFatalErrorHandler() routine for ARM Cortex-M.
*/ */
#include <toolchain.h> #include <toolchain.h>
#include <sections.h> #include <sections.h>
@ -62,23 +62,23 @@ const NANO_ESF _default_esf = {0xdeaddead, /* a1 */
0xdeaddead, /* xpsr */ 0xdeaddead, /* xpsr */
}; };
/******************************************************************************* /**
* *
* _NanoFatalErrorHandler - nanokernel fatal error handler * _NanoFatalErrorHandler - nanokernel fatal error handler
* *
* This routine is called when fatal error conditions are detected by software * This routine is called when fatal error conditions are detected by software
* and is responsible only for reporting the error. Once reported, it then * and is responsible only for reporting the error. Once reported, it then
* invokes the user provided routine _SysFatalErrorHandler() which is * invokes the user provided routine _SysFatalErrorHandler() which is
* responsible for implementing the error handling policy. * responsible for implementing the error handling policy.
* *
* The caller is expected to always provide a usable ESF. In the event that the * The caller is expected to always provide a usable ESF. In the event that the
* fatal error does not have a hardware generated ESF, the caller should either * fatal error does not have a hardware generated ESF, the caller should either
* create its own or use a pointer to the global default ESF <_default_esf>. * create its own or use a pointer to the global default ESF <_default_esf>.
* *
* RETURNS: This function does not return. * RETURNS: This function does not return.
* *
* \NOMANUAL * \NOMANUAL
*/ */
FUNC_NORETURN void _NanoFatalErrorHandler( FUNC_NORETURN void _NanoFatalErrorHandler(
unsigned int reason, /* reason that handler was called */ unsigned int reason, /* reason that handler was called */

View file

@ -33,7 +33,7 @@
/* /*
DESCRIPTION DESCRIPTION
Common fault handler for ARM Cortex-M processors. Common fault handler for ARM Cortex-M processors.
*/ */
#include <toolchain.h> #include <toolchain.h>
#include <sections.h> #include <sections.h>
@ -59,24 +59,24 @@ Common fault handler for ARM Cortex-M processors.
#endif #endif
#if (CONFIG_FAULT_DUMP == 1) #if (CONFIG_FAULT_DUMP == 1)
/******************************************************************************* /**
* *
* _FaultDump - dump information regarding fault (FAULT_DUMP == 1) * _FaultDump - dump information regarding fault (FAULT_DUMP == 1)
* *
* Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 1 * Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 1
* (short form). * (short form).
* *
* eg. (precise bus error escalated to hard fault): * eg. (precise bus error escalated to hard fault):
* *
* Fault! EXC #3, Thread: 0x200000dc, instr: 0x000011d3 * Fault! EXC #3, Thread: 0x200000dc, instr: 0x000011d3
* HARD FAULT: Escalation (see below)! * HARD FAULT: Escalation (see below)!
* MMFSR: 0x00000000, BFSR: 0x00000082, UFSR: 0x00000000 * MMFSR: 0x00000000, BFSR: 0x00000082, UFSR: 0x00000000
* BFAR: 0xff001234 * BFAR: 0xff001234
* *
* RETURNS: N/A * RETURNS: N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
void _FaultDump(const NANO_ESF *esf, int fault) void _FaultDump(const NANO_ESF *esf, int fault)
{ {
@ -118,16 +118,16 @@ void _FaultDump(const NANO_ESF *esf, int fault)
#endif #endif
#if (CONFIG_FAULT_DUMP == 2) #if (CONFIG_FAULT_DUMP == 2)
/******************************************************************************* /**
* *
* _FaultContextShow - dump context information * _FaultContextShow - dump context information
* *
* See _FaultDump() for example. * See _FaultDump() for example.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
static void _FaultContextShow(const NANO_ESF *esf) static void _FaultContextShow(const NANO_ESF *esf)
{ {
@ -137,16 +137,16 @@ static void _FaultContextShow(const NANO_ESF *esf)
esf->pc); esf->pc);
} }
/******************************************************************************* /**
* *
* _MpuFault - dump MPU fault information * _MpuFault - dump MPU fault information
* *
* See _FaultDump() for example. * See _FaultDump() for example.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
static void _MpuFault(const NANO_ESF *esf, static void _MpuFault(const NANO_ESF *esf,
int fromHardFault) int fromHardFault)
@ -172,16 +172,16 @@ static void _MpuFault(const NANO_ESF *esf,
} }
} }
/******************************************************************************* /**
* *
* _BusFault - dump bus fault information * _BusFault - dump bus fault information
* *
* See _FaultDump() for example. * See _FaultDump() for example.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
static void _BusFault(const NANO_ESF *esf, static void _BusFault(const NANO_ESF *esf,
int fromHardFault) int fromHardFault)
@ -213,16 +213,16 @@ static void _BusFault(const NANO_ESF *esf,
} }
} }
/******************************************************************************* /**
* *
* _UsageFault - dump usage fault information * _UsageFault - dump usage fault information
* *
* See _FaultDump() for example. * See _FaultDump() for example.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
static void _UsageFault(const NANO_ESF *esf) static void _UsageFault(const NANO_ESF *esf)
{ {
@ -253,16 +253,16 @@ static void _UsageFault(const NANO_ESF *esf)
_ScbUsageFaultAllFaultsReset(); _ScbUsageFaultAllFaultsReset();
} }
/******************************************************************************* /**
* *
* _HardFault - dump hard fault information * _HardFault - dump hard fault information
* *
* See _FaultDump() for example. * See _FaultDump() for example.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
static void _HardFault(const NANO_ESF *esf) static void _HardFault(const NANO_ESF *esf)
{ {
@ -281,32 +281,32 @@ static void _HardFault(const NANO_ESF *esf)
} }
} }
/******************************************************************************* /**
* *
* _DebugMonitor - dump debug monitor exception information * _DebugMonitor - dump debug monitor exception information
* *
* See _FaultDump() for example. * See _FaultDump() for example.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
static void _DebugMonitor(const NANO_ESF *esf) static void _DebugMonitor(const NANO_ESF *esf)
{ {
PR_EXC("***** Debug monitor exception (not implemented) *****\n"); PR_EXC("***** Debug monitor exception (not implemented) *****\n");
} }
/******************************************************************************* /**
* *
* _ReservedException - dump reserved exception information * _ReservedException - dump reserved exception information
* *
* See _FaultDump() for example. * See _FaultDump() for example.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
static void _ReservedException(const NANO_ESF *esf, static void _ReservedException(const NANO_ESF *esf,
int fault) int fault)
@ -316,27 +316,27 @@ static void _ReservedException(const NANO_ESF *esf,
fault - 16); fault - 16);
} }
/******************************************************************************* /**
* *
* _FaultDump - dump information regarding fault (FAULT_DUMP == 2) * _FaultDump - dump information regarding fault (FAULT_DUMP == 2)
* *
* Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 2 * Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 2
* (long form). * (long form).
* *
* eg. (precise bus error escalated to hard fault): * eg. (precise bus error escalated to hard fault):
* *
* Executing context ID (thread): 0x200000dc * Executing context ID (thread): 0x200000dc
* Faulting instruction address: 0x000011d3 * Faulting instruction address: 0x000011d3
* ***** HARD FAULT ***** * ***** HARD FAULT *****
* Fault escalation (see below) * Fault escalation (see below)
* ***** BUS FAULT ***** * ***** BUS FAULT *****
* Precise data bus error * Precise data bus error
* Address: 0xff001234 * Address: 0xff001234
* *
* RETURNS: N/A * RETURNS: N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
static void _FaultDump(const NANO_ESF *esf, int fault) static void _FaultDump(const NANO_ESF *esf, int fault)
{ {
@ -363,23 +363,23 @@ static void _FaultDump(const NANO_ESF *esf, int fault)
} }
#endif /* FAULT_DUMP == 2 */ #endif /* FAULT_DUMP == 2 */
/******************************************************************************* /**
* *
* _Fault - fault handler * _Fault - fault handler
* *
* This routine is called when fatal error conditions are detected by hardware * This routine is called when fatal error conditions are detected by hardware
* and is responsible only for reporting the error. Once reported, it then * and is responsible only for reporting the error. Once reported, it then
* invokes the user provided routine _SysFatalErrorHandler() which is * invokes the user provided routine _SysFatalErrorHandler() which is
* responsible for implementing the error handling policy. * responsible for implementing the error handling policy.
* *
* Since the ESF can be either on the MSP or PSP depending if an exception or * Since the ESF can be either on the MSP or PSP depending if an exception or
* interrupt was already being handled, it is passed a pointer to both and has * interrupt was already being handled, it is passed a pointer to both and has
* to find out on which the ESP is present. * to find out on which the ESP is present.
* *
* RETURNS: This function does not return. * RETURNS: This function does not return.
* *
* \NOMANUAL * \NOMANUAL
*/ */
void _Fault( void _Fault(
const NANO_ESF *msp, /* pointer to potential ESF on MSP */ const NANO_ESF *msp, /* pointer to potential ESF on MSP */
@ -394,16 +394,16 @@ void _Fault(
_SysFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, esf); _SysFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, esf);
} }
/******************************************************************************* /**
* *
* _FaultInit - initialization of fault handling * _FaultInit - initialization of fault handling
* *
* Turns on the desired hardware faults. * Turns on the desired hardware faults.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
void _FaultInit(void) void _FaultInit(void)
{ {

View file

@ -33,7 +33,7 @@
/* /*
DESCRIPTION DESCRIPTION
Fault handlers for ARM Cortex-M processors. Fault handlers for ARM Cortex-M processors.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -52,28 +52,28 @@ GTEXT(__usage_fault)
GTEXT(__debug_monitor) GTEXT(__debug_monitor)
GTEXT(__reserved) GTEXT(__reserved)
/******************************************************************************* /**
* *
* __fault - fault handler installed in the fault and reserved vectors * __fault - fault handler installed in the fault and reserved vectors
* *
* Entry point for the hard fault, MPU fault, bus fault, usage fault, debug * Entry point for the hard fault, MPU fault, bus fault, usage fault, debug
* monitor and reserved exceptions. * monitor and reserved exceptions.
* *
* Save the values of the MSP and PSP in r0 and r1 respectively, so the first * Save the values of the MSP and PSP in r0 and r1 respectively, so the first
* and second parameters to the _Fault() C function that will handle the rest. * and second parameters to the _Fault() C function that will handle the rest.
* This has to be done because at this point we do not know if the fault * This has to be done because at this point we do not know if the fault
* happened while handling an exception or not, and thus the ESF could be on * happened while handling an exception or not, and thus the ESF could be on
* either stack. _Fault() will find out where the ESF resides. * either stack. _Fault() will find out where the ESF resides.
* *
* Provides these symbols: * Provides these symbols:
* *
* __hard_fault * __hard_fault
* __mpu_fault * __mpu_fault
* __bus_fault * __bus_fault
* __usage_fault * __usage_fault
* __debug_monitor * __debug_monitor
* __reserved * __reserved
*/ */
SECTION_SUBSEC_FUNC(TEXT,__fault,__hard_fault) SECTION_SUBSEC_FUNC(TEXT,__fault,__hard_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,__mpu_fault) SECTION_SUBSEC_FUNC(TEXT,__fault,__mpu_fault)

View file

@ -34,7 +34,7 @@
DESCRIPTION DESCRIPTION
This library implements find_last_set() and find_first_set() which returns the This library implements find_last_set() and find_first_set() which returns the
most and least significant bit set respectively. most and least significant bit set respectively.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -48,17 +48,17 @@ _ASM_FILE_PROLOGUE
GTEXT(find_last_set) GTEXT(find_last_set)
GTEXT(find_first_set) GTEXT(find_first_set)
/******************************************************************************* /**
* *
* find_last_set - find first set bit (searching from the most significant bit) * find_last_set - find first set bit (searching from the most significant bit)
* *
* This routine finds the first bit set in the argument passed it and * This routine finds the first bit set in the argument passed it and
* returns the index of that bit. Bits are numbered starting * returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit. A return value of zero indicates that * at 1 from the least significant bit. A return value of zero indicates that
* the value passed is zero. * the value passed is zero.
* *
* RETURNS: most significant bit set * RETURNS: most significant bit set
*/ */
SECTION_FUNC(TEXT, find_last_set) SECTION_FUNC(TEXT, find_last_set)
@ -69,17 +69,17 @@ SECTION_FUNC(TEXT, find_last_set)
mov pc, lr mov pc, lr
/******************************************************************************* /**
* *
* find_first_set - find first set bit (searching from the least significant bit) * find_first_set - find first set bit (searching from the least significant bit)
* *
* This routine finds the first bit set in the argument passed it and * This routine finds the first bit set in the argument passed it and
* returns the index of that bit. Bits are numbered starting * returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit. A return value of zero indicates that * at 1 from the least significant bit. A return value of zero indicates that
* the value passed is zero. * the value passed is zero.
* *
* RETURNS: least significant bit set * RETURNS: least significant bit set
*/ */
SECTION_FUNC(TEXT, find_first_set) SECTION_FUNC(TEXT, find_first_set)

View file

@ -38,7 +38,7 @@ point returns or when it aborts itself, the CPU is in thread mode and must
call _Swap() (which triggers a service call), but when in handler mode, the call _Swap() (which triggers a service call), but when in handler mode, the
CPU must exit handler mode to cause the context switch, and thus must queue CPU must exit handler mode to cause the context switch, and thus must queue
the PendSV exception. the PendSV exception.
*/ */
#ifdef CONFIG_MICROKERNEL #ifdef CONFIG_MICROKERNEL
#include <microkernel.h> #include <microkernel.h>
@ -51,18 +51,18 @@ the PendSV exception.
#include <nanokernel.h> #include <nanokernel.h>
#include <arch/cpu.h> #include <arch/cpu.h>
/******************************************************************************* /**
* *
* fiber_abort - abort the currently executing fiber * fiber_abort - abort the currently executing fiber
* *
* Possible reasons for a fiber aborting: * Possible reasons for a fiber aborting:
* *
* - the fiber explicitly aborts itself by calling this routine * - the fiber explicitly aborts itself by calling this routine
* - the fiber implicitly aborts by returning from its entry point * - the fiber implicitly aborts by returning from its entry point
* - the fiber encounters a fatal exception * - the fiber encounters a fatal exception
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void fiber_abort(void) void fiber_abort(void)
{ {

View file

@ -39,7 +39,7 @@ that we are running in an exception.
Upon exception exit, it must be recorded that the task is not in an exception Upon exception exit, it must be recorded that the task is not in an exception
anymore. anymore.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -51,27 +51,27 @@ anymore.
_ASM_FILE_PROLOGUE _ASM_FILE_PROLOGUE
/******************************************************************************* /**
* *
* _GdbStubExcEntry - exception entry extra work when GDB_INFO is enabled * _GdbStubExcEntry - exception entry extra work when GDB_INFO is enabled
* *
* During normal system operation, the callee-saved registers are saved lazily * During normal system operation, the callee-saved registers are saved lazily
* only when a context switch is required. To allow looking at the current * only when a context switch is required. To allow looking at the current
* threads registers while debugging an exception/interrupt, they must be saved * threads registers while debugging an exception/interrupt, they must be saved
* upon entry since the handler could be using them: thus, looking at the CPU * upon entry since the handler could be using them: thus, looking at the CPU
* registers would show the current system state and not the current *thread*'s * registers would show the current system state and not the current *thread*'s
* state. * state.
* *
* Also, record the fact that the thread is currently interrupted so that VQEMU * Also, record the fact that the thread is currently interrupted so that VQEMU
* looks into the CCS and not the CPU registers to obtain the current thread's * looks into the CCS and not the CPU registers to obtain the current thread's
* register values. * register values.
* *
* NOTE: * NOTE:
* - must be called with interrupts locked * - must be called with interrupts locked
* - cannot use r0 without saving it first * - cannot use r0 without saving it first
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
SECTION_FUNC(TEXT, _GdbStubExcEntry) SECTION_FUNC(TEXT, _GdbStubExcEntry)
@ -95,20 +95,20 @@ SECTION_FUNC(TEXT, _GdbStubExcEntry)
bx lr bx lr
/******************************************************************************* /**
* *
* _GdbStubExcExit - exception exit extra clean up when GDB_INFO is enabled * _GdbStubExcExit - exception exit extra clean up when GDB_INFO is enabled
* *
* Record the fact that the thread is not interrupted anymore so that VQEMU * Record the fact that the thread is not interrupted anymore so that VQEMU
* looks at the CPU registers and not into the CCS to obtain the current * looks at the CPU registers and not into the CCS to obtain the current
* thread's register values. Only do this if this is not a nested exception. * thread's register values. Only do this if this is not a nested exception.
* *
* NOTE: * NOTE:
* - must be called with interrupts locked * - must be called with interrupts locked
* - cannot use r0 without saving it first * - cannot use r0 without saving it first
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
SECTION_FUNC(TEXT, _GdbStubExcExit) SECTION_FUNC(TEXT, _GdbStubExcExit)
@ -129,24 +129,24 @@ SECTION_FUNC(TEXT, _GdbStubExcExit)
bx lr bx lr
/******************************************************************************* /**
* *
* _irq_vector_table_entry_with_gdb_stub - stub for ISRs installed directly in * _irq_vector_table_entry_with_gdb_stub - stub for ISRs installed directly in
* vector table * vector table
* *
* The kernel on Cortex-M3/4 can be configured so that ISRs * The kernel on Cortex-M3/4 can be configured so that ISRs
* are installed directly in the vector table for maximum efficiency. * are installed directly in the vector table for maximum efficiency.
* *
* When OS-awareness is enabled, a stub must be inserted to invoke * When OS-awareness is enabled, a stub must be inserted to invoke
* _GdbStubExcEntry() before the user ISR runs, to save the current task's * _GdbStubExcEntry() before the user ISR runs, to save the current task's
* registers. This stub thus gets inserted in the vector table instead of the * registers. This stub thus gets inserted in the vector table instead of the
* user's ISR. The user's IRQ vector table gets pushed after the vector table * user's ISR. The user's IRQ vector table gets pushed after the vector table
* automatically by the linker script: this is all transparent to the user. * automatically by the linker script: this is all transparent to the user.
* This stub must also act as a demuxer that find the running exception and * This stub must also act as a demuxer that find the running exception and
* invoke the user's real ISR. * invoke the user's real ISR.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
SECTION_FUNC(TEXT, _irq_vector_table_entry_with_gdb_stub) SECTION_FUNC(TEXT, _irq_vector_table_entry_with_gdb_stub)

View file

@ -35,7 +35,7 @@ DESCRIPTION
When GDB is enabled, the static IRQ vector table needs to install the When GDB is enabled, the static IRQ vector table needs to install the
_irq_vector_table_entry_with_gdb_stub stub to do some work before calling the _irq_vector_table_entry_with_gdb_stub stub to do some work before calling the
user-installed ISRs. user-installed ISRs.
*/ */
#include <toolchain.h> #include <toolchain.h>
#include <sections.h> #include <sections.h>

View file

@ -38,23 +38,23 @@ point returns or when it aborts itself, the CPU is in thread mode and must
call _Swap() (which triggers a service call), but when in handler mode, the call _Swap() (which triggers a service call), but when in handler mode, the
CPU must exit handler mode to cause the context switch, and thus must queue CPU must exit handler mode to cause the context switch, and thus must queue
the PendSV exception. the PendSV exception.
*/ */
#include <toolchain.h> #include <toolchain.h>
#include <sections.h> #include <sections.h>
#include <nanokernel.h> #include <nanokernel.h>
#include <arch/cpu.h> #include <arch/cpu.h>
/******************************************************************************* /**
* *
* _IntLibInit - initialize interrupts * _IntLibInit - initialize interrupts
* *
* Ensures all interrupts have their priority set to _EXC_IRQ_DEFAULT_PRIO and * Ensures all interrupts have their priority set to _EXC_IRQ_DEFAULT_PRIO and
* not 0, which they have it set to when coming out of reset. This ensures that * not 0, which they have it set to when coming out of reset. This ensures that
* interrupt locking via BASEPRI works as expected. * interrupt locking via BASEPRI works as expected.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _IntLibInit(void) void _IntLibInit(void)
{ {

View file

@ -35,7 +35,7 @@ DESCRIPTION
Interrupt management: enabling/disabling and dynamic ISR connecting/replacing. Interrupt management: enabling/disabling and dynamic ISR connecting/replacing.
SW_ISR_TABLE_DYNAMIC has to be enabled for connecting ISRs at runtime. SW_ISR_TABLE_DYNAMIC has to be enabled for connecting ISRs at runtime.
*/ */
#include <nanokernel.h> #include <nanokernel.h>
#include <arch/cpu.h> #include <arch/cpu.h>
@ -46,18 +46,18 @@ SW_ISR_TABLE_DYNAMIC has to be enabled for connecting ISRs at runtime.
extern void __reserved(void); extern void __reserved(void);
/******************************************************************************* /**
* *
* irq_handler_set - replace an interrupt handler by another * irq_handler_set - replace an interrupt handler by another
* *
* An interrupt's ISR can be replaced at runtime. Care must be taken that the * An interrupt's ISR can be replaced at runtime. Care must be taken that the
* interrupt is disabled before doing this. * interrupt is disabled before doing this.
* *
* This routine will hang if <old> is not found in the table and ASSERT_ON is * This routine will hang if <old> is not found in the table and ASSERT_ON is
* enabled. * enabled.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void irq_handler_set(unsigned int irq, void irq_handler_set(unsigned int irq,
void (*old)(void *arg), void (*old)(void *arg),
@ -76,16 +76,16 @@ void irq_handler_set(unsigned int irq,
irq_unlock_inline(key); irq_unlock_inline(key);
} }
/******************************************************************************* /**
* *
* irq_enable - enable an interrupt line * irq_enable - enable an interrupt line
* *
* Clear possible pending interrupts on the line, and enable the interrupt * Clear possible pending interrupts on the line, and enable the interrupt
* line. After this call, the CPU will receive interrupts for the specified * line. After this call, the CPU will receive interrupts for the specified
* <irq>. * <irq>.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void irq_enable(unsigned int irq) void irq_enable(unsigned int irq)
{ {
@ -94,35 +94,35 @@ void irq_enable(unsigned int irq)
_NvicIrqEnable(irq); _NvicIrqEnable(irq);
} }
/******************************************************************************* /**
* *
* irq_disable - disable an interrupt line * irq_disable - disable an interrupt line
* *
* Disable an interrupt line. After this call, the CPU will stop receiving * Disable an interrupt line. After this call, the CPU will stop receiving
* interrupts for the specified <irq>. * interrupts for the specified <irq>.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void irq_disable(unsigned int irq) void irq_disable(unsigned int irq)
{ {
_NvicIrqDisable(irq); _NvicIrqDisable(irq);
} }
/******************************************************************************* /**
* *
* irq_priority_set - set an interrupt's priority * irq_priority_set - set an interrupt's priority
* *
* Valid values are from 1 to 255. Interrupts of priority 1 are not masked when * Valid values are from 1 to 255. Interrupts of priority 1 are not masked when
* interrupts are locked system-wide, so care must be taken when using them. ISR * interrupts are locked system-wide, so care must be taken when using them. ISR
* installed with priority 1 interrupts cannot make kernel calls. * installed with priority 1 interrupts cannot make kernel calls.
* *
* Priority 0 is reserved for kernel usage and cannot be used. * Priority 0 is reserved for kernel usage and cannot be used.
* *
* The priority is verified if ASSERT_ON is enabled. * The priority is verified if ASSERT_ON is enabled.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void irq_priority_set(unsigned int irq, void irq_priority_set(unsigned int irq,
unsigned int prio) unsigned int prio)
@ -131,17 +131,17 @@ void irq_priority_set(unsigned int irq,
_NvicIrqPrioSet(irq, _EXC_PRIO(prio)); _NvicIrqPrioSet(irq, _EXC_PRIO(prio));
} }
/******************************************************************************* /**
* *
* _irq_spurious - spurious interrupt handler * _irq_spurious - spurious interrupt handler
* *
* Installed in all dynamic interrupt slots at boot time. Throws an error if * Installed in all dynamic interrupt slots at boot time. Throws an error if
* called. * called.
* *
* See __reserved(). * See __reserved().
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _irq_spurious(void *unused) void _irq_spurious(void *unused)
{ {
@ -149,18 +149,18 @@ void _irq_spurious(void *unused)
__reserved(); __reserved();
} }
/******************************************************************************* /**
* *
* irq_connect - connect an ISR to an interrupt line * irq_connect - connect an ISR to an interrupt line
* *
* <isr> is connected to interrupt line <irq> (exception #<irq>+16). No prior * <isr> is connected to interrupt line <irq> (exception #<irq>+16). No prior
* ISR can have been connected on <irq> interrupt line since the system booted. * ISR can have been connected on <irq> interrupt line since the system booted.
* *
* This routine will hang if another ISR was connected for interrupt line <irq> * This routine will hang if another ISR was connected for interrupt line <irq>
* and ASSERT_ON is enabled; if ASSERT_ON is disabled, it will fail silently. * and ASSERT_ON is enabled; if ASSERT_ON is disabled, it will fail silently.
* *
* RETURNS: the interrupt line number * RETURNS: the interrupt line number
*/ */
int irq_connect(unsigned int irq, int irq_connect(unsigned int irq,
unsigned int prio, unsigned int prio,
@ -172,16 +172,16 @@ int irq_connect(unsigned int irq,
return irq; return irq;
} }
/******************************************************************************* /**
* *
* irq_disconnect - disconnect an ISR from an interrupt line * irq_disconnect - disconnect an ISR from an interrupt line
* *
* Interrupt line <irq> (exception #<irq>+16) is disconnected from its ISR and * Interrupt line <irq> (exception #<irq>+16) is disconnected from its ISR and
* the latter is replaced by _irq_spurious(). irq_disable() should have * the latter is replaced by _irq_spurious(). irq_disable() should have
* been called before invoking this routine. * been called before invoking this routine.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void irq_disconnect(unsigned int irq) void irq_disconnect(unsigned int irq)
{ {

View file

@ -35,7 +35,7 @@ DESCRIPTION
Wrapper installed in vector table for handling dynamic interrupts that accept Wrapper installed in vector table for handling dynamic interrupts that accept
a parameter. a parameter.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -53,19 +53,19 @@ GDATA(_sw_isr_table)
GTEXT(_isr_wrapper) GTEXT(_isr_wrapper)
GTEXT(_IntExit) GTEXT(_IntExit)
/******************************************************************************* /**
* *
* _isr_wrapper - wrapper around ISRs when inserted in software ISR table * _isr_wrapper - wrapper around ISRs when inserted in software ISR table
* *
* When inserted in the vector table, _isr_wrapper() demuxes the ISR table using * When inserted in the vector table, _isr_wrapper() demuxes the ISR table using
* the running interrupt number as the index, and invokes the registered ISR * the running interrupt number as the index, and invokes the registered ISR
* with its correspoding argument. When returning from the ISR, it determines * with its correspoding argument. When returning from the ISR, it determines
* if a context switch needs to happen (see documentation for __pendsv()) and * if a context switch needs to happen (see documentation for __pendsv()) and
* pends the PendSV exception if so: the latter will perform the context switch * pends the PendSV exception if so: the latter will perform the context switch
* itself. * itself.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
SECTION_FUNC(TEXT, _isr_wrapper) SECTION_FUNC(TEXT, _isr_wrapper)
_GDB_STUB_EXC_ENTRY _GDB_STUB_EXC_ENTRY

View file

@ -46,7 +46,7 @@ Typically, only those members that are accessed by assembly language routines
are defined; however, it doesn't hurt to define all fields for the sake of are defined; however, it doesn't hurt to define all fields for the sake of
completeness. completeness.
*/ */
#include <gen_offset.h> #include <gen_offset.h>
#include <nano_private.h> #include <nano_private.h>

View file

@ -34,7 +34,7 @@
DESCRIPTION DESCRIPTION
This module implements the routines necessary for thread context switching This module implements the routines necessary for thread context switching
on ARM Cortex-M3/M4 CPUs. on ARM Cortex-M3/M4 CPUs.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -51,23 +51,23 @@ GTEXT(__pendsv)
GDATA(_nanokernel) GDATA(_nanokernel)
/******************************************************************************* /**
* *
* __pendsv - PendSV exception handler, handling context switches * __pendsv - PendSV exception handler, handling context switches
* *
* The PendSV exception is the only context in the system that can perform * The PendSV exception is the only context in the system that can perform
* context switching. When an execution context finds out it has to switch * context switching. When an execution context finds out it has to switch
* contexts, it pends the PendSV exception. * contexts, it pends the PendSV exception.
* *
* When PendSV is pended, the decision that a context switch must happen has * When PendSV is pended, the decision that a context switch must happen has
* already been taken. In other words, when __pendsv() runs, we *know* we have * already been taken. In other words, when __pendsv() runs, we *know* we have
* to swap *something*. * to swap *something*.
* *
* The scheduling algorithm is simple: schedule the head of the runnable FIBER * The scheduling algorithm is simple: schedule the head of the runnable FIBER
* context list, which is represented by _nanokernel.fiber. If there are no * context list, which is represented by _nanokernel.fiber. If there are no
* runnable FIBER contexts, then schedule the TASK context represented by * runnable FIBER contexts, then schedule the TASK context represented by
* _nanokernel.task. The _nanokernel.task field will never be NULL. * _nanokernel.task. The _nanokernel.task field will never be NULL.
*/ */
SECTION_FUNC(TEXT, __pendsv) SECTION_FUNC(TEXT, __pendsv)
@ -146,15 +146,15 @@ SECTION_FUNC(TEXT, __pendsv)
/* exc return */ /* exc return */
bx lr bx lr
/******************************************************************************* /**
* *
* __svc - service call handler * __svc - service call handler
* *
* The service call (svc) is only used in _Swap() to enter handler mode so we * The service call (svc) is only used in _Swap() to enter handler mode so we
* can go through the PendSV exception to perform a context switch. * can go through the PendSV exception to perform a context switch.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
SECTION_FUNC(TEXT, __svc) SECTION_FUNC(TEXT, __svc)
@ -178,38 +178,38 @@ SECTION_FUNC(TEXT, __svc)
/* handler mode exit, to PendSV */ /* handler mode exit, to PendSV */
bx lr bx lr
/******************************************************************************* /**
* *
* _Swap - initiate a cooperative context switch * _Swap - initiate a cooperative context switch
* *
* The _Swap() routine is invoked by various nanokernel services to effect * The _Swap() routine is invoked by various nanokernel services to effect
* a cooperative context context switch. Prior to invoking _Swap(), the caller * a cooperative context context switch. Prior to invoking _Swap(), the caller
* disables interrupts via irq_lock() and the return 'key' is passed as a * disables interrupts via irq_lock() and the return 'key' is passed as a
* parameter to _Swap(). The 'key' actually represents the BASEPRI register * parameter to _Swap(). The 'key' actually represents the BASEPRI register
* prior to disabling interrupts via the BASEPRI mechanism. * prior to disabling interrupts via the BASEPRI mechanism.
* *
* _Swap() itself does not do much. * _Swap() itself does not do much.
* *
* It simply stores the intlock key (the BASEPRI value) parameter into * It simply stores the intlock key (the BASEPRI value) parameter into
* current->basepri, and then triggers a service call exception (svc) to setup * current->basepri, and then triggers a service call exception (svc) to setup
* the PendSV exception, which does the heavy lifting of context switching. * the PendSV exception, which does the heavy lifting of context switching.
* This is the only place we have to save BASEPRI since the other paths to * This is the only place we have to save BASEPRI since the other paths to
* __pendsv all come from handling an interrupt, which means we know the * __pendsv all come from handling an interrupt, which means we know the
* interrupts were not locked: in that case the BASEPRI value is 0. * interrupts were not locked: in that case the BASEPRI value is 0.
* *
* Given that _Swap() is called to effect a cooperative context context switch, * Given that _Swap() is called to effect a cooperative context context switch,
* only the caller-saved integer registers need to be saved in the tCCS of the * only the caller-saved integer registers need to be saved in the tCCS of the
* outgoing context. This is all performed by the hardware, which stores it in * outgoing context. This is all performed by the hardware, which stores it in
* its exception stack frame, created when handling the svc exception. * its exception stack frame, created when handling the svc exception.
* *
* RETURNS: may contain a return value setup by a call to fiberRtnValueSet() * RETURNS: may contain a return value setup by a call to fiberRtnValueSet()
* *
* C function prototype: * C function prototype:
* *
* unsigned int _Swap (unsigned int basepri); * unsigned int _Swap (unsigned int basepri);
* *
*/ */
SECTION_FUNC(TEXT, _Swap) SECTION_FUNC(TEXT, _Swap)

View file

@ -38,7 +38,7 @@ point returns or when it aborts itself, the CPU is in thread mode and must
call the equivalent of task_abort(<self>), but when in handler mode, the call the equivalent of task_abort(<self>), but when in handler mode, the
CPU must queue a packet to K_swapper(), then exit handler mode to queue the CPU must queue a packet to K_swapper(), then exit handler mode to queue the
PendSV exception and cause the immediate context switch to K_swapper. PendSV exception and cause the immediate context switch to K_swapper.
*/ */
#ifdef CONFIG_MICROKERNEL #ifdef CONFIG_MICROKERNEL
@ -52,20 +52,20 @@ PendSV exception and cause the immediate context switch to K_swapper.
static struct k_args cmd_packet; static struct k_args cmd_packet;
/******************************************************************************* /**
* *
* _TaskAbort - abort the current task * _TaskAbort - abort the current task
* *
* Possible reasons for a task aborting: * Possible reasons for a task aborting:
* *
* - the task explicitly aborts itself by calling this routine * - the task explicitly aborts itself by calling this routine
* - the task implicitly aborts by returning from its entry point * - the task implicitly aborts by returning from its entry point
* - the task encounters a fatal exception * - the task encounters a fatal exception
* *
* RETURNS: N/A * RETURNS: N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
void _TaskAbort(void) void _TaskAbort(void)
{ {

View file

@ -34,7 +34,7 @@
DESCRIPTION DESCRIPTION
This header file is used to specify and describe board-level aspects for the This header file is used to specify and describe board-level aspects for the
'fsl_frdm_k64f' BSP. 'fsl_frdm_k64f' BSP.
*/ */
#ifndef _BOARD__H_ #ifndef _BOARD__H_
#define _BOARD__H_ #define _BOARD__H_

View file

@ -40,7 +40,7 @@ a) When software-managed ISRs (SW_ISR_TABLE) is enabled, and in that case it
b) When the BSP is written so that device ISRs are installed directly in the b) When the BSP is written so that device ISRs are installed directly in the
vector table, they are enumerated here. vector table, they are enumerated here.
*/ */
#include <toolchain.h> #include <toolchain.h>
#include <sections.h> #include <sections.h>

View file

@ -33,7 +33,7 @@
/* /*
DESCRIPTION DESCRIPTION
This is the linker script for both standard images and XIP images. This is the linker script for both standard images and XIP images.
*/ */
/* Flash base address and size */ /* Flash base address and size */
#define FLASH_START 0x00000000 #define FLASH_START 0x00000000

View file

@ -37,7 +37,7 @@ is to hard hang, sleeping.
This might be preferable than rebooting to help debugging, or because This might be preferable than rebooting to help debugging, or because
rebooting might trigger the exact same problem over and over. rebooting might trigger the exact same problem over and over.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE

View file

@ -34,7 +34,7 @@
DESCRIPTION DESCRIPTION
This module provides routines to initialize and support board-level hardware This module provides routines to initialize and support board-level hardware
for the fsl_frdm_k64f BSP. for the fsl_frdm_k64f BSP.
*/ */
#include <nanokernel.h> #include <nanokernel.h>
#include <board.h> #include <board.h>
@ -94,21 +94,21 @@ uint8_t __security_frdm_k64f_section __security_frdm_k64f[] = {
/* Reserved for FlexNVM feature (unsupported by this MCU) */ /* Reserved for FlexNVM feature (unsupported by this MCU) */
0xFF, 0xFF}; 0xFF, 0xFF};
/******************************************************************************* /**
* *
* clkInit - initialize the system clock * clkInit - initialize the system clock
* *
* This routine will configure the multipurpose clock generator (MCG) to * This routine will configure the multipurpose clock generator (MCG) to
* set up the system clock. * set up the system clock.
* The MCG has nine possible modes, including Stop mode. This routine assumes * The MCG has nine possible modes, including Stop mode. This routine assumes
* that the current MCG mode is FLL Engaged Internal (FEI), as from reset. * that the current MCG mode is FLL Engaged Internal (FEI), as from reset.
* It transitions through the FLL Bypassed External (FBE) and * It transitions through the FLL Bypassed External (FBE) and
* PLL Bypassed External (PBE) modes to get to the desired * PLL Bypassed External (PBE) modes to get to the desired
* PLL Engaged External (PEE) mode and generate the maximum 120 MHz system clock. * PLL Engaged External (PEE) mode and generate the maximum 120 MHz system clock.
* *
* RETURNS: N/A * RETURNS: N/A
* *
*/ */
static void clkInit(void) static void clkInit(void)
{ {
@ -247,15 +247,15 @@ static void clkInit(void)
#if defined(DO_CONSOLE_INIT) #if defined(DO_CONSOLE_INIT)
/******************************************************************************* /**
* *
* consoleInit - initialize target-only console * consoleInit - initialize target-only console
* *
* Only used for debugging. * Only used for debugging.
* *
* RETURNS: N/A * RETURNS: N/A
* *
*/ */
#include <console/uart_console.h> #include <console/uart_console.h>
@ -298,16 +298,16 @@ static void consoleInit(void)
} while ((0)) } while ((0))
#endif /* DO_CONSOLE_INIT */ #endif /* DO_CONSOLE_INIT */
/******************************************************************************* /**
* *
* _InitHardware - perform basic hardware initialization * _InitHardware - perform basic hardware initialization
* *
* Initialize the interrupt controller device drivers and the * Initialize the interrupt controller device drivers and the
* Kinetis UART device driver. * Kinetis UART device driver.
* Also initialize the timer device driver, if required. * Also initialize the timer device driver, if required.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _InitHardware(void) void _InitHardware(void)
{ {

View file

@ -33,7 +33,7 @@
/* /*
DESCRIPTION DESCRIPTION
This module initializes the watchdog for the fsl_frdm_k64f BSP. This module initializes the watchdog for the fsl_frdm_k64f BSP.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -53,7 +53,7 @@ GTEXT(_WdogInit)
#define WDOG_UNLOCK_1_CMD 0xC520 #define WDOG_UNLOCK_1_CMD 0xC520
#define WDOG_UNLOCK_2_CMD 0xD928 #define WDOG_UNLOCK_2_CMD 0xD928
/******************************************************************************* /**
* *
* _WdogInit - Watchdog timer disable routine * _WdogInit - Watchdog timer disable routine
* *

View file

@ -40,16 +40,16 @@
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE
/******************************************************************************* /**
* *
* _IpsrGet - obtain value of IPSR register * _IpsrGet - obtain value of IPSR register
* *
* Obtain and return current value of IPSR register. * Obtain and return current value of IPSR register.
* *
* RETURNS: the contents of the IPSR register * RETURNS: the contents of the IPSR register
* *
* \NOMANUAL * \NOMANUAL
*/ */
static ALWAYS_INLINE uint32_t _IpsrGet(void) static ALWAYS_INLINE uint32_t _IpsrGet(void)
{ {
@ -59,16 +59,16 @@ static ALWAYS_INLINE uint32_t _IpsrGet(void)
return vector; return vector;
} }
/******************************************************************************* /**
* *
* _MspSet - set the value of the Main Stack Pointer register * _MspSet - set the value of the Main Stack Pointer register
* *
* Store the value of <msp> in MSP register. * Store the value of <msp> in MSP register.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
static ALWAYS_INLINE void _MspSet(uint32_t msp /* value to store in MSP */ static ALWAYS_INLINE void _MspSet(uint32_t msp /* value to store in MSP */
) )

View file

@ -33,7 +33,7 @@
/* /*
DESCRIPTION DESCRIPTION
Exception/interrupt context helpers. Exception/interrupt context helpers.
*/ */
#ifndef _ARM_CORTEXM_ISR__H_ #ifndef _ARM_CORTEXM_ISR__H_
#define _ARM_CORTEXM_ISR__H_ #define _ARM_CORTEXM_ISR__H_
@ -47,19 +47,19 @@ Exception/interrupt context helpers.
#else #else
/******************************************************************************* /**
* *
* _IsInIsr - find out if running in an ISR context * _IsInIsr - find out if running in an ISR context
* *
* The current executing vector is found in the IPSR register. We consider the * The current executing vector is found in the IPSR register. We consider the
* IRQs (exception 16 and up), and the PendSV and SYSTICK exceptions, to be * IRQs (exception 16 and up), and the PendSV and SYSTICK exceptions, to be
* interrupts. Taking a fault within an exception is also considered in * interrupts. Taking a fault within an exception is also considered in
* interrupt context. * interrupt context.
* *
* RETURNS: 1 if in ISR, 0 if not. * RETURNS: 1 if in ISR, 0 if not.
* *
* \NOMANUAL * \NOMANUAL
*/ */
static ALWAYS_INLINE int _IsInIsr(void) static ALWAYS_INLINE int _IsInIsr(void)
{ {
uint32_t vector = _IpsrGet(); uint32_t vector = _IpsrGet();
@ -68,18 +68,18 @@ static ALWAYS_INLINE int _IsInIsr(void)
return (vector > 13) || (vector && _ScbIsNestedExc()); return (vector > 13) || (vector && _ScbIsNestedExc());
} }
/******************************************************************************* /**
* _ExcSetup - setup system exceptions * _ExcSetup - setup system exceptions
* *
* Set exception priorities to conform with the BASEPRI locking mechanism. * Set exception priorities to conform with the BASEPRI locking mechanism.
* Set PendSV priority to lowest possible. * Set PendSV priority to lowest possible.
* *
* Enable fault exceptions. * Enable fault exceptions.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
static ALWAYS_INLINE void _ExcSetup(void) static ALWAYS_INLINE void _ExcSetup(void)
{ {

View file

@ -33,7 +33,7 @@
/* /*
DESCRIPTION DESCRIPTION
Stack helper functions. Stack helper functions.
*/ */
#ifndef _ARM_CORTEXM_STACK__H_ #ifndef _ARM_CORTEXM_STACK__H_
#define _ARM_CORTEXM_STACK__H_ #define _ARM_CORTEXM_STACK__H_
@ -68,17 +68,17 @@ Stack helper functions.
extern char _interrupt_stack[CONFIG_ISR_STACK_SIZE]; extern char _interrupt_stack[CONFIG_ISR_STACK_SIZE];
/******************************************************************************* /**
* *
* _InterruptStackSetup - setup interrupt stack * _InterruptStackSetup - setup interrupt stack
* *
* On Cortex-M, the interrupt stack is registered in the MSP (main stack * On Cortex-M, the interrupt stack is registered in the MSP (main stack
* pointer) register, and switched to automatically when taking an exception. * pointer) register, and switched to automatically when taking an exception.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
static ALWAYS_INLINE void _InterruptStackSetup(void) static ALWAYS_INLINE void _InterruptStackSetup(void)
{ {

View file

@ -39,7 +39,7 @@ This file is also included by assembly language files which must #define
_ASMLANGUAGE before including this header file. Note that nanokernel assembly _ASMLANGUAGE before including this header file. Note that nanokernel assembly
source files obtains structure offset values via "absolute symbols" in the source files obtains structure offset values via "absolute symbols" in the
offsets.o module. offsets.o module.
*/ */
#ifndef _NANO_PRIVATE_H #ifndef _NANO_PRIVATE_H
#define _NANO_PRIVATE_H #define _NANO_PRIVATE_H
@ -184,18 +184,18 @@ static ALWAYS_INLINE void nanoArchInit(void)
_CpuIdleInit(); _CpuIdleInit();
} }
/******************************************************************************* /**
* *
* fiberRtnValueSet - set the return value for the specified fiber (inline) * fiberRtnValueSet - set the return value for the specified fiber (inline)
* *
* The register used to store the return value from a function call invocation * The register used to store the return value from a function call invocation
* to <value>. It is assumed that the specified <fiber> is pending, and thus * to <value>. It is assumed that the specified <fiber> is pending, and thus
* the fiber's context is stored in its tCCS structure. * the fiber's context is stored in its tCCS structure.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
static ALWAYS_INLINE void fiberRtnValueSet( static ALWAYS_INLINE void fiberRtnValueSet(
tCCS *fiber, /* pointer to fiber */ tCCS *fiber, /* pointer to fiber */

View file

@ -35,7 +35,7 @@ DESCRIPTION
ARM-specific parts of start_task(). ARM-specific parts of start_task().
Currently empty, only here for abstraction. Currently empty, only here for abstraction.
*/ */
#ifndef _START_TASK_ARCH__H_ #ifndef _START_TASK_ARCH__H_
#define _START_TASK_ARCH__H_ #define _START_TASK_ARCH__H_

View file

@ -34,7 +34,7 @@
DESCRIPTION DESCRIPTION
This header file is used to specify and describe board-level aspects for This header file is used to specify and describe board-level aspects for
the 'ti_lm3s6965' BSP. the 'ti_lm3s6965' BSP.
*/ */
#ifndef _BOARD__H_ #ifndef _BOARD__H_
#define _BOARD__H_ #define _BOARD__H_

View file

@ -40,7 +40,7 @@ a) When software-managed ISRs (SW_ISR_TABLE) is enabled, and in that case it
b) When the BSP is written so that device ISRs are installed directly in the b) When the BSP is written so that device ISRs are installed directly in the
vector table, they are enumerated here. vector table, they are enumerated here.
*/ */
#include <toolchain.h> #include <toolchain.h>
#include <sections.h> #include <sections.h>

View file

@ -37,7 +37,7 @@ is to hard hang, sleeping.
This might be preferable than rebooting to help debugging, or because This might be preferable than rebooting to help debugging, or because
rebooting might trigger the exact same problem over and over. rebooting might trigger the exact same problem over and over.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE

View file

@ -37,7 +37,7 @@ Library for controlling target-specific devices present in the 0x400fe000
peripherals memory region. peripherals memory region.
Currently, only enabling the main OSC with default value is implemented. Currently, only enabling the main OSC with default value is implemented.
*/ */
#include <stdint.h> #include <stdint.h>
#include <toolchain.h> #include <toolchain.h>
@ -49,12 +49,12 @@ Currently, only enabling the main OSC with default value is implemented.
volatile struct __scp __scp_section __scp; volatile struct __scp __scp_section __scp;
/******************************************************************************* /**
* *
* _ScpMainOscEnable - enable main oscillator with default frequency of 6MHz * _ScpMainOscEnable - enable main oscillator with default frequency of 6MHz
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _ScpMainOscEnable(void) void _ScpMainOscEnable(void)
{ {
union __rcc reg; union __rcc reg;

View file

@ -46,7 +46,7 @@ These modules are not defined:
The registers and bit field names are taken from the 'Stellaris LM3S6965 The registers and bit field names are taken from the 'Stellaris LM3S6965
Microcontroller DATA SHEET (DS-LM3S6965-12746.2515) revision H' document, Microcontroller DATA SHEET (DS-LM3S6965-12746.2515) revision H' document,
section 5.4/5.5, pp .184-200. section 5.4/5.5, pp .184-200.
*/ */
#ifndef _SCP_H_ #ifndef _SCP_H_
#define _SCP_H_ #define _SCP_H_

View file

@ -34,7 +34,7 @@
DESCRIPTION DESCRIPTION
This module provides routines to initialize and support board-level hardware This module provides routines to initialize and support board-level hardware
for the ti_lm3s6965 BSP. for the ti_lm3s6965 BSP.
*/ */
#include <nanokernel.h> #include <nanokernel.h>
#include <board.h> #include <board.h>
@ -59,13 +59,13 @@ extern void _NmiInit(void);
#if defined(DO_CONSOLE_INIT) #if defined(DO_CONSOLE_INIT)
/******************************************************************************* /**
* *
* uart_generic_info_init - initialize generic information for one UART * uart_generic_info_init - initialize generic information for one UART
* *
* RETURNS: N/A * RETURNS: N/A
* *
*/ */
inline void uart_generic_info_init(struct uart_init_info *pInfo) inline void uart_generic_info_init(struct uart_init_info *pInfo)
{ {
@ -79,15 +79,15 @@ inline void uart_generic_info_init(struct uart_init_info *pInfo)
#if defined(DO_CONSOLE_INIT) #if defined(DO_CONSOLE_INIT)
/******************************************************************************* /**
* *
* consoleInit - initialize target-only console * consoleInit - initialize target-only console
* *
* Only used for debugging. * Only used for debugging.
* *
* RETURNS: N/A * RETURNS: N/A
* *
*/ */
#include <console/uart_console.h> #include <console/uart_console.h>
@ -132,16 +132,16 @@ static void bluetooth_init(void)
} while ((0)) } while ((0))
#endif /* CONFIG_BLUETOOTH */ #endif /* CONFIG_BLUETOOTH */
/******************************************************************************* /**
* *
* _InitHardware - perform basic hardware initialization * _InitHardware - perform basic hardware initialization
* *
* Initialize the interrupt controller device drivers and the * Initialize the interrupt controller device drivers and the
* integrated 16550-compatible UART device driver. * integrated 16550-compatible UART device driver.
* Also initialize the timer device driver, if required. * Also initialize the timer device driver, if required.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _InitHardware(void) void _InitHardware(void)
{ {

View file

@ -32,7 +32,7 @@
/* /*
DESCRIPTION DESCRIPTION
This module contains functions for manipulation caches. This module contains functions for manipulation caches.
*/ */
#include <nanokernel.h> #include <nanokernel.h>
#include <arch/cpu.h> #include <arch/cpu.h>
@ -44,19 +44,19 @@ This module contains functions for manipulation caches.
#error Cannot use this implementation with a cache line size of 0 #error Cannot use this implementation with a cache line size of 0
#endif #endif
/******************************************************************************* /**
* *
* _SysCacheFlush - flush a page to main memory * _SysCacheFlush - flush a page to main memory
* *
* No alignment is required for either <virt> or <size>, but since * No alignment is required for either <virt> or <size>, but since
* _SysCacheFlush() iterates on the cache lines, a cache line alignment for both * _SysCacheFlush() iterates on the cache lines, a cache line alignment for both
* is optimal. * is optimal.
* *
* The cache line size is specified via the CONFIG_CACHE_LINE_SIZE kconfig * The cache line size is specified via the CONFIG_CACHE_LINE_SIZE kconfig
* option. * option.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _SysCacheFlush(VIRT_ADDR virt, size_t size) void _SysCacheFlush(VIRT_ADDR virt, size_t size)
{ {

View file

@ -32,7 +32,7 @@
/* /*
DESCRIPTION DESCRIPTION
This module contains functions for manipulating caches. This module contains functions for manipulating caches.
*/ */
#ifndef CONFIG_CLFLUSH_INSTRUCTION_SUPPORTED #ifndef CONFIG_CLFLUSH_INSTRUCTION_SUPPORTED
@ -42,20 +42,20 @@ This module contains functions for manipulating caches.
/* externs (internal APIs) */ /* externs (internal APIs) */
GTEXT(_SysCacheFlush) GTEXT(_SysCacheFlush)
/******************************************************************************* /**
* *
* _SysCacheFlush - flush a page to main memory * _SysCacheFlush - flush a page to main memory
* *
* This implementation flushes the whole cache. * This implementation flushes the whole cache.
* *
* C signature: * C signature:
* *
* void _SysCacheFlush (VIRT_ADDR virt, size_t size) * void _SysCacheFlush (VIRT_ADDR virt, size_t size)
* *
* Both parameters are ignored in this implementation. * Both parameters are ignored in this implementation.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
SECTION_FUNC(TEXT, _SysCacheFlush) SECTION_FUNC(TEXT, _SysCacheFlush)
wbinvd wbinvd

View file

@ -41,7 +41,7 @@ service routines, and to operations performed by peer processors.
INTERNAL INTERNAL
These operators are currently unavailable to user space applications, These operators are currently unavailable to user space applications,
as there is no requirement for this capability. as there is no requirement for this capability.
*/ */
/* includes */ /* includes */
@ -66,33 +66,33 @@ as there is no requirement for this capability.
GTEXT(atomic_and) GTEXT(atomic_and)
GTEXT(atomic_nand) GTEXT(atomic_nand)
/******************************************************************************* /**
* *
* atomic_cas - atomic compare-and-set primitive * atomic_cas - atomic compare-and-set primitive
* *
* This routine provides the compare-and-set operator. If the original value at * This routine provides the compare-and-set operator. If the original value at
* <target> equals <oldValue>, then <newValue> is stored at <target> and the * <target> equals <oldValue>, then <newValue> is stored at <target> and the
* function returns 1. * function returns 1.
* *
* If the original value at <target> does not equal <oldValue>, then the store * If the original value at <target> does not equal <oldValue>, then the store
* is not done and the function returns 0. * is not done and the function returns 0.
* *
* The reading of the original value at <target>, the comparison, * The reading of the original value at <target>, the comparison,
* and the write of the new value (if it occurs) all happen atomically with * and the write of the new value (if it occurs) all happen atomically with
* respect to both interrupts and accesses of other processors to <target>. * respect to both interrupts and accesses of other processors to <target>.
* *
* RETURNS: Returns 1 if <newValue> is written, 0 otherwise. * RETURNS: Returns 1 if <newValue> is written, 0 otherwise.
* *
* int atomic_cas * int atomic_cas
* ( * (
* atomic_t * target, /@ address to be tested @/ * atomic_t * target, /@ address to be tested @/
* atomic_val_t oldValue, /@ value to compare against @/ * atomic_val_t oldValue, /@ value to compare against @/
* atomic_val_t newValue /@ value to compare against @/ * atomic_val_t newValue /@ value to compare against @/
* ) * )
* *
* INTERNAL * INTERNAL
* The 'cmpxchg' instruction is NOT supported on processor prior to the 80486 * The 'cmpxchg' instruction is NOT supported on processor prior to the 80486
*/ */
SECTION_FUNC(TEXT, atomic_cas) SECTION_FUNC(TEXT, atomic_cas)
@ -113,25 +113,25 @@ BRANCH_LABEL(atomic_cas1)
ret ret
/******************************************************************************* /**
* *
* atomic_add - atomic add primitive * atomic_add - atomic add primitive
* *
* This routine provides the atomic addition operator. The <value> is * This routine provides the atomic addition operator. The <value> is
* atomically added to the value at <target>, placing the result at <target>, * atomically added to the value at <target>, placing the result at <target>,
* and the old value from <target> is returned. * and the old value from <target> is returned.
* *
* RETURNS: The previous value from <target> * RETURNS: The previous value from <target>
* *
* atomic_val_t atomic_add * atomic_val_t atomic_add
* ( * (
* atomic_t * target, /@ memory location to add to @/ * atomic_t * target, /@ memory location to add to @/
* atomic_val_t value /@ value to add @/ * atomic_val_t value /@ value to add @/
* ) * )
* *
* INTERNAL * INTERNAL
* The 'xadd' instruction is NOT supported on processor prior to the 80486 * The 'xadd' instruction is NOT supported on processor prior to the 80486
*/ */
SECTION_FUNC(TEXT, atomic_add) SECTION_FUNC(TEXT, atomic_add)
@ -145,25 +145,25 @@ SECTION_FUNC(TEXT, atomic_add)
ret ret
/******************************************************************************* /**
* *
* atomic_sub - atomic subtraction primitive * atomic_sub - atomic subtraction primitive
* *
* This routine provides the atomic subtraction operator. The <value> is * This routine provides the atomic subtraction operator. The <value> is
* atomically subtracted from the value at <target>, placing the result at * atomically subtracted from the value at <target>, placing the result at
* <target>, and the old value from <target> is returned. * <target>, and the old value from <target> is returned.
* *
* RETURNS: The previous value from <target> * RETURNS: The previous value from <target>
* *
* atomic_val_t atomic_sub * atomic_val_t atomic_sub
* ( * (
* atomic_t * target, /@ memory location to subtract from @/ * atomic_t * target, /@ memory location to subtract from @/
* atomic_val_t value /@ value to subtract @/ * atomic_val_t value /@ value to subtract @/
* ) * )
* *
* INTERNAL * INTERNAL
* The 'xadd' instruction is NOT supported on processor prior to the 80486 * The 'xadd' instruction is NOT supported on processor prior to the 80486
*/ */
SECTION_FUNC(TEXT, atomic_sub) SECTION_FUNC(TEXT, atomic_sub)
@ -178,23 +178,23 @@ SECTION_FUNC(TEXT, atomic_sub)
ret ret
/******************************************************************************* /**
* *
* atomic_inc - atomic increment primitive * atomic_inc - atomic increment primitive
* *
* This routine provides the atomic increment operator. The value at <target> * This routine provides the atomic increment operator. The value at <target>
* is atomically incremented by 1, and the old value from <target> is returned. * is atomically incremented by 1, and the old value from <target> is returned.
* *
* RETURNS: The value from <target> before the increment * RETURNS: The value from <target> before the increment
* *
* atomic_val_t atomic_inc * atomic_val_t atomic_inc
* ( * (
* atomic_t *target /@ memory location to increment @/ * atomic_t *target /@ memory location to increment @/
* ) * )
* *
* INTERNAL * INTERNAL
* The 'xadd' instruction is NOT supported on processor prior to the 80486 * The 'xadd' instruction is NOT supported on processor prior to the 80486
*/ */
SECTION_FUNC(TEXT, atomic_inc) SECTION_FUNC(TEXT, atomic_inc)
@ -210,23 +210,23 @@ SECTION_FUNC(TEXT, atomic_inc)
ret ret
/******************************************************************************* /**
* *
* atomic_dec - atomic decrement primitive * atomic_dec - atomic decrement primitive
* *
* This routine provides the atomic decrement operator. The value at <target> * This routine provides the atomic decrement operator. The value at <target>
* is atomically decremented by 1, and the old value from <target> is returned. * is atomically decremented by 1, and the old value from <target> is returned.
* *
* RETURNS: The value from <target> prior to the decrement * RETURNS: The value from <target> prior to the decrement
* *
* atomic_val_t atomic_dec * atomic_val_t atomic_dec
* ( * (
* atomic_t *target /@ memory location to decrement @/ * atomic_t *target /@ memory location to decrement @/
* ) * )
* *
* INTERNAL * INTERNAL
* The 'xadd' instruction is NOT supported on processor prior to the 80486 * The 'xadd' instruction is NOT supported on processor prior to the 80486
*/ */
SECTION_FUNC(TEXT, atomic_dec) SECTION_FUNC(TEXT, atomic_dec)
@ -240,22 +240,22 @@ SECTION_FUNC(TEXT, atomic_dec)
ret ret
/******************************************************************************* /**
* *
* atomic_get - atomic get primitive * atomic_get - atomic get primitive
* *
* This routine provides the atomic get primitive to atomically read * This routine provides the atomic get primitive to atomically read
* a value from <target>. It simply does an ordinary load. Note that <target> * a value from <target>. It simply does an ordinary load. Note that <target>
* is expected to be aligned to a 4-byte boundary. * is expected to be aligned to a 4-byte boundary.
* *
* RETURNS: The value read from <target> * RETURNS: The value read from <target>
* *
* atomic_t atomic_get * atomic_t atomic_get
* ( * (
* atomic_t *target /@ memory location to read from @/ * atomic_t *target /@ memory location to read from @/
* ) * )
* *
*/ */
SECTION_FUNC(TEXT, atomic_get) SECTION_FUNC(TEXT, atomic_get)
@ -264,25 +264,25 @@ SECTION_FUNC(TEXT, atomic_get)
ret ret
/******************************************************************************* /**
* *
* atomic_set - atomic get-and-set primitive * atomic_set - atomic get-and-set primitive
* *
* This routine provides the atomic set operator. The <value> is atomically * This routine provides the atomic set operator. The <value> is atomically
* written at <target> and the previous value at <target> is returned. * written at <target> and the previous value at <target> is returned.
* *
* RETURNS: The previous value from <target> * RETURNS: The previous value from <target>
* *
* atomic_val_t atomic_set * atomic_val_t atomic_set
* ( * (
* atomic_t *target, /@ memory location to write to @/ * atomic_t *target, /@ memory location to write to @/
* atomic_val_t value /@ value to set @/ * atomic_val_t value /@ value to set @/
* ) * )
* *
* INTERNAL * INTERNAL
* The XCHG instruction is executed on the specified address to * The XCHG instruction is executed on the specified address to
* swap in value. The value swapped out is returned by this function. * swap in value. The value swapped out is returned by this function.
*/ */
SECTION_FUNC(TEXT, atomic_set) SECTION_FUNC(TEXT, atomic_set)
@ -304,22 +304,22 @@ SECTION_FUNC(TEXT, atomic_set)
ret ret
/******************************************************************************* /**
* *
* atomic_clear - atomic clear primitive * atomic_clear - atomic clear primitive
* *
* This routine provides the atomic clear operator. The value of 0 is atomically * This routine provides the atomic clear operator. The value of 0 is atomically
* written at <target> and the previous value at <target> is returned. (Hence, * written at <target> and the previous value at <target> is returned. (Hence,
* atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).) * atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).)
* *
* RETURNS: The previous value from <target> * RETURNS: The previous value from <target>
* *
* atomic_val_t atomic_clear * atomic_val_t atomic_clear
* ( * (
* atomic_t *target /@ memory location to write to @/ * atomic_t *target /@ memory location to write to @/
* ) * )
* *
*/ */
SECTION_FUNC(TEXT, atomic_clear) SECTION_FUNC(TEXT, atomic_clear)
@ -341,25 +341,25 @@ SECTION_FUNC(TEXT, atomic_clear)
ret ret
/******************************************************************************* /**
* *
* atomic_or - atomic bitwise inclusive OR primitive * atomic_or - atomic bitwise inclusive OR primitive
* *
* This routine provides the atomic bitwise inclusive OR operator. The <value> * This routine provides the atomic bitwise inclusive OR operator. The <value>
* is atomically bitwise OR'ed with the value at <target>, placing the result * is atomically bitwise OR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned. * at <target>, and the previous value at <target> is returned.
* *
* RETURNS: The previous value from <target> * RETURNS: The previous value from <target>
* *
* atomic_val_t atomic_or * atomic_val_t atomic_or
* ( * (
* atomic_t *target, /@ memory location to be modified @/ * atomic_t *target, /@ memory location to be modified @/
* atomic_val_t value /@ value to OR @/ * atomic_val_t value /@ value to OR @/
* ) * )
* *
* INTERNAL * INTERNAL
* The 'cmpxchg' instruction is NOT supported on processor prior to the 80486 * The 'cmpxchg' instruction is NOT supported on processor prior to the 80486
*/ */
SECTION_FUNC(TEXT, atomic_or) SECTION_FUNC(TEXT, atomic_or)
@ -379,25 +379,25 @@ BRANCH_LABEL(atomic_or_retry)
ret ret
/******************************************************************************* /**
* *
* atomic_xor - atomic bitwise exclusive OR (XOR) primitive * atomic_xor - atomic bitwise exclusive OR (XOR) primitive
* *
* This routine provides the atomic bitwise exclusive OR operator. The <value> * This routine provides the atomic bitwise exclusive OR operator. The <value>
* is atomically bitwise XOR'ed with the value at <target>, placing the result * is atomically bitwise XOR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned. * at <target>, and the previous value at <target> is returned.
* *
* RETURNS: The previous value from <target> * RETURNS: The previous value from <target>
* *
* atomic_val_t atomic_xor * atomic_val_t atomic_xor
* ( * (
* atomic_t *target, /@ memory location to be modified @/ * atomic_t *target, /@ memory location to be modified @/
* atomic_t value /@ value to XOR @/ * atomic_t value /@ value to XOR @/
* ) * )
* *
* INTERNAL * INTERNAL
* The 'cmpxchg' instruction is NOT supported on processor prior to the 80486 * The 'cmpxchg' instruction is NOT supported on processor prior to the 80486
*/ */
SECTION_FUNC(TEXT, atomic_xor) SECTION_FUNC(TEXT, atomic_xor)
@ -417,25 +417,25 @@ BRANCH_LABEL(atomic_xor_retry)
ret ret
/******************************************************************************* /**
* *
* atomic_and - atomic bitwise AND primitive * atomic_and - atomic bitwise AND primitive
* *
* This routine provides the atomic bitwise AND operator. The <value> is * This routine provides the atomic bitwise AND operator. The <value> is
* atomically bitwise AND'ed with the value at <target>, placing the result * atomically bitwise AND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned. * at <target>, and the previous value at <target> is returned.
* *
* RETURNS: The previous value from <target> * RETURNS: The previous value from <target>
* *
* atomic_val_t atomic_and * atomic_val_t atomic_and
* ( * (
* atomic_t *target, /@ memory location to be modified @/ * atomic_t *target, /@ memory location to be modified @/
* atomic_val_t value /@ value to AND @/ * atomic_val_t value /@ value to AND @/
* ) * )
* *
* INTERNAL * INTERNAL
* The 'cmpxchg' instruction is NOT supported on processor prior to the 80486 * The 'cmpxchg' instruction is NOT supported on processor prior to the 80486
*/ */
SECTION_FUNC(TEXT, atomic_and) SECTION_FUNC(TEXT, atomic_and)
@ -455,25 +455,25 @@ BRANCH_LABEL(atomic_and_retry)
ret ret
/******************************************************************************* /**
* *
* atomic_nand - atomic bitwise NAND primitive * atomic_nand - atomic bitwise NAND primitive
* *
* This routine provides the atomic bitwise NAND operator. The <value> is * This routine provides the atomic bitwise NAND operator. The <value> is
* atomically bitwise NAND'ed with the value at <target>, placing the result * atomically bitwise NAND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned. * at <target>, and the previous value at <target> is returned.
* *
* RETURNS: The previous value from <target> * RETURNS: The previous value from <target>
* *
* atomic_val_t atomic_nand * atomic_val_t atomic_nand
* ( * (
* atomic_t * target, /@ memory location to be modified @/ * atomic_t * target, /@ memory location to be modified @/
* atomic_val_t value /@ value to NAND @/ * atomic_val_t value /@ value to NAND @/
* ) * )
* *
* INTERNAL * INTERNAL
* The 'cmpxchg' instruction is NOT supported on processor prior to the 80486 * The 'cmpxchg' instruction is NOT supported on processor prior to the 80486
*/ */
SECTION_FUNC(TEXT, atomic_nand) SECTION_FUNC(TEXT, atomic_nand)

View file

@ -43,30 +43,30 @@ operators that do utilize the LOCK prefix instruction.
INTERNAL INTERNAL
These operators are currently unavailable to user space applications These operators are currently unavailable to user space applications
as there is no requirement for this capability. as there is no requirement for this capability.
*/ */
#if defined(CONFIG_LOCK_INSTRUCTION_UNSUPPORTED) #if defined(CONFIG_LOCK_INSTRUCTION_UNSUPPORTED)
#include <nanokernel.h> #include <nanokernel.h>
#include <arch/cpu.h> #include <arch/cpu.h>
/******************************************************************************* /**
* *
* atomic_cas - atomic compare-and-set primitive * atomic_cas - atomic compare-and-set primitive
* *
* This routine provides the compare-and-set operator. If the original value at * This routine provides the compare-and-set operator. If the original value at
* <target> equals <oldValue>, then <newValue> is stored at <target> and the * <target> equals <oldValue>, then <newValue> is stored at <target> and the
* function returns 1. * function returns 1.
* *
* If the original value at <target> does not equal <oldValue>, then the store * If the original value at <target> does not equal <oldValue>, then the store
* is not done and the function returns 0. * is not done and the function returns 0.
* *
* The reading of the original value at <target>, the comparison, * The reading of the original value at <target>, the comparison,
* and the write of the new value (if it occurs) all happen atomically with * and the write of the new value (if it occurs) all happen atomically with
* respect to both interrupts and accesses of other processors to <target>. * respect to both interrupts and accesses of other processors to <target>.
* *
* RETURNS: Returns 1 if <newValue> is written, 0 otherwise. * RETURNS: Returns 1 if <newValue> is written, 0 otherwise.
*/ */
int atomic_cas( int atomic_cas(
atomic_t *target, /* address to be tested */ atomic_t *target, /* address to be tested */
@ -88,16 +88,16 @@ int atomic_cas(
return 1; return 1;
} }
/******************************************************************************* /**
* *
* atomic_add - atomic addition primitive * atomic_add - atomic addition primitive
* *
* This routine provides the atomic addition operator. The <value> is * This routine provides the atomic addition operator. The <value> is
* atomically added to the value at <target>, placing the result at <target>, * atomically added to the value at <target>, placing the result at <target>,
* and the old value from <target> is returned. * and the old value from <target> is returned.
* *
* RETURNS: The previous value from <target> * RETURNS: The previous value from <target>
*/ */
atomic_val_t atomic_add( atomic_val_t atomic_add(
atomic_t *target, /* memory location to add to */ atomic_t *target, /* memory location to add to */
@ -114,16 +114,16 @@ atomic_val_t atomic_add(
return ovalue; return ovalue;
} }
/******************************************************************************* /**
* *
* atomic_sub - atomic subtraction primitive * atomic_sub - atomic subtraction primitive
* *
* This routine provides the atomic subtraction operator. The <value> is * This routine provides the atomic subtraction operator. The <value> is
* atomically subtracted from the value at <target>, placing the result at * atomically subtracted from the value at <target>, placing the result at
* <target>, and the old value from <target> is returned. * <target>, and the old value from <target> is returned.
* *
* RETURNS: The previous value from <target> * RETURNS: The previous value from <target>
*/ */
atomic_val_t atomic_sub( atomic_val_t atomic_sub(
atomic_t *target, /* memory location to subtract from */ atomic_t *target, /* memory location to subtract from */
@ -140,15 +140,15 @@ atomic_val_t atomic_sub(
return ovalue; return ovalue;
} }
/******************************************************************************* /**
* *
* atomic_inc - atomic increment primitive * atomic_inc - atomic increment primitive
* *
* This routine provides the atomic increment operator. The value at <target> * This routine provides the atomic increment operator. The value at <target>
* is atomically incremented by 1, and the old value from <target> is returned. * is atomically incremented by 1, and the old value from <target> is returned.
* *
* RETURNS: The value from <target> before the increment * RETURNS: The value from <target> before the increment
*/ */
atomic_val_t atomic_inc( atomic_val_t atomic_inc(
atomic_t *target /* memory location to increment */ atomic_t *target /* memory location to increment */
@ -164,15 +164,15 @@ atomic_val_t atomic_inc(
return ovalue; return ovalue;
} }
/******************************************************************************* /**
* *
* atomic_dec - atomic decrement primitive * atomic_dec - atomic decrement primitive
* *
* This routine provides the atomic decrement operator. The value at <target> * This routine provides the atomic decrement operator. The value at <target>
* is atomically decremented by 1, and the old value from <target> is returned. * is atomically decremented by 1, and the old value from <target> is returned.
* *
* RETURNS: The value from <target> prior to the decrement * RETURNS: The value from <target> prior to the decrement
*/ */
atomic_val_t atomic_dec( atomic_val_t atomic_dec(
atomic_t *target /* memory location to decrement */ atomic_t *target /* memory location to decrement */
@ -188,16 +188,16 @@ atomic_val_t atomic_dec(
return ovalue; return ovalue;
} }
/******************************************************************************* /**
* *
* atomic_get - atomic get primitive * atomic_get - atomic get primitive
* *
* This routine provides the atomic get primitive to atomically read * This routine provides the atomic get primitive to atomically read
* a value from <target>. It simply does an ordinary load. Note that <target> * a value from <target>. It simply does an ordinary load. Note that <target>
* is expected to be aligned to a 4-byte boundary. * is expected to be aligned to a 4-byte boundary.
* *
* RETURNS: The value read from <target> * RETURNS: The value read from <target>
*/ */
atomic_val_t atomic_get(const atomic_t *target /* memory location to read from */ atomic_val_t atomic_get(const atomic_t *target /* memory location to read from */
) )
@ -205,15 +205,15 @@ atomic_val_t atomic_get(const atomic_t *target /* memory location to read from *
return *target; return *target;
} }
/******************************************************************************* /**
* *
* atomic_set - atomic get-and-set primitive * atomic_set - atomic get-and-set primitive
* *
* This routine provides the atomic set operator. The <value> is atomically * This routine provides the atomic set operator. The <value> is atomically
* written at <target> and the previous value at <target> is returned. * written at <target> and the previous value at <target> is returned.
* *
* RETURNS: The previous value from <target> * RETURNS: The previous value from <target>
*/ */
atomic_val_t atomic_set( atomic_val_t atomic_set(
atomic_t *target, /* memory location to write to */ atomic_t *target, /* memory location to write to */
@ -230,16 +230,16 @@ atomic_val_t atomic_set(
return ovalue; return ovalue;
} }
/******************************************************************************* /**
* *
* atomic_clear - atomic clear primitive * atomic_clear - atomic clear primitive
* *
* This routine provides the atomic clear operator. The value of 0 is atomically * This routine provides the atomic clear operator. The value of 0 is atomically
* written at <target> and the previous value at <target> is returned. (Hence, * written at <target> and the previous value at <target> is returned. (Hence,
* atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).) * atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).)
* *
* RETURNS: The previous value from <target> * RETURNS: The previous value from <target>
*/ */
atomic_val_t atomic_clear( atomic_val_t atomic_clear(
atomic_t *target /* memory location to write to */ atomic_t *target /* memory location to write to */
@ -255,16 +255,16 @@ atomic_val_t atomic_clear(
return ovalue; return ovalue;
} }
/******************************************************************************* /**
* *
* atomic_or - atomic bitwise inclusive OR primitive * atomic_or - atomic bitwise inclusive OR primitive
* *
* This routine provides the atomic bitwise inclusive OR operator. The <value> * This routine provides the atomic bitwise inclusive OR operator. The <value>
* is atomically bitwise OR'ed with the value at <target>, placing the result * is atomically bitwise OR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned. * at <target>, and the previous value at <target> is returned.
* *
* RETURNS: The previous value from <target> * RETURNS: The previous value from <target>
*/ */
atomic_val_t atomic_or( atomic_val_t atomic_or(
atomic_t *target, /* memory location to be modified */ atomic_t *target, /* memory location to be modified */
@ -281,16 +281,16 @@ atomic_val_t atomic_or(
return ovalue; return ovalue;
} }
/******************************************************************************* /**
* *
* atomic_xor - atomic bitwise exclusive OR (XOR) primitive * atomic_xor - atomic bitwise exclusive OR (XOR) primitive
* *
* This routine provides the atomic bitwise exclusive OR operator. The <value> * This routine provides the atomic bitwise exclusive OR operator. The <value>
* is atomically bitwise XOR'ed with the value at <target>, placing the result * is atomically bitwise XOR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned. * at <target>, and the previous value at <target> is returned.
* *
* RETURNS: The previous value from <target> * RETURNS: The previous value from <target>
*/ */
atomic_val_t atomic_xor( atomic_val_t atomic_xor(
atomic_t *target, /* memory location to be modified */ atomic_t *target, /* memory location to be modified */
@ -307,16 +307,16 @@ atomic_val_t atomic_xor(
return ovalue; return ovalue;
} }
/******************************************************************************* /**
* *
* atomic_and - atomic bitwise AND primitive * atomic_and - atomic bitwise AND primitive
* *
* This routine provides the atomic bitwise AND operator. The <value> is * This routine provides the atomic bitwise AND operator. The <value> is
* atomically bitwise AND'ed with the value at <target>, placing the result * atomically bitwise AND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned. * at <target>, and the previous value at <target> is returned.
* *
* RETURNS: The previous value from <target> * RETURNS: The previous value from <target>
*/ */
atomic_val_t atomic_and( atomic_val_t atomic_and(
atomic_t *target, /* memory location to be modified */ atomic_t *target, /* memory location to be modified */
@ -333,16 +333,16 @@ atomic_val_t atomic_and(
return ovalue; return ovalue;
} }
/******************************************************************************* /**
* *
* atomic_nand - atomic bitwise NAND primitive * atomic_nand - atomic bitwise NAND primitive
* *
* This routine provides the atomic bitwise NAND operator. The <value> is * This routine provides the atomic bitwise NAND operator. The <value> is
* atomically bitwise NAND'ed with the value at <target>, placing the result * atomically bitwise NAND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned. * at <target>, and the previous value at <target> is returned.
* *
* RETURNS: The previous value from <target> * RETURNS: The previous value from <target>
*/ */
atomic_val_t atomic_nand( atomic_val_t atomic_nand(
atomic_t *target, /* memory location to be modified */ atomic_t *target, /* memory location to be modified */

View file

@ -34,7 +34,7 @@
DESCRIPTION DESCRIPTION
This module provides core nanokernel fiber related primitives for the IA-32 This module provides core nanokernel fiber related primitives for the IA-32
processor architecture. processor architecture.
*/ */
#ifdef CONFIG_MICROKERNEL #ifdef CONFIG_MICROKERNEL
#include <microkernel.h> #include <microkernel.h>
@ -57,20 +57,20 @@ tNANO _nanokernel = {0};
void _ContextEntryWrapper(_ContextEntry, _ContextArg, _ContextArg, _ContextArg); void _ContextEntryWrapper(_ContextEntry, _ContextArg, _ContextArg, _ContextArg);
#endif /* CONFIG_GDB_INFO */ #endif /* CONFIG_GDB_INFO */
/******************************************************************************* /**
* *
* _NewContextInternal - initialize a new execution context * _NewContextInternal - initialize a new execution context
* *
* This function is utilized to initialize all execution contexts (both fiber * This function is utilized to initialize all execution contexts (both fiber
* and task). The 'priority' parameter will be set to -1 for the creation of * and task). The 'priority' parameter will be set to -1 for the creation of
* task context. * task context.
* *
* This function is called by _NewContext() to initialize task contexts. * This function is called by _NewContext() to initialize task contexts.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
static void _NewContextInternal( static void _NewContextInternal(
char *pStackMem, /* pointer to context stack memory */ char *pStackMem, /* pointer to context stack memory */
@ -206,64 +206,64 @@ static void _NewContextInternal(
} }
#ifdef CONFIG_GDB_INFO #ifdef CONFIG_GDB_INFO
/******************************************************************************* /**
* *
* _ContextEntryWrapper - adjust stack before invoking _context_entry * _ContextEntryWrapper - adjust stack before invoking _context_entry
* *
* This function adjusts the initial stack frame created by _NewContext() * This function adjusts the initial stack frame created by _NewContext()
* such that the GDB stack frame unwinders recognize it as the outermost frame * such that the GDB stack frame unwinders recognize it as the outermost frame
* in the context's stack. The function then jumps to _context_entry(). * in the context's stack. The function then jumps to _context_entry().
* *
* GDB normally stops unwinding a stack when it detects that it has * GDB normally stops unwinding a stack when it detects that it has
* reached a function called main(). Kernel tasks, however, do not have * reached a function called main(). Kernel tasks, however, do not have
* a main() function, and there does not appear to be a simple way of stopping * a main() function, and there does not appear to be a simple way of stopping
* the unwinding of the stack. * the unwinding of the stack.
* *
* Given the initial context created by _NewContext(), GDB expects to find a * Given the initial context created by _NewContext(), GDB expects to find a
* return address on the stack immediately above the context entry routine * return address on the stack immediately above the context entry routine
* _context_entry, in the location occupied by the initial EFLAGS. * _context_entry, in the location occupied by the initial EFLAGS.
* GDB attempts to examine the memory at this return address, which typically * GDB attempts to examine the memory at this return address, which typically
* results in an invalid access to page 0 of memory. * results in an invalid access to page 0 of memory.
* *
* This function overwrites the initial EFLAGS with zero. When GDB subsequently * This function overwrites the initial EFLAGS with zero. When GDB subsequently
* attempts to examine memory at address zero, the PeekPoke driver detects * attempts to examine memory at address zero, the PeekPoke driver detects
* an invalid access to address zero and returns an error, which causes the * an invalid access to address zero and returns an error, which causes the
* GDB stack unwinder to stop somewhat gracefully. * GDB stack unwinder to stop somewhat gracefully.
* *
* __________________ * __________________
* | param3 | <------ Top of the stack * | param3 | <------ Top of the stack
* |__________________| * |__________________|
* | param2 | Stack Grows Down * | param2 | Stack Grows Down
* |__________________| | * |__________________| |
* | param1 | V * | param1 | V
* |__________________| * |__________________|
* | pEntry | * | pEntry |
* |__________________| * |__________________|
* | initial EFLAGS | <---- ESP when invoked by _Swap() * | initial EFLAGS | <---- ESP when invoked by _Swap()
* |__________________| (Zeroed by this routine) * |__________________| (Zeroed by this routine)
* | entryRtn | <----- Context Entry Routine invoked by _Swap() * | entryRtn | <----- Context Entry Routine invoked by _Swap()
* |__________________| (This routine if GDB_INFO) * |__________________| (This routine if GDB_INFO)
* | <edi> | \ * | <edi> | \
* |__________________| | * |__________________| |
* | <esi> | | * | <esi> | |
* |__________________| | * |__________________| |
* | <ebx> | |---- Initial registers restored by _Swap() * | <ebx> | |---- Initial registers restored by _Swap()
* |__________________| | * |__________________| |
* | <ebp> | | * | <ebp> | |
* |__________________| | * |__________________| |
* | <eax> | / * | <eax> | /
* |__________________| * |__________________|
* *
* *
* The initial EFLAGS cannot be overwritten until after _Swap() has swapped in * The initial EFLAGS cannot be overwritten until after _Swap() has swapped in
* the new context for the first time. This routine is called by _Swap() the * the new context for the first time. This routine is called by _Swap() the
* first time that the new context is swapped in, and it jumps to * first time that the new context is swapped in, and it jumps to
* _context_entry after it has done its work. * _context_entry after it has done its work.
* *
* RETURNS: this routine does NOT return. * RETURNS: this routine does NOT return.
* *
* \NOMANUAL * \NOMANUAL
*/ */
__asm__("\t.globl _context_entry\n" __asm__("\t.globl _context_entry\n"
"\t.section .text\n" "\t.section .text\n"
@ -273,20 +273,20 @@ __asm__("\t.globl _context_entry\n"
"\tjmp _context_entry\n"); "\tjmp _context_entry\n");
#endif /* CONFIG_GDB_INFO */ #endif /* CONFIG_GDB_INFO */
/******************************************************************************* /**
* *
* _NewContext - create a new kernel execution context * _NewContext - create a new kernel execution context
* *
* This function is utilized to create execution contexts for both fiber * This function is utilized to create execution contexts for both fiber
* contexts and kernel task contexts. * contexts and kernel task contexts.
* *
* The "context control block" (CCS) is carved from the "end" of the specified * The "context control block" (CCS) is carved from the "end" of the specified
* context stack memory. * context stack memory.
* *
* RETURNS: opaque pointer to initialized CCS structure * RETURNS: opaque pointer to initialized CCS structure
* *
* \NOMANUAL * \NOMANUAL
*/ */
void _NewContext( void _NewContext(
char *pStackMem, /* pointer to aligned stack memory */ char *pStackMem, /* pointer to aligned stack memory */

View file

@ -47,7 +47,7 @@ supports the execution of the 'hlt' instruction from a guest (results in a
VM exit), and more importantly, the Hypervisor will respect the VM exit), and more importantly, the Hypervisor will respect the
single instruction delay slot after the 'sti' instruction as required single instruction delay slot after the 'sti' instruction as required
by nano_cpu_atomic_idle(). by nano_cpu_atomic_idle().
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -64,21 +64,21 @@ by nano_cpu_atomic_idle().
#ifndef CONFIG_NO_ISRS #ifndef CONFIG_NO_ISRS
/******************************************************************************* /**
* *
* nano_cpu_idle - power save idle routine for IA-32 * nano_cpu_idle - power save idle routine for IA-32
* *
* This function will be called by the nanokernel idle loop or possibly within * This function will be called by the nanokernel idle loop or possibly within
* an implementation of _sys_power_save_idle in the microkernel when the * an implementation of _sys_power_save_idle in the microkernel when the
* '_sys_power_save_flag' variable is non-zero. The IA-32 'hlt' instruction * '_sys_power_save_flag' variable is non-zero. The IA-32 'hlt' instruction
* will be issued causing a low-power consumption sleep mode. * will be issued causing a low-power consumption sleep mode.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* C function prototype: * C function prototype:
* *
* void nano_cpu_idle (void); * void nano_cpu_idle (void);
*/ */
SECTION_FUNC(TEXT, nano_cpu_idle) SECTION_FUNC(TEXT, nano_cpu_idle)
#ifdef CONFIG_INT_LATENCY_BENCHMARK #ifdef CONFIG_INT_LATENCY_BENCHMARK
@ -94,31 +94,31 @@ SECTION_FUNC(TEXT, nano_cpu_idle)
ret /* return after processing ISR */ ret /* return after processing ISR */
/******************************************************************************* /**
* *
* nano_cpu_atomic_idle - atomically re-enable interrupts and enter low power mode * nano_cpu_atomic_idle - atomically re-enable interrupts and enter low power mode
* *
* This function is utilized by the nanokernel object "wait" APIs for task * This function is utilized by the nanokernel object "wait" APIs for task
* contexts, e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(), nano_task_stack_pop_wait(), * contexts, e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(), nano_task_stack_pop_wait(),
* and nano_task_fifo_get_wait(). * and nano_task_fifo_get_wait().
* *
* INTERNAL * INTERNAL
* The requirements for nano_cpu_atomic_idle() are as follows: * The requirements for nano_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be * 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are * atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments * enabled before the processor enters a low-power mode. See the comments
* in nano_task_lifo_get_wait(), for example, of the race condition that occurs * in nano_task_lifo_get_wait(), for example, of the race condition that occurs
* if this requirement is not met. * if this requirement is not met.
* *
* 2) After waking up from the low-power mode, the interrupt lockout state * 2) After waking up from the low-power mode, the interrupt lockout state
* must be restored as indicated in the 'imask' input parameter. * must be restored as indicated in the 'imask' input parameter.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* C function prototype: * C function prototype:
* *
* void nano_cpu_atomic_idle (unsigned int imask); * void nano_cpu_atomic_idle (unsigned int imask);
*/ */
SECTION_FUNC(TEXT, nano_cpu_atomic_idle) SECTION_FUNC(TEXT, nano_cpu_atomic_idle)
#ifdef CONFIG_INT_LATENCY_BENCHMARK #ifdef CONFIG_INT_LATENCY_BENCHMARK

View file

@ -74,7 +74,7 @@ an error code is present on the stack or not.
NOTE: Be sure to update the arch specific definition of the _EXC_STUB_SIZE NOTE: Be sure to update the arch specific definition of the _EXC_STUB_SIZE
macro to reflect the size of the full exception stub (as shown above). macro to reflect the size of the full exception stub (as shown above).
The _EXC_STUB_SIZE macro is defined in arch/x86/include/nano_private.h. The _EXC_STUB_SIZE macro is defined in arch/x86/include/nano_private.h.
*/ */
#include <nanokernel.h> #include <nanokernel.h>
@ -86,37 +86,37 @@ void _NanoCpuExcConnectAtDpl(unsigned int vector,
NANO_EXC_STUB pExcStubMem, NANO_EXC_STUB pExcStubMem,
unsigned int dpl); unsigned int dpl);
/******************************************************************************* /**
* *
* nanoCpuExcConnect - connect a C routine to an exception * nanoCpuExcConnect - connect a C routine to an exception
* *
* This routine connects an exception handler coded in C to the specified * This routine connects an exception handler coded in C to the specified
* interrupt vector. An exception is defined as a synchronous interrupt, i.e. * interrupt vector. An exception is defined as a synchronous interrupt, i.e.
* an interrupt asserted as a direct result of program execution as opposed * an interrupt asserted as a direct result of program execution as opposed
* to a hardware device asserting an interrupt. * to a hardware device asserting an interrupt.
* *
* When the exception specified by <vector> is asserted, the current context * When the exception specified by <vector> is asserted, the current context
* is saved on the current stack, i.e. a switch to some other stack is not * is saved on the current stack, i.e. a switch to some other stack is not
* performed, followed by executing <routine> which has the following signature: * performed, followed by executing <routine> which has the following signature:
* *
* void (*routine) (NANO_ESF *pEsf) * void (*routine) (NANO_ESF *pEsf)
* *
* The <pExcStubMem> argument points to memory that the system can use to * The <pExcStubMem> argument points to memory that the system can use to
* synthesize the exception stub that calls <routine>. The memory need not be * synthesize the exception stub that calls <routine>. The memory need not be
* initialized, but must be persistent (i.e. it cannot be on the caller's stack). * initialized, but must be persistent (i.e. it cannot be on the caller's stack).
* Declaring a global or static variable of type NANO_EXC_STUB will provide a * Declaring a global or static variable of type NANO_EXC_STUB will provide a
* suitable area of the proper size. * suitable area of the proper size.
* *
* The handler is connected via an interrupt-gate descriptor having a * The handler is connected via an interrupt-gate descriptor having a
* descriptor privilege level (DPL) equal to zero. * descriptor privilege level (DPL) equal to zero.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* INTERNAL * INTERNAL
* The function prototype for nanoCpuExcConnect() only exists in nano_private.h, * The function prototype for nanoCpuExcConnect() only exists in nano_private.h,
* in other words, it's still considered private since the definitions for * in other words, it's still considered private since the definitions for
* the NANO_ESF structures have not been completed. * the NANO_ESF structures have not been completed.
*/ */
void nanoCpuExcConnect(unsigned int vector, /* interrupt vector: 0 to 255 on void nanoCpuExcConnect(unsigned int vector, /* interrupt vector: 0 to 255 on
IA-32 */ IA-32 */
@ -126,37 +126,37 @@ void nanoCpuExcConnect(unsigned int vector, /* interrupt vector: 0 to 255 on
_NanoCpuExcConnectAtDpl(vector, routine, pExcStubMem, 0); _NanoCpuExcConnectAtDpl(vector, routine, pExcStubMem, 0);
} }
/******************************************************************************* /**
* *
* _NanoCpuExcConnectAtDpl - connect a C routine to an exception * _NanoCpuExcConnectAtDpl - connect a C routine to an exception
* *
* This routine connects an exception handler coded in C to the specified * This routine connects an exception handler coded in C to the specified
* interrupt vector. An exception is defined as a synchronous interrupt, i.e. * interrupt vector. An exception is defined as a synchronous interrupt, i.e.
* an interrupt asserted as a direct result of program execution as opposed * an interrupt asserted as a direct result of program execution as opposed
* to a hardware device asserting an interrupt. * to a hardware device asserting an interrupt.
* *
* When the exception specified by <vector> is asserted, the current context * When the exception specified by <vector> is asserted, the current context
* is saved on the current stack, i.e. a switch to some other stack is not * is saved on the current stack, i.e. a switch to some other stack is not
* performed, followed by executing <routine> which has the following signature: * performed, followed by executing <routine> which has the following signature:
* *
* void (*routine) (NANO_ESF *pEsf) * void (*routine) (NANO_ESF *pEsf)
* *
* The <pExcStubMem> argument points to memory that the system can use to * The <pExcStubMem> argument points to memory that the system can use to
* synthesize the exception stub that calls <routine>. The memory need not be * synthesize the exception stub that calls <routine>. The memory need not be
* initialized, but must be persistent (i.e. it cannot be on the caller's stack). * initialized, but must be persistent (i.e. it cannot be on the caller's stack).
* Declaring a global or static variable of type NANO_EXC_STUB will provide a * Declaring a global or static variable of type NANO_EXC_STUB will provide a
* suitable area of the proper size. * suitable area of the proper size.
* *
* The handler is connected via an interrupt-gate descriptor having the supplied * The handler is connected via an interrupt-gate descriptor having the supplied
* descriptor privilege level (DPL). * descriptor privilege level (DPL).
* *
* RETURNS: N/A * RETURNS: N/A
* *
* INTERNAL * INTERNAL
* The function prototype for nanoCpuExcConnect() only exists in nano_private.h, * The function prototype for nanoCpuExcConnect() only exists in nano_private.h,
* in other words, it's still considered private since the definitions for * in other words, it's still considered private since the definitions for
* the NANO_ESF structures have not been completed. * the NANO_ESF structures have not been completed.
*/ */
void _NanoCpuExcConnectAtDpl( void _NanoCpuExcConnectAtDpl(
unsigned int vector, /* interrupt vector: 0 to 255 on IA-32 */ unsigned int vector, /* interrupt vector: 0 to 255 on IA-32 */

View file

@ -36,7 +36,7 @@ This module implements assembly routines to manage exceptions (synchronous
interrupts) on the Intel IA-32 architecture. More specifically, interrupts) on the Intel IA-32 architecture. More specifically,
exceptions are implemented in this module. The stubs are invoked when entering exceptions are implemented in this module. The stubs are invoked when entering
and exiting a C exception handler. and exiting a C exception handler.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -57,31 +57,31 @@ and exiting a C exception handler.
/******************************************************************************* /**
* *
* _ExcEnt - inform the kernel of an exception * _ExcEnt - inform the kernel of an exception
* *
* This function is called from the exception stub created by nanoCpuExcConnect() * This function is called from the exception stub created by nanoCpuExcConnect()
* to inform the kernel of an exception. This routine currently does * to inform the kernel of an exception. This routine currently does
* _not_ increment a context/interrupt specific exception count. Also, * _not_ increment a context/interrupt specific exception count. Also,
* execution of the exception handler occurs on the current stack, i.e. * execution of the exception handler occurs on the current stack, i.e.
* _ExcEnt() does not switch to another stack. The volatile integer * _ExcEnt() does not switch to another stack. The volatile integer
* registers are saved on the stack, and control is returned back to the * registers are saved on the stack, and control is returned back to the
* exception stub. * exception stub.
* *
* WARNINGS * WARNINGS
* *
* Host-based tools and the target-based GDB agent depend on the stack frame * Host-based tools and the target-based GDB agent depend on the stack frame
* created by this routine to determine the locations of volatile registers. * created by this routine to determine the locations of volatile registers.
* These tools must be updated to reflect any changes to the stack frame. * These tools must be updated to reflect any changes to the stack frame.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* C function prototype: * C function prototype:
* *
* void _ExcEnt (void); * void _ExcEnt (void);
* *
*/ */
SECTION_FUNC(TEXT, _ExcEnt) SECTION_FUNC(TEXT, _ExcEnt)
@ -215,22 +215,22 @@ BRANCH_LABEL(allDone)
jmp *%eax /* "return" back to stub */ jmp *%eax /* "return" back to stub */
/******************************************************************************* /**
* *
* _ExcExit - inform the kernel of an exception exit * _ExcExit - inform the kernel of an exception exit
* *
* This function is called from the exception stub created by nanoCpuExcConnect() * This function is called from the exception stub created by nanoCpuExcConnect()
* to inform the kernel that the processing of an exception has * to inform the kernel that the processing of an exception has
* completed. This routine restores the volatile integer registers and * completed. This routine restores the volatile integer registers and
* then control is returned back to the interrupted context or ISR. * then control is returned back to the interrupted context or ISR.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* C function prototype: * C function prototype:
* *
* void _ExcExit (void); * void _ExcExit (void);
* *
*/ */
SECTION_FUNC(TEXT, _ExcExit) SECTION_FUNC(TEXT, _ExcExit)

View file

@ -33,7 +33,7 @@
/* /*
DESCRIPTION DESCRIPTION
This module provides the _NanoFatalErrorHandler() routine. This module provides the _NanoFatalErrorHandler() routine.
*/ */
#include <toolchain.h> #include <toolchain.h>
#include <sections.h> #include <sections.h>
@ -65,21 +65,21 @@ const NANO_ESF _default_esf = {
0xdeaddead /* SS */ 0xdeaddead /* SS */
}; };
/******************************************************************************* /**
* *
* _NanoFatalErrorHandler - nanokernel fatal error handler * _NanoFatalErrorHandler - nanokernel fatal error handler
* *
* This routine is called when a fatal error condition is detected by either * This routine is called when a fatal error condition is detected by either
* hardware or software. * hardware or software.
* *
* The caller is expected to always provide a usable ESF. In the event that the * The caller is expected to always provide a usable ESF. In the event that the
* fatal error does not have a hardware generated ESF, the caller should either * fatal error does not have a hardware generated ESF, the caller should either
* create its own or use a pointer to the global default ESF <_default_esf>. * create its own or use a pointer to the global default ESF <_default_esf>.
* *
* RETURNS: This function does not return. * RETURNS: This function does not return.
* *
* \NOMANUAL * \NOMANUAL
*/ */
FUNC_NORETURN void _NanoFatalErrorHandler( FUNC_NORETURN void _NanoFatalErrorHandler(
unsigned int reason, /* reason that handler was called */ unsigned int reason, /* reason that handler was called */

View file

@ -38,7 +38,7 @@ architecture.
INTERNAL INTERNAL
Inline versions of these APIs, find_last_set_inline() and find_first_set_inline(), Inline versions of these APIs, find_last_set_inline() and find_first_set_inline(),
are defined in arch.h. are defined in arch.h.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -51,24 +51,24 @@ are defined in arch.h.
GTEXT(find_last_set) GTEXT(find_last_set)
GTEXT(find_first_set) GTEXT(find_first_set)
/******************************************************************************* /**
* *
* find_first_set - find first set bit searching from the LSB * find_first_set - find first set bit searching from the LSB
* *
* This routine finds the first bit set in the passed argument and * This routine finds the first bit set in the passed argument and
* returns the index of that bit. Bits are numbered starting * returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit to 32 for the most significant bit. * at 1 from the least significant bit to 32 for the most significant bit.
* A return value of zero indicates that the value passed is zero. * A return value of zero indicates that the value passed is zero.
* *
* RETURNS: bit position from 1 to 32, or 0 if the argument is zero. * RETURNS: bit position from 1 to 32, or 0 if the argument is zero.
* *
* INTERNAL * INTERNAL
* For Intel64 (x86_64) architectures, the 'cmovz' can be removed * For Intel64 (x86_64) architectures, the 'cmovz' can be removed
* and leverage the fact that the 'bsrl' doesn't modify the destination operand * and leverage the fact that the 'bsrl' doesn't modify the destination operand
* when the source operand is zero. The "bitpos" variable can be preloaded * when the source operand is zero. The "bitpos" variable can be preloaded
* into the destination register, and given the unconditional ++bitpos that * into the destination register, and given the unconditional ++bitpos that
* is performed after the 'cmovz', the correct results are yielded. * is performed after the 'cmovz', the correct results are yielded.
*/ */
SECTION_FUNC(TEXT, find_first_set) SECTION_FUNC(TEXT, find_first_set)
@ -94,24 +94,24 @@ BRANCH_LABEL(ffsLsb_argNotZero) /* this label serves find_first_set() & find_las
#endif /* !CONFIG_CMOV_UNSUPPORTED */ #endif /* !CONFIG_CMOV_UNSUPPORTED */
/******************************************************************************* /**
* *
* find_last_set - find first set bit searching from the MSB * find_last_set - find first set bit searching from the MSB
* *
* This routine finds the first bit set in the passed argument and * This routine finds the first bit set in the passed argument and
* returns the index of that bit. Bits are numbered starting * returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit to 32 for the most significant bit. * at 1 from the least significant bit to 32 for the most significant bit.
* A return value of zero indicates that the value passed is zero. * A return value of zero indicates that the value passed is zero.
* *
* RETURNS: bit position from 1 to 32, or 0 if the argument is zero. * RETURNS: bit position from 1 to 32, or 0 if the argument is zero.
* *
* INTERNAL * INTERNAL
* For Intel64 (x86_64) architectures, the 'cmovz' can be removed * For Intel64 (x86_64) architectures, the 'cmovz' can be removed
* and leverage the fact that the 'bsfl' doesn't modify the destination operand * and leverage the fact that the 'bsfl' doesn't modify the destination operand
* when the source operand is zero. The "bitpos" variable can be preloaded * when the source operand is zero. The "bitpos" variable can be preloaded
* into the destination register, and given the unconditional ++bitpos that * into the destination register, and given the unconditional ++bitpos that
* is performed after the 'cmovz', the correct results are yielded. * is performed after the 'cmovz', the correct results are yielded.
*/ */
SECTION_FUNC(TEXT, find_last_set) SECTION_FUNC(TEXT, find_last_set)

View file

@ -93,7 +93,7 @@ FP operations. All other tasks and fibers have CR0[TS] = 1 so that an attempt
to perform an FP operation will cause an exception, allowing the system to to perform an FP operation will cause an exception, allowing the system to
enable FP resource sharing on its behalf. enable FP resource sharing on its behalf.
*/ */
#ifdef CONFIG_MICROKERNEL #ifdef CONFIG_MICROKERNEL
#include <microkernel.h> #include <microkernel.h>
@ -112,74 +112,74 @@ enable FP resource sharing on its behalf.
extern uint32_t _sse_mxcsr_default_value; /* SSE control/status register default value */ extern uint32_t _sse_mxcsr_default_value; /* SSE control/status register default value */
#endif /* CONFIG_SSE */ #endif /* CONFIG_SSE */
/******************************************************************************* /**
* *
* _FpCtxSave - save non-integer context information * _FpCtxSave - save non-integer context information
* *
* This routine saves the system's "live" non-integer context into the * This routine saves the system's "live" non-integer context into the
* specified CCS. If the specified task or fiber supports SSE then * specified CCS. If the specified task or fiber supports SSE then
* x87/MMX/SSEx context info is saved, otherwise only x87/MMX context is saved. * x87/MMX/SSEx context info is saved, otherwise only x87/MMX context is saved.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
static void _FpCtxSave(tCCS *ccs) static void _FpCtxSave(tCCS *ccs)
{ {
_do_fp_ctx_save(ccs->flags & USE_SSE, &ccs->preempFloatReg); _do_fp_ctx_save(ccs->flags & USE_SSE, &ccs->preempFloatReg);
} }
/******************************************************************************* /**
* *
* _FpCtxInit - initialize non-integer context information * _FpCtxInit - initialize non-integer context information
* *
* This routine initializes the system's "live" non-integer context. * This routine initializes the system's "live" non-integer context.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
static inline void _FpCtxInit(tCCS *ccs) static inline void _FpCtxInit(tCCS *ccs)
{ {
_do_fp_ctx_init(ccs->flags & USE_SSE); _do_fp_ctx_init(ccs->flags & USE_SSE);
} }
/******************************************************************************* /**
* *
* _FpEnable - enable preservation of non-integer context information * _FpEnable - enable preservation of non-integer context information
* *
* This routine allows the specified task/fiber (which may be the active * This routine allows the specified task/fiber (which may be the active
* task/fiber) to safely share the system's floating point registers with * task/fiber) to safely share the system's floating point registers with
* other tasks/fibers. The <options> parameter indicates which floating point * other tasks/fibers. The <options> parameter indicates which floating point
* register sets will be used by the specified task/fiber: * register sets will be used by the specified task/fiber:
* *
* a) USE_FP indicates x87 FPU and MMX registers only * a) USE_FP indicates x87 FPU and MMX registers only
* b) USE_SSE indicates x87 FPU and MMX and SSEx registers * b) USE_SSE indicates x87 FPU and MMX and SSEx registers
* *
* Invoking this routine creates a floating point context for the task/fiber * Invoking this routine creates a floating point context for the task/fiber
* that corresponds to an FPU that has been reset. The system will thereafter * that corresponds to an FPU that has been reset. The system will thereafter
* protect the task/fiber's FP context so that it is not altered during * protect the task/fiber's FP context so that it is not altered during
* a pre-emptive context switch. * a pre-emptive context switch.
* *
* WARNING * WARNING
* This routine should only be used to enable floating point support for a * This routine should only be used to enable floating point support for a
* task/fiber that does not currently have such support enabled already. * task/fiber that does not currently have such support enabled already.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* INTERNAL * INTERNAL
* Since the transition from "non-FP supporting" to "FP supporting" must be done * Since the transition from "non-FP supporting" to "FP supporting" must be done
* atomically to avoid confusing the floating point logic used by _Swap(), * atomically to avoid confusing the floating point logic used by _Swap(),
* this routine locks interrupts to ensure that a context switch does not occur, * this routine locks interrupts to ensure that a context switch does not occur,
* The locking isn't really needed when the routine is called by a fiber * The locking isn't really needed when the routine is called by a fiber
* (since context switching can't occur), but it is harmless and allows a single * (since context switching can't occur), but it is harmless and allows a single
* routine to be called by both tasks and fibers (thus saving code space). * routine to be called by both tasks and fibers (thus saving code space).
* *
* If necessary, the interrupt latency impact of calling this routine from a * If necessary, the interrupt latency impact of calling this routine from a
* fiber could be lessened by re-designing things so that only task-type callers * fiber could be lessened by re-designing things so that only task-type callers
* locked interrupts (i.e. move the locking to task_float_enable()). However, * locked interrupts (i.e. move the locking to task_float_enable()). However,
* all calls to fiber_float_enable() would need to be reviewed to ensure they * all calls to fiber_float_enable() would need to be reviewed to ensure they
* are only used from a fiber, rather than from "generic" code used by both * are only used from a fiber, rather than from "generic" code used by both
* tasks and fibers. * tasks and fibers.
*/ */
void _FpEnable(tCCS *ccs, void _FpEnable(tCCS *ccs,
unsigned int options /* USE_FP or USE_SSE */ unsigned int options /* USE_FP or USE_SSE */
@ -287,63 +287,63 @@ void _FpEnable(tCCS *ccs,
irq_unlock_inline(imask); irq_unlock_inline(imask);
} }
/******************************************************************************* /**
* *
* fiber_float_enable - enable preservation of non-integer context information * fiber_float_enable - enable preservation of non-integer context information
* *
* This routine allows a fiber to permit a task/fiber (including itself) to * This routine allows a fiber to permit a task/fiber (including itself) to
* safely share the system's floating point registers with other tasks/fibers. * safely share the system's floating point registers with other tasks/fibers.
* *
* See the description of _FpEnable() for further details. * See the description of _FpEnable() for further details.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
FUNC_ALIAS(_FpEnable, fiber_float_enable, void); FUNC_ALIAS(_FpEnable, fiber_float_enable, void);
/******************************************************************************* /**
* *
* task_float_enable - enable preservation of non-integer context information * task_float_enable - enable preservation of non-integer context information
* *
* This routine allows a task to permit a task/fiber (including itself) to * This routine allows a task to permit a task/fiber (including itself) to
* safely share the system's floating point registers with other tasks/fibers. * safely share the system's floating point registers with other tasks/fibers.
* *
* See the description of _FpEnable() for further details. * See the description of _FpEnable() for further details.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
FUNC_ALIAS(_FpEnable, task_float_enable, void); FUNC_ALIAS(_FpEnable, task_float_enable, void);
/******************************************************************************* /**
* *
* _FpDisable - disable preservation of non-integer context information * _FpDisable - disable preservation of non-integer context information
* *
* This routine prevents the specified task/fiber (which may be the active * This routine prevents the specified task/fiber (which may be the active
* task/fiber) from safely sharing any of the system's floating point registers * task/fiber) from safely sharing any of the system's floating point registers
* with other tasks/fibers. * with other tasks/fibers.
* *
* WARNING * WARNING
* This routine should only be used to disable floating point support for * This routine should only be used to disable floating point support for
* a task/fiber that currently has such support enabled. * a task/fiber that currently has such support enabled.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* INTERNAL * INTERNAL
* Since the transition from "FP supporting" to "non-FP supporting" must be done * Since the transition from "FP supporting" to "non-FP supporting" must be done
* atomically to avoid confusing the floating point logic used by _Swap(), * atomically to avoid confusing the floating point logic used by _Swap(),
* this routine locks interrupts to ensure that a context switch does not occur, * this routine locks interrupts to ensure that a context switch does not occur,
* The locking isn't really needed when the routine is called by a fiber * The locking isn't really needed when the routine is called by a fiber
* (since context switching can't occur), but it is harmless and allows a single * (since context switching can't occur), but it is harmless and allows a single
* routine to be called by both tasks and fibers (thus saving code space). * routine to be called by both tasks and fibers (thus saving code space).
* *
* If necessary, the interrupt latency impact of calling this routine from a * If necessary, the interrupt latency impact of calling this routine from a
* fiber could be lessened by re-designing things so that only task-type callers * fiber could be lessened by re-designing things so that only task-type callers
* locked interrupts (i.e. move the locking to task_float_disable()). However, * locked interrupts (i.e. move the locking to task_float_disable()). However,
* all calls to fiber_float_disable() would need to be reviewed to ensure they * all calls to fiber_float_disable() would need to be reviewed to ensure they
* are only used from a fiber, rather than from "generic" code used by both * are only used from a fiber, rather than from "generic" code used by both
* tasks and fibers. * tasks and fibers.
*/ */
void _FpDisable(tCCS *ccs) void _FpDisable(tCCS *ccs)
{ {
@ -376,58 +376,58 @@ void _FpDisable(tCCS *ccs)
irq_unlock_inline(imask); irq_unlock_inline(imask);
} }
/******************************************************************************* /**
* *
* fiber_float_disable - disable preservation of non-integer context * fiber_float_disable - disable preservation of non-integer context
*information *information
* *
* This routine allows a fiber to disallow a task/fiber (including itself) from * This routine allows a fiber to disallow a task/fiber (including itself) from
* safely sharing any of the system's floating point registers with other * safely sharing any of the system's floating point registers with other
* tasks/fibers. * tasks/fibers.
* *
* WARNING * WARNING
* This routine should only be used to disable floating point support for * This routine should only be used to disable floating point support for
* a task/fiber that currently has such support enabled. * a task/fiber that currently has such support enabled.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
FUNC_ALIAS(_FpDisable, fiber_float_disable, void); FUNC_ALIAS(_FpDisable, fiber_float_disable, void);
/******************************************************************************* /**
* *
* task_float_disable - disable preservation of non-integer context information * task_float_disable - disable preservation of non-integer context information
* *
* This routine allows a task to disallow a task/fiber (including itself) from * This routine allows a task to disallow a task/fiber (including itself) from
* safely sharing any of the system's floating point registers with other * safely sharing any of the system's floating point registers with other
* tasks/fibers. * tasks/fibers.
* *
* WARNING * WARNING
* This routine should only be used to disable floating point support for * This routine should only be used to disable floating point support for
* a task/fiber that currently has such support enabled. * a task/fiber that currently has such support enabled.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
FUNC_ALIAS(_FpDisable, task_float_disable, void); FUNC_ALIAS(_FpDisable, task_float_disable, void);
#ifdef CONFIG_AUTOMATIC_FP_ENABLING #ifdef CONFIG_AUTOMATIC_FP_ENABLING
/******************************************************************************* /**
* *
* _FpNotAvailableExcHandler - handler for "device not available" exception * _FpNotAvailableExcHandler - handler for "device not available" exception
* *
* This routine is registered to handle the "device not available" exception * This routine is registered to handle the "device not available" exception
* (vector = 7) when the AUTOMATIC_FP_ENABLING configuration option has been * (vector = 7) when the AUTOMATIC_FP_ENABLING configuration option has been
* been selected. * been selected.
* *
* The processor will generate this exception if any x87 FPU, MMX, or SSEx * The processor will generate this exception if any x87 FPU, MMX, or SSEx
* instruction is executed while CR0[TS]=1. The handler then enables the * instruction is executed while CR0[TS]=1. The handler then enables the
* current task or fiber with the USE_FP option (or the USE_SSE option if the * current task or fiber with the USE_FP option (or the USE_SSE option if the
* SSE configuration option has been enabled). * SSE configuration option has been enabled).
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _FpNotAvailableExcHandler(NANO_ESF * pEsf /* not used */ void _FpNotAvailableExcHandler(NANO_ESF * pEsf /* not used */
) )

View file

@ -34,7 +34,7 @@
DESCRIPTION DESCRIPTION
This module contains routines for updating the global descriptor table (GDT) This module contains routines for updating the global descriptor table (GDT)
for the IA-32 architecture. for the IA-32 architecture.
*/ */
#include <linker-defs.h> #include <linker-defs.h>
#include <toolchain.h> #include <toolchain.h>

View file

@ -49,7 +49,7 @@ The _IntBoiExit() routine is provided in a separate module so that it gets
included in the final image only if an interrupt controller driver utilizing included in the final image only if an interrupt controller driver utilizing
_IntBoiExit() is present. _IntBoiExit() is present.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
#include <arch/x86/asm.h> #include <arch/x86/asm.h>
@ -64,18 +64,18 @@ _IntBoiExit() is present.
GTEXT(_IntExit) GTEXT(_IntExit)
/******************************************************************************* /**
* *
* _IntBoiExit - exit interrupt handler stub without invoking ISR * _IntBoiExit - exit interrupt handler stub without invoking ISR
* *
* This routine exits an interrupt handler stub without invoking the associated * This routine exits an interrupt handler stub without invoking the associated
* ISR handler (or the EOI handler, if present). It should only be jumped to * ISR handler (or the EOI handler, if present). It should only be jumped to
* by an interrupt controller driver's BOI routine, and only if the BOI routine * by an interrupt controller driver's BOI routine, and only if the BOI routine
* is passed a single parameter by the interrupt stub. * is passed a single parameter by the interrupt stub.
* *
* \INTERNAL * \INTERNAL
* A BOI routine that has no parameters can jump directly to _IntExit(). * A BOI routine that has no parameters can jump directly to _IntExit().
*/ */
SECTION_FUNC(TEXT, _IntBoiExit) SECTION_FUNC(TEXT, _IntBoiExit)
addl $4, %esp /* pop off the $BoiParameter */ addl $4, %esp /* pop off the $BoiParameter */

View file

@ -85,7 +85,7 @@ NOTE: Be sure to update the arch specific definition of the _INT_STUB_SIZE macro
to reflect the maximum potential size of the interrupt stub (as shown above). to reflect the maximum potential size of the interrupt stub (as shown above).
The _INT_STUB_SIZE macro is defined in include/nanokernel/x86/arch.h. The _INT_STUB_SIZE macro is defined in include/nanokernel/x86/arch.h.
*/ */
#ifndef CONFIG_NO_ISRS #ifndef CONFIG_NO_ISRS
@ -159,7 +159,7 @@ static NANO_INT_STUB dynamic_stubs[ALL_DYNAMIC_STUBS] = {
[0 ... (ALL_DYNAMIC_STUBS - 1)] = { _STUB_AVAIL, } [0 ... (ALL_DYNAMIC_STUBS - 1)] = { _STUB_AVAIL, }
}; };
/******************************************************************************* /**
* _int_stub_alloc - allocate dynamic interrupt stub * _int_stub_alloc - allocate dynamic interrupt stub
* *
* RETURNS: index of the first available element of the STUB array or -1 * RETURNS: index of the first available element of the STUB array or -1
@ -179,28 +179,28 @@ static int _int_stub_alloc(void)
} }
#endif /* ALL_DYNAMIC_STUBS > 0 */ #endif /* ALL_DYNAMIC_STUBS > 0 */
/******************************************************************************* /**
* *
* _IntVecSet - connect a routine to an interrupt vector * _IntVecSet - connect a routine to an interrupt vector
* *
* This routine "connects" the specified <routine> to the specified interrupt * This routine "connects" the specified <routine> to the specified interrupt
* <vector>. On the IA-32 architecture, an interrupt vector is a value from * <vector>. On the IA-32 architecture, an interrupt vector is a value from
* 0 to 255. This routine merely fills in the appropriate interrupt * 0 to 255. This routine merely fills in the appropriate interrupt
* descriptor table (IDT) with an interrupt-gate descriptor such that <routine> * descriptor table (IDT) with an interrupt-gate descriptor such that <routine>
* is invoked when interrupt <vector> is asserted. The <dpl> argument specifies * is invoked when interrupt <vector> is asserted. The <dpl> argument specifies
* the privilege level for the interrupt-gate descriptor; (hardware) interrupts * the privilege level for the interrupt-gate descriptor; (hardware) interrupts
* and exceptions should specify a level of 0, whereas handlers for user-mode * and exceptions should specify a level of 0, whereas handlers for user-mode
* software generated interrupts should specify 3. * software generated interrupts should specify 3.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* INTERNAL * INTERNAL
* Unlike nanoCpuExcConnect() and irq_connect(), the _IntVecSet() routine * Unlike nanoCpuExcConnect() and irq_connect(), the _IntVecSet() routine
* is a very basic API that simply updates the appropriate entry in Interrupt * is a very basic API that simply updates the appropriate entry in Interrupt
* Descriptor Table (IDT) such that the specified routine is invoked when the * Descriptor Table (IDT) such that the specified routine is invoked when the
* specified interrupt vector is asserted. * specified interrupt vector is asserted.
* *
*/ */
void _IntVecSet( void _IntVecSet(
unsigned int vector, /* interrupt vector: 0 to 255 on IA-32 */ unsigned int vector, /* interrupt vector: 0 to 255 on IA-32 */
@ -233,53 +233,53 @@ void _IntVecSet(
* generates an error * generates an error
*/ */
#if ALL_DYNAMIC_STUBS > 0 #if ALL_DYNAMIC_STUBS > 0
/******************************************************************************* /**
* *
* irq_connect - connect a C routine to a hardware interrupt * irq_connect - connect a C routine to a hardware interrupt
* *
* This routine connects an interrupt service routine (ISR) coded in C to * This routine connects an interrupt service routine (ISR) coded in C to
* the specified hardware <irq>. An interrupt vector will be allocated to * the specified hardware <irq>. An interrupt vector will be allocated to
* satisfy the specified <priority>. If the interrupt service routine is being * satisfy the specified <priority>. If the interrupt service routine is being
* connected to a software generated interrupt, then <irq> must be set to * connected to a software generated interrupt, then <irq> must be set to
* NANO_SOFT_IRQ. * NANO_SOFT_IRQ.
* *
* The specified <irq> represents a virtualized IRQ, i.e. it does not * The specified <irq> represents a virtualized IRQ, i.e. it does not
* necessarily represent a specific IRQ line on a given interrupt controller * necessarily represent a specific IRQ line on a given interrupt controller
* device. The BSP presents a virtualized set of IRQs from 0 to N, where N * device. The BSP presents a virtualized set of IRQs from 0 to N, where N
* is the total number of IRQs supported by all the interrupt controller devices * is the total number of IRQs supported by all the interrupt controller devices
* on the board. See the BSP's documentation for the mapping of virtualized * on the board. See the BSP's documentation for the mapping of virtualized
* IRQ to physical IRQ. * IRQ to physical IRQ.
* *
* When the device asserts an interrupt on the specified <irq>, a switch to * When the device asserts an interrupt on the specified <irq>, a switch to
* the interrupt stack is performed (if not already executing on the interrupt * the interrupt stack is performed (if not already executing on the interrupt
* stack), followed by saving the integer (i.e. non-floating point) context of * stack), followed by saving the integer (i.e. non-floating point) context of
* the currently executing task, fiber, or ISR. The ISR specified by <routine> * the currently executing task, fiber, or ISR. The ISR specified by <routine>
* will then be invoked with the single <parameter>. When the ISR returns, a * will then be invoked with the single <parameter>. When the ISR returns, a
* context switch may occur. * context switch may occur.
* *
* The routine searches for the first available element in the synamic_stubs * The routine searches for the first available element in the synamic_stubs
* array and uses it for the stub. * array and uses it for the stub.
* *
* RETURNS: the allocated interrupt vector * RETURNS: the allocated interrupt vector
* *
* WARNINGS * WARNINGS
* Some boards utilize interrupt controllers where the interrupt vector * Some boards utilize interrupt controllers where the interrupt vector
* cannot be programmed on an IRQ basis; as a result, the vector assigned * cannot be programmed on an IRQ basis; as a result, the vector assigned
* to the <irq> during interrupt controller initialization will be returned. * to the <irq> during interrupt controller initialization will be returned.
* In these cases, the requested <priority> is not honoured since the interrupt * In these cases, the requested <priority> is not honoured since the interrupt
* prioritization is fixed by the interrupt controller (e.g. IRQ0 will always * prioritization is fixed by the interrupt controller (e.g. IRQ0 will always
* be the highest priority interrupt regardless of what interrupt vector * be the highest priority interrupt regardless of what interrupt vector
* was assigned to IRQ0). * was assigned to IRQ0).
* *
* This routine does not perform range checking on the requested <priority> * This routine does not perform range checking on the requested <priority>
* and thus, depending on the underlying interrupt controller, may result * and thus, depending on the underlying interrupt controller, may result
* in the assignment of an interrupt vector located in the reserved range of * in the assignment of an interrupt vector located in the reserved range of
* the processor. * the processor.
* *
* INTERNAL * INTERNAL
* For debug kernels, this routine shall return -1 when there are no * For debug kernels, this routine shall return -1 when there are no
* vectors remaining in the specified <priority> level. * vectors remaining in the specified <priority> level.
*/ */
int irq_connect( int irq_connect(
unsigned int irq, /* virtualized IRQ to connect to */ unsigned int irq, /* virtualized IRQ to connect to */
@ -478,36 +478,36 @@ int irq_connect(
} }
#endif /* ALL_DYNAMIC_STUBS > 0 */ #endif /* ALL_DYNAMIC_STUBS > 0 */
/******************************************************************************* /**
* *
* _IntVecAlloc - allocate a free interrupt vector given <priority> * _IntVecAlloc - allocate a free interrupt vector given <priority>
* *
* This routine scans the interrupt_vectors_allocated[] array for a free vector that * This routine scans the interrupt_vectors_allocated[] array for a free vector that
* satisfies the specified <priority>. It is a utility function for use only * satisfies the specified <priority>. It is a utility function for use only
* by a BSP's _SysIntVecAlloc() routine. * by a BSP's _SysIntVecAlloc() routine.
* *
* This routine assumes that the relationship between interrupt priority and * This routine assumes that the relationship between interrupt priority and
* interrupt vector is : * interrupt vector is :
* *
* priority = vector / 16; * priority = vector / 16;
* *
* Since vectors 0 to 31 are reserved by the IA-32 architecture, the priorities * Since vectors 0 to 31 are reserved by the IA-32 architecture, the priorities
* of user defined interrupts range from 2 to 15. Each interrupt priority level * of user defined interrupts range from 2 to 15. Each interrupt priority level
* contains 16 vectors, and the prioritization of interrupts within a priority * contains 16 vectors, and the prioritization of interrupts within a priority
* level is determined by the vector number; the higher the vector number, the * level is determined by the vector number; the higher the vector number, the
* higher the priority within that priority level. * higher the priority within that priority level.
* *
* It is also assumed that the interrupt controllers are capable of managing * It is also assumed that the interrupt controllers are capable of managing
* interrupt requests on a per-vector level as opposed to a per-priority level. * interrupt requests on a per-vector level as opposed to a per-priority level.
* For example, the local APIC on Pentium4 and later processors, the in-service * For example, the local APIC on Pentium4 and later processors, the in-service
* register (ISR) and the interrupt request register (IRR) are 256 bits wide. * register (ISR) and the interrupt request register (IRR) are 256 bits wide.
* *
* RETURNS: allocated interrupt vector * RETURNS: allocated interrupt vector
* *
* INTERNAL * INTERNAL
* For debug kernels, this routine shall return -1 when there are no * For debug kernels, this routine shall return -1 when there are no
* vectors remaining in the specified <priority> level. * vectors remaining in the specified <priority> level.
*/ */
int _IntVecAlloc(unsigned int priority) int _IntVecAlloc(unsigned int priority)
{ {
@ -601,18 +601,18 @@ int _IntVecAlloc(unsigned int priority)
return vector; return vector;
} }
/******************************************************************************* /**
* *
* _IntVecMarkAllocated - mark interrupt vector as allocated * _IntVecMarkAllocated - mark interrupt vector as allocated
* *
* This routine is used to "reserve" an interrupt vector that is allocated * This routine is used to "reserve" an interrupt vector that is allocated
* or assigned by any means other than _IntVecAllocate(). This marks the vector * or assigned by any means other than _IntVecAllocate(). This marks the vector
* as allocated so that any future invocations of _IntVecAllocate() will not * as allocated so that any future invocations of _IntVecAllocate() will not
* return that vector. * return that vector.
* *
* RETURNS: N/A * RETURNS: N/A
* *
*/ */
void _IntVecMarkAllocated(unsigned int vector) void _IntVecMarkAllocated(unsigned int vector)
{ {
@ -625,15 +625,15 @@ void _IntVecMarkAllocated(unsigned int vector)
irq_unlock(imask); irq_unlock(imask);
} }
/******************************************************************************* /**
* *
* _IntVecMarkFree - mark interrupt vector as free * _IntVecMarkFree - mark interrupt vector as free
* *
* This routine is used to "free" an interrupt vector that is allocated * This routine is used to "free" an interrupt vector that is allocated
* or assigned using _IntVecAllocate() or _IntVecMarkAllocated(). This marks the * or assigned using _IntVecAllocate() or _IntVecMarkAllocated(). This marks the
* vector as available so that any future allocations can return that vector. * vector as available so that any future allocations can return that vector.
* *
*/ */
void _IntVecMarkFree(unsigned int vector) void _IntVecMarkFree(unsigned int vector)
{ {

View file

@ -31,15 +31,15 @@
*/ */
/* /*
* DESCRIPTION * DESCRIPTION
* This module contains the irq_handler_set() API. This routine is closely * This module contains the irq_handler_set() API. This routine is closely
* associated with irq_connect(), and any changes to the layout of the * associated with irq_connect(), and any changes to the layout of the
* constructed interrupt stub must be reflected in both places. * constructed interrupt stub must be reflected in both places.
* *
* INTERNAL * INTERNAL
* This routine is defined here, rather than in intconnect.c, so that it can be * This routine is defined here, rather than in intconnect.c, so that it can be
* omitted from a system image if it isn't required. * omitted from a system image if it isn't required.
*/ */
#include <nano_private.h> #include <nano_private.h>
@ -57,28 +57,28 @@ extern unsigned char _idt_base_address[];
#define FIRST_OPT_OPCODE_OFF 5 #define FIRST_OPT_OPCODE_OFF 5
/******************************************************************************* /**
* *
* irq_handler_set - set the handler in an already connected stub * irq_handler_set - set the handler in an already connected stub
* *
* This routine is used to modify an already fully constructed interrupt stub * This routine is used to modify an already fully constructed interrupt stub
* to specify a new <routine> and/or <parameter>. * to specify a new <routine> and/or <parameter>.
* *
* WARNINGS: * WARNINGS:
* *
* A fully constructed interrupt stub is generated via irq_connect(), i.e. * A fully constructed interrupt stub is generated via irq_connect(), i.e.
* the irq_handler_set() function must only be called after invoking * the irq_handler_set() function must only be called after invoking
* irq_connect(). * irq_connect().
* *
* The caller must ensure that the associated interrupt does not occur while * The caller must ensure that the associated interrupt does not occur while
* this routine is executing, otherwise race conditions may arise that could * this routine is executing, otherwise race conditions may arise that could
* cause the interrupt stub to invoke the handler using an incorrect routine * cause the interrupt stub to invoke the handler using an incorrect routine
* and/or parameter. If possible, silence the source of the associated interrupt * and/or parameter. If possible, silence the source of the associated interrupt
* only, rather than locking out all interrupts. * only, rather than locking out all interrupts.
* *
* RETURNS: N/A * RETURNS: N/A
* *
*/ */
void irq_handler_set(unsigned int vector, void irq_handler_set(unsigned int vector,
void (*oldRoutine)(void *parameter), void (*oldRoutine)(void *parameter),

View file

@ -36,7 +36,7 @@ This module implements assembly routines to manage interrupts on
the Intel IA-32 architecture. More specifically, the interrupt (asynchronous the Intel IA-32 architecture. More specifically, the interrupt (asynchronous
exception) stubs are implemented in this module. The stubs are invoked when exception) stubs are implemented in this module. The stubs are invoked when
entering and exiting a C interrupt handler. entering and exiting a C interrupt handler.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -74,41 +74,41 @@ entering and exiting a C interrupt handler.
GTEXT(_int_latency_start) GTEXT(_int_latency_start)
GTEXT(_int_latency_stop) GTEXT(_int_latency_stop)
#endif #endif
/******************************************************************************* /**
* *
* _IntEnt - inform the kernel of an interrupt * _IntEnt - inform the kernel of an interrupt
* *
* This function is called from the interrupt stub created by irq_connect() * This function is called from the interrupt stub created by irq_connect()
* to inform the kernel of an interrupt. This routine increments * to inform the kernel of an interrupt. This routine increments
* _nanokernel.nested (to support interrupt nesting), switches to the * _nanokernel.nested (to support interrupt nesting), switches to the
* base of the interrupt stack, if not already on the interrupt stack, and then * base of the interrupt stack, if not already on the interrupt stack, and then
* saves the volatile integer registers onto the stack. Finally, control is * saves the volatile integer registers onto the stack. Finally, control is
* returned back to the interrupt stub code (which will then invoke the * returned back to the interrupt stub code (which will then invoke the
* "application" interrupt service routine). * "application" interrupt service routine).
* *
* Only the volatile integer registers are saved since ISRs are assumed not to * Only the volatile integer registers are saved since ISRs are assumed not to
* utilize floating point (or SSE) instructions. If an ISR requires the usage * utilize floating point (or SSE) instructions. If an ISR requires the usage
* of floating point (or SSE) instructions, it must first invoke nanoCpuFpSave() * of floating point (or SSE) instructions, it must first invoke nanoCpuFpSave()
* (or nanoCpuSseSave()) at the beginning of the ISR. A subsequent * (or nanoCpuSseSave()) at the beginning of the ISR. A subsequent
* nanoCpuFpRestore() (or nanoCpuSseRestore()) is needed just prior to returning * nanoCpuFpRestore() (or nanoCpuSseRestore()) is needed just prior to returning
* from the ISR. Note that the nanoCpuFpSave(), nanoCpuSseSave(), * from the ISR. Note that the nanoCpuFpSave(), nanoCpuSseSave(),
* nanoCpuFpRestore(), and nanoCpuSseRestore() APIs have not been * nanoCpuFpRestore(), and nanoCpuSseRestore() APIs have not been
* implemented yet. * implemented yet.
* *
* WARNINGS * WARNINGS
* *
* Host-based tools and the target-based GDB agent depend on the stack frame * Host-based tools and the target-based GDB agent depend on the stack frame
* created by this routine to determine the locations of volatile registers. * created by this routine to determine the locations of volatile registers.
* These tools must be updated to reflect any changes to the stack frame. * These tools must be updated to reflect any changes to the stack frame.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* C function prototype: * C function prototype:
* *
* void _IntEnt (void); * void _IntEnt (void);
* *
* NOMANUAL * NOMANUAL
*/ */
SECTION_FUNC(TEXT, _IntEnt) SECTION_FUNC(TEXT, _IntEnt)
@ -240,29 +240,29 @@ BRANCH_LABEL(_HandleIdle)
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */ #endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
/******************************************************************************* /**
* *
* _IntExit - inform the kernel of an interrupt exit * _IntExit - inform the kernel of an interrupt exit
* *
* This function is called from the interrupt stub created by irq_connect() * This function is called from the interrupt stub created by irq_connect()
* to inform the kernel that the processing of an interrupt has * to inform the kernel that the processing of an interrupt has
* completed. This routine decrements _nanokernel.nested (to support interrupt * completed. This routine decrements _nanokernel.nested (to support interrupt
* nesting), restores the volatile integer registers, and then switches * nesting), restores the volatile integer registers, and then switches
* back to the interrupted context's stack, if this isn't a nested interrupt. * back to the interrupted context's stack, if this isn't a nested interrupt.
* *
* Finally, control is returned back to the interrupted fiber context or ISR. * Finally, control is returned back to the interrupted fiber context or ISR.
* A context switch _may_ occur if the interrupted context was a task context, * A context switch _may_ occur if the interrupted context was a task context,
* in which case one or more other fiber and task contexts will execute before * in which case one or more other fiber and task contexts will execute before
* this routine resumes and control gets returned to the interrupted task. * this routine resumes and control gets returned to the interrupted task.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* C function prototype: * C function prototype:
* *
* void _IntExit (void); * void _IntExit (void);
* *
* NOMANUAL * NOMANUAL
*/ */
SECTION_FUNC(TEXT, _IntExit) SECTION_FUNC(TEXT, _IntExit)
@ -388,38 +388,38 @@ BRANCH_LABEL(nestedInterrupt)
iret iret
/******************************************************************************* /**
* *
* _SpuriousIntHandler - * _SpuriousIntHandler -
* _SpuriousIntNoErrCodeHandler - spurious interrupt handler stubs * _SpuriousIntNoErrCodeHandler - spurious interrupt handler stubs
* *
* Interrupt-gate descriptors are statically created for all slots in the IDT * Interrupt-gate descriptors are statically created for all slots in the IDT
* that point to _SpuriousIntHandler() or _SpuriousIntNoErrCodeHandler(). The * that point to _SpuriousIntHandler() or _SpuriousIntNoErrCodeHandler(). The
* former stub is connected to exception vectors where the processor pushes an * former stub is connected to exception vectors where the processor pushes an
* error code onto the stack (or kernel stack) in addition to the EFLAGS/CS/EIP * error code onto the stack (or kernel stack) in addition to the EFLAGS/CS/EIP
* records. * records.
* *
* A spurious interrupt is considered a fatal condition, thus this routine * A spurious interrupt is considered a fatal condition, thus this routine
* merely sets up the 'reason' and 'pEsf' parameters to the BSP provided * merely sets up the 'reason' and 'pEsf' parameters to the BSP provided
* routine: _SysFatalHwErrorHandler(). In other words, there is no provision * routine: _SysFatalHwErrorHandler(). In other words, there is no provision
* to return to the interrupted context and thus the volatile registers * to return to the interrupted context and thus the volatile registers
* are not saved. * are not saved.
* *
* RETURNS: Never returns * RETURNS: Never returns
* *
* C function prototype: * C function prototype:
* *
* void _SpuriousIntHandler (void); * void _SpuriousIntHandler (void);
* *
* INTERNAL * INTERNAL
* The _IntVecSet() routine creates an interrupt-gate descriptor for all * The _IntVecSet() routine creates an interrupt-gate descriptor for all
* connections. The processor will automatically clear the IF bit * connections. The processor will automatically clear the IF bit
* in the EFLAGS register upon execution of the handler, * in the EFLAGS register upon execution of the handler,
* thus _SpuriousIntNoErrCodeHandler()/_SpuriousIntHandler() shall be * thus _SpuriousIntNoErrCodeHandler()/_SpuriousIntHandler() shall be
* invoked with interrupts disabled. * invoked with interrupts disabled.
* *
* NOMANUAL * NOMANUAL
*/ */
SECTION_FUNC(TEXT, _SpuriousIntNoErrCodeHandler) SECTION_FUNC(TEXT, _SpuriousIntNoErrCodeHandler)
@ -462,34 +462,34 @@ BRANCH_LABEL(callFatalHandler)
jmp callFatalHandler jmp callFatalHandler
/******************************************************************************* /**
* *
* irq_lock - disable interrupts on the local CPU * irq_lock - disable interrupts on the local CPU
* *
* This routine disables interrupts. It can be called from either interrupt * This routine disables interrupts. It can be called from either interrupt
* or context level. This routine returns an architecture-dependent * or context level. This routine returns an architecture-dependent
* lock-out key representing the "interrupt disable state" prior to the call; * lock-out key representing the "interrupt disable state" prior to the call;
* this key can be passed to fiber_enable_ints() to re-enable interrupts. * this key can be passed to fiber_enable_ints() to re-enable interrupts.
* *
* The lock-out key should only be used as the argument to the * The lock-out key should only be used as the argument to the
* fiber_enable_ints() API. It should never be used to manually re-enable * fiber_enable_ints() API. It should never be used to manually re-enable
* interrupts or to inspect or manipulate the contents of the source register. * interrupts or to inspect or manipulate the contents of the source register.
* *
* WARNINGS * WARNINGS
* Invoking a kernel routine with interrupts locked may result in * Invoking a kernel routine with interrupts locked may result in
* interrupts being re-enabled for an unspecified period of time. If the * interrupts being re-enabled for an unspecified period of time. If the
* called routine blocks, interrupts will be re-enabled while another * called routine blocks, interrupts will be re-enabled while another
* context executes, or while the system is idle. * context executes, or while the system is idle.
* *
* The "interrupt disable state" is an attribute of a context, i.e. it's part * The "interrupt disable state" is an attribute of a context, i.e. it's part
* of the context context. Thus, if a context disables interrupts and * of the context context. Thus, if a context disables interrupts and
* subsequently invokes a kernel routine that causes the calling context * subsequently invokes a kernel routine that causes the calling context
* to block, the interrupt disable state will be restored when the context is * to block, the interrupt disable state will be restored when the context is
* later rescheduled for execution. * later rescheduled for execution.
* *
* RETURNS: An architecture-dependent lock-out key representing the * RETURNS: An architecture-dependent lock-out key representing the
* "interrupt disable state" prior to the call. * "interrupt disable state" prior to the call.
*/ */
SECTION_FUNC(TEXT, irq_lock) SECTION_FUNC(TEXT, irq_lock)
pushfl pushfl
@ -501,16 +501,16 @@ SECTION_FUNC(TEXT, irq_lock)
ret ret
/******************************************************************************* /**
* *
* irq_unlock - enable interrupts on the local CPU * irq_unlock - enable interrupts on the local CPU
* *
* This routine re-enables interrupts on the local CPU. The <key> parameter * This routine re-enables interrupts on the local CPU. The <key> parameter
* is an architecture-dependent lock-out key that is returned by a previous * is an architecture-dependent lock-out key that is returned by a previous
* invocation of irq_lock(). * invocation of irq_lock().
* *
* This routine can be called from either a context or ISR context. * This routine can be called from either a context or ISR context.
*/ */
SECTION_FUNC(TEXT, irq_unlock) SECTION_FUNC(TEXT, irq_unlock)
testl $0x200, SP_ARG1(%esp) testl $0x200, SP_ARG1(%esp)

View file

@ -34,7 +34,7 @@
DESCRIPTION DESCRIPTION
This module provides the implementation of the _MsrWrite() and _MsrRead() This module provides the implementation of the _MsrWrite() and _MsrRead()
utilities. utilities.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -45,28 +45,28 @@ utilities.
GTEXT(_MsrWrite) GTEXT(_MsrWrite)
GTEXT(_MsrRead) GTEXT(_MsrRead)
/******************************************************************************* /**
* *
* _MsrWrite - write to a model specific register (MSR) * _MsrWrite - write to a model specific register (MSR)
* *
* This function is used to write to an MSR. * This function is used to write to an MSR.
* *
* C function prototype: * C function prototype:
* *
* void _MsrWrite (unsigned int msr, uint64_t msrData); * void _MsrWrite (unsigned int msr, uint64_t msrData);
* *
* The definitions of the so-called "Architectural MSRs" are contained * The definitions of the so-called "Architectural MSRs" are contained
* in nano_private.h and have the format: IA32_XXX_MSR * in nano_private.h and have the format: IA32_XXX_MSR
* *
* INTERNAL * INTERNAL
* 1) The 'wrmsr' instruction was introduced in the Pentium processor; executing * 1) The 'wrmsr' instruction was introduced in the Pentium processor; executing
* this instruction on an earlier IA-32 processor will result in an invalid * this instruction on an earlier IA-32 processor will result in an invalid
* opcode exception. * opcode exception.
* 2) The 'wrmsr' uses the ECX, EDX, and EAX registers which matches the set of * 2) The 'wrmsr' uses the ECX, EDX, and EAX registers which matches the set of
* volatile registers! * volatile registers!
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
SECTION_FUNC(TEXT, _MsrWrite) SECTION_FUNC(TEXT, _MsrWrite)
movl SP_ARG1(%esp), %ecx /* load ECX with <msr> */ movl SP_ARG1(%esp), %ecx /* load ECX with <msr> */
@ -76,28 +76,28 @@ SECTION_FUNC(TEXT, _MsrWrite)
ret ret
/******************************************************************************* /**
* *
* _MsrRead - read from a model specific register (MSR) * _MsrRead - read from a model specific register (MSR)
* *
* This function is used to read from an MSR. * This function is used to read from an MSR.
* *
* C function prototype: * C function prototype:
* *
* uint64_t _MsrRead (unsigned int msr); * uint64_t _MsrRead (unsigned int msr);
* *
* The definitions of the so-called "Architectural MSRs" are contained * The definitions of the so-called "Architectural MSRs" are contained
* in nano_private.h and have the format: IA32_XXX_MSR * in nano_private.h and have the format: IA32_XXX_MSR
* *
* INTERNAL * INTERNAL
* 1) The 'rdmsr' instruction was introduced in the Pentium processor; executing * 1) The 'rdmsr' instruction was introduced in the Pentium processor; executing
* this instruction on an earlier IA-32 processor will result in an invalid * this instruction on an earlier IA-32 processor will result in an invalid
* opcode exception. * opcode exception.
* 2) The 'rdmsr' uses the ECX, EDX, and EAX registers which matches the set of * 2) The 'rdmsr' uses the ECX, EDX, and EAX registers which matches the set of
* volatile registers! * volatile registers!
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
SECTION_FUNC(TEXT, _MsrRead) SECTION_FUNC(TEXT, _MsrRead)
movl SP_ARG1(%esp), %ecx /* load ECX with <msr> */ movl SP_ARG1(%esp), %ecx /* load ECX with <msr> */

View file

@ -45,7 +45,7 @@ Typically, only those members that are accessed by assembly language routines
are defined; however, it doesn't hurt to define all fields for the sake of are defined; however, it doesn't hurt to define all fields for the sake of
completeness. completeness.
*/ */
#include <gen_offset.h> /* located in kernel/arch/common/include */ #include <gen_offset.h> /* located in kernel/arch/common/include */

View file

@ -39,7 +39,7 @@ a representation of the save stack frame generated by _Swap() in order
to generate offsets (in the form of absolute symbols) for consumption by to generate offsets (in the form of absolute symbols) for consumption by
host tools. Please update swapstk.h if changing the structure of the host tools. Please update swapstk.h if changing the structure of the
save frame on the stack. save frame on the stack.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -54,58 +54,58 @@ save frame on the stack.
/* externs */ /* externs */
/******************************************************************************* /**
* *
* _Swap - initiate a cooperative context switch * _Swap - initiate a cooperative context switch
* *
* The _Swap() routine is invoked by various nanokernel services to effect * The _Swap() routine is invoked by various nanokernel services to effect
* a cooperative context context switch. Prior to invoking _Swap(), the * a cooperative context context switch. Prior to invoking _Swap(), the
* caller disables interrupts (via irq_lock) and the return 'key' * caller disables interrupts (via irq_lock) and the return 'key'
* is passed as a parameter to _Swap(). The 'key' actually represents * is passed as a parameter to _Swap(). The 'key' actually represents
* the EFLAGS register prior to disabling interrupts via a 'cli' instruction. * the EFLAGS register prior to disabling interrupts via a 'cli' instruction.
* *
* Given that _Swap() is called to effect a cooperative context context switch, * Given that _Swap() is called to effect a cooperative context context switch,
* only the non-volatile integer registers need to be saved in the tCCS of the * only the non-volatile integer registers need to be saved in the tCCS of the
* outgoing context. The restoration of the integer registers of the incoming * outgoing context. The restoration of the integer registers of the incoming
* context depends on whether that context was preemptively context switched * context depends on whether that context was preemptively context switched
* out. The INT_ACTIVE and EXC_ACTIVE bits in the tCCS->flags field will signify * out. The INT_ACTIVE and EXC_ACTIVE bits in the tCCS->flags field will signify
* that the context was preemptively context switched out, and thus both the * that the context was preemptively context switched out, and thus both the
* volatile and non-volatile integer registers need to be restored. * volatile and non-volatile integer registers need to be restored.
* *
* The non-volatile registers need to be scrubbed to ensure they contain no * The non-volatile registers need to be scrubbed to ensure they contain no
* sensitive information that could compromise system security. This is to * sensitive information that could compromise system security. This is to
* make sure that information will not be leaked from one application to * make sure that information will not be leaked from one application to
* another via these volatile registers. * another via these volatile registers.
* *
* Here, the integer registers (EAX, ECX, EDX) have been scrubbed. Any changes * Here, the integer registers (EAX, ECX, EDX) have been scrubbed. Any changes
* to this routine that alter the values of these registers MUST be reviewed * to this routine that alter the values of these registers MUST be reviewed
* for potential security impacts. * for potential security impacts.
* *
* Floating point registers are handled using a lazy save/restore * Floating point registers are handled using a lazy save/restore
* mechanism since it's expected relatively few contexts will be created * mechanism since it's expected relatively few contexts will be created
* with the USE_FP or USE_SSE option bits. The nanokernel data structure * with the USE_FP or USE_SSE option bits. The nanokernel data structure
* maintains a 'current_fp' field to keep track of the context that "owns" * maintains a 'current_fp' field to keep track of the context that "owns"
* the floating point registers. Floating point registers consist of * the floating point registers. Floating point registers consist of
* ST0->ST7 (x87 FPU and MMX registers) and XMM0 -> XMM7. * ST0->ST7 (x87 FPU and MMX registers) and XMM0 -> XMM7.
* *
* All floating point registers are considered 'volatile' thus they will * All floating point registers are considered 'volatile' thus they will
* only be saved/restored when a preemptive context context switch occurs. * only be saved/restored when a preemptive context context switch occurs.
* *
* Floating point registers are currently NOT scrubbed, and are subject to * Floating point registers are currently NOT scrubbed, and are subject to
* potential security leaks. * potential security leaks.
* *
* The scheduling algorithm is simple: schedule the head of the runnable * The scheduling algorithm is simple: schedule the head of the runnable
* FIBER context list, which is represented by _nanokernel.fiber. If there are * FIBER context list, which is represented by _nanokernel.fiber. If there are
* no runnable FIBER contexts, then schedule the TASK context represented * no runnable FIBER contexts, then schedule the TASK context represented
* by _nanokernel.task. The _nanokernel.task field will never be NULL. * by _nanokernel.task. The _nanokernel.task field will never be NULL.
* *
* RETURNS: may contain a return value setup by a call to fiberRtnValueSet() * RETURNS: may contain a return value setup by a call to fiberRtnValueSet()
* *
* C function prototype: * C function prototype:
* *
* unsigned int _Swap (unsigned int eflags); * unsigned int _Swap (unsigned int eflags);
* *
*/ */
SECTION_FUNC(TEXT, _Swap) SECTION_FUNC(TEXT, _Swap)
movl $_nanokernel, %eax movl $_nanokernel, %eax

View file

@ -36,7 +36,7 @@ This module contains utilities to perform unaligned reads/writes from/to a
32-bit quantity. Some memory subsystems to not support the IA-32 byte 32-bit quantity. Some memory subsystems to not support the IA-32 byte
enable lines, and thus accessing an unaligned 32-bit quantity is enable lines, and thus accessing an unaligned 32-bit quantity is
performed byte-by-byte. performed byte-by-byte.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -49,31 +49,31 @@ performed byte-by-byte.
GTEXT(_Unaligned32Write) GTEXT(_Unaligned32Write)
GTEXT(_Unaligned32Read) GTEXT(_Unaligned32Read)
/******************************************************************************* /**
* *
* _Unaligned32Write - perform an unaligned 32-bit write operation * _Unaligned32Write - perform an unaligned 32-bit write operation
* *
* This function is used during the interrupt and exception stub code * This function is used during the interrupt and exception stub code
* synthesis step when writing out the 32-bit relative jmp/branch * synthesis step when writing out the 32-bit relative jmp/branch
* offsets. * offsets.
* *
* Generally, the 32-bit offsets are located at an odd memory address. For * Generally, the 32-bit offsets are located at an odd memory address. For
* target hardware that don't fully (or properly) decode the byte enable * target hardware that don't fully (or properly) decode the byte enable
* lines from the IA-32 processor, this function shall be utilized to write out * lines from the IA-32 processor, this function shall be utilized to write out
* the data byte-by-byte. * the data byte-by-byte.
* *
* The BSP specific configuration option CONFIG_UNALIGNED_WRITE_UNSUPPORTED * The BSP specific configuration option CONFIG_UNALIGNED_WRITE_UNSUPPORTED
* shall be defined when the kernel is built for target hardware that cannot * shall be defined when the kernel is built for target hardware that cannot
* support unaligned double word (32-bit) write operations. * support unaligned double word (32-bit) write operations.
* *
* C function prototype: * C function prototype:
* *
* void _Unaligned32Write * void _Unaligned32Write
* ( * (
* unsigned int * ptr, * unsigned int * ptr,
* unsigned int val * unsigned int val
* ); * );
*/ */
SECTION_FUNC(TEXT, _Unaligned32Write) SECTION_FUNC(TEXT, _Unaligned32Write)
movl 0x4(%esp), %edx /* fetch ptr argument */ movl 0x4(%esp), %edx /* fetch ptr argument */
@ -86,30 +86,30 @@ SECTION_FUNC(TEXT, _Unaligned32Write)
ret ret
/******************************************************************************* /**
* *
* _Unaligned32Read - perform an unaligned 32-bit read operation * _Unaligned32Read - perform an unaligned 32-bit read operation
* *
* This function is used during the interrupt and exception stub code * This function is used during the interrupt and exception stub code
* synthesis step when reading the 32-bit relative jmp/branch * synthesis step when reading the 32-bit relative jmp/branch
* offsets. * offsets.
* *
* Generally, the 32-bit offsets are located at an odd memory address. For * Generally, the 32-bit offsets are located at an odd memory address. For
* target hardware that don't fully (or properly) decode the byte enable * target hardware that don't fully (or properly) decode the byte enable
* lines from the IA-32 processor, this function shall be utilized to read * lines from the IA-32 processor, this function shall be utilized to read
* the data byte-by-byte. * the data byte-by-byte.
* *
* The BSP specific configuration option CONFIG_UNALIGNED_WRITE_UNSUPPORTED * The BSP specific configuration option CONFIG_UNALIGNED_WRITE_UNSUPPORTED
* shall be defined when the kernel is built for target hardware that cannot * shall be defined when the kernel is built for target hardware that cannot
* support unaligned double word (32-bit) write operations. * support unaligned double word (32-bit) write operations.
* *
* C function prototype: * C function prototype:
* *
* unsigned int _Unaligned32Read * unsigned int _Unaligned32Read
* ( * (
* unsigned int * ptr * unsigned int * ptr
* ); * );
*/ */
SECTION_FUNC(TEXT, _Unaligned32Read) SECTION_FUNC(TEXT, _Unaligned32Read)
movl 0x4(%esp), %edx /* fetch ptr argument */ movl 0x4(%esp), %edx /* fetch ptr argument */

View file

@ -40,7 +40,7 @@ booting scenarios (e.g. via GRUB or any other multiboot compliant bootloader)
now assume that the system is already in 32-bit protected mode and address line now assume that the system is already in 32-bit protected mode and address line
A20 is enabled. However, the code associated with CONFIG_PROT_MODE_SWITCH has A20 is enabled. However, the code associated with CONFIG_PROT_MODE_SWITCH has
been left in place should future booting scenarios arise which require its use. been left in place should future booting scenarios arise which require its use.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE

View file

@ -34,7 +34,7 @@
DESCRIPTION DESCRIPTION
This module contains the static interrupt stubs for the various drivers employed This module contains the static interrupt stubs for the various drivers employed
by x86 BSPs. by x86 BSPs.
*/ */
#define _ASMLANGUAGE #define _ASMLANGUAGE

View file

@ -34,7 +34,7 @@
DESCRIPTION DESCRIPTION
This header file is used to specify and describe board-level aspects for This header file is used to specify and describe board-level aspects for
the 'generic_pc' BSP. the 'generic_pc' BSP.
*/ */
#ifndef __INCboardh #ifndef __INCboardh
#define __INCboardh #define __INCboardh

View file

@ -33,7 +33,7 @@
/* /*
DESCRIPTION DESCRIPTION
This is the linker script for both standard images and XIP images. This is the linker script for both standard images and XIP images.
*/ */
#include <autoconf.h> #include <autoconf.h>

View file

@ -34,7 +34,7 @@
DESCRIPTION DESCRIPTION
This module provides routines to initialize and support board-level hardware This module provides routines to initialize and support board-level hardware
for the generic_pc BSP. for the generic_pc BSP.
*/ */
#include <nanokernel.h> #include <nanokernel.h>
#include "board.h" #include "board.h"
@ -86,13 +86,13 @@ static inline void ioapicInit(void)
#ifdef DO_CONSOLE_INIT #ifdef DO_CONSOLE_INIT
/******************************************************************************* /**
* *
* uart_generic_info_init - initialize initialization information for one UART * uart_generic_info_init - initialize initialization information for one UART
* *
* RETURNS: N/A * RETURNS: N/A
* *
*/ */
void uart_generic_info_init(struct uart_init_info *p_info) void uart_generic_info_init(struct uart_init_info *p_info)
{ {
@ -106,15 +106,15 @@ void uart_generic_info_init(struct uart_init_info *p_info)
#if defined(DO_CONSOLE_INIT) #if defined(DO_CONSOLE_INIT)
/******************************************************************************* /**
* *
* consoleInit - initialize target-only console * consoleInit - initialize target-only console
* *
* Only used for debugging. * Only used for debugging.
* *
* RETURNS: N/A * RETURNS: N/A
* *
*/ */
#include <console/uart_console.h> #include <console/uart_console.h>
@ -149,16 +149,16 @@ static void bluetooth_init(void)
} while ((0)) } while ((0))
#endif /* CONFIG_BLUETOOTH */ #endif /* CONFIG_BLUETOOTH */
/******************************************************************************* /**
* *
* _InitHardware - perform basic hardware initialization * _InitHardware - perform basic hardware initialization
* *
* Initialize the Intel 8259A interrupt controller device driver and the * Initialize the Intel 8259A interrupt controller device driver and the
* Intel 8250 UART device driver. * Intel 8250 UART device driver.
* Also initialize the timer device driver, if required. * Also initialize the timer device driver, if required.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _InitHardware(void) void _InitHardware(void)
{ {

View file

@ -63,33 +63,33 @@ extern "C" {
call h; \ call h; \
jmp _ExcExit; jmp _ExcExit;
/******************************************************************************* /**
* *
* NANO_CPU_EXC_CONNECT - to generate and register an exception stub * NANO_CPU_EXC_CONNECT - to generate and register an exception stub
* *
* Generates an exception stub for the handler, <h>. It is registered * Generates an exception stub for the handler, <h>. It is registered
* on the vector given by <v> with the privilege level <d>; <d> should always * on the vector given by <v> with the privilege level <d>; <d> should always
* be 0. * be 0.
* *
* Use this version of the macro if the processor pushes an error code for the * Use this version of the macro if the processor pushes an error code for the
* given exception. * given exception.
*/ */
#define NANO_CPU_EXC_CONNECT(h, v, d) \ #define NANO_CPU_EXC_CONNECT(h, v, d) \
NANO_CPU_INT_REGISTER_ASM(h, v, d) GTEXT(MK_STUB_NAME(h)); \ NANO_CPU_INT_REGISTER_ASM(h, v, d) GTEXT(MK_STUB_NAME(h)); \
SECTION_FUNC(TEXT, MK_STUB_NAME(h)) NANO_CPU_EXC_CONNECT_CODE(h) SECTION_FUNC(TEXT, MK_STUB_NAME(h)) NANO_CPU_EXC_CONNECT_CODE(h)
/******************************************************************************* /**
* *
* NANO_CPU_EXC_CONNECT_NO_ERR - to generate and register an exception stub * NANO_CPU_EXC_CONNECT_NO_ERR - to generate and register an exception stub
* *
* Generates an exception stub for the handler, <h>. It is registered * Generates an exception stub for the handler, <h>. It is registered
* on the vector given by <v> with the privilege level <d>; <d> should always * on the vector given by <v> with the privilege level <d>; <d> should always
* be 0. * be 0.
* *
* Use this version of the macro if the processor doesn't push an error code for * Use this version of the macro if the processor doesn't push an error code for
* the given exception. The created stub pushes a dummy value of 0 to keep the * the given exception. The created stub pushes a dummy value of 0 to keep the
* exception stack frame the same. * exception stack frame the same.
*/ */
#define NANO_CPU_EXC_CONNECT_NO_ERR(h, v, d) \ #define NANO_CPU_EXC_CONNECT_NO_ERR(h, v, d) \
NANO_CPU_INT_REGISTER_ASM(h, v, d) GTEXT(MK_STUB_NAME(h)); \ NANO_CPU_INT_REGISTER_ASM(h, v, d) GTEXT(MK_STUB_NAME(h)); \

View file

@ -45,14 +45,14 @@ NANO_CPU_EXC_CONNECT_NO_ERR(handler,vector,0)
#else /* !_ASMLANGUAGE */ #else /* !_ASMLANGUAGE */
/******************************************************************************* /**
* *
* EflagsGet - return the current value of the EFLAGS register * EflagsGet - return the current value of the EFLAGS register
* *
* RETURNS: the EFLAGS register. * RETURNS: the EFLAGS register.
* *
* \NOMANUAL * \NOMANUAL
*/ */
static inline unsigned int EflagsGet(void) static inline unsigned int EflagsGet(void)
{ {
@ -70,15 +70,15 @@ static inline unsigned int EflagsGet(void)
#ifdef CONFIG_FP_SHARING #ifdef CONFIG_FP_SHARING
/******************************************************************************* /**
* *
* _FpAccessDisable - disallow use of floating point capabilities * _FpAccessDisable - disallow use of floating point capabilities
* *
* This routine sets CR0[TS] to 1, which disallows the use of FP instructions * This routine sets CR0[TS] to 1, which disallows the use of FP instructions
* by the currently executing context. * by the currently executing context.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
static inline void _FpAccessDisable(void) static inline void _FpAccessDisable(void)
{ {
@ -94,17 +94,17 @@ static inline void _FpAccessDisable(void)
} }
/******************************************************************************* /**
* *
* _do_fp_ctx_save - save non-integer context information * _do_fp_ctx_save - save non-integer context information
* *
* This routine saves the system's "live" non-integer context into the * This routine saves the system's "live" non-integer context into the
* specified area. If the specified task or fiber supports SSE then * specified area. If the specified task or fiber supports SSE then
* x87/MMX/SSEx context info is saved, otherwise only x87/MMX context is saved. * x87/MMX/SSEx context info is saved, otherwise only x87/MMX context is saved.
* Function is invoked by _FpCtxSave(tCCS *ccs) * Function is invoked by _FpCtxSave(tCCS *ccs)
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
static inline void _do_fp_ctx_save(int flags, void *preemp_float_reg) static inline void _do_fp_ctx_save(int flags, void *preemp_float_reg)
{ {
@ -126,15 +126,15 @@ static inline void _do_fp_ctx_save(int flags, void *preemp_float_reg)
} }
} }
/******************************************************************************* /**
* *
* _do_fp_ctx_init - initialize non-integer context information * _do_fp_ctx_init - initialize non-integer context information
* *
* This routine initializes the system's "live" non-integer context. * This routine initializes the system's "live" non-integer context.
* Function is invoked by _FpCtxInit(tCCS *ccs) * Function is invoked by _FpCtxInit(tCCS *ccs)
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
static inline void _do_fp_ctx_init(int flags) static inline void _do_fp_ctx_init(int flags)
{ {

View file

@ -34,7 +34,7 @@
DESCRIPTION DESCRIPTION
This file provides definitions for the Global Descriptor Table (GDT) for the This file provides definitions for the Global Descriptor Table (GDT) for the
IA-32 architecture. IA-32 architecture.
*/ */
#ifndef _GDT_H #ifndef _GDT_H
#define _GDT_H #define _GDT_H

View file

@ -42,7 +42,7 @@ This file is also included by assembly language files which must #define
_ASMLANGUAGE before including this header file. Note that nanokernel assembly _ASMLANGUAGE before including this header file. Note that nanokernel assembly
source files obtains structure offset values via "absolute symbols" in the source files obtains structure offset values via "absolute symbols" in the
offsets.o module. offsets.o module.
*/ */
#ifndef _NANO_PRIVATE_H #ifndef _NANO_PRIVATE_H
#define _NANO_PRIVATE_H #define _NANO_PRIVATE_H
@ -760,18 +760,18 @@ extern tNANO _nanokernel;
/* inline function definitions */ /* inline function definitions */
/******************************************************************************* /**
* *
* nanoArchInit - performs architecture-specific initialization * nanoArchInit - performs architecture-specific initialization
* *
* This routine performs architecture-specific initialization of the nanokernel. * This routine performs architecture-specific initialization of the nanokernel.
* Trivial stuff is done inline; more complex initialization is done via * Trivial stuff is done inline; more complex initialization is done via
* function calls. * function calls.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
static inline void nanoArchInit(void) static inline void nanoArchInit(void)
{ {
@ -809,18 +809,18 @@ static inline void nanoArchInit(void)
} }
/******************************************************************************* /**
* *
* fiberRtnValueSet - set the return value for the specified fiber (inline) * fiberRtnValueSet - set the return value for the specified fiber (inline)
* *
* The register used to store the return value from a function call invocation is * The register used to store the return value from a function call invocation is
* set to <value>. It is assumed that the specified <fiber> is pending, and * set to <value>. It is assumed that the specified <fiber> is pending, and
* thus the fibers context is stored in its tCCS structure. * thus the fibers context is stored in its tCCS structure.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
static inline void fiberRtnValueSet( static inline void fiberRtnValueSet(
tCCS *fiber, /* pointer to fiber */ tCCS *fiber, /* pointer to fiber */

View file

@ -33,7 +33,7 @@
/* /*
DESCRIPTION DESCRIPTION
Intel-specific parts of start_task(). Only FP functionality currently. Intel-specific parts of start_task(). Only FP functionality currently.
*/ */
#ifndef _START_TASK_ARCH__H_ #ifndef _START_TASK_ARCH__H_
#define _START_TASK_ARCH__H_ #define _START_TASK_ARCH__H_

View file

@ -39,7 +39,7 @@ NOTE: _Swap() does not use this file as it uses the push instruction to save a
context. Changes to the file will not automatically be picked up by _Swap(). context. Changes to the file will not automatically be picked up by _Swap().
Conversely, changes to _Swap() should be mirrored here if the stack frame is Conversely, changes to _Swap() should be mirrored here if the stack frame is
modified. modified.
*/ */
#ifndef _SWAPSTK_H #ifndef _SWAPSTK_H
#define _SWAPSTK_H #define _SWAPSTK_H

View file

@ -34,7 +34,7 @@
DESCRIPTION DESCRIPTION
This header file is used to specify and describe board-level aspects for This header file is used to specify and describe board-level aspects for
the 'Quark' BSP. the 'Quark' BSP.
*/ */
#ifndef __INCboardh #ifndef __INCboardh
#define __INCboardh #define __INCboardh
@ -152,31 +152,31 @@ the 'Quark' BSP.
sys_out8(data, (unsigned int)address) sys_out8(data, (unsigned int)address)
#define PLB_BYTE_REG_READ(address) sys_in8((unsigned int)address) #define PLB_BYTE_REG_READ(address) sys_in8((unsigned int)address)
/******************************************************************************* /**
* *
* outByte - output byte to memory location * outByte - output byte to memory location
* *
* RETURNS: N/A * RETURNS: N/A
* *
* NOMANUAL * NOMANUAL
*/ */
static inline void outByte(uint8_t data, uint32_t addr) static inline void outByte(uint8_t data, uint32_t addr)
{ {
*(volatile uint8_t *)addr = data; *(volatile uint8_t *)addr = data;
} }
/******************************************************************************* /**
* *
* inByte - obtain byte value from memory location * inByte - obtain byte value from memory location
* *
* This function issues the 'move' instruction to read a byte from the specified * This function issues the 'move' instruction to read a byte from the specified
* memory address. * memory address.
* *
* RETURNS: the byte read from the specified memory address * RETURNS: the byte read from the specified memory address
* *
* NOMANUAL * NOMANUAL
*/ */
static inline uint8_t inByte(uint32_t addr) static inline uint8_t inByte(uint32_t addr)
{ {
@ -194,31 +194,31 @@ static inline uint8_t inByte(uint32_t addr)
sys_out16(data, (unsigned int)address) sys_out16(data, (unsigned int)address)
#define PLB_WORD_REG_READ(address) sys_in16((unsigned int)address) #define PLB_WORD_REG_READ(address) sys_in16((unsigned int)address)
/******************************************************************************* /**
* *
* outWord - output word to memory location * outWord - output word to memory location
* *
* RETURNS: N/A * RETURNS: N/A
* *
* NOMANUAL * NOMANUAL
*/ */
static inline void outWord(uint16_t data, uint32_t addr) static inline void outWord(uint16_t data, uint32_t addr)
{ {
*(volatile uint16_t *)addr = data; *(volatile uint16_t *)addr = data;
} }
/******************************************************************************* /**
* *
* inWord - obtain word value from memory location * inWord - obtain word value from memory location
* *
* This function issues the 'move' instruction to read a word from the specified * This function issues the 'move' instruction to read a word from the specified
* memory address. * memory address.
* *
* RETURNS: the word read from the specified memory address * RETURNS: the word read from the specified memory address
* *
* NOMANUAL * NOMANUAL
*/ */
static inline uint16_t inWord(uint32_t addr) static inline uint16_t inWord(uint32_t addr)
{ {
@ -236,31 +236,31 @@ static inline uint16_t inWord(uint32_t addr)
sys_out32(data, (unsigned int)address) sys_out32(data, (unsigned int)address)
#define PLB_LONG_REG_READ(address) sys_in32((unsigned int)address) #define PLB_LONG_REG_READ(address) sys_in32((unsigned int)address)
/******************************************************************************* /**
* *
* outLong - output long word to memory location * outLong - output long word to memory location
* *
* RETURNS: N/A * RETURNS: N/A
* *
* NOMANUAL * NOMANUAL
*/ */
static inline void outLong(uint32_t data, uint32_t addr) static inline void outLong(uint32_t data, uint32_t addr)
{ {
*(volatile uint32_t *)addr = data; *(volatile uint32_t *)addr = data;
} }
/******************************************************************************* /**
* *
* inLong - obtain long word value from memory location * inLong - obtain long word value from memory location
* *
* This function issues the 'move' instruction to read a word from the specified * This function issues the 'move' instruction to read a word from the specified
* memory address. * memory address.
* *
* RETURNS: the long word read from the specified memory address * RETURNS: the long word read from the specified memory address
* *
* NOMANUAL * NOMANUAL
*/ */
static inline uint32_t inLong(uint32_t addr) static inline uint32_t inLong(uint32_t addr)
{ {
@ -268,19 +268,19 @@ static inline uint32_t inLong(uint32_t addr)
} }
#endif /* !_ASMLANGUAGE */ #endif /* !_ASMLANGUAGE */
/******************************************************************************* /**
* *
* pci_pin2irq - convert PCI interrupt PIN to IRQ * pci_pin2irq - convert PCI interrupt PIN to IRQ
* *
* The routine uses "standard design consideration" and implies that * The routine uses "standard design consideration" and implies that
* INTA (pin 1) -> IRQ 16 * INTA (pin 1) -> IRQ 16
* INTB (pin 2) -> IRQ 17 * INTB (pin 2) -> IRQ 17
* INTC (pin 3) -> IRQ 18 * INTC (pin 3) -> IRQ 18
* INTD (pin 4) -> IRQ 19 * INTD (pin 4) -> IRQ 19
* *
* RETURNS: IRQ number, -1 if the result is incorrect * RETURNS: IRQ number, -1 if the result is incorrect
* *
*/ */
static inline int pci_pin2irq(int pin) static inline int pci_pin2irq(int pin)
{ {
@ -289,13 +289,13 @@ static inline int pci_pin2irq(int pin)
return N_PIC_IRQS + pin - 1; return N_PIC_IRQS + pin - 1;
} }
/******************************************************************************* /**
* *
* pci_irq2pin - convert IRQ to PCI interrupt pin * pci_irq2pin - convert IRQ to PCI interrupt pin
* *
* RETURNS: pin number, -1 if the result is incorrect * RETURNS: pin number, -1 if the result is incorrect
* *
*/ */
static inline int pci_irq2pin(int irq) static inline int pci_irq2pin(int irq)
{ {

View file

@ -33,7 +33,7 @@
/* /*
DESCRIPTION DESCRIPTION
This is the linker script for both standard images and XIP images. This is the linker script for both standard images and XIP images.
*/ */
#include <autoconf.h> #include <autoconf.h>

View file

@ -37,7 +37,7 @@ for the Quark BSP.
Implementation Remarks: Implementation Remarks:
Handlers for the secondary serial port have not been added. Handlers for the secondary serial port have not been added.
*/ */
#include <nanokernel.h> #include <nanokernel.h>
#include <misc/printk.h> #include <misc/printk.h>
@ -55,13 +55,13 @@ Handlers for the secondary serial port have not been added.
#if defined(DO_CONSOLE_INIT) #if defined(DO_CONSOLE_INIT)
/******************************************************************************* /**
* *
* uart_generic_info_init - initialize initialization information for one UART * uart_generic_info_init - initialize initialization information for one UART
* *
* RETURNS: N/A * RETURNS: N/A
* *
*/ */
void uart_generic_info_init(struct uart_init_info *p_info) void uart_generic_info_init(struct uart_init_info *p_info)
{ {
@ -74,15 +74,15 @@ void uart_generic_info_init(struct uart_init_info *p_info)
#if defined(DO_CONSOLE_INIT) #if defined(DO_CONSOLE_INIT)
/******************************************************************************* /**
* *
* consoleInit - initialize target-only console * consoleInit - initialize target-only console
* *
* Only used for debugging. * Only used for debugging.
* *
* RETURNS: N/A * RETURNS: N/A
* *
*/ */
#include <console/uart_console.h> #include <console/uart_console.h>
@ -101,16 +101,16 @@ static void consoleInit(void)
} while ((0)) } while ((0))
#endif /* DO_CONSOLE_INIT */ #endif /* DO_CONSOLE_INIT */
/******************************************************************************* /**
* *
* _InitHardware - perform basic hardware initialization * _InitHardware - perform basic hardware initialization
* *
* Initialize the Intel LOAPIC and IOAPIC device driver and the * Initialize the Intel LOAPIC and IOAPIC device driver and the
* Intel 8250 UART device driver. * Intel 8250 UART device driver.
* Also initialize the timer device driver, if required. * Also initialize the timer device driver, if required.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _InitHardware(void) void _InitHardware(void)
{ {

View file

@ -34,7 +34,7 @@
DESCRIPTION DESCRIPTION
This module provides the _SysFatalErrorHandler() routine which is common to This module provides the _SysFatalErrorHandler() routine which is common to
supported BSPs. supported BSPs.
*/ */
#include <nanokernel.h> #include <nanokernel.h>
#include <toolchain.h> #include <toolchain.h>
@ -47,25 +47,25 @@ supported BSPs.
#define PRINTK(...) #define PRINTK(...)
#endif /* CONFIG_PRINTK */ #endif /* CONFIG_PRINTK */
/******************************************************************************* /**
* *
* _SysFatalErrorHandler - fatal error handler * _SysFatalErrorHandler - fatal error handler
* *
* This routine implements the corrective action to be taken when the system * This routine implements the corrective action to be taken when the system
* detects a fatal error. * detects a fatal error.
* *
* This sample implementation attempts to abort the current context and allow * This sample implementation attempts to abort the current context and allow
* the system to continue executing, which may permit the system to continue * the system to continue executing, which may permit the system to continue
* functioning with degraded capabilities. * functioning with degraded capabilities.
* *
* System designers may wish to enhance or substitute this sample * System designers may wish to enhance or substitute this sample
* implementation to take other actions, such as logging error (or debug) * implementation to take other actions, such as logging error (or debug)
* information to a persistent repository and/or rebooting the system. * information to a persistent repository and/or rebooting the system.
* *
* RETURNS: This function does not return. * RETURNS: This function does not return.
* *
* \NOMANUAL * \NOMANUAL
*/ */
FUNC_NORETURN void _SysFatalErrorHandler( FUNC_NORETURN void _SysFatalErrorHandler(
unsigned int reason, /* fatal error reason */ unsigned int reason, /* fatal error reason */

View file

@ -33,7 +33,7 @@
/* /*
DESCRIPTION DESCRIPTION
Intel-specific parts of start_task(). Only FP functionality currently. Intel-specific parts of start_task(). Only FP functionality currently.
*/ */
#ifdef CONFIG_MICROKERNEL #ifdef CONFIG_MICROKERNEL
@ -51,12 +51,12 @@ Intel-specific parts of start_task(). Only FP functionality currently.
#define SSE_GROUP 0x10 #define SSE_GROUP 0x10
/******************************************************************************* /**
* *
* _StartTaskArch - Intel-specifc parts of task initialization * _StartTaskArch - Intel-specifc parts of task initialization
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _StartTaskArch( void _StartTaskArch(
struct k_proc *X, /* ptr to task control block */ struct k_proc *X, /* ptr to task control block */

View file

@ -35,7 +35,7 @@
Serial console driver. Serial console driver.
Hooks into the printk and fputc (for printf) modules. Poll driven. Hooks into the printk and fputc (for printf) modules. Poll driven.
*/ */
#include <nanokernel.h> #include <nanokernel.h>
#include <arch/cpu.h> #include <arch/cpu.h>
@ -56,7 +56,7 @@
#endif #endif
#if 0 /* NOTUSED */ #if 0 /* NOTUSED */
/****************************************************************************** /**
* *
* consoleIn - get a character from UART * consoleIn - get a character from UART
* *
@ -74,7 +74,7 @@ static int consoleIn(void)
#endif #endif
#if defined(CONFIG_PRINTK) || defined(CONFIG_STDOUT_CONSOLE) #if defined(CONFIG_PRINTK) || defined(CONFIG_STDOUT_CONSOLE)
/****************************************************************************** /**
* *
* consoleOut - output one character to UART * consoleOut - output one character to UART
* *
@ -209,7 +209,7 @@ void uart_register_input(struct nano_fifo *avail, struct nano_fifo *lines)
} while ((0)) } while ((0))
#endif #endif
/****************************************************************************** /**
* *
* uart_console_init - initialize one UART as the console/debug port * uart_console_init - initialize one UART as the console/debug port
* *

View file

@ -61,7 +61,7 @@ command is issued, the 8259A will automatically reset the highest IS bit of
those that are set, since in the fully nested mode the highest IS level is those that are set, since in the fully nested mode the highest IS level is
the last level acknowledged and serviced. the last level acknowledged and serviced.
*/ */
/* /*
* A board support package's board.h header must provide definitions for the * A board support package's board.h header must provide definitions for the
@ -116,15 +116,15 @@ FUNC_ALIAS(_i8259_irq_enable, irq_enable, void);
FUNC_ALIAS(_i8259_irq_disable, irq_disable, void); FUNC_ALIAS(_i8259_irq_disable, irq_disable, void);
#endif /* CONFIG_SHUTOFF_PIC */ #endif /* CONFIG_SHUTOFF_PIC */
/******************************************************************************* /**
* *
* _i8259_init - initialize the Intel 8259A PIC device driver * _i8259_init - initialize the Intel 8259A PIC device driver
* *
* This routine initializes the Intel 8259A PIC device driver and the device * This routine initializes the Intel 8259A PIC device driver and the device
* itself. * itself.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _i8259_init(void) void _i8259_init(void)
{ {
@ -185,16 +185,16 @@ void _i8259_init(void)
} }
#ifndef CONFIG_SHUTOFF_PIC #ifndef CONFIG_SHUTOFF_PIC
/******************************************************************************* /**
* *
* _i8259_eoi_master - send EOI(end of interrupt) signal to the master PIC. * _i8259_eoi_master - send EOI(end of interrupt) signal to the master PIC.
* *
* This routine is called at the end of the interrupt handler. * This routine is called at the end of the interrupt handler.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* ERRNO * ERRNO
*/ */
void _i8259_eoi_master(unsigned int irq /* IRQ number to void _i8259_eoi_master(unsigned int irq /* IRQ number to
send EOI: unused */ send EOI: unused */
@ -207,17 +207,17 @@ void _i8259_eoi_master(unsigned int irq /* IRQ number to
PLB_BYTE_REG_WRITE(I8259_EOI, PIC_IACK(PIC_MASTER_BASE_ADRS)); PLB_BYTE_REG_WRITE(I8259_EOI, PIC_IACK(PIC_MASTER_BASE_ADRS));
} }
/******************************************************************************* /**
* *
* _i8259_eoi_slave - send EOI(end of interrupt) signal to the slave PIC. * _i8259_eoi_slave - send EOI(end of interrupt) signal to the slave PIC.
* *
* This routine is called at the end of the interrupt handler in the Normal * This routine is called at the end of the interrupt handler in the Normal
* Fully Nested Mode. * Fully Nested Mode.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* ERRNO * ERRNO
*/ */
void _i8259_eoi_slave(unsigned int irq /* IRQ number to void _i8259_eoi_slave(unsigned int irq /* IRQ number to
send EOI: unused */ send EOI: unused */
@ -239,22 +239,22 @@ void _i8259_eoi_slave(unsigned int irq /* IRQ number to
__asm__ volatile("popfl;\n\t"); __asm__ volatile("popfl;\n\t");
} }
/******************************************************************************* /**
* *
* __I8259IntEnable - enable/disable a specified PIC interrupt input line * __I8259IntEnable - enable/disable a specified PIC interrupt input line
* *
* This routine enables or disables a specified PIC interrupt input line. To * This routine enables or disables a specified PIC interrupt input line. To
* enable an interrupt input line, the parameter <enable> must be non-zero. * enable an interrupt input line, the parameter <enable> must be non-zero.
* *
* The nanokernel exports the irq_enable() and irq_disable() * The nanokernel exports the irq_enable() and irq_disable()
* APIs (mapped to _i8259_irq_enable() and _i8259_irq_disable(), respectively). * APIs (mapped to _i8259_irq_enable() and _i8259_irq_disable(), respectively).
* This function is called by _i8259_irq_enable() and _i8259_irq_disable() to * This function is called by _i8259_irq_enable() and _i8259_irq_disable() to
* perform the actual enabling/disabling of an IRQ to minimize footprint. * perform the actual enabling/disabling of an IRQ to minimize footprint.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* see also: _i8259_irq_disable()/_i8259_irq_enable * see also: _i8259_irq_disable()/_i8259_irq_enable
*/ */
static void __I8259IntEnable( static void __I8259IntEnable(
unsigned int irq, /* IRQ number to enable */ unsigned int irq, /* IRQ number to enable */
@ -290,16 +290,16 @@ static void __I8259IntEnable(
} }
/******************************************************************************* /**
* *
* _i8259_irq_disable - disable a specified PIC interrupt input line * _i8259_irq_disable - disable a specified PIC interrupt input line
* *
* This routine disables a specified PIC interrupt input line. * This routine disables a specified PIC interrupt input line.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* SEE ALSO: _i8259_irq_enable() * SEE ALSO: _i8259_irq_enable()
*/ */
void _i8259_irq_disable(unsigned int irq /* IRQ number to disable */ void _i8259_irq_disable(unsigned int irq /* IRQ number to disable */
) )
@ -307,16 +307,16 @@ void _i8259_irq_disable(unsigned int irq /* IRQ number to disable */
return __I8259IntEnable(irq, 0); return __I8259IntEnable(irq, 0);
} }
/******************************************************************************* /**
* *
* _i8259_irq_enable - enable a specified PIC interrupt input line * _i8259_irq_enable - enable a specified PIC interrupt input line
* *
* This routine enables a specified PIC interrupt input line. * This routine enables a specified PIC interrupt input line.
* *
* RETURNS: N/A * RETURNS: N/A
* *
* SEE ALSO: _i8259_irq_disable() * SEE ALSO: _i8259_irq_disable()
*/ */
void _i8259_irq_enable(unsigned int irq /* IRQ number to enable */ void _i8259_irq_enable(unsigned int irq /* IRQ number to enable */
) )

View file

@ -42,7 +42,7 @@ The distinction between a spurious interrupt and a real one is detected by
looking at the in service register (ISR). The bit (bit 7) will be 1 indicating looking at the in service register (ISR). The bit (bit 7) will be 1 indicating
a real IRQ has been inserted. a real IRQ has been inserted.
*/ */
/* includes */ /* includes */
#define _ASMLANGUAGE #define _ASMLANGUAGE
@ -59,20 +59,20 @@ a real IRQ has been inserted.
GDATA(_i8259_spurious_interrupt_count) GDATA(_i8259_spurious_interrupt_count)
/******************************************************************************* /**
* *
* _i8259_boi_master - detect whether it is spurious interrupt or not * _i8259_boi_master - detect whether it is spurious interrupt or not
* *
* This routine is called before the user's interrupt handler to detect the * This routine is called before the user's interrupt handler to detect the
* spurious interrupt on the master PIC. If a spurious interrupt condition is * spurious interrupt on the master PIC. If a spurious interrupt condition is
* detected, a global variable is incremented and the execution of the interrupt * detected, a global variable is incremented and the execution of the interrupt
* stub is "short circuited", i.e. a return to the interrupted context * stub is "short circuited", i.e. a return to the interrupted context
* occurs. * occurs.
* *
* void _i8259_boi_master (void) * void _i8259_boi_master (void)
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
SECTION_FUNC(TEXT, _i8259_boi_master) SECTION_FUNC(TEXT, _i8259_boi_master)
/* disable interrupts */ /* disable interrupts */
@ -93,20 +93,20 @@ SECTION_FUNC(TEXT, _i8259_boi_master)
ret ret
/******************************************************************************* /**
* *
* _i8259_boi_slave - detect whether it is spurious interrupt or not * _i8259_boi_slave - detect whether it is spurious interrupt or not
* *
* This routine is called before the user's interrupt handler to detect the * This routine is called before the user's interrupt handler to detect the
* spurious interrupt on the slave PIC. If a spurious interrupt condition is * spurious interrupt on the slave PIC. If a spurious interrupt condition is
* detected, a global variable is incremented and the execution of the interrupt * detected, a global variable is incremented and the execution of the interrupt
* stub is "short circuited", i.e. a return to the interrupted context * stub is "short circuited", i.e. a return to the interrupted context
* occurs. * occurs.
* *
* void _i8259_boi_slave (void) * void _i8259_boi_slave (void)
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
SECTION_FUNC(TEXT, _i8259_boi_slave) SECTION_FUNC(TEXT, _i8259_boi_slave)
/* disable interrupts */ /* disable interrupts */

View file

@ -73,7 +73,7 @@ This implementation doesn't support multiple IO APICs.
INCLUDE FILES: ioapic.h loapic.h INCLUDE FILES: ioapic.h loapic.h
SEE ALSO: loApicIntr.c SEE ALSO: loApicIntr.c
*/ */
#include <nanokernel.h> #include <nanokernel.h>
#include <arch/cpu.h> #include <arch/cpu.h>
@ -209,14 +209,14 @@ static void _IoApicRedUpdateLo(unsigned int irq, uint32_t value,
* IRQ virtualization imposed by the BSP. * IRQ virtualization imposed by the BSP.
*/ */
/******************************************************************************* /**
* *
* _ioapic_init - initialize the IO APIC or xAPIC * _ioapic_init - initialize the IO APIC or xAPIC
* *
* This routine initializes the IO APIC or xAPIC. * This routine initializes the IO APIC or xAPIC.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _ioapic_init(void) void _ioapic_init(void)
{ {
@ -261,14 +261,14 @@ void _ioapic_init(void)
} }
} }
/******************************************************************************* /**
* *
* _ioapic_eoi - send EOI (End Of Interrupt) signal to IO APIC * _ioapic_eoi - send EOI (End Of Interrupt) signal to IO APIC
* *
* This routine sends an EOI signal to the IO APIC's interrupting source. * This routine sends an EOI signal to the IO APIC's interrupting source.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _ioapic_eoi(unsigned int irq /* INT number to send EOI */ void _ioapic_eoi(unsigned int irq /* INT number to send EOI */
) )
@ -277,16 +277,16 @@ void _ioapic_eoi(unsigned int irq /* INT number to send EOI */
*(volatile unsigned int *)(LOAPIC_BASE_ADRS + LOAPIC_EOI) = 0; *(volatile unsigned int *)(LOAPIC_BASE_ADRS + LOAPIC_EOI) = 0;
} }
/******************************************************************************* /**
* *
* _ioapic_eoi_get - get EOI (End Of Interrupt) information * _ioapic_eoi_get - get EOI (End Of Interrupt) information
* *
* This routine returns EOI signalling information for a specific IRQ. * This routine returns EOI signalling information for a specific IRQ.
* *
* RETURNS: address of routine to be called to signal EOI; * RETURNS: address of routine to be called to signal EOI;
* as a side effect, also passes back indication if routine requires * as a side effect, also passes back indication if routine requires
* an interrupt vector argument and what the argument value should be * an interrupt vector argument and what the argument value should be
*/ */
void *_ioapic_eoi_get(unsigned int irq, /* INTIN number of interest */ void *_ioapic_eoi_get(unsigned int irq, /* INTIN number of interest */
char *argRequired, /* ptr to "argument required" result char *argRequired, /* ptr to "argument required" result
@ -317,14 +317,14 @@ void *_ioapic_eoi_get(unsigned int irq, /* INTIN number of interest */
return _ioapic_eoi; return _ioapic_eoi;
} }
/******************************************************************************* /**
* *
* _ioapic_irq_enable - enable a specified APIC interrupt input line * _ioapic_irq_enable - enable a specified APIC interrupt input line
* *
* This routine enables a specified APIC interrupt input line. * This routine enables a specified APIC interrupt input line.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _ioapic_irq_enable(unsigned int irq /* INTIN number to enable */ void _ioapic_irq_enable(unsigned int irq /* INTIN number to enable */
) )
@ -332,14 +332,14 @@ void _ioapic_irq_enable(unsigned int irq /* INTIN number to enable */
_IoApicRedUpdateLo(irq, 0, IOAPIC_INT_MASK); _IoApicRedUpdateLo(irq, 0, IOAPIC_INT_MASK);
} }
/******************************************************************************* /**
* *
* _ioapic_irq_disable - disable a specified APIC interrupt input line * _ioapic_irq_disable - disable a specified APIC interrupt input line
* *
* This routine disables a specified APIC interrupt input line. * This routine disables a specified APIC interrupt input line.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _ioapic_irq_disable(unsigned int irq /* INTIN number to disable */ void _ioapic_irq_disable(unsigned int irq /* INTIN number to disable */
) )
@ -347,14 +347,14 @@ void _ioapic_irq_disable(unsigned int irq /* INTIN number to disable */
_IoApicRedUpdateLo(irq, IOAPIC_INT_MASK, IOAPIC_INT_MASK); _IoApicRedUpdateLo(irq, IOAPIC_INT_MASK, IOAPIC_INT_MASK);
} }
/******************************************************************************* /**
* *
* _ioapic_irq_set - programs the interrupt redirection table * _ioapic_irq_set - programs the interrupt redirection table
* *
* This routine sets up the redirection table entry for the specified IRQ * This routine sets up the redirection table entry for the specified IRQ
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _ioapic_irq_set(unsigned int irq, /* virtualized IRQ */ void _ioapic_irq_set(unsigned int irq, /* virtualized IRQ */
unsigned int vector, /* vector number */ unsigned int vector, /* vector number */
uint32_t flags /* interrupt flags */ uint32_t flags /* interrupt flags */
@ -368,15 +368,15 @@ void _ioapic_irq_set(unsigned int irq, /* virtualized IRQ */
ioApicRedSetLo(irq, rteValue); ioApicRedSetLo(irq, rteValue);
} }
/******************************************************************************* /**
* *
* _ioapic_int_vec_set - program interrupt vector for specified irq * _ioapic_int_vec_set - program interrupt vector for specified irq
* *
* The routine writes the interrupt vector in the Interrupt Redirection * The routine writes the interrupt vector in the Interrupt Redirection
* Table for specified irq number * Table for specified irq number
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
void _ioapic_int_vec_set(unsigned int irq, /* INT number */ void _ioapic_int_vec_set(unsigned int irq, /* INT number */
unsigned int vector /* vector number */ unsigned int vector /* vector number */
) )
@ -386,14 +386,14 @@ void _ioapic_int_vec_set(unsigned int irq, /* INT number */
#ifndef XIOAPIC_DIRECT_ADDRESSING #ifndef XIOAPIC_DIRECT_ADDRESSING
/******************************************************************************* /**
* *
* __IoApicGet - read a 32 bit IO APIC register * __IoApicGet - read a 32 bit IO APIC register
* *
* This routine reads the specified IO APIC register using indirect addressing. * This routine reads the specified IO APIC register using indirect addressing.
* *
* RETURNS: register value * RETURNS: register value
*/ */
static uint32_t __IoApicGet( static uint32_t __IoApicGet(
int32_t offset /* register offset (8 bits) */ int32_t offset /* register offset (8 bits) */
@ -414,14 +414,14 @@ static uint32_t __IoApicGet(
return value; return value;
} }
/******************************************************************************* /**
* *
* __IoApicSet - write a 32 bit IO APIC register * __IoApicSet - write a 32 bit IO APIC register
* *
* This routine writes the specified IO APIC register using indirect addressing. * This routine writes the specified IO APIC register using indirect addressing.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
static void __IoApicSet( static void __IoApicSet(
int32_t offset, /* register offset (8 bits) */ int32_t offset, /* register offset (8 bits) */
@ -442,14 +442,14 @@ static void __IoApicSet(
#endif #endif
/******************************************************************************* /**
* *
* ioApicRedGetLo - get low 32 bits of Redirection Table entry * ioApicRedGetLo - get low 32 bits of Redirection Table entry
* *
* This routine reads the low-order 32 bits of a Redirection Table entry. * This routine reads the low-order 32 bits of a Redirection Table entry.
* *
* RETURNS: 32 low-order bits * RETURNS: 32 low-order bits
*/ */
static uint32_t ioApicRedGetLo(unsigned int irq /* INTIN number */ static uint32_t ioApicRedGetLo(unsigned int irq /* INTIN number */
) )
@ -468,14 +468,14 @@ static uint32_t ioApicRedGetLo(unsigned int irq /* INTIN number */
#endif #endif
} }
/******************************************************************************* /**
* *
* ioApicRedSetLo - set low 32 bits of Redirection Table entry * ioApicRedSetLo - set low 32 bits of Redirection Table entry
* *
* This routine writes the low-order 32 bits of a Redirection Table entry. * This routine writes the low-order 32 bits of a Redirection Table entry.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
static void ioApicRedSetLo(unsigned int irq, /* INTIN number */ static void ioApicRedSetLo(unsigned int irq, /* INTIN number */
uint32_t lower32 /* value to be written */ uint32_t lower32 /* value to be written */
@ -495,14 +495,14 @@ static void ioApicRedSetLo(unsigned int irq, /* INTIN number */
#endif #endif
} }
/******************************************************************************* /**
* *
* ioApicRedSetHi - set high 32 bits of Redirection Table entry * ioApicRedSetHi - set high 32 bits of Redirection Table entry
* *
* This routine writes the high-order 32 bits of a Redirection Table entry. * This routine writes the high-order 32 bits of a Redirection Table entry.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
static void ioApicRedSetHi(unsigned int irq, /* INTIN number */ static void ioApicRedSetHi(unsigned int irq, /* INTIN number */
uint32_t upper32 /* value to be written */ uint32_t upper32 /* value to be written */
@ -522,15 +522,15 @@ static void ioApicRedSetHi(unsigned int irq, /* INTIN number */
#endif #endif
} }
/******************************************************************************* /**
* *
* _IoApicRedUpdateLo - modify low 32 bits of Redirection Table entry * _IoApicRedUpdateLo - modify low 32 bits of Redirection Table entry
* *
* This routine modifies selected portions of the low-order 32 bits of a * This routine modifies selected portions of the low-order 32 bits of a
* Redirection Table entry, as indicated by the associate bit mask. * Redirection Table entry, as indicated by the associate bit mask.
* *
* RETURNS: N/A * RETURNS: N/A
*/ */
static void _IoApicRedUpdateLo( static void _IoApicRedUpdateLo(
unsigned int irq, /* INTIN number */ unsigned int irq, /* INTIN number */
@ -548,15 +548,15 @@ static void _IoApicRedUpdateLo(
* macro if the I/O APIC supports the MSI redirect capability. * macro if the I/O APIC supports the MSI redirect capability.
*/ */
/******************************************************************************* /**
* *
* _IoApicRteConfigSet - write to the RTE config register for specified IRQ * _IoApicRteConfigSet - write to the RTE config register for specified IRQ
* *
* This routine writes the specified 32-bit <value> into the RTE configuration * This routine writes the specified 32-bit <value> into the RTE configuration
* register for the specified <irq> (0 to (IOAPIC_NUM_RTES - 1)) * register for the specified <irq> (0 to (IOAPIC_NUM_RTES - 1))
* *
* RETURNS: void * RETURNS: void
*/ */
static void _IoApicRteConfigSet(unsigned int irq, /* INTIN number */ static void _IoApicRteConfigSet(unsigned int irq, /* INTIN number */
uint32_t value /* value to be written */ uint32_t value /* value to be written */
@ -576,15 +576,15 @@ static void _IoApicRteConfigSet(unsigned int irq, /* INTIN number */
*((volatile uint32_t *)(IOAPIC_BASE_ADRS + offset)) = value; *((volatile uint32_t *)(IOAPIC_BASE_ADRS + offset)) = value;
} }
/******************************************************************************* /**
* *
* _IoApicRedirRegSet - write to the specified MSI redirection register * _IoApicRedirRegSet - write to the specified MSI redirection register
* *
* This routine writes the 32-bit <value> into the redirection register * This routine writes the 32-bit <value> into the redirection register
* specified by <reg>. * specified by <reg>.
* *
* RETURNS: void * RETURNS: void
*/ */
static void _IoApicRedirRegSet(unsigned int reg, uint32_t value) static void _IoApicRedirRegSet(unsigned int reg, uint32_t value)
{ {

Some files were not shown because too many files have changed in this diff Show more