doxygen: change comment style to match javadoc

The change replaces multiple asterisks to ** at
the beginning of comments and adds a space before
the asterisks at the beginning of lines.

Change-Id: I7656bde3bf4d9a31e38941e43b580520432dabc1
Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2015-07-01 17:22:39 -04:00
commit ea0d0b220c
305 changed files with 11249 additions and 11249 deletions

View file

@ -35,7 +35,7 @@ DESCRIPTION
This library provides routines to perform a number of atomic operations
on a memory location: add, subtract, increment, decrement, bitwise OR,
bitwise NOR, bitwise AND, bitwise NAND, set, clear and compare-and-swap.
*/
*/
#define _ASMLANGUAGE
@ -60,49 +60,49 @@ GTEXT(atomic_cas)
.section .TEXT._Atomic, "ax"
.balign 2
/*******************************************************************************
*
* atomic_clear - atomically clear a memory location
*
* This routine atomically clears the contents of <target> and returns the old
* value that was in <target>.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_clear
* (
* atomic_t *target /@ memory location to clear @/
* )
*/
/**
*
* atomic_clear - atomically clear a memory location
*
* This routine atomically clears the contents of <target> and returns the old
* value that was in <target>.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_clear
* (
* atomic_t *target /@ memory location to clear @/
* )
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_clear)
mov_s r1, 0
/* fall through into atomic_set */
/*******************************************************************************
*
* atomic_set - atomically set a memory location
*
* This routine atomically sets the contents of <target> to <value> and returns
* the old value that was in <target>.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_set
* (
* atomic_t *target, /@ memory location to set @/
* atomic_val_t value /@ set with this value @/
* )
*
*/
/**
*
* atomic_set - atomically set a memory location
*
* This routine atomically sets the contents of <target> to <value> and returns
* the old value that was in <target>.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_set
* (
* atomic_t *target, /@ memory location to set @/
* atomic_val_t value /@ set with this value @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_set)
@ -111,72 +111,72 @@ SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_set)
j_s.d [blink]
mov_s r0, r1 /* return old value */
/******************************************************************************
*
* atomic_get - Get the value of a shared memory atomically
*
* This routine atomically retrieves the value in *target
*
* atomic_val_t atomic_get
* (
* atomic_t *target /@ address of atom to be retrieved @/
* )
*
* RETURN: value read from address target.
*
*/
/**
*
* atomic_get - Get the value of a shared memory atomically
*
* This routine atomically retrieves the value in *target
*
* atomic_val_t atomic_get
* (
* atomic_t *target /@ address of atom to be retrieved @/
* )
*
* RETURN: value read from address target.
*
*/
SECTION_FUNC(TEXT, atomic_get)
ld_s r0, [r0, 0]
j_s [blink]
/*******************************************************************************
*
* atomic_inc - atomically increment a memory location
*
* This routine atomically increments the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the
* atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_inc
* (
* atomic_t *target, /@ memory location to increment @/
* )
*
*/
/**
*
* atomic_inc - atomically increment a memory location
*
* This routine atomically increments the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the
* atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_inc
* (
* atomic_t *target, /@ memory location to increment @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_inc)
mov_s r1, 1
/* fall through into atomic_add */
/*******************************************************************************
*
* atomic_add - atomically add a value to a memory location
*
* This routine atomically adds the contents of <target> and <value>, placing
* the result in <target>. The operation is done using signed integer arithmetic.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_add
* (
* atomic_t *target, /@ memory location to add to @/
* atomic_val_t value /@ value to add @/
* )
*/
/**
*
* atomic_add - atomically add a value to a memory location
*
* This routine atomically adds the contents of <target> and <value>, placing
* the result in <target>. The operation is done using signed integer arithmetic.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_add
* (
* atomic_t *target, /@ memory location to add to @/
* atomic_val_t value /@ value to add @/
* )
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_add)
@ -191,54 +191,54 @@ SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_add)
j_s.d [blink]
mov_s r0, r2 /* return old value */
/*******************************************************************************
*
* atomic_dec - atomically decrement a memory location
*
* This routine atomically decrements the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the
* atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_dec
* (
* atomic_t *target, /@ memory location to decrement @/
* )
*
*/
/**
*
* atomic_dec - atomically decrement a memory location
*
* This routine atomically decrements the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the
* atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_dec
* (
* atomic_t *target, /@ memory location to decrement @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_dec_sub, atomic_dec)
mov_s r1, 1
/* fall through into atomic_sub */
/*******************************************************************************
*
* atomic_sub - atomically subtract a value from a memory location
*
* This routine atomically subtracts <value> from the contents of <target>,
* placing the result in <target>. The operation is done using signed integer
* arithmetic. Various CPU architectures may impose restrictions with regards to
* the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_sub
* (
* atomic_t *target, /@ memory location to subtract from @/
* atomic_val_t value /@ value to subtract @/
* )
*
*/
/**
*
* atomic_sub - atomically subtract a value from a memory location
*
* This routine atomically subtracts <value> from the contents of <target>,
* placing the result in <target>. The operation is done using signed integer
* arithmetic. Various CPU architectures may impose restrictions with regards to
* the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_sub
* (
* atomic_t *target, /@ memory location to subtract from @/
* atomic_val_t value /@ value to subtract @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_dec_sub, atomic_sub)
@ -253,28 +253,28 @@ SECTION_SUBSEC_FUNC(TEXT, atomic_dec_sub, atomic_sub)
j_s.d [blink]
mov_s r0, r2 /* return old value */
/******************************************************************************
*
* atomic_nand - atomically perform a bitwise NAND on a memory location
*
* This routine atomically performs a bitwise NAND operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_nand
* (
* atomic_t *target, /@ memory location to NAND @/
* atomic_val_t value /@ NAND with this value @/
* )
*
*/
/**
*
* atomic_nand - atomically perform a bitwise NAND on a memory location
*
* This routine atomically performs a bitwise NAND operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_nand
* (
* atomic_t *target, /@ memory location to NAND @/
* atomic_val_t value /@ NAND with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_nand)
@ -290,28 +290,28 @@ SECTION_FUNC(TEXT, atomic_nand)
j_s.d [blink]
mov_s r0, r2 /* return old value */
/******************************************************************************
*
* atomic_and - atomically perform a bitwise AND on a memory location
*
* This routine atomically performs a bitwise AND operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_and
* (
* atomic_t *target, /@ memory location to AND @/
* atomic_val_t value /@ AND with this value @/
* )
*
*/
/**
*
* atomic_and - atomically perform a bitwise AND on a memory location
*
* This routine atomically performs a bitwise AND operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_and
* (
* atomic_t *target, /@ memory location to AND @/
* atomic_val_t value /@ AND with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_and)
@ -326,28 +326,28 @@ SECTION_FUNC(TEXT, atomic_and)
j_s.d [blink]
mov_s r0, r2 /* return old value */
/*******************************************************************************
*
* atomic_or - atomically perform a bitwise OR on memory location
*
* This routine atomically performs a bitwise OR operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_or
* (
* atomic_t *target, /@ memory location to OR @/
* atomic_val_t value /@ OR with this value @/
* )
*
*/
/**
*
* atomic_or - atomically perform a bitwise OR on memory location
*
* This routine atomically performs a bitwise OR operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_or
* (
* atomic_t *target, /@ memory location to OR @/
* atomic_val_t value /@ OR with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_or)
@ -362,28 +362,28 @@ SECTION_FUNC(TEXT, atomic_or)
j_s.d [blink]
mov_s r0, r2 /* return old value */
/*******************************************************************************
*
* atomic_xor - atomically perform a bitwise XOR on a memory location
*
* This routine atomically performs a bitwise XOR operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_xor
* (
* atomic_t *target, /@ memory location to XOR @/
* atomic_val_t value /@ XOR with this value @/
* )
*
*/
/**
*
* atomic_xor - atomically perform a bitwise XOR on a memory location
*
* This routine atomically performs a bitwise XOR operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_xor
* (
* atomic_t *target, /@ memory location to XOR @/
* atomic_val_t value /@ XOR with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_xor)
@ -398,29 +398,29 @@ SECTION_FUNC(TEXT, atomic_xor)
j_s.d [blink]
mov_s r0, r2 /* return old value */
/*******************************************************************************
*
* atomic_cas - atomically compare-and-swap the contents of a memory location
*
* This routine performs an atomic compare-and-swap. testing that the contents of
* <target> contains <oldValue>, and if it does, setting the value of <target>
* to <newValue>. Various CPU architectures may impose restrictions with regards
* to the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: 1 if the swap is actually executed, 0 otherwise.
*
* ERRNO: N/A
*
* int atomic_cas
* (
* atomic_t *target, /@ memory location to compare-and-swap @/
* atomic_val_t oldValue, /@ compare to this value @/
* atomic_val_t newValue, /@ swap with this value @/
* )
*
*/
/**
*
* atomic_cas - atomically compare-and-swap the contents of a memory location
*
* This routine performs an atomic compare-and-swap. testing that the contents of
* <target> contains <oldValue>, and if it does, setting the value of <target>
* to <newValue>. Various CPU architectures may impose restrictions with regards
* to the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: 1 if the swap is actually executed, 0 otherwise.
*
* ERRNO: N/A
*
* int atomic_cas
* (
* atomic_t *target, /@ memory location to compare-and-swap @/
* atomic_val_t oldValue, /@ compare to this value @/
* atomic_val_t newValue, /@ swap with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_cas)

View file

@ -33,7 +33,7 @@
/*
DESCRIPTION
CPU power management routines.
*/
*/
#define _ASMLANGUAGE

View file

@ -35,7 +35,7 @@ DESCRIPTION
This module implements the code for handling entry to and exit from Fast IRQs.
See isr_wrapper.S for details.
*/
*/
#define _ASMLANGUAGE
@ -53,25 +53,25 @@ GDATA(_firq_stack)
SECTION_VAR(NOINIT, _firq_stack)
.space CONFIG_FIRQ_STACK_SIZE
/*******************************************************************************
*
* _firq_enter - work to be done before handing control to a FIRQ ISR
*
* The processor switches to a second register bank so registers from the
* current bank do not have to be preserved yet. The only issue is the LP_START/
* LP_COUNT/LP_END registers, which are not banked.
*
* If all FIRQ ISRs are programmed such that there are no use of the LP
* registers (ie. no LPcc instruction), then the kernel can be configured to
* remove the use of _firq_enter().
*
* When entering a FIRQ, interrupts might as well be locked: the processor is
* running at its highest priority, and cannot be preempted by anything.
*
* Assumption by _isr_demux: r3 is untouched by _firq_enter.
*
* RETURNS: N/A
*/
/**
*
* _firq_enter - work to be done before handing control to a FIRQ ISR
*
* The processor switches to a second register bank so registers from the
* current bank do not have to be preserved yet. The only issue is the LP_START/
* LP_COUNT/LP_END registers, which are not banked.
*
* If all FIRQ ISRs are programmed such that there are no use of the LP
* registers (ie. no LPcc instruction), then the kernel can be configured to
* remove the use of _firq_enter().
*
* When entering a FIRQ, interrupts might as well be locked: the processor is
* running at its highest priority, and cannot be preempted by anything.
*
* Assumption by _isr_demux: r3 is untouched by _firq_enter.
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _firq_enter)
@ -97,12 +97,12 @@ SECTION_FUNC(TEXT, _firq_enter)
j @_isr_demux
/*******************************************************************************
*
* _firq_exit - work to be done exiting a FIRQ
*
* RETURNS: N/A
*/
/**
*
* _firq_exit - work to be done exiting a FIRQ
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _firq_exit)
@ -256,12 +256,12 @@ _firq_no_reschedule:
/* LP registers are already restored, just switch back to bank 0 */
rtie
/*******************************************************************************
*
* _firq_stack_setup - install the FIRQ stack in register bank 1
*
* RETURNS: N/A
*/
/**
*
* _firq_stack_setup - install the FIRQ stack in register bank 1
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _firq_stack_setup)

View file

@ -34,7 +34,7 @@
DESCRIPTION
This module implements the routines necessary for handling fatal faults on
ARCv2 CPUs.
*/
*/
#include <nano_private.h>
#include <offsets.h>
@ -52,23 +52,23 @@ const NANO_ESF _default_esf = {
0xdeaddead, /* placeholder */
};
/*******************************************************************************
*
* _NanoFatalErrorHandler - nanokernel fatal error handler
*
* This routine is called when fatal error conditions are detected by software
* and is responsible only for reporting the error. Once reported, it then
* invokes the user provided routine _SysFatalErrorHandler() which is
* responsible for implementing the error handling policy.
*
* The caller is expected to always provide a usable ESF. In the event that the
* fatal error does not have a hardware generated ESF, the caller should either
* create its own or use a pointer to the global default ESF <_default_esf>.
*
* RETURNS: This function does not return.
*
* \NOMANUAL
*/
/**
*
* _NanoFatalErrorHandler - nanokernel fatal error handler
*
* This routine is called when fatal error conditions are detected by software
* and is responsible only for reporting the error. Once reported, it then
* invokes the user provided routine _SysFatalErrorHandler() which is
* responsible for implementing the error handling policy.
*
* The caller is expected to always provide a usable ESF. In the event that the
* fatal error does not have a hardware generated ESF, the caller should either
* create its own or use a pointer to the global default ESF <_default_esf>.
*
* RETURNS: This function does not return.
*
* \NOMANUAL
*/
FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
const NANO_ESF *pEsf)

View file

@ -34,7 +34,7 @@
DESCRIPTION
This library implements nanoFfsMsb() and nanoFfsLsb() which returns the
most and least significant bit set respectively.
*/
*/
#define _ASMLANGUAGE
@ -46,17 +46,17 @@ most and least significant bit set respectively.
GTEXT(nanoFfsMsb)
GTEXT(nanoFfsLsb)
/*******************************************************************************
*
* nanoFfsMsb - find first set bit (searching from the most significant bit)
*
* This routine finds the first bit set in the argument passed it and
* returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit. A return value of zero indicates that
* the value passed is zero.
*
* RETURNS: most significant bit set
*/
/**
*
* nanoFfsMsb - find first set bit (searching from the most significant bit)
*
* This routine finds the first bit set in the argument passed it and
* returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit. A return value of zero indicates that
* the value passed is zero.
*
* RETURNS: most significant bit set
*/
SECTION_FUNC(TEXT, nanoFfsMsb)
@ -69,17 +69,17 @@ SECTION_FUNC(TEXT, nanoFfsMsb)
j_s.d [blink]
add.nz r0, r0, 1
/*******************************************************************************
*
* nanoFfsLsb - find first set bit (searching from the least significant bit)
*
* This routine finds the first bit set in the argument passed it and
* returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit. A return value of zero indicates that
* the value passed is zero.
*
* RETURNS: least significant bit set
*/
/**
*
* nanoFfsLsb - find first set bit (searching from the least significant bit)
*
* This routine finds the first bit set in the argument passed it and
* returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit. A return value of zero indicates that
* the value passed is zero.
*
* RETURNS: least significant bit set
*/
SECTION_FUNC(TEXT, nanoFfsLsb)

View file

@ -40,55 +40,55 @@
#include <toolchain.h>
#include <arch/cpu.h>
/*******************************************************************************
*
* irq_lock - disable all interrupts on the local CPU
*
* This routine disables interrupts. It can be called from either interrupt,
* task or fiber level. This routine returns an architecture-dependent
* lock-out key representing the "interrupt disable state" prior to the call;
* this key can be passed to irq_unlock() to re-enable interrupts.
*
* The lock-out key should only be used as the argument to the
* irq_unlock() API. It should never be used to manually re-enable
* interrupts or to inspect or manipulate the contents of the source register.
*
* WARNINGS
* Invoking a kernel routine with interrupts locked may result in
* interrupts being re-enabled for an unspecified period of time. If the
* called routine blocks, interrupts will be re-enabled while another
* context executes, or while the system is idle.
*
* The "interrupt disable state" is an attribute of a context. Thus, if a
* fiber or task disables interrupts and subsequently invokes a kernel
* routine that causes the calling context to block, the interrupt
* disable state will be restored when the context is later rescheduled
* for execution.
*
* RETURNS: An architecture-dependent lock-out key representing the
* "interrupt disable state" prior to the call.
*
* \NOMANUAL
*/
/**
*
* irq_lock - disable all interrupts on the local CPU
*
* This routine disables interrupts. It can be called from either interrupt,
* task or fiber level. This routine returns an architecture-dependent
* lock-out key representing the "interrupt disable state" prior to the call;
* this key can be passed to irq_unlock() to re-enable interrupts.
*
* The lock-out key should only be used as the argument to the
* irq_unlock() API. It should never be used to manually re-enable
* interrupts or to inspect or manipulate the contents of the source register.
*
* WARNINGS
* Invoking a kernel routine with interrupts locked may result in
* interrupts being re-enabled for an unspecified period of time. If the
* called routine blocks, interrupts will be re-enabled while another
* context executes, or while the system is idle.
*
* The "interrupt disable state" is an attribute of a context. Thus, if a
* fiber or task disables interrupts and subsequently invokes a kernel
* routine that causes the calling context to block, the interrupt
* disable state will be restored when the context is later rescheduled
* for execution.
*
* RETURNS: An architecture-dependent lock-out key representing the
* "interrupt disable state" prior to the call.
*
* \NOMANUAL
*/
SECTION_FUNC(TEXT, irq_lock)
j_s.d [blink]
clri r0
/*******************************************************************************
*
* irq_unlock - enable all interrupts on the local CPU
*
* This routine re-enables interrupts on the local CPU. The <key> parameter
* is an architecture-dependent lock-out key that is returned by a previous
* invocation of irq_lock().
*
* This routine can be called from either interrupt, task or fiber level.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
/**
*
* irq_unlock - enable all interrupts on the local CPU
*
* This routine re-enables interrupts on the local CPU. The <key> parameter
* is an architecture-dependent lock-out key that is returned by a previous
* invocation of irq_lock().
*
* This routine can be called from either interrupt, task or fiber level.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
SECTION_FUNC(TEXT, irq_unlock)
j_s.d [blink]

View file

@ -35,7 +35,7 @@ DESCRIPTION
Wrapper installed in vector table for handling dynamic interrupts that accept
a parameter.
*/
*/
#define _ASMLANGUAGE
@ -207,7 +207,7 @@ From RIRQ:
Both types of IRQs already have an IRQ stack frame: simply return from
interrupt.
*/
*/
SECTION_FUNC(TEXT, _isr_enter)
lr r0, [_ARC_V2_AUX_IRQ_ACT]

View file

@ -46,7 +46,7 @@ Typically, only those members that are accessed by assembly language routines
are defined; however, it doesn't hurt to define all fields for the sake of
completeness.
*/
*/
#include <gen_offset.h>
#include <nano_private.h>

View file

@ -36,7 +36,7 @@ This module implements the code for handling entry to and exit from regular
IRQs.
See isr_wrapper.S for details.
*/
*/
#define _ASMLANGUAGE
@ -49,18 +49,18 @@ See isr_wrapper.S for details.
GTEXT(_rirq_enter)
GTEXT(_rirq_exit)
/*******************************************************************************
*
* _rirq_enter - work to be done before handing control to an IRQ ISR
*
* The processor pushes automatically all registers that need to be saved.
* However, since the processor always runs at kernel privilege there is no
* automatic switch to the IRQ stack: this must be done in software.
*
* Assumption by _isr_demux: r3 is untouched by _rirq_enter.
*
* RETURNS: N/A
*/
/**
*
* _rirq_enter - work to be done before handing control to an IRQ ISR
*
* The processor pushes automatically all registers that need to be saved.
* However, since the processor always runs at kernel privilege there is no
* automatic switch to the IRQ stack: this must be done in software.
*
* Assumption by _isr_demux: r3 is untouched by _rirq_enter.
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _rirq_enter)
@ -75,12 +75,12 @@ SECTION_FUNC(TEXT, _rirq_enter)
j _isr_demux
/*******************************************************************************
*
* _rirq_exit - work to be done exiting an IRQ
*
* RETURNS: N/A
*/
/**
*
* _rirq_exit - work to be done exiting an IRQ
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _rirq_exit)

View file

@ -36,7 +36,7 @@ This module implements the routines necessary for thread context switching
on ARCv2 CPUs.
See isr_wrapper.S for details.
*/
*/
#define _ASMLANGUAGE
@ -51,37 +51,37 @@ GTEXT(_Swap)
GDATA(_nanokernel)
/*******************************************************************************
*
* _Swap - initiate a cooperative context switch
*
* The _Swap() routine is invoked by various nanokernel services to effect
* a cooperative context context switch. Prior to invoking _Swap(), the caller
* disables interrupts via nanoCpuIntLock() and the return 'key' is passed as a
* parameter to _Swap(). The key is in fact the value stored in the register
* operand of a CLRI instruction.
*
* It stores the intlock key parameter into current->intlock_key.
/**
*
* _Swap - initiate a cooperative context switch
*
* The _Swap() routine is invoked by various nanokernel services to effect
* a cooperative context context switch. Prior to invoking _Swap(), the caller
* disables interrupts via nanoCpuIntLock() and the return 'key' is passed as a
* parameter to _Swap(). The key is in fact the value stored in the register
* operand of a CLRI instruction.
*
* It stores the intlock key parameter into current->intlock_key.
* Given that _Swap() is called to effect a cooperative context context switch,
* the caller-saved integer registers are saved on the stack by the function
* call preamble to _Swap(). This creates a custom stack frame that will be
* popped when returning from _Swap(), but is not suitable for handling a return
* from an exception. Thus, the fact that the thread is pending because of a
* cooperative call to _Swap() has to be recorded via the _CAUSE_COOP code in
* the relinquish_cause of the context's tCCS. The _IrqExit()/_FirqExit() code
* will take care of doing the right thing to restore the thread status.
*
* When _Swap() is invoked, we know the decision to perform a context switch or
* not has already been taken and a context switch must happen.
*
* RETURNS: may contain a return value setup by a call to fiberRtnValueSet()
*
* C function prototype:
*
* unsigned int _Swap (unsigned int key);
*
*/
* Given that _Swap() is called to effect a cooperative context context switch,
* the caller-saved integer registers are saved on the stack by the function
* call preamble to _Swap(). This creates a custom stack frame that will be
* popped when returning from _Swap(), but is not suitable for handling a return
* from an exception. Thus, the fact that the thread is pending because of a
* cooperative call to _Swap() has to be recorded via the _CAUSE_COOP code in
* the relinquish_cause of the context's tCCS. The _IrqExit()/_FirqExit() code
* will take care of doing the right thing to restore the thread status.
*
* When _Swap() is invoked, we know the decision to perform a context switch or
* not has already been taken and a context switch must happen.
*
* RETURNS: may contain a return value setup by a call to fiberRtnValueSet()
*
* C function prototype:
*
* unsigned int _Swap (unsigned int key);
*
*/
SECTION_FUNC(TEXT, _Swap)

View file

@ -33,7 +33,7 @@
/*
DESCRIPTION
This module provides the _SysFatalErrorHandler() routine for ARCv2 BSPs.
*/
*/
#include <nanokernel.h>
#include <toolchain.h>
@ -61,25 +61,25 @@ static inline void nonEssentialTaskAbort(void)
} while ((0))
#endif
/*******************************************************************************
*
* _SysFatalErrorHandler - fatal error handler
*
* This routine implements the corrective action to be taken when the system
* detects a fatal error.
*
* This sample implementation attempts to abort the current context and allow
* the system to continue executing, which may permit the system to continue
* functioning with degraded capabilities.
*
* System designers may wish to enhance or substitute this sample
* implementation to take other actions, such as logging error (or debug)
* information to a persistent repository and/or rebooting the system.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
/**
*
* _SysFatalErrorHandler - fatal error handler
*
* This routine implements the corrective action to be taken when the system
* detects a fatal error.
*
* This sample implementation attempts to abort the current context and allow
* the system to continue executing, which may permit the system to continue
* functioning with degraded capabilities.
*
* System designers may wish to enhance or substitute this sample
* implementation to take other actions, such as logging error (or debug)
* information to a persistent repository and/or rebooting the system.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
void _SysFatalErrorHandler(
unsigned int reason, /* fatal error reason */

View file

@ -39,7 +39,7 @@ This file is also included by assembly language files which must #define
_ASMLANGUAGE before including this header file. Note that nanokernel assembly
source files obtains structure offset values via "absolute symbols" in the
offsets.o module.
*/
*/
#ifndef _NANO_PRIVATE_H
#define _NANO_PRIVATE_H
@ -238,32 +238,32 @@ static ALWAYS_INLINE void nanoArchInit(void)
_irq_setup();
}
/*******************************************************************************
*
* fiberRtnValueSet - set the return value for the specified fiber (inline)
*
* The register used to store the return value from a function call invocation
* to <value>. It is assumed that the specified <fiber> is pending, and thus
* the fiber's context is stored in its tCCS structure.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
/**
*
* fiberRtnValueSet - set the return value for the specified fiber (inline)
*
* The register used to store the return value from a function call invocation
* to <value>. It is assumed that the specified <fiber> is pending, and thus
* the fiber's context is stored in its tCCS structure.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static ALWAYS_INLINE void fiberRtnValueSet(tCCS *fiber, unsigned int value)
{
fiber->return_value = value;
}
/*******************************************************************************
*
* _IS_IN_ISR - indicates if kernel is handling interrupt
*
* RETURNS: 1 if interrupt handler is executed, 0 otherwise
*
* \NOMANUAL
*/
/**
*
* _IS_IN_ISR - indicates if kernel is handling interrupt
*
* RETURNS: 1 if interrupt handler is executed, 0 otherwise
*
* \NOMANUAL
*/
static ALWAYS_INLINE int _IS_IN_ISR(void)
{

View file

@ -38,20 +38,20 @@ call _Cstart().
Stack is available in this module, but not the global data/bss until their
initialization is performed.
*/
*/
#include <stdint.h>
#include <toolchain.h>
#include <linker-defs.h>
/*******************************************************************************
*
* bssZero - clear BSS
*
* This routine clears the BSS region, so all bytes are 0.
*
* RETURNS: N/A
*/
/**
*
* bssZero - clear BSS
*
* This routine clears the BSS region, so all bytes are 0.
*
* RETURNS: N/A
*/
static void bssZero(void)
{
@ -63,14 +63,14 @@ static void bssZero(void)
}
}
/*******************************************************************************
*
* dataCopy - copy the data section from ROM to RAM
*
* This routine copies the data section from ROM to RAM.
*
* RETURNS: N/A
*/
/**
*
* dataCopy - copy the data section from ROM to RAM
*
* This routine copies the data section from ROM to RAM.
*
* RETURNS: N/A
*/
#ifdef CONFIG_XIP
static void dataCopy(void)
@ -90,14 +90,14 @@ static void dataCopy(void)
#endif
extern FUNC_NORETURN void _Cstart(void);
/*******************************************************************************
*
* _PrepC - prepare to and run C code
*
* This routine prepares for the execution of and runs C code.
*
* RETURNS: N/A
*/
/**
*
* _PrepC - prepare to and run C code
*
* This routine prepares for the execution of and runs C code.
*
* RETURNS: N/A
*/
void _PrepC(void)
{

View file

@ -33,7 +33,7 @@
/*
DESCRIPTION
Reset handler that prepares the system for running C code.
*/
*/
#define _ASMLANGUAGE
@ -46,19 +46,19 @@ Reset handler that prepares the system for running C code.
GTEXT(__reset)
/*******************************************************************************
*
* __reset - reset vector
*
* Ran when the system comes out of reset. The processor is at supervisor level.
*
* Locking interrupts prevents anything from interrupting the CPU.
*
* When these steps are completed, jump to _PrepC(), which will finish setting
* up the system for running C code.
*
* RETURNS: N/A
*/
/**
*
* __reset - reset vector
*
* Ran when the system comes out of reset. The processor is at supervisor level.
*
* Locking interrupts prevents anything from interrupting the CPU.
*
* When these steps are completed, jump to _PrepC(), which will finish setting
* up the system for running C code.
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT,__reset)

View file

@ -45,7 +45,7 @@ to work around an issue with the assembler where:
statements would end up with the two half-words of the functions' addresses
swapped.
*/
*/
#include <stdint.h>
#include <toolchain.h>

View file

@ -40,7 +40,7 @@ System exception handler names all have the same format:
__<exception name with underscores>
Refer to the ARCv2 manual for an explanation of the exceptions.
*/
*/
#ifndef _VECTOR_TABLE__H_
#define _VECTOR_TABLE__H_

View file

@ -35,7 +35,7 @@ DESCRIPTION
Provides a boot time handler that simply hangs in a sleep loop, and a run time
handler that resets the CPU. Also provides a mechanism for hooking a custom
run time handler.
*/
*/
#include <nanokernel.h>
#include <arch/cpu.h>
@ -52,15 +52,15 @@ extern void _SysNmiOnReset(void);
typedef void (*_NmiHandler_t)(void);
static _NmiHandler_t handler = _SysNmiOnReset;
/*******************************************************************************
*
* _DefaultHandler - default NMI handler installed when kernel is up
*
* The default handler outputs a error message and reboots the target. It is
* installed by calling _NmiInit();
*
* RETURNS: N/A
*/
/**
*
* _DefaultHandler - default NMI handler installed when kernel is up
*
* The default handler outputs a error message and reboots the target. It is
* installed by calling _NmiInit();
*
* RETURNS: N/A
*/
static void _DefaultHandler(void)
{
@ -68,32 +68,32 @@ static void _DefaultHandler(void)
_ScbSystemReset();
}
/*******************************************************************************
*
* _NmiInit - install default runtime NMI handler
*
* Meant to be called by BSP code if they want to install a simple NMI handler
* that reboots the target. It should be installed after the console is
* initialized.
*
* RETURNS: N/A
*/
/**
*
* _NmiInit - install default runtime NMI handler
*
* Meant to be called by BSP code if they want to install a simple NMI handler
* that reboots the target. It should be installed after the console is
* initialized.
*
* RETURNS: N/A
*/
void _NmiInit(void)
{
handler = _DefaultHandler;
}
/*******************************************************************************
*
* _NmiHandlerSet - install a custom runtime NMI handler
*
* Meant to be called by BSP code if they want to install a custom NMI handler
* that reboots. It should be installed after the console is initialized if it is
* meant to output to the console.
*
* RETURNS: N/A
*/
/**
*
* _NmiHandlerSet - install a custom runtime NMI handler
*
* Meant to be called by BSP code if they want to install a custom NMI handler
* that reboots. It should be installed after the console is initialized if it is
* meant to output to the console.
*
* RETURNS: N/A
*/
void _NmiHandlerSet(void (*pHandler)(void))
{
@ -101,14 +101,14 @@ void _NmiHandlerSet(void (*pHandler)(void))
}
#endif /* CONFIG_RUNTIME_NMI */
/*******************************************************************************
*
* __nmi - handler installed in the vector table
*
* Simply call what is installed in 'static void(*handler)(void)'.
*
* RETURNS: N/A
*/
/**
*
* __nmi - handler installed in the vector table
*
* Simply call what is installed in 'static void(*handler)(void)'.
*
* RETURNS: N/A
*/
void __nmi(void)
{

View file

@ -38,20 +38,20 @@ call _Cstart().
Stack is available in this module, but not the global data/bss until their
initialization is performed.
*/
*/
#include <stdint.h>
#include <toolchain.h>
#include <linker-defs.h>
/*******************************************************************************
*
* bssZero - clear BSS
*
* This routine clears the BSS region, so all bytes are 0.
*
* RETURNS: N/A
*/
/**
*
* bssZero - clear BSS
*
* This routine clears the BSS region, so all bytes are 0.
*
* RETURNS: N/A
*/
static void bssZero(void)
{
@ -63,14 +63,14 @@ static void bssZero(void)
}
}
/*******************************************************************************
*
* dataCopy - copy the data section from ROM to RAM
*
* This routine copies the data section from ROM to RAM.
*
* RETURNS: N/A
*/
/**
*
* dataCopy - copy the data section from ROM to RAM
*
* This routine copies the data section from ROM to RAM.
*
* RETURNS: N/A
*/
#ifdef CONFIG_XIP
static void dataCopy(void)
@ -90,14 +90,14 @@ static void dataCopy(void)
#endif
extern FUNC_NORETURN void _Cstart(void);
/*******************************************************************************
*
* _PrepC - prepare to and run C code
*
* This routine prepares for the execution of and runs C code.
*
* RETURNS: N/A
*/
/**
*
* _PrepC - prepare to and run C code
*
* This routine prepares for the execution of and runs C code.
*
* RETURNS: N/A
*/
void _PrepC(void)
{

View file

@ -33,7 +33,7 @@
/*
DESCRIPTION
Reset handler that prepares the system for running C code.
*/
*/
#define _ASMLANGUAGE
@ -47,28 +47,28 @@ _ASM_FILE_PROLOGUE
GTEXT(__reset)
/*******************************************************************************
*
* __reset - reset vector
*
* Ran when the system comes out of reset. The processor is in thread mode with
* privileged level. At this point, the main stack pointer (MSP) is already
* pointing to a valid area in SRAM.
*
* Locking interrupts prevents anything but NMIs and hard faults from
* interrupting the CPU. A default NMI handler is already in place in the
* vector table, and the boot code should not generate hard fault, or we're in
* deep trouble.
*
* We want to use the process stack pointer (PSP) instead of the MSP, since the
* MSP is to be set up to point to the one-and-only interrupt stack during later
* boot. That would not be possible if in use for running C code.
*
* When these steps are completed, jump to _PrepC(), which will finish setting
* up the system for running C code.
*
* RETURNS: N/A
*/
/**
*
* __reset - reset vector
*
* Ran when the system comes out of reset. The processor is in thread mode with
* privileged level. At this point, the main stack pointer (MSP) is already
* pointing to a valid area in SRAM.
*
* Locking interrupts prevents anything but NMIs and hard faults from
* interrupting the CPU. A default NMI handler is already in place in the
* vector table, and the boot code should not generate hard fault, or we're in
* deep trouble.
*
* We want to use the process stack pointer (PSP) instead of the MSP, since the
* MSP is to be set up to point to the one-and-only interrupt stack during later
* boot. That would not be possible if in use for running C code.
*
* When these steps are completed, jump to _PrepC(), which will finish setting
* up the system for running C code.
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT,__reset)

View file

@ -36,7 +36,7 @@ DESCRIPTION
Most of the SCB interface consists of simple bit-flipping methods, and is
implemented as inline functions in scb.h. This module thus contains only data
definitions and more complex routines, if needed.
*/
*/
#include <nanokernel.h>
#include <arch/cpu.h>
@ -44,14 +44,14 @@ definitions and more complex routines, if needed.
#define SCB_AIRCR_VECTKEY_EN_W 0x05FA
/*******************************************************************************
*
* _ScbSystemReset - reset the system
*
* This routine resets the processor.
*
* RETURNS: N/A
*/
/**
*
* _ScbSystemReset - reset the system
*
* This routine resets the processor.
*
* RETURNS: N/A
*/
void _ScbSystemReset(void)
{
@ -63,19 +63,19 @@ void _ScbSystemReset(void)
__scs.scb.aircr.val = reg.val;
}
/*******************************************************************************
*
* _ScbNumPriGroupSet - set the number of priority groups based on the number
* of exception priorities desired
*
* Exception priorities can be divided in priority groups, inside which there is
* no preemption. The priorities inside a group are only used to decide which
* exception will run when more than one is ready to be handled.
*
* The number of priorities has to be a power of two, from 1 to 128.
*
* RETURNS: N/A
*/
/**
*
* _ScbNumPriGroupSet - set the number of priority groups based on the number
* of exception priorities desired
*
* Exception priorities can be divided in priority groups, inside which there is
* no preemption. The priorities inside a group are only used to decide which
* exception will run when more than one is ready to be handled.
*
* The number of priorities has to be a power of two, from 1 to 128.
*
* RETURNS: N/A
*/
void _ScbNumPriGroupSet(unsigned int n /* number of priorities */
)

View file

@ -35,7 +35,7 @@ DESCRIPTION
Most of the SCS interface consists of simple bit-flipping methods, and is
implemented as inline functions in scs.h. This module thus contains only data
definitions and more complex routines, if needed.
*/
*/
#include <nanokernel.h>
#include <arch/cpu.h>

View file

@ -33,7 +33,7 @@
/*
DESCRIPTION
Software ISR table for ARM
*/
*/
#define _ASMLANGUAGE

View file

@ -38,7 +38,7 @@ point, ie. the first instruction executed.
The table is populated with all the system exception handlers. The NMI vector
must be populated with a valid handler since it can happen at any time. The
rest should not be triggered until the kernel is ready to handle them.
*/
*/
#define _ASMLANGUAGE

View file

@ -40,7 +40,7 @@ System exception handler names all have the same format:
__<exception name with underscores>
No other symbol has the same format, so they are easy to spot.
*/
*/
#ifndef _VECTOR_TABLE__H_
#define _VECTOR_TABLE__H_

View file

@ -33,7 +33,7 @@
/*
DESCRIPTION
This module provides the _SysFatalErrorHandler() routine for Cortex-M BSPs.
*/
*/
#include <nanokernel.h>
#include <toolchain.h>
@ -61,25 +61,25 @@ static inline void nonEssentialTaskAbort(void)
} while ((0))
#endif
/*******************************************************************************
*
* _SysFatalErrorHandler - fatal error handler
*
* This routine implements the corrective action to be taken when the system
* detects a fatal error.
*
* This sample implementation attempts to abort the current context and allow
* the system to continue executing, which may permit the system to continue
* functioning with degraded capabilities.
*
* System designers may wish to enhance or substitute this sample
* implementation to take other actions, such as logging error (or debug)
* information to a persistent repository and/or rebooting the system.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
/**
*
* _SysFatalErrorHandler - fatal error handler
*
* This routine implements the corrective action to be taken when the system
* detects a fatal error.
*
* This sample implementation attempts to abort the current context and allow
* the system to continue executing, which may permit the system to continue
* functioning with degraded capabilities.
*
* System designers may wish to enhance or substitute this sample
* implementation to take other actions, such as logging error (or debug)
* information to a persistent repository and/or rebooting the system.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
void _SysFatalErrorHandler(
unsigned int reason, /* fatal error reason */

View file

@ -35,7 +35,7 @@ DESCRIPTION
This library provides routines to perform a number of atomic operations
on a memory location: add, subtract, increment, decrement, bitwise OR,
bitwise NOR, bitwise AND, bitwise NAND, set, clear and compare-and-swap.
*/
*/
#define _ASMLANGUAGE
@ -59,49 +59,49 @@ GTEXT(atomic_inc)
GTEXT(atomic_sub)
GTEXT(atomic_cas)
/*******************************************************************************
*
* atomic_clear - atomically clear a memory location
*
* This routine atomically clears the contents of <target> and returns the old
* value that was in <target>.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_clear
* (
* atomic_t *target /@ memory location to clear @/
* )
*/
/**
*
* atomic_clear - atomically clear a memory location
*
* This routine atomically clears the contents of <target> and returns the old
* value that was in <target>.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_clear
* (
* atomic_t *target /@ memory location to clear @/
* )
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_clear)
MOV r1, #0
/* fall through into atomic_set */
/*******************************************************************************
*
* atomic_set - atomically set a memory location
*
* This routine atomically sets the contents of <target> to <value> and returns
* the old value that was in <target>.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_set
* (
* atomic_t *target, /@ memory location to set @/
* atomic_val_t value /@ set with this value @/
* )
*
*/
/**
*
* atomic_set - atomically set a memory location
*
* This routine atomically sets the contents of <target> to <value> and returns
* the old value that was in <target>.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_set
* (
* atomic_t *target, /@ memory location to set @/
* atomic_val_t value /@ set with this value @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_set)
@ -114,72 +114,72 @@ SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_set)
MOV r0, r2 /* return old value */
MOV pc, lr
/******************************************************************************
*
* atomic_get - Get the value of a shared memory atomically
*
* This routine atomically retrieves the value in *target
*
* long atomic_get
* (
* atomic_t * target /@ address of atom to be retrieved @/
* )
*
* RETURN: value read from address target.
*
*/
/**
*
* atomic_get - Get the value of a shared memory atomically
*
* This routine atomically retrieves the value in *target
*
* long atomic_get
* (
* atomic_t * target /@ address of atom to be retrieved @/
* )
*
* RETURN: value read from address target.
*
*/
SECTION_FUNC(TEXT, atomic_get)
LDR r0, [r0]
MOV pc, lr
/*******************************************************************************
*
* atomic_inc - atomically increment a memory location
*
* This routine atomically increments the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the
* atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_inc
* (
* atomic_t *target, /@ memory location to increment @/
* )
*
*/
/**
*
* atomic_inc - atomically increment a memory location
*
* This routine atomically increments the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the
* atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_inc
* (
* atomic_t *target, /@ memory location to increment @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_inc)
MOV r1, #1
/* fall through into atomic_add */
/*******************************************************************************
*
* atomic_add - atomically add a value to a memory location
*
* This routine atomically adds the contents of <target> and <value>, placing
* the result in <target>. The operation is done using signed integer arithmetic.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_add
* (
* atomic_t *target, /@ memory location to add to @/
* atomic_val_t value /@ value to add @/
* )
*/
/**
*
* atomic_add - atomically add a value to a memory location
*
* This routine atomically adds the contents of <target> and <value>, placing
* the result in <target>. The operation is done using signed integer arithmetic.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_add
* (
* atomic_t *target, /@ memory location to add to @/
* atomic_val_t value /@ value to add @/
* )
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_add)
@ -193,54 +193,54 @@ SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_add)
MOV r0, r2 /* return old value */
MOV pc, lr
/*******************************************************************************
*
* atomic_dec - atomically decrement a memory location
*
* This routine atomically decrements the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the
* atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_dec
* (
* atomic_t *target, /@ memory location to decrement @/
* )
*
*/
/**
*
* atomic_dec - atomically decrement a memory location
*
* This routine atomically decrements the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the
* atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_dec
* (
* atomic_t *target, /@ memory location to decrement @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_decSub, atomic_dec)
MOV r1, #1
/* fall through into atomic_sub */
/*******************************************************************************
*
* atomic_sub - atomically subtract a value from a memory location
*
* This routine atomically subtracts <value> from the contents of <target>,
* placing the result in <target>. The operation is done using signed integer
* arithmetic. Various CPU architectures may impose restrictions with regards to
* the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_sub
* (
* atomic_t *target, /@ memory location to subtract from @/
* atomic_val_t value /@ value to subtract @/
* )
*
*/
/**
*
* atomic_sub - atomically subtract a value from a memory location
*
* This routine atomically subtracts <value> from the contents of <target>,
* placing the result in <target>. The operation is done using signed integer
* arithmetic. Various CPU architectures may impose restrictions with regards to
* the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_sub
* (
* atomic_t *target, /@ memory location to subtract from @/
* atomic_val_t value /@ value to subtract @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_decSub, atomic_sub)
@ -253,28 +253,28 @@ SECTION_SUBSEC_FUNC(TEXT, atomic_decSub, atomic_sub)
MOV r0, r2 /* return old value */
MOV pc, lr
/******************************************************************************
*
* atomic_nand - atomically perform a bitwise NAND on a memory location
*
* This routine atomically performs a bitwise NAND operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_nand
* (
* atomic_t *target, /@ memory location to NAND @/
* atomic_val_t value /@ NAND with this value @/
* )
*
*/
/**
*
* atomic_nand - atomically perform a bitwise NAND on a memory location
*
* This routine atomically performs a bitwise NAND operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_nand
* (
* atomic_t *target, /@ memory location to NAND @/
* atomic_val_t value /@ NAND with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_nand)
@ -288,28 +288,28 @@ SECTION_FUNC(TEXT, atomic_nand)
MOV r0, r2 /* return old value */
MOV pc, lr
/******************************************************************************
*
* atomic_and - atomically perform a bitwise AND on a memory location
*
* This routine atomically performs a bitwise AND operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_and
* (
* atomic_t *target, /@ memory location to AND @/
* atomic_val_t value /@ AND with this value @/
* )
*
*/
/**
*
* atomic_and - atomically perform a bitwise AND on a memory location
*
* This routine atomically performs a bitwise AND operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_and
* (
* atomic_t *target, /@ memory location to AND @/
* atomic_val_t value /@ AND with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_and)
@ -322,28 +322,28 @@ SECTION_FUNC(TEXT, atomic_and)
MOV r0, r2 /* return old value */
MOV pc, lr
/*******************************************************************************
*
* atomic_or - atomically perform a bitwise OR on memory location
*
* This routine atomically performs a bitwise OR operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_or
* (
* atomic_t *target, /@ memory location to OR @/
* atomic_val_t value /@ OR with this value @/
* )
*
*/
/**
*
* atomic_or - atomically perform a bitwise OR on memory location
*
* This routine atomically performs a bitwise OR operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_or
* (
* atomic_t *target, /@ memory location to OR @/
* atomic_val_t value /@ OR with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_or)
@ -356,28 +356,28 @@ SECTION_FUNC(TEXT, atomic_or)
MOV r0, r2 /* return old value */
MOV pc, lr
/*******************************************************************************
*
* atomic_xor - atomically perform a bitwise XOR on a memory location
*
* This routine atomically performs a bitwise XOR operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_xor
* (
* atomic_t *target, /@ memory location to XOR @/
* atomic_val_t value /@ XOR with this value @/
* )
*
*/
/**
*
* atomic_xor - atomically perform a bitwise XOR on a memory location
*
* This routine atomically performs a bitwise XOR operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_xor
* (
* atomic_t *target, /@ memory location to XOR @/
* atomic_val_t value /@ XOR with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_xor)
@ -390,29 +390,29 @@ SECTION_FUNC(TEXT, atomic_xor)
MOV r0, r2 /* return old value */
MOV pc, lr
/*******************************************************************************
*
* atomic_cas - atomically compare-and-swap the contents of a memory location
*
* This routine performs an atomic compare-and-swap. testing that the contents of
* <target> contains <oldValue>, and if it does, setting the value of <target>
* to <newValue>. Various CPU architectures may impose restrictions with regards
* to the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: 1 if the swap is actually executed, 0 otherwise.
*
* ERRNO: N/A
*
* int atomic_cas
* (
* atomic_t *target, /@ memory location to compare-and-swap @/
* atomic_val_t oldValue, /@ compare to this value @/
* atomic_val_t newValue, /@ swap with this value @/
* )
*
*/
/**
*
* atomic_cas - atomically compare-and-swap the contents of a memory location
*
* This routine performs an atomic compare-and-swap. testing that the contents of
* <target> contains <oldValue>, and if it does, setting the value of <target>
* to <newValue>. Various CPU architectures may impose restrictions with regards
* to the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: 1 if the swap is actually executed, 0 otherwise.
*
* ERRNO: N/A
*
* int atomic_cas
* (
* atomic_t *target, /@ memory location to compare-and-swap @/
* atomic_val_t oldValue, /@ compare to this value @/
* atomic_val_t newValue, /@ swap with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_cas)

View file

@ -46,7 +46,7 @@ unlocked. This achieves two purposes:
2. Zero Interrupt Latency (ZLI) is achievable via this by allowing certain
interrupts to set their priority to 1, thus being allowed in when interrupts
are locked for regular interrupts.
*/
*/
#define _ASMLANGUAGE
@ -59,18 +59,18 @@ _ASM_FILE_PROLOGUE
GTEXT(irq_lock)
GTEXT(irq_unlock)
/*******************************************************************************
*
* irq_lock - lock interrupts
*
* Prevent exceptions of priority lower than to the two highest priorities from
* interrupting the CPU.
*
* This function can be called recursively: it will return a key to return the
* state of interrupt locking to the previous level.
*
* RETURNS: a key to return to the previous interrupt locking level
*/
/**
*
* irq_lock - lock interrupts
*
* Prevent exceptions of priority lower than to the two highest priorities from
* interrupting the CPU.
*
* This function can be called recursively: it will return a key to return the
* state of interrupt locking to the previous level.
*
* RETURNS: a key to return to the previous interrupt locking level
*/
SECTION_FUNC(TEXT,irq_lock)
movs.n r1, #_EXC_IRQ_DEFAULT_PRIO
@ -78,15 +78,15 @@ SECTION_FUNC(TEXT,irq_lock)
msr BASEPRI, r1
bx lr
/*******************************************************************************
*
* irq_unlock - unlock interrupts
*
* Return the state of interrupt locking to a previous level, passed in via the
* <key> parameter, obtained from a previous call to irq_lock().
*
* RETURNS: N/A
*/
/**
*
* irq_unlock - unlock interrupts
*
* Return the state of interrupt locking to a previous level, passed in via the
* <key> parameter, obtained from a previous call to irq_lock().
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT,irq_unlock)
msr BASEPRI, r0

View file

@ -34,7 +34,7 @@
DESCRIPTION
Core nanokernel fiber related primitives for the ARM Cortex-M processor
architecture.
*/
*/
#include <nanokernel.h>
#include <arch/cpu.h>
@ -53,14 +53,14 @@ tNANO _nanokernel = {0};
#endif
#if defined(CONFIG_CONTEXT_MONITOR)
/*******************************************************************************
*
* _context_monitor_init - initialize context monitoring support
*
* Currently only inserts the new context in the list of active contexts.
*
* RETURNS: N/A
*/
/**
*
* _context_monitor_init - initialize context monitoring support
*
* Currently only inserts the new context in the list of active contexts.
*
* RETURNS: N/A
*/
static ALWAYS_INLINE void _context_monitor_init(struct ccs *pCcs /* context */
)
@ -81,26 +81,26 @@ static ALWAYS_INLINE void _context_monitor_init(struct ccs *pCcs /* context */
}
#endif /* CONFIG_CONTEXT_MONITOR */
/*******************************************************************************
*
* _NewContext - intialize a new context (thread) from its stack space
*
* The control structure (CCS) is put at the lower address of the stack. An
* initial context, to be "restored" by __pendsv(), is put at the other end of
* the stack, and thus reusable by the stack when not needed anymore.
*
* The initial context is an exception stack frame (ESF) since exiting the
* PendSV exception will want to pop an ESF. Interestingly, even if the lsb of
* an instruction address to jump to must always be set since the CPU always
* runs in thumb mode, the ESF expects the real address of the instruction,
* with the lsb *not* set (instructions are always aligned on 16 bit halfwords).
* Since the compiler automatically sets the lsb of function addresses, we have
* to unset it manually before storing it in the 'pc' field of the ESF.
*
* <options> is currently unused.
*
* RETURNS: N/A
*/
/**
*
* _NewContext - intialize a new context (thread) from its stack space
*
* The control structure (CCS) is put at the lower address of the stack. An
* initial context, to be "restored" by __pendsv(), is put at the other end of
* the stack, and thus reusable by the stack when not needed anymore.
*
* The initial context is an exception stack frame (ESF) since exiting the
* PendSV exception will want to pop an ESF. Interestingly, even if the lsb of
* an instruction address to jump to must always be set since the CPU always
* runs in thumb mode, the ESF expects the real address of the instruction,
* with the lsb *not* set (instructions are always aligned on 16 bit halfwords).
* Since the compiler automatically sets the lsb of function addresses, we have
* to unset it manually before storing it in the 'pc' field of the ESF.
*
* <options> is currently unused.
*
* RETURNS: N/A
*/
void _NewContext(
char *pStackMem, /* aligned stack memory */

View file

@ -32,7 +32,7 @@
/*
DESCRIPTION
*/
*/
#define _ASMLANGUAGE
@ -56,19 +56,19 @@ GTEXT(nano_cpu_atomic_idle)
#define _SCR_INIT_BITS _SCB_SCR_SEVONPEND
/*******************************************************************************
*
* _CpuIdleInit - initialization of CPU idle
*
* Only called by nanoArchInit(). Sets SEVONPEND bit once for the system's
* duration.
*
* RETURNS: N/A
*
* C function prototype:
*
* void _CpuIdleInit (void);
*/
/**
*
* _CpuIdleInit - initialization of CPU idle
*
* Only called by nanoArchInit(). Sets SEVONPEND bit once for the system's
* duration.
*
* RETURNS: N/A
*
* C function prototype:
*
* void _CpuIdleInit (void);
*/
SECTION_FUNC(TEXT, _CpuIdleInit)
ldr r1, =_SCB_SCR
@ -78,36 +78,36 @@ SECTION_FUNC(TEXT, _CpuIdleInit)
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
/*******************************************************************************
*
* _NanoIdleValGet - get the kernel idle setting
*
* Returns the nanokernel idle setting, in ticks. Only called by __systick().
*
* RETURNS: the requested number of ticks for the kernel to be idle
*
* C function prototype:
*
* int32_t _NanoIdleValGet (void);
*/
/**
*
* _NanoIdleValGet - get the kernel idle setting
*
* Returns the nanokernel idle setting, in ticks. Only called by __systick().
*
* RETURNS: the requested number of ticks for the kernel to be idle
*
* C function prototype:
*
* int32_t _NanoIdleValGet (void);
*/
SECTION_FUNC(TEXT, _NanoIdleValGet)
ldr r0, =_nanokernel
ldr r0, [r0, #__tNANO_idle_OFFSET]
bx lr
/*******************************************************************************
*
* _NanoIdleValClear - clear the kernel idle setting
*
* Sets the nanokernel idle setting to 0. Only called by __systick().
*
* RETURNS: N/A
*
* C function prototype:
*
* void _NanoIdleValClear (void);
*/
/**
*
* _NanoIdleValClear - clear the kernel idle setting
*
* Sets the nanokernel idle setting to 0. Only called by __systick().
*
* RETURNS: N/A
*
* C function prototype:
*
* void _NanoIdleValClear (void);
*/
SECTION_FUNC(TEXT, _NanoIdleValClear)
ldr r0, =_nanokernel
@ -117,21 +117,21 @@ SECTION_FUNC(TEXT, _NanoIdleValClear)
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
/*******************************************************************************
*
* nano_cpu_idle - power save idle routine for ARM Cortex-M
*
* This function will be called by the nanokernel idle loop or possibly within
* an implementation of _sys_power_save_idle in the microkernel when the
* '_sys_power_save_flag' variable is non-zero. The ARM 'wfi' instruction
* will be issued, causing a low-power consumption sleep mode.
*
* RETURNS: N/A
*
* C function prototype:
*
* void nano_cpu_idle (void);
*/
/**
*
* nano_cpu_idle - power save idle routine for ARM Cortex-M
*
* This function will be called by the nanokernel idle loop or possibly within
* an implementation of _sys_power_save_idle in the microkernel when the
* '_sys_power_save_flag' variable is non-zero. The ARM 'wfi' instruction
* will be issued, causing a low-power consumption sleep mode.
*
* RETURNS: N/A
*
* C function prototype:
*
* void nano_cpu_idle (void);
*/
SECTION_FUNC(TEXT, nano_cpu_idle)
/* clear BASEPRI so wfi is awakened by incoming interrupts */
@ -142,31 +142,31 @@ SECTION_FUNC(TEXT, nano_cpu_idle)
bx lr
/*******************************************************************************
*
* nano_cpu_atomic_idle - atomically re-enable interrupts and enter low power mode
*
* This function is utilized by the nanokernel object "wait" APIs for task
* contexts, e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(), nano_task_stack_pop_wait(),
* and nano_task_fifo_get_wait().
*
* INTERNAL
* The requirements for nano_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments
* in nano_task_lifo_get_wait(), for example, of the race condition that occurs
* if this requirement is not met.
*
* 2) After waking up from the low-power mode, the interrupt lockout state
* must be restored as indicated in the 'imask' input parameter.
*
* RETURNS: N/A
*
* C function prototype:
*
* void nano_cpu_atomic_idle (unsigned int imask);
*/
/**
*
* nano_cpu_atomic_idle - atomically re-enable interrupts and enter low power mode
*
* This function is utilized by the nanokernel object "wait" APIs for task
* contexts, e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(), nano_task_stack_pop_wait(),
* and nano_task_fifo_get_wait().
*
* INTERNAL
* The requirements for nano_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments
* in nano_task_lifo_get_wait(), for example, of the race condition that occurs
* if this requirement is not met.
*
* 2) After waking up from the low-power mode, the interrupt lockout state
* must be restored as indicated in the 'imask' input parameter.
*
* RETURNS: N/A
*
* C function prototype:
*
* void nano_cpu_atomic_idle (unsigned int imask);
*/
SECTION_FUNC(TEXT, nano_cpu_atomic_idle)

View file

@ -36,7 +36,7 @@ DESCRIPTION
Provides functions for performing kernel handling when exiting exceptions or
interrupts that are installed directly in the vector table (i.e. that are not
wrapped around by _isr_wrapper()).
*/
*/
#define _ASMLANGUAGE
@ -61,46 +61,46 @@ GDATA(_nanokernel)
#endif
#define _EXIT_EXC_IF_FIBER_NOT_READY _EXIT_EXC_IF_FIBER_PREEMPTED
/*******************************************************************************
*
* _IntExit - kernel housekeeping when exiting interrupt handler installed
* directly in vector table
*
* Kernel allows installing interrupt handlers (ISRs) directly into the vector
* table to get the lowest interrupt latency possible. This allows the ISR to be
* invoked directly without going through a software interrupt table. However,
* upon exiting the ISR, some kernel work must still be performed, namely
* possible context switching. While ISRs connected in the software interrupt
* table do this automatically via a wrapper, ISRs connected directly in the
* vector table must invoke _IntExit() as the *very last* action before
* returning.
*
* e.g.
*
* void myISR(void)
* {
* printk("in %s\n", __FUNCTION__);
* doStuff();
* _IntExit();
* }
*
* RETURNS: N/A
*/
/**
*
* _IntExit - kernel housekeeping when exiting interrupt handler installed
* directly in vector table
*
* Kernel allows installing interrupt handlers (ISRs) directly into the vector
* table to get the lowest interrupt latency possible. This allows the ISR to be
* invoked directly without going through a software interrupt table. However,
* upon exiting the ISR, some kernel work must still be performed, namely
* possible context switching. While ISRs connected in the software interrupt
* table do this automatically via a wrapper, ISRs connected directly in the
* vector table must invoke _IntExit() as the *very last* action before
* returning.
*
* e.g.
*
* void myISR(void)
* {
* printk("in %s\n", __FUNCTION__);
* doStuff();
* _IntExit();
* }
*
* RETURNS: N/A
*/
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit)
/* _IntExit falls through to _ExcExit (they are aliases of each other) */
/*******************************************************************************
*
* _ExcExit - kernel housekeeping when exiting exception handler installed
* directly in vector table
*
* See _IntExit().
*
* RETURNS: N/A
*/
/**
*
* _ExcExit - kernel housekeeping when exiting exception handler installed
* directly in vector table
*
* See _IntExit().
*
* RETURNS: N/A
*/
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)

View file

@ -33,7 +33,7 @@
/*
DESCRIPTION
This module provides the _NanoFatalErrorHandler() routine for ARM Cortex-M.
*/
*/
#include <toolchain.h>
#include <sections.h>
@ -62,23 +62,23 @@ const NANO_ESF _default_esf = {0xdeaddead, /* a1 */
0xdeaddead, /* xpsr */
};
/*******************************************************************************
*
* _NanoFatalErrorHandler - nanokernel fatal error handler
*
* This routine is called when fatal error conditions are detected by software
* and is responsible only for reporting the error. Once reported, it then
* invokes the user provided routine _SysFatalErrorHandler() which is
* responsible for implementing the error handling policy.
*
* The caller is expected to always provide a usable ESF. In the event that the
* fatal error does not have a hardware generated ESF, the caller should either
* create its own or use a pointer to the global default ESF <_default_esf>.
*
* RETURNS: This function does not return.
*
* \NOMANUAL
*/
/**
*
* _NanoFatalErrorHandler - nanokernel fatal error handler
*
* This routine is called when fatal error conditions are detected by software
* and is responsible only for reporting the error. Once reported, it then
* invokes the user provided routine _SysFatalErrorHandler() which is
* responsible for implementing the error handling policy.
*
* The caller is expected to always provide a usable ESF. In the event that the
* fatal error does not have a hardware generated ESF, the caller should either
* create its own or use a pointer to the global default ESF <_default_esf>.
*
* RETURNS: This function does not return.
*
* \NOMANUAL
*/
FUNC_NORETURN void _NanoFatalErrorHandler(
unsigned int reason, /* reason that handler was called */

View file

@ -33,7 +33,7 @@
/*
DESCRIPTION
Common fault handler for ARM Cortex-M processors.
*/
*/
#include <toolchain.h>
#include <sections.h>
@ -59,24 +59,24 @@ Common fault handler for ARM Cortex-M processors.
#endif
#if (CONFIG_FAULT_DUMP == 1)
/*******************************************************************************
*
* _FaultDump - dump information regarding fault (FAULT_DUMP == 1)
*
* Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 1
* (short form).
*
* eg. (precise bus error escalated to hard fault):
*
* Fault! EXC #3, Thread: 0x200000dc, instr: 0x000011d3
* HARD FAULT: Escalation (see below)!
* MMFSR: 0x00000000, BFSR: 0x00000082, UFSR: 0x00000000
* BFAR: 0xff001234
*
* RETURNS: N/A
*
* \NOMANUAL
*/
/**
*
* _FaultDump - dump information regarding fault (FAULT_DUMP == 1)
*
* Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 1
* (short form).
*
* eg. (precise bus error escalated to hard fault):
*
* Fault! EXC #3, Thread: 0x200000dc, instr: 0x000011d3
* HARD FAULT: Escalation (see below)!
* MMFSR: 0x00000000, BFSR: 0x00000082, UFSR: 0x00000000
* BFAR: 0xff001234
*
* RETURNS: N/A
*
* \NOMANUAL
*/
void _FaultDump(const NANO_ESF *esf, int fault)
{
@ -118,16 +118,16 @@ void _FaultDump(const NANO_ESF *esf, int fault)
#endif
#if (CONFIG_FAULT_DUMP == 2)
/*******************************************************************************
*
* _FaultContextShow - dump context information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
/**
*
* _FaultContextShow - dump context information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _FaultContextShow(const NANO_ESF *esf)
{
@ -137,16 +137,16 @@ static void _FaultContextShow(const NANO_ESF *esf)
esf->pc);
}
/*******************************************************************************
*
* _MpuFault - dump MPU fault information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
/**
*
* _MpuFault - dump MPU fault information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _MpuFault(const NANO_ESF *esf,
int fromHardFault)
@ -172,16 +172,16 @@ static void _MpuFault(const NANO_ESF *esf,
}
}
/*******************************************************************************
*
* _BusFault - dump bus fault information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
/**
*
* _BusFault - dump bus fault information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _BusFault(const NANO_ESF *esf,
int fromHardFault)
@ -213,16 +213,16 @@ static void _BusFault(const NANO_ESF *esf,
}
}
/*******************************************************************************
*
* _UsageFault - dump usage fault information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
/**
*
* _UsageFault - dump usage fault information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _UsageFault(const NANO_ESF *esf)
{
@ -253,16 +253,16 @@ static void _UsageFault(const NANO_ESF *esf)
_ScbUsageFaultAllFaultsReset();
}
/*******************************************************************************
*
* _HardFault - dump hard fault information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
/**
*
* _HardFault - dump hard fault information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _HardFault(const NANO_ESF *esf)
{
@ -281,32 +281,32 @@ static void _HardFault(const NANO_ESF *esf)
}
}
/*******************************************************************************
*
* _DebugMonitor - dump debug monitor exception information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
/**
*
* _DebugMonitor - dump debug monitor exception information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _DebugMonitor(const NANO_ESF *esf)
{
PR_EXC("***** Debug monitor exception (not implemented) *****\n");
}
/*******************************************************************************
*
* _ReservedException - dump reserved exception information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
/**
*
* _ReservedException - dump reserved exception information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _ReservedException(const NANO_ESF *esf,
int fault)
@ -316,27 +316,27 @@ static void _ReservedException(const NANO_ESF *esf,
fault - 16);
}
/*******************************************************************************
*
* _FaultDump - dump information regarding fault (FAULT_DUMP == 2)
*
* Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 2
* (long form).
*
* eg. (precise bus error escalated to hard fault):
*
* Executing context ID (thread): 0x200000dc
* Faulting instruction address: 0x000011d3
* ***** HARD FAULT *****
* Fault escalation (see below)
* ***** BUS FAULT *****
* Precise data bus error
* Address: 0xff001234
*
* RETURNS: N/A
*
* \NOMANUAL
*/
/**
*
* _FaultDump - dump information regarding fault (FAULT_DUMP == 2)
*
* Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 2
* (long form).
*
* eg. (precise bus error escalated to hard fault):
*
* Executing context ID (thread): 0x200000dc
* Faulting instruction address: 0x000011d3
* ***** HARD FAULT *****
* Fault escalation (see below)
* ***** BUS FAULT *****
* Precise data bus error
* Address: 0xff001234
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _FaultDump(const NANO_ESF *esf, int fault)
{
@ -363,23 +363,23 @@ static void _FaultDump(const NANO_ESF *esf, int fault)
}
#endif /* FAULT_DUMP == 2 */
/*******************************************************************************
*
* _Fault - fault handler
*
* This routine is called when fatal error conditions are detected by hardware
* and is responsible only for reporting the error. Once reported, it then
* invokes the user provided routine _SysFatalErrorHandler() which is
* responsible for implementing the error handling policy.
*
* Since the ESF can be either on the MSP or PSP depending if an exception or
* interrupt was already being handled, it is passed a pointer to both and has
* to find out on which the ESP is present.
*
* RETURNS: This function does not return.
*
* \NOMANUAL
*/
/**
*
* _Fault - fault handler
*
* This routine is called when fatal error conditions are detected by hardware
* and is responsible only for reporting the error. Once reported, it then
* invokes the user provided routine _SysFatalErrorHandler() which is
* responsible for implementing the error handling policy.
*
* Since the ESF can be either on the MSP or PSP depending if an exception or
* interrupt was already being handled, it is passed a pointer to both and has
* to find out on which the ESP is present.
*
* RETURNS: This function does not return.
*
* \NOMANUAL
*/
void _Fault(
const NANO_ESF *msp, /* pointer to potential ESF on MSP */
@ -394,16 +394,16 @@ void _Fault(
_SysFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, esf);
}
/*******************************************************************************
*
* _FaultInit - initialization of fault handling
*
* Turns on the desired hardware faults.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
/**
*
* _FaultInit - initialization of fault handling
*
* Turns on the desired hardware faults.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
void _FaultInit(void)
{

View file

@ -33,7 +33,7 @@
/*
DESCRIPTION
Fault handlers for ARM Cortex-M processors.
*/
*/
#define _ASMLANGUAGE
@ -52,28 +52,28 @@ GTEXT(__usage_fault)
GTEXT(__debug_monitor)
GTEXT(__reserved)
/*******************************************************************************
*
* __fault - fault handler installed in the fault and reserved vectors
*
* Entry point for the hard fault, MPU fault, bus fault, usage fault, debug
* monitor and reserved exceptions.
*
* Save the values of the MSP and PSP in r0 and r1 respectively, so the first
* and second parameters to the _Fault() C function that will handle the rest.
* This has to be done because at this point we do not know if the fault
* happened while handling an exception or not, and thus the ESF could be on
* either stack. _Fault() will find out where the ESF resides.
*
* Provides these symbols:
*
* __hard_fault
* __mpu_fault
* __bus_fault
* __usage_fault
* __debug_monitor
* __reserved
*/
/**
*
* __fault - fault handler installed in the fault and reserved vectors
*
* Entry point for the hard fault, MPU fault, bus fault, usage fault, debug
* monitor and reserved exceptions.
*
* Save the values of the MSP and PSP in r0 and r1 respectively, so the first
* and second parameters to the _Fault() C function that will handle the rest.
* This has to be done because at this point we do not know if the fault
* happened while handling an exception or not, and thus the ESF could be on
* either stack. _Fault() will find out where the ESF resides.
*
* Provides these symbols:
*
* __hard_fault
* __mpu_fault
* __bus_fault
* __usage_fault
* __debug_monitor
* __reserved
*/
SECTION_SUBSEC_FUNC(TEXT,__fault,__hard_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,__mpu_fault)

View file

@ -34,7 +34,7 @@
DESCRIPTION
This library implements find_last_set() and find_first_set() which returns the
most and least significant bit set respectively.
*/
*/
#define _ASMLANGUAGE
@ -48,17 +48,17 @@ _ASM_FILE_PROLOGUE
GTEXT(find_last_set)
GTEXT(find_first_set)
/*******************************************************************************
*
* find_last_set - find first set bit (searching from the most significant bit)
*
* This routine finds the first bit set in the argument passed it and
* returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit. A return value of zero indicates that
* the value passed is zero.
*
* RETURNS: most significant bit set
*/
/**
*
* find_last_set - find first set bit (searching from the most significant bit)
*
* This routine finds the first bit set in the argument passed it and
* returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit. A return value of zero indicates that
* the value passed is zero.
*
* RETURNS: most significant bit set
*/
SECTION_FUNC(TEXT, find_last_set)
@ -69,17 +69,17 @@ SECTION_FUNC(TEXT, find_last_set)
mov pc, lr
/*******************************************************************************
*
* find_first_set - find first set bit (searching from the least significant bit)
*
* This routine finds the first bit set in the argument passed it and
* returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit. A return value of zero indicates that
* the value passed is zero.
*
* RETURNS: least significant bit set
*/
/**
*
* find_first_set - find first set bit (searching from the least significant bit)
*
* This routine finds the first bit set in the argument passed it and
* returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit. A return value of zero indicates that
* the value passed is zero.
*
* RETURNS: least significant bit set
*/
SECTION_FUNC(TEXT, find_first_set)

View file

@ -38,7 +38,7 @@ point returns or when it aborts itself, the CPU is in thread mode and must
call _Swap() (which triggers a service call), but when in handler mode, the
CPU must exit handler mode to cause the context switch, and thus must queue
the PendSV exception.
*/
*/
#ifdef CONFIG_MICROKERNEL
#include <microkernel.h>
@ -51,18 +51,18 @@ the PendSV exception.
#include <nanokernel.h>
#include <arch/cpu.h>
/*******************************************************************************
*
* fiber_abort - abort the currently executing fiber
*
* Possible reasons for a fiber aborting:
*
* - the fiber explicitly aborts itself by calling this routine
* - the fiber implicitly aborts by returning from its entry point
* - the fiber encounters a fatal exception
*
* RETURNS: N/A
*/
/**
*
* fiber_abort - abort the currently executing fiber
*
* Possible reasons for a fiber aborting:
*
* - the fiber explicitly aborts itself by calling this routine
* - the fiber implicitly aborts by returning from its entry point
* - the fiber encounters a fatal exception
*
* RETURNS: N/A
*/
void fiber_abort(void)
{

View file

@ -39,7 +39,7 @@ that we are running in an exception.
Upon exception exit, it must be recorded that the task is not in an exception
anymore.
*/
*/
#define _ASMLANGUAGE
@ -51,27 +51,27 @@ anymore.
_ASM_FILE_PROLOGUE
/*******************************************************************************
*
* _GdbStubExcEntry - exception entry extra work when GDB_INFO is enabled
*
* During normal system operation, the callee-saved registers are saved lazily
* only when a context switch is required. To allow looking at the current
* threads registers while debugging an exception/interrupt, they must be saved
* upon entry since the handler could be using them: thus, looking at the CPU
* registers would show the current system state and not the current *thread*'s
* state.
*
* Also, record the fact that the thread is currently interrupted so that VQEMU
* looks into the CCS and not the CPU registers to obtain the current thread's
* register values.
*
* NOTE:
* - must be called with interrupts locked
* - cannot use r0 without saving it first
*
* RETURNS: N/A
*/
/**
*
* _GdbStubExcEntry - exception entry extra work when GDB_INFO is enabled
*
* During normal system operation, the callee-saved registers are saved lazily
* only when a context switch is required. To allow looking at the current
* threads registers while debugging an exception/interrupt, they must be saved
* upon entry since the handler could be using them: thus, looking at the CPU
* registers would show the current system state and not the current *thread*'s
* state.
*
* Also, record the fact that the thread is currently interrupted so that VQEMU
* looks into the CCS and not the CPU registers to obtain the current thread's
* register values.
*
* NOTE:
* - must be called with interrupts locked
* - cannot use r0 without saving it first
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _GdbStubExcEntry)
@ -95,20 +95,20 @@ SECTION_FUNC(TEXT, _GdbStubExcEntry)
bx lr
/*******************************************************************************
*
* _GdbStubExcExit - exception exit extra clean up when GDB_INFO is enabled
*
* Record the fact that the thread is not interrupted anymore so that VQEMU
* looks at the CPU registers and not into the CCS to obtain the current
* thread's register values. Only do this if this is not a nested exception.
*
* NOTE:
* - must be called with interrupts locked
* - cannot use r0 without saving it first
*
* RETURNS: N/A
*/
/**
*
* _GdbStubExcExit - exception exit extra clean up when GDB_INFO is enabled
*
* Record the fact that the thread is not interrupted anymore so that VQEMU
* looks at the CPU registers and not into the CCS to obtain the current
* thread's register values. Only do this if this is not a nested exception.
*
* NOTE:
* - must be called with interrupts locked
* - cannot use r0 without saving it first
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _GdbStubExcExit)
@ -129,24 +129,24 @@ SECTION_FUNC(TEXT, _GdbStubExcExit)
bx lr
/*******************************************************************************
*
* _irq_vector_table_entry_with_gdb_stub - stub for ISRs installed directly in
* vector table
*
* The kernel on Cortex-M3/4 can be configured so that ISRs
* are installed directly in the vector table for maximum efficiency.
*
* When OS-awareness is enabled, a stub must be inserted to invoke
* _GdbStubExcEntry() before the user ISR runs, to save the current task's
* registers. This stub thus gets inserted in the vector table instead of the
* user's ISR. The user's IRQ vector table gets pushed after the vector table
* automatically by the linker script: this is all transparent to the user.
* This stub must also act as a demuxer that find the running exception and
* invoke the user's real ISR.
*
* RETURNS: N/A
*/
/**
*
* _irq_vector_table_entry_with_gdb_stub - stub for ISRs installed directly in
* vector table
*
* The kernel on Cortex-M3/4 can be configured so that ISRs
* are installed directly in the vector table for maximum efficiency.
*
* When OS-awareness is enabled, a stub must be inserted to invoke
* _GdbStubExcEntry() before the user ISR runs, to save the current task's
* registers. This stub thus gets inserted in the vector table instead of the
* user's ISR. The user's IRQ vector table gets pushed after the vector table
* automatically by the linker script: this is all transparent to the user.
* This stub must also act as a demuxer that find the running exception and
* invoke the user's real ISR.
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _irq_vector_table_entry_with_gdb_stub)

View file

@ -35,7 +35,7 @@ DESCRIPTION
When GDB is enabled, the static IRQ vector table needs to install the
_irq_vector_table_entry_with_gdb_stub stub to do some work before calling the
user-installed ISRs.
*/
*/
#include <toolchain.h>
#include <sections.h>

View file

@ -38,23 +38,23 @@ point returns or when it aborts itself, the CPU is in thread mode and must
call _Swap() (which triggers a service call), but when in handler mode, the
CPU must exit handler mode to cause the context switch, and thus must queue
the PendSV exception.
*/
*/
#include <toolchain.h>
#include <sections.h>
#include <nanokernel.h>
#include <arch/cpu.h>
/*******************************************************************************
*
* _IntLibInit - initialize interrupts
*
* Ensures all interrupts have their priority set to _EXC_IRQ_DEFAULT_PRIO and
* not 0, which they have it set to when coming out of reset. This ensures that
* interrupt locking via BASEPRI works as expected.
*
* RETURNS: N/A
*/
/**
*
* _IntLibInit - initialize interrupts
*
* Ensures all interrupts have their priority set to _EXC_IRQ_DEFAULT_PRIO and
* not 0, which they have it set to when coming out of reset. This ensures that
* interrupt locking via BASEPRI works as expected.
*
* RETURNS: N/A
*/
void _IntLibInit(void)
{

View file

@ -35,7 +35,7 @@ DESCRIPTION
Interrupt management: enabling/disabling and dynamic ISR connecting/replacing.
SW_ISR_TABLE_DYNAMIC has to be enabled for connecting ISRs at runtime.
*/
*/
#include <nanokernel.h>
#include <arch/cpu.h>
@ -46,18 +46,18 @@ SW_ISR_TABLE_DYNAMIC has to be enabled for connecting ISRs at runtime.
extern void __reserved(void);
/*******************************************************************************
*
* irq_handler_set - replace an interrupt handler by another
*
* An interrupt's ISR can be replaced at runtime. Care must be taken that the
* interrupt is disabled before doing this.
*
* This routine will hang if <old> is not found in the table and ASSERT_ON is
* enabled.
*
* RETURNS: N/A
*/
/**
*
* irq_handler_set - replace an interrupt handler by another
*
* An interrupt's ISR can be replaced at runtime. Care must be taken that the
* interrupt is disabled before doing this.
*
* This routine will hang if <old> is not found in the table and ASSERT_ON is
* enabled.
*
* RETURNS: N/A
*/
void irq_handler_set(unsigned int irq,
void (*old)(void *arg),
@ -76,16 +76,16 @@ void irq_handler_set(unsigned int irq,
irq_unlock_inline(key);
}
/*******************************************************************************
*
* irq_enable - enable an interrupt line
*
* Clear possible pending interrupts on the line, and enable the interrupt
* line. After this call, the CPU will receive interrupts for the specified
* <irq>.
*
* RETURNS: N/A
*/
/**
*
* irq_enable - enable an interrupt line
*
* Clear possible pending interrupts on the line, and enable the interrupt
* line. After this call, the CPU will receive interrupts for the specified
* <irq>.
*
* RETURNS: N/A
*/
void irq_enable(unsigned int irq)
{
@ -94,35 +94,35 @@ void irq_enable(unsigned int irq)
_NvicIrqEnable(irq);
}
/*******************************************************************************
*
* irq_disable - disable an interrupt line
*
* Disable an interrupt line. After this call, the CPU will stop receiving
* interrupts for the specified <irq>.
*
* RETURNS: N/A
*/
/**
*
* irq_disable - disable an interrupt line
*
* Disable an interrupt line. After this call, the CPU will stop receiving
* interrupts for the specified <irq>.
*
* RETURNS: N/A
*/
void irq_disable(unsigned int irq)
{
_NvicIrqDisable(irq);
}
/*******************************************************************************
*
* irq_priority_set - set an interrupt's priority
*
* Valid values are from 1 to 255. Interrupts of priority 1 are not masked when
* interrupts are locked system-wide, so care must be taken when using them. ISR
* installed with priority 1 interrupts cannot make kernel calls.
*
* Priority 0 is reserved for kernel usage and cannot be used.
*
* The priority is verified if ASSERT_ON is enabled.
*
* RETURNS: N/A
*/
/**
*
* irq_priority_set - set an interrupt's priority
*
* Valid values are from 1 to 255. Interrupts of priority 1 are not masked when
* interrupts are locked system-wide, so care must be taken when using them. ISR
* installed with priority 1 interrupts cannot make kernel calls.
*
* Priority 0 is reserved for kernel usage and cannot be used.
*
* The priority is verified if ASSERT_ON is enabled.
*
* RETURNS: N/A
*/
void irq_priority_set(unsigned int irq,
unsigned int prio)
@ -131,17 +131,17 @@ void irq_priority_set(unsigned int irq,
_NvicIrqPrioSet(irq, _EXC_PRIO(prio));
}
/*******************************************************************************
*
* _irq_spurious - spurious interrupt handler
*
* Installed in all dynamic interrupt slots at boot time. Throws an error if
* called.
*
* See __reserved().
*
* RETURNS: N/A
*/
/**
*
* _irq_spurious - spurious interrupt handler
*
* Installed in all dynamic interrupt slots at boot time. Throws an error if
* called.
*
* See __reserved().
*
* RETURNS: N/A
*/
void _irq_spurious(void *unused)
{
@ -149,18 +149,18 @@ void _irq_spurious(void *unused)
__reserved();
}
/*******************************************************************************
*
* irq_connect - connect an ISR to an interrupt line
*
* <isr> is connected to interrupt line <irq> (exception #<irq>+16). No prior
* ISR can have been connected on <irq> interrupt line since the system booted.
*
* This routine will hang if another ISR was connected for interrupt line <irq>
* and ASSERT_ON is enabled; if ASSERT_ON is disabled, it will fail silently.
*
* RETURNS: the interrupt line number
*/
/**
*
* irq_connect - connect an ISR to an interrupt line
*
* <isr> is connected to interrupt line <irq> (exception #<irq>+16). No prior
* ISR can have been connected on <irq> interrupt line since the system booted.
*
* This routine will hang if another ISR was connected for interrupt line <irq>
* and ASSERT_ON is enabled; if ASSERT_ON is disabled, it will fail silently.
*
* RETURNS: the interrupt line number
*/
int irq_connect(unsigned int irq,
unsigned int prio,
@ -172,16 +172,16 @@ int irq_connect(unsigned int irq,
return irq;
}
/*******************************************************************************
*
* irq_disconnect - disconnect an ISR from an interrupt line
*
* Interrupt line <irq> (exception #<irq>+16) is disconnected from its ISR and
* the latter is replaced by _irq_spurious(). irq_disable() should have
* been called before invoking this routine.
*
* RETURNS: N/A
*/
/**
*
* irq_disconnect - disconnect an ISR from an interrupt line
*
* Interrupt line <irq> (exception #<irq>+16) is disconnected from its ISR and
* the latter is replaced by _irq_spurious(). irq_disable() should have
* been called before invoking this routine.
*
* RETURNS: N/A
*/
void irq_disconnect(unsigned int irq)
{

View file

@ -35,7 +35,7 @@ DESCRIPTION
Wrapper installed in vector table for handling dynamic interrupts that accept
a parameter.
*/
*/
#define _ASMLANGUAGE
@ -53,19 +53,19 @@ GDATA(_sw_isr_table)
GTEXT(_isr_wrapper)
GTEXT(_IntExit)
/*******************************************************************************
*
* _isr_wrapper - wrapper around ISRs when inserted in software ISR table
*
* When inserted in the vector table, _isr_wrapper() demuxes the ISR table using
* the running interrupt number as the index, and invokes the registered ISR
* with its correspoding argument. When returning from the ISR, it determines
* if a context switch needs to happen (see documentation for __pendsv()) and
* pends the PendSV exception if so: the latter will perform the context switch
* itself.
*
* RETURNS: N/A
*/
/**
*
* _isr_wrapper - wrapper around ISRs when inserted in software ISR table
*
* When inserted in the vector table, _isr_wrapper() demuxes the ISR table using
* the running interrupt number as the index, and invokes the registered ISR
* with its correspoding argument. When returning from the ISR, it determines
* if a context switch needs to happen (see documentation for __pendsv()) and
* pends the PendSV exception if so: the latter will perform the context switch
* itself.
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _isr_wrapper)
_GDB_STUB_EXC_ENTRY

View file

@ -46,7 +46,7 @@ Typically, only those members that are accessed by assembly language routines
are defined; however, it doesn't hurt to define all fields for the sake of
completeness.
*/
*/
#include <gen_offset.h>
#include <nano_private.h>

View file

@ -34,7 +34,7 @@
DESCRIPTION
This module implements the routines necessary for thread context switching
on ARM Cortex-M3/M4 CPUs.
*/
*/
#define _ASMLANGUAGE
@ -51,23 +51,23 @@ GTEXT(__pendsv)
GDATA(_nanokernel)
/*******************************************************************************
*
* __pendsv - PendSV exception handler, handling context switches
*
* The PendSV exception is the only context in the system that can perform
* context switching. When an execution context finds out it has to switch
* contexts, it pends the PendSV exception.
*
* When PendSV is pended, the decision that a context switch must happen has
* already been taken. In other words, when __pendsv() runs, we *know* we have
* to swap *something*.
*
* The scheduling algorithm is simple: schedule the head of the runnable FIBER
* context list, which is represented by _nanokernel.fiber. If there are no
* runnable FIBER contexts, then schedule the TASK context represented by
* _nanokernel.task. The _nanokernel.task field will never be NULL.
*/
/**
*
* __pendsv - PendSV exception handler, handling context switches
*
* The PendSV exception is the only context in the system that can perform
* context switching. When an execution context finds out it has to switch
* contexts, it pends the PendSV exception.
*
* When PendSV is pended, the decision that a context switch must happen has
* already been taken. In other words, when __pendsv() runs, we *know* we have
* to swap *something*.
*
* The scheduling algorithm is simple: schedule the head of the runnable FIBER
* context list, which is represented by _nanokernel.fiber. If there are no
* runnable FIBER contexts, then schedule the TASK context represented by
* _nanokernel.task. The _nanokernel.task field will never be NULL.
*/
SECTION_FUNC(TEXT, __pendsv)
@ -146,15 +146,15 @@ SECTION_FUNC(TEXT, __pendsv)
/* exc return */
bx lr
/*******************************************************************************
*
* __svc - service call handler
*
* The service call (svc) is only used in _Swap() to enter handler mode so we
* can go through the PendSV exception to perform a context switch.
*
* RETURNS: N/A
*/
/**
*
* __svc - service call handler
*
* The service call (svc) is only used in _Swap() to enter handler mode so we
* can go through the PendSV exception to perform a context switch.
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, __svc)
@ -178,38 +178,38 @@ SECTION_FUNC(TEXT, __svc)
/* handler mode exit, to PendSV */
bx lr
/*******************************************************************************
*
* _Swap - initiate a cooperative context switch
*
* The _Swap() routine is invoked by various nanokernel services to effect
* a cooperative context context switch. Prior to invoking _Swap(), the caller
* disables interrupts via irq_lock() and the return 'key' is passed as a
* parameter to _Swap(). The 'key' actually represents the BASEPRI register
* prior to disabling interrupts via the BASEPRI mechanism.
*
* _Swap() itself does not do much.
*
* It simply stores the intlock key (the BASEPRI value) parameter into
* current->basepri, and then triggers a service call exception (svc) to setup
* the PendSV exception, which does the heavy lifting of context switching.
/**
*
* _Swap - initiate a cooperative context switch
*
* The _Swap() routine is invoked by various nanokernel services to effect
* a cooperative context context switch. Prior to invoking _Swap(), the caller
* disables interrupts via irq_lock() and the return 'key' is passed as a
* parameter to _Swap(). The 'key' actually represents the BASEPRI register
* prior to disabling interrupts via the BASEPRI mechanism.
*
* _Swap() itself does not do much.
*
* It simply stores the intlock key (the BASEPRI value) parameter into
* current->basepri, and then triggers a service call exception (svc) to setup
* the PendSV exception, which does the heavy lifting of context switching.
* This is the only place we have to save BASEPRI since the other paths to
* __pendsv all come from handling an interrupt, which means we know the
* interrupts were not locked: in that case the BASEPRI value is 0.
*
* Given that _Swap() is called to effect a cooperative context context switch,
* only the caller-saved integer registers need to be saved in the tCCS of the
* outgoing context. This is all performed by the hardware, which stores it in
* its exception stack frame, created when handling the svc exception.
*
* RETURNS: may contain a return value setup by a call to fiberRtnValueSet()
*
* C function prototype:
*
* unsigned int _Swap (unsigned int basepri);
*
*/
* This is the only place we have to save BASEPRI since the other paths to
* __pendsv all come from handling an interrupt, which means we know the
* interrupts were not locked: in that case the BASEPRI value is 0.
*
* Given that _Swap() is called to effect a cooperative context context switch,
* only the caller-saved integer registers need to be saved in the tCCS of the
* outgoing context. This is all performed by the hardware, which stores it in
* its exception stack frame, created when handling the svc exception.
*
* RETURNS: may contain a return value setup by a call to fiberRtnValueSet()
*
* C function prototype:
*
* unsigned int _Swap (unsigned int basepri);
*
*/
SECTION_FUNC(TEXT, _Swap)

View file

@ -38,7 +38,7 @@ point returns or when it aborts itself, the CPU is in thread mode and must
call the equivalent of task_abort(<self>), but when in handler mode, the
CPU must queue a packet to K_swapper(), then exit handler mode to queue the
PendSV exception and cause the immediate context switch to K_swapper.
*/
*/
#ifdef CONFIG_MICROKERNEL
@ -52,20 +52,20 @@ PendSV exception and cause the immediate context switch to K_swapper.
static struct k_args cmd_packet;
/*******************************************************************************
*
* _TaskAbort - abort the current task
*
* Possible reasons for a task aborting:
*
* - the task explicitly aborts itself by calling this routine
* - the task implicitly aborts by returning from its entry point
* - the task encounters a fatal exception
*
* RETURNS: N/A
*
* \NOMANUAL
*/
/**
*
* _TaskAbort - abort the current task
*
* Possible reasons for a task aborting:
*
* - the task explicitly aborts itself by calling this routine
* - the task implicitly aborts by returning from its entry point
* - the task encounters a fatal exception
*
* RETURNS: N/A
*
* \NOMANUAL
*/
void _TaskAbort(void)
{

View file

@ -34,7 +34,7 @@
DESCRIPTION
This header file is used to specify and describe board-level aspects for the
'fsl_frdm_k64f' BSP.
*/
*/
#ifndef _BOARD__H_
#define _BOARD__H_

View file

@ -40,7 +40,7 @@ a) When software-managed ISRs (SW_ISR_TABLE) is enabled, and in that case it
b) When the BSP is written so that device ISRs are installed directly in the
vector table, they are enumerated here.
*/
*/
#include <toolchain.h>
#include <sections.h>

View file

@ -33,7 +33,7 @@
/*
DESCRIPTION
This is the linker script for both standard images and XIP images.
*/
*/
/* Flash base address and size */
#define FLASH_START 0x00000000

View file

@ -37,7 +37,7 @@ is to hard hang, sleeping.
This might be preferable than rebooting to help debugging, or because
rebooting might trigger the exact same problem over and over.
*/
*/
#define _ASMLANGUAGE

View file

@ -34,7 +34,7 @@
DESCRIPTION
This module provides routines to initialize and support board-level hardware
for the fsl_frdm_k64f BSP.
*/
*/
#include <nanokernel.h>
#include <board.h>
@ -94,21 +94,21 @@ uint8_t __security_frdm_k64f_section __security_frdm_k64f[] = {
/* Reserved for FlexNVM feature (unsupported by this MCU) */
0xFF, 0xFF};
/*******************************************************************************
*
* clkInit - initialize the system clock
*
* This routine will configure the multipurpose clock generator (MCG) to
* set up the system clock.
* The MCG has nine possible modes, including Stop mode. This routine assumes
* that the current MCG mode is FLL Engaged Internal (FEI), as from reset.
* It transitions through the FLL Bypassed External (FBE) and
* PLL Bypassed External (PBE) modes to get to the desired
* PLL Engaged External (PEE) mode and generate the maximum 120 MHz system clock.
*
* RETURNS: N/A
*
*/
/**
*
* clkInit - initialize the system clock
*
* This routine will configure the multipurpose clock generator (MCG) to
* set up the system clock.
* The MCG has nine possible modes, including Stop mode. This routine assumes
* that the current MCG mode is FLL Engaged Internal (FEI), as from reset.
* It transitions through the FLL Bypassed External (FBE) and
* PLL Bypassed External (PBE) modes to get to the desired
* PLL Engaged External (PEE) mode and generate the maximum 120 MHz system clock.
*
* RETURNS: N/A
*
*/
static void clkInit(void)
{
@ -247,15 +247,15 @@ static void clkInit(void)
#if defined(DO_CONSOLE_INIT)
/*******************************************************************************
*
* consoleInit - initialize target-only console
*
* Only used for debugging.
*
* RETURNS: N/A
*
*/
/**
*
* consoleInit - initialize target-only console
*
* Only used for debugging.
*
* RETURNS: N/A
*
*/
#include <console/uart_console.h>
@ -298,16 +298,16 @@ static void consoleInit(void)
} while ((0))
#endif /* DO_CONSOLE_INIT */
/*******************************************************************************
*
* _InitHardware - perform basic hardware initialization
*
* Initialize the interrupt controller device drivers and the
* Kinetis UART device driver.
* Also initialize the timer device driver, if required.
*
* RETURNS: N/A
*/
/**
*
* _InitHardware - perform basic hardware initialization
*
* Initialize the interrupt controller device drivers and the
* Kinetis UART device driver.
* Also initialize the timer device driver, if required.
*
* RETURNS: N/A
*/
void _InitHardware(void)
{

View file

@ -33,7 +33,7 @@
/*
DESCRIPTION
This module initializes the watchdog for the fsl_frdm_k64f BSP.
*/
*/
#define _ASMLANGUAGE
@ -53,7 +53,7 @@ GTEXT(_WdogInit)
#define WDOG_UNLOCK_1_CMD 0xC520
#define WDOG_UNLOCK_2_CMD 0xD928
/*******************************************************************************
/**
*
* _WdogInit - Watchdog timer disable routine
*

View file

@ -40,16 +40,16 @@
#ifndef _ASMLANGUAGE
/*******************************************************************************
*
* _IpsrGet - obtain value of IPSR register
*
* Obtain and return current value of IPSR register.
*
* RETURNS: the contents of the IPSR register
*
* \NOMANUAL
*/
/**
*
* _IpsrGet - obtain value of IPSR register
*
* Obtain and return current value of IPSR register.
*
* RETURNS: the contents of the IPSR register
*
* \NOMANUAL
*/
static ALWAYS_INLINE uint32_t _IpsrGet(void)
{
@ -59,16 +59,16 @@ static ALWAYS_INLINE uint32_t _IpsrGet(void)
return vector;
}
/*******************************************************************************
*
* _MspSet - set the value of the Main Stack Pointer register
*
* Store the value of <msp> in MSP register.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
/**
*
* _MspSet - set the value of the Main Stack Pointer register
*
* Store the value of <msp> in MSP register.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static ALWAYS_INLINE void _MspSet(uint32_t msp /* value to store in MSP */
)

View file

@ -33,7 +33,7 @@
/*
DESCRIPTION
Exception/interrupt context helpers.
*/
*/
#ifndef _ARM_CORTEXM_ISR__H_
#define _ARM_CORTEXM_ISR__H_
@ -47,19 +47,19 @@ Exception/interrupt context helpers.
#else
/*******************************************************************************
*
* _IsInIsr - find out if running in an ISR context
*
* The current executing vector is found in the IPSR register. We consider the
* IRQs (exception 16 and up), and the PendSV and SYSTICK exceptions, to be
* interrupts. Taking a fault within an exception is also considered in
* interrupt context.
*
* RETURNS: 1 if in ISR, 0 if not.
*
* \NOMANUAL
*/
/**
*
* _IsInIsr - find out if running in an ISR context
*
* The current executing vector is found in the IPSR register. We consider the
* IRQs (exception 16 and up), and the PendSV and SYSTICK exceptions, to be
* interrupts. Taking a fault within an exception is also considered in
* interrupt context.
*
* RETURNS: 1 if in ISR, 0 if not.
*
* \NOMANUAL
*/
static ALWAYS_INLINE int _IsInIsr(void)
{
uint32_t vector = _IpsrGet();
@ -68,18 +68,18 @@ static ALWAYS_INLINE int _IsInIsr(void)
return (vector > 13) || (vector && _ScbIsNestedExc());
}
/*******************************************************************************
* _ExcSetup - setup system exceptions
*
* Set exception priorities to conform with the BASEPRI locking mechanism.
* Set PendSV priority to lowest possible.
*
* Enable fault exceptions.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
/**
* _ExcSetup - setup system exceptions
*
* Set exception priorities to conform with the BASEPRI locking mechanism.
* Set PendSV priority to lowest possible.
*
* Enable fault exceptions.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static ALWAYS_INLINE void _ExcSetup(void)
{

View file

@ -33,7 +33,7 @@
/*
DESCRIPTION
Stack helper functions.
*/
*/
#ifndef _ARM_CORTEXM_STACK__H_
#define _ARM_CORTEXM_STACK__H_
@ -68,17 +68,17 @@ Stack helper functions.
extern char _interrupt_stack[CONFIG_ISR_STACK_SIZE];
/*******************************************************************************
*
* _InterruptStackSetup - setup interrupt stack
*
* On Cortex-M, the interrupt stack is registered in the MSP (main stack
* pointer) register, and switched to automatically when taking an exception.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
/**
*
* _InterruptStackSetup - setup interrupt stack
*
* On Cortex-M, the interrupt stack is registered in the MSP (main stack
* pointer) register, and switched to automatically when taking an exception.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static ALWAYS_INLINE void _InterruptStackSetup(void)
{

View file

@ -39,7 +39,7 @@ This file is also included by assembly language files which must #define
_ASMLANGUAGE before including this header file. Note that nanokernel assembly
source files obtains structure offset values via "absolute symbols" in the
offsets.o module.
*/
*/
#ifndef _NANO_PRIVATE_H
#define _NANO_PRIVATE_H
@ -184,18 +184,18 @@ static ALWAYS_INLINE void nanoArchInit(void)
_CpuIdleInit();
}
/*******************************************************************************
*
* fiberRtnValueSet - set the return value for the specified fiber (inline)
*
* The register used to store the return value from a function call invocation
* to <value>. It is assumed that the specified <fiber> is pending, and thus
* the fiber's context is stored in its tCCS structure.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
/**
*
* fiberRtnValueSet - set the return value for the specified fiber (inline)
*
* The register used to store the return value from a function call invocation
* to <value>. It is assumed that the specified <fiber> is pending, and thus
* the fiber's context is stored in its tCCS structure.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static ALWAYS_INLINE void fiberRtnValueSet(
tCCS *fiber, /* pointer to fiber */

View file

@ -35,7 +35,7 @@ DESCRIPTION
ARM-specific parts of start_task().
Currently empty, only here for abstraction.
*/
*/
#ifndef _START_TASK_ARCH__H_
#define _START_TASK_ARCH__H_

View file

@ -34,7 +34,7 @@
DESCRIPTION
This header file is used to specify and describe board-level aspects for
the 'ti_lm3s6965' BSP.
*/
*/
#ifndef _BOARD__H_
#define _BOARD__H_

View file

@ -40,7 +40,7 @@ a) When software-managed ISRs (SW_ISR_TABLE) is enabled, and in that case it
b) When the BSP is written so that device ISRs are installed directly in the
vector table, they are enumerated here.
*/
*/
#include <toolchain.h>
#include <sections.h>

View file

@ -37,7 +37,7 @@ is to hard hang, sleeping.
This might be preferable than rebooting to help debugging, or because
rebooting might trigger the exact same problem over and over.
*/
*/
#define _ASMLANGUAGE

View file

@ -37,7 +37,7 @@ Library for controlling target-specific devices present in the 0x400fe000
peripherals memory region.
Currently, only enabling the main OSC with default value is implemented.
*/
*/
#include <stdint.h>
#include <toolchain.h>
@ -49,12 +49,12 @@ Currently, only enabling the main OSC with default value is implemented.
volatile struct __scp __scp_section __scp;
/*******************************************************************************
*
* _ScpMainOscEnable - enable main oscillator with default frequency of 6MHz
*
* RETURNS: N/A
*/
/**
*
* _ScpMainOscEnable - enable main oscillator with default frequency of 6MHz
*
* RETURNS: N/A
*/
void _ScpMainOscEnable(void)
{
union __rcc reg;

View file

@ -46,7 +46,7 @@ These modules are not defined:
The registers and bit field names are taken from the 'Stellaris LM3S6965
Microcontroller DATA SHEET (DS-LM3S6965-12746.2515) revision H' document,
section 5.4/5.5, pp .184-200.
*/
*/
#ifndef _SCP_H_
#define _SCP_H_

View file

@ -34,7 +34,7 @@
DESCRIPTION
This module provides routines to initialize and support board-level hardware
for the ti_lm3s6965 BSP.
*/
*/
#include <nanokernel.h>
#include <board.h>
@ -59,13 +59,13 @@ extern void _NmiInit(void);
#if defined(DO_CONSOLE_INIT)
/*******************************************************************************
*
* uart_generic_info_init - initialize generic information for one UART
*
* RETURNS: N/A
*
*/
/**
*
* uart_generic_info_init - initialize generic information for one UART
*
* RETURNS: N/A
*
*/
inline void uart_generic_info_init(struct uart_init_info *pInfo)
{
@ -79,15 +79,15 @@ inline void uart_generic_info_init(struct uart_init_info *pInfo)
#if defined(DO_CONSOLE_INIT)
/*******************************************************************************
*
* consoleInit - initialize target-only console
*
* Only used for debugging.
*
* RETURNS: N/A
*
*/
/**
*
* consoleInit - initialize target-only console
*
* Only used for debugging.
*
* RETURNS: N/A
*
*/
#include <console/uart_console.h>
@ -132,16 +132,16 @@ static void bluetooth_init(void)
} while ((0))
#endif /* CONFIG_BLUETOOTH */
/*******************************************************************************
*
* _InitHardware - perform basic hardware initialization
*
* Initialize the interrupt controller device drivers and the
* integrated 16550-compatible UART device driver.
* Also initialize the timer device driver, if required.
*
* RETURNS: N/A
*/
/**
*
* _InitHardware - perform basic hardware initialization
*
* Initialize the interrupt controller device drivers and the
* integrated 16550-compatible UART device driver.
* Also initialize the timer device driver, if required.
*
* RETURNS: N/A
*/
void _InitHardware(void)
{

View file

@ -32,7 +32,7 @@
/*
DESCRIPTION
This module contains functions for manipulation caches.
*/
*/
#include <nanokernel.h>
#include <arch/cpu.h>
@ -44,19 +44,19 @@ This module contains functions for manipulation caches.
#error Cannot use this implementation with a cache line size of 0
#endif
/*******************************************************************************
*
* _SysCacheFlush - flush a page to main memory
*
* No alignment is required for either <virt> or <size>, but since
* _SysCacheFlush() iterates on the cache lines, a cache line alignment for both
* is optimal.
*
* The cache line size is specified via the CONFIG_CACHE_LINE_SIZE kconfig
* option.
*
* RETURNS: N/A
*/
/**
*
* _SysCacheFlush - flush a page to main memory
*
* No alignment is required for either <virt> or <size>, but since
* _SysCacheFlush() iterates on the cache lines, a cache line alignment for both
* is optimal.
*
* The cache line size is specified via the CONFIG_CACHE_LINE_SIZE kconfig
* option.
*
* RETURNS: N/A
*/
void _SysCacheFlush(VIRT_ADDR virt, size_t size)
{

View file

@ -32,7 +32,7 @@
/*
DESCRIPTION
This module contains functions for manipulating caches.
*/
*/
#ifndef CONFIG_CLFLUSH_INSTRUCTION_SUPPORTED
@ -42,20 +42,20 @@ This module contains functions for manipulating caches.
/* externs (internal APIs) */
GTEXT(_SysCacheFlush)
/*******************************************************************************
*
* _SysCacheFlush - flush a page to main memory
*
* This implementation flushes the whole cache.
*
* C signature:
*
* void _SysCacheFlush (VIRT_ADDR virt, size_t size)
*
* Both parameters are ignored in this implementation.
*
* RETURNS: N/A
*/
/**
*
* _SysCacheFlush - flush a page to main memory
*
* This implementation flushes the whole cache.
*
* C signature:
*
* void _SysCacheFlush (VIRT_ADDR virt, size_t size)
*
* Both parameters are ignored in this implementation.
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _SysCacheFlush)
wbinvd

View file

@ -41,7 +41,7 @@ service routines, and to operations performed by peer processors.
INTERNAL
These operators are currently unavailable to user space applications,
as there is no requirement for this capability.
*/
*/
/* includes */
@ -66,33 +66,33 @@ as there is no requirement for this capability.
GTEXT(atomic_and)
GTEXT(atomic_nand)
/*******************************************************************************
*
* atomic_cas - atomic compare-and-set primitive
*
* This routine provides the compare-and-set operator. If the original value at
* <target> equals <oldValue>, then <newValue> is stored at <target> and the
* function returns 1.
*
* If the original value at <target> does not equal <oldValue>, then the store
* is not done and the function returns 0.
*
* The reading of the original value at <target>, the comparison,
* and the write of the new value (if it occurs) all happen atomically with
* respect to both interrupts and accesses of other processors to <target>.
*
* RETURNS: Returns 1 if <newValue> is written, 0 otherwise.
*
* int atomic_cas
* (
* atomic_t * target, /@ address to be tested @/
* atomic_val_t oldValue, /@ value to compare against @/
* atomic_val_t newValue /@ value to compare against @/
* )
*
* INTERNAL
* The 'cmpxchg' instruction is NOT supported on processor prior to the 80486
*/
/**
*
* atomic_cas - atomic compare-and-set primitive
*
* This routine provides the compare-and-set operator. If the original value at
* <target> equals <oldValue>, then <newValue> is stored at <target> and the
* function returns 1.
*
* If the original value at <target> does not equal <oldValue>, then the store
* is not done and the function returns 0.
*
* The reading of the original value at <target>, the comparison,
* and the write of the new value (if it occurs) all happen atomically with
* respect to both interrupts and accesses of other processors to <target>.
*
* RETURNS: Returns 1 if <newValue> is written, 0 otherwise.
*
* int atomic_cas
* (
* atomic_t * target, /@ address to be tested @/
* atomic_val_t oldValue, /@ value to compare against @/
* atomic_val_t newValue /@ value to compare against @/
* )
*
* INTERNAL
* The 'cmpxchg' instruction is NOT supported on processor prior to the 80486
*/
SECTION_FUNC(TEXT, atomic_cas)
@ -113,25 +113,25 @@ BRANCH_LABEL(atomic_cas1)
ret
/*******************************************************************************
*
* atomic_add - atomic add primitive
*
* This routine provides the atomic addition operator. The <value> is
* atomically added to the value at <target>, placing the result at <target>,
* and the old value from <target> is returned.
*
* RETURNS: The previous value from <target>
*
* atomic_val_t atomic_add
* (
* atomic_t * target, /@ memory location to add to @/
* atomic_val_t value /@ value to add @/
* )
*
* INTERNAL
* The 'xadd' instruction is NOT supported on processor prior to the 80486
*/
/**
*
* atomic_add - atomic add primitive
*
* This routine provides the atomic addition operator. The <value> is
* atomically added to the value at <target>, placing the result at <target>,
* and the old value from <target> is returned.
*
* RETURNS: The previous value from <target>
*
* atomic_val_t atomic_add
* (
* atomic_t * target, /@ memory location to add to @/
* atomic_val_t value /@ value to add @/
* )
*
* INTERNAL
* The 'xadd' instruction is NOT supported on processor prior to the 80486
*/
SECTION_FUNC(TEXT, atomic_add)
@ -145,25 +145,25 @@ SECTION_FUNC(TEXT, atomic_add)
ret
/*******************************************************************************
*
* atomic_sub - atomic subtraction primitive
*
* This routine provides the atomic subtraction operator. The <value> is
* atomically subtracted from the value at <target>, placing the result at
* <target>, and the old value from <target> is returned.
*
* RETURNS: The previous value from <target>
*
* atomic_val_t atomic_sub
* (
* atomic_t * target, /@ memory location to subtract from @/
* atomic_val_t value /@ value to subtract @/
* )
*
* INTERNAL
* The 'xadd' instruction is NOT supported on processor prior to the 80486
*/
/**
*
* atomic_sub - atomic subtraction primitive
*
* This routine provides the atomic subtraction operator. The <value> is
* atomically subtracted from the value at <target>, placing the result at
* <target>, and the old value from <target> is returned.
*
* RETURNS: The previous value from <target>
*
* atomic_val_t atomic_sub
* (
* atomic_t * target, /@ memory location to subtract from @/
* atomic_val_t value /@ value to subtract @/
* )
*
* INTERNAL
* The 'xadd' instruction is NOT supported on processor prior to the 80486
*/
SECTION_FUNC(TEXT, atomic_sub)
@ -178,23 +178,23 @@ SECTION_FUNC(TEXT, atomic_sub)
ret
/*******************************************************************************
*
* atomic_inc - atomic increment primitive
*
* This routine provides the atomic increment operator. The value at <target>
* is atomically incremented by 1, and the old value from <target> is returned.
*
* RETURNS: The value from <target> before the increment
*
* atomic_val_t atomic_inc
* (
* atomic_t *target /@ memory location to increment @/
* )
*
* INTERNAL
* The 'xadd' instruction is NOT supported on processor prior to the 80486
*/
/**
*
* atomic_inc - atomic increment primitive
*
* This routine provides the atomic increment operator. The value at <target>
* is atomically incremented by 1, and the old value from <target> is returned.
*
* RETURNS: The value from <target> before the increment
*
* atomic_val_t atomic_inc
* (
* atomic_t *target /@ memory location to increment @/
* )
*
* INTERNAL
* The 'xadd' instruction is NOT supported on processor prior to the 80486
*/
SECTION_FUNC(TEXT, atomic_inc)
@ -210,23 +210,23 @@ SECTION_FUNC(TEXT, atomic_inc)
ret
/*******************************************************************************
*
* atomic_dec - atomic decrement primitive
*
* This routine provides the atomic decrement operator. The value at <target>
* is atomically decremented by 1, and the old value from <target> is returned.
*
* RETURNS: The value from <target> prior to the decrement
*
* atomic_val_t atomic_dec
* (
* atomic_t *target /@ memory location to decrement @/
* )
*
* INTERNAL
* The 'xadd' instruction is NOT supported on processor prior to the 80486
*/
/**
*
* atomic_dec - atomic decrement primitive
*
* This routine provides the atomic decrement operator. The value at <target>
* is atomically decremented by 1, and the old value from <target> is returned.
*
* RETURNS: The value from <target> prior to the decrement
*
* atomic_val_t atomic_dec
* (
* atomic_t *target /@ memory location to decrement @/
* )
*
* INTERNAL
* The 'xadd' instruction is NOT supported on processor prior to the 80486
*/
SECTION_FUNC(TEXT, atomic_dec)
@ -240,22 +240,22 @@ SECTION_FUNC(TEXT, atomic_dec)
ret
/*******************************************************************************
*
* atomic_get - atomic get primitive
*
* This routine provides the atomic get primitive to atomically read
* a value from <target>. It simply does an ordinary load. Note that <target>
* is expected to be aligned to a 4-byte boundary.
*
* RETURNS: The value read from <target>
*
* atomic_t atomic_get
* (
* atomic_t *target /@ memory location to read from @/
* )
*
*/
/**
*
* atomic_get - atomic get primitive
*
* This routine provides the atomic get primitive to atomically read
* a value from <target>. It simply does an ordinary load. Note that <target>
* is expected to be aligned to a 4-byte boundary.
*
* RETURNS: The value read from <target>
*
* atomic_t atomic_get
* (
* atomic_t *target /@ memory location to read from @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_get)
@ -264,25 +264,25 @@ SECTION_FUNC(TEXT, atomic_get)
ret
/*******************************************************************************
*
* atomic_set - atomic get-and-set primitive
*
* This routine provides the atomic set operator. The <value> is atomically
* written at <target> and the previous value at <target> is returned.
*
* RETURNS: The previous value from <target>
*
* atomic_val_t atomic_set
* (
* atomic_t *target, /@ memory location to write to @/
* atomic_val_t value /@ value to set @/
* )
*
* INTERNAL
* The XCHG instruction is executed on the specified address to
* swap in value. The value swapped out is returned by this function.
*/
/**
*
* atomic_set - atomic get-and-set primitive
*
* This routine provides the atomic set operator. The <value> is atomically
* written at <target> and the previous value at <target> is returned.
*
* RETURNS: The previous value from <target>
*
* atomic_val_t atomic_set
* (
* atomic_t *target, /@ memory location to write to @/
* atomic_val_t value /@ value to set @/
* )
*
* INTERNAL
* The XCHG instruction is executed on the specified address to
* swap in value. The value swapped out is returned by this function.
*/
SECTION_FUNC(TEXT, atomic_set)
@ -304,22 +304,22 @@ SECTION_FUNC(TEXT, atomic_set)
ret
/*******************************************************************************
*
* atomic_clear - atomic clear primitive
*
* This routine provides the atomic clear operator. The value of 0 is atomically
* written at <target> and the previous value at <target> is returned. (Hence,
* atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).)
*
* RETURNS: The previous value from <target>
*
* atomic_val_t atomic_clear
* (
* atomic_t *target /@ memory location to write to @/
* )
*
*/
/**
*
* atomic_clear - atomic clear primitive
*
* This routine provides the atomic clear operator. The value of 0 is atomically
* written at <target> and the previous value at <target> is returned. (Hence,
* atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).)
*
* RETURNS: The previous value from <target>
*
* atomic_val_t atomic_clear
* (
* atomic_t *target /@ memory location to write to @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_clear)
@ -341,25 +341,25 @@ SECTION_FUNC(TEXT, atomic_clear)
ret
/*******************************************************************************
*
* atomic_or - atomic bitwise inclusive OR primitive
*
* This routine provides the atomic bitwise inclusive OR operator. The <value>
* is atomically bitwise OR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* RETURNS: The previous value from <target>
*
* atomic_val_t atomic_or
* (
* atomic_t *target, /@ memory location to be modified @/
* atomic_val_t value /@ value to OR @/
* )
*
* INTERNAL
* The 'cmpxchg' instruction is NOT supported on processor prior to the 80486
*/
/**
*
* atomic_or - atomic bitwise inclusive OR primitive
*
* This routine provides the atomic bitwise inclusive OR operator. The <value>
* is atomically bitwise OR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* RETURNS: The previous value from <target>
*
* atomic_val_t atomic_or
* (
* atomic_t *target, /@ memory location to be modified @/
* atomic_val_t value /@ value to OR @/
* )
*
* INTERNAL
* The 'cmpxchg' instruction is NOT supported on processor prior to the 80486
*/
SECTION_FUNC(TEXT, atomic_or)
@ -379,25 +379,25 @@ BRANCH_LABEL(atomic_or_retry)
ret
/*******************************************************************************
*
* atomic_xor - atomic bitwise exclusive OR (XOR) primitive
*
* This routine provides the atomic bitwise exclusive OR operator. The <value>
* is atomically bitwise XOR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* RETURNS: The previous value from <target>
*
* atomic_val_t atomic_xor
* (
* atomic_t *target, /@ memory location to be modified @/
* atomic_t value /@ value to XOR @/
* )
*
* INTERNAL
* The 'cmpxchg' instruction is NOT supported on processor prior to the 80486
*/
/**
*
* atomic_xor - atomic bitwise exclusive OR (XOR) primitive
*
* This routine provides the atomic bitwise exclusive OR operator. The <value>
* is atomically bitwise XOR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* RETURNS: The previous value from <target>
*
* atomic_val_t atomic_xor
* (
* atomic_t *target, /@ memory location to be modified @/
* atomic_t value /@ value to XOR @/
* )
*
* INTERNAL
* The 'cmpxchg' instruction is NOT supported on processor prior to the 80486
*/
SECTION_FUNC(TEXT, atomic_xor)
@ -417,25 +417,25 @@ BRANCH_LABEL(atomic_xor_retry)
ret
/*******************************************************************************
*
* atomic_and - atomic bitwise AND primitive
*
* This routine provides the atomic bitwise AND operator. The <value> is
* atomically bitwise AND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* RETURNS: The previous value from <target>
*
* atomic_val_t atomic_and
* (
* atomic_t *target, /@ memory location to be modified @/
* atomic_val_t value /@ value to AND @/
* )
*
* INTERNAL
* The 'cmpxchg' instruction is NOT supported on processor prior to the 80486
*/
/**
*
* atomic_and - atomic bitwise AND primitive
*
* This routine provides the atomic bitwise AND operator. The <value> is
* atomically bitwise AND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* RETURNS: The previous value from <target>
*
* atomic_val_t atomic_and
* (
* atomic_t *target, /@ memory location to be modified @/
* atomic_val_t value /@ value to AND @/
* )
*
* INTERNAL
* The 'cmpxchg' instruction is NOT supported on processor prior to the 80486
*/
SECTION_FUNC(TEXT, atomic_and)
@ -455,25 +455,25 @@ BRANCH_LABEL(atomic_and_retry)
ret
/*******************************************************************************
*
* atomic_nand - atomic bitwise NAND primitive
*
* This routine provides the atomic bitwise NAND operator. The <value> is
* atomically bitwise NAND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* RETURNS: The previous value from <target>
*
* atomic_val_t atomic_nand
* (
* atomic_t * target, /@ memory location to be modified @/
* atomic_val_t value /@ value to NAND @/
* )
*
* INTERNAL
* The 'cmpxchg' instruction is NOT supported on processor prior to the 80486
*/
/**
*
* atomic_nand - atomic bitwise NAND primitive
*
* This routine provides the atomic bitwise NAND operator. The <value> is
* atomically bitwise NAND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* RETURNS: The previous value from <target>
*
* atomic_val_t atomic_nand
* (
* atomic_t * target, /@ memory location to be modified @/
* atomic_val_t value /@ value to NAND @/
* )
*
* INTERNAL
* The 'cmpxchg' instruction is NOT supported on processor prior to the 80486
*/
SECTION_FUNC(TEXT, atomic_nand)

View file

@ -43,30 +43,30 @@ operators that do utilize the LOCK prefix instruction.
INTERNAL
These operators are currently unavailable to user space applications
as there is no requirement for this capability.
*/
*/
#if defined(CONFIG_LOCK_INSTRUCTION_UNSUPPORTED)
#include <nanokernel.h>
#include <arch/cpu.h>
/*******************************************************************************
*
* atomic_cas - atomic compare-and-set primitive
*
* This routine provides the compare-and-set operator. If the original value at
* <target> equals <oldValue>, then <newValue> is stored at <target> and the
* function returns 1.
*
* If the original value at <target> does not equal <oldValue>, then the store
* is not done and the function returns 0.
*
* The reading of the original value at <target>, the comparison,
* and the write of the new value (if it occurs) all happen atomically with
* respect to both interrupts and accesses of other processors to <target>.
*
* RETURNS: Returns 1 if <newValue> is written, 0 otherwise.
*/
/**
*
* atomic_cas - atomic compare-and-set primitive
*
* This routine provides the compare-and-set operator. If the original value at
* <target> equals <oldValue>, then <newValue> is stored at <target> and the
* function returns 1.
*
* If the original value at <target> does not equal <oldValue>, then the store
* is not done and the function returns 0.
*
* The reading of the original value at <target>, the comparison,
* and the write of the new value (if it occurs) all happen atomically with
* respect to both interrupts and accesses of other processors to <target>.
*
* RETURNS: Returns 1 if <newValue> is written, 0 otherwise.
*/
int atomic_cas(
atomic_t *target, /* address to be tested */
@ -88,16 +88,16 @@ int atomic_cas(
return 1;
}
/*******************************************************************************
*
* atomic_add - atomic addition primitive
*
* This routine provides the atomic addition operator. The <value> is
* atomically added to the value at <target>, placing the result at <target>,
* and the old value from <target> is returned.
*
* RETURNS: The previous value from <target>
*/
/**
*
* atomic_add - atomic addition primitive
*
* This routine provides the atomic addition operator. The <value> is
* atomically added to the value at <target>, placing the result at <target>,
* and the old value from <target> is returned.
*
* RETURNS: The previous value from <target>
*/
atomic_val_t atomic_add(
atomic_t *target, /* memory location to add to */
@ -114,16 +114,16 @@ atomic_val_t atomic_add(
return ovalue;
}
/*******************************************************************************
*
* atomic_sub - atomic subtraction primitive
*
* This routine provides the atomic subtraction operator. The <value> is
* atomically subtracted from the value at <target>, placing the result at
* <target>, and the old value from <target> is returned.
*
* RETURNS: The previous value from <target>
*/
/**
*
* atomic_sub - atomic subtraction primitive
*
* This routine provides the atomic subtraction operator. The <value> is
* atomically subtracted from the value at <target>, placing the result at
* <target>, and the old value from <target> is returned.
*
* RETURNS: The previous value from <target>
*/
atomic_val_t atomic_sub(
atomic_t *target, /* memory location to subtract from */
@ -140,15 +140,15 @@ atomic_val_t atomic_sub(
return ovalue;
}
/*******************************************************************************
*
* atomic_inc - atomic increment primitive
*
* This routine provides the atomic increment operator. The value at <target>
* is atomically incremented by 1, and the old value from <target> is returned.
*
* RETURNS: The value from <target> before the increment
*/
/**
*
* atomic_inc - atomic increment primitive
*
* This routine provides the atomic increment operator. The value at <target>
* is atomically incremented by 1, and the old value from <target> is returned.
*
* RETURNS: The value from <target> before the increment
*/
atomic_val_t atomic_inc(
atomic_t *target /* memory location to increment */
@ -164,15 +164,15 @@ atomic_val_t atomic_inc(
return ovalue;
}
/*******************************************************************************
*
* atomic_dec - atomic decrement primitive
*
* This routine provides the atomic decrement operator. The value at <target>
* is atomically decremented by 1, and the old value from <target> is returned.
*
* RETURNS: The value from <target> prior to the decrement
*/
/**
*
* atomic_dec - atomic decrement primitive
*
* This routine provides the atomic decrement operator. The value at <target>
* is atomically decremented by 1, and the old value from <target> is returned.
*
* RETURNS: The value from <target> prior to the decrement
*/
atomic_val_t atomic_dec(
atomic_t *target /* memory location to decrement */
@ -188,16 +188,16 @@ atomic_val_t atomic_dec(
return ovalue;
}
/*******************************************************************************
*
* atomic_get - atomic get primitive
*
* This routine provides the atomic get primitive to atomically read
* a value from <target>. It simply does an ordinary load. Note that <target>
* is expected to be aligned to a 4-byte boundary.
*
* RETURNS: The value read from <target>
*/
/**
*
* atomic_get - atomic get primitive
*
* This routine provides the atomic get primitive to atomically read
* a value from <target>. It simply does an ordinary load. Note that <target>
* is expected to be aligned to a 4-byte boundary.
*
* RETURNS: The value read from <target>
*/
atomic_val_t atomic_get(const atomic_t *target /* memory location to read from */
)
@ -205,15 +205,15 @@ atomic_val_t atomic_get(const atomic_t *target /* memory location to read from *
return *target;
}
/*******************************************************************************
*
* atomic_set - atomic get-and-set primitive
*
* This routine provides the atomic set operator. The <value> is atomically
* written at <target> and the previous value at <target> is returned.
*
* RETURNS: The previous value from <target>
*/
/**
*
* atomic_set - atomic get-and-set primitive
*
* This routine provides the atomic set operator. The <value> is atomically
* written at <target> and the previous value at <target> is returned.
*
* RETURNS: The previous value from <target>
*/
atomic_val_t atomic_set(
atomic_t *target, /* memory location to write to */
@ -230,16 +230,16 @@ atomic_val_t atomic_set(
return ovalue;
}
/*******************************************************************************
*
* atomic_clear - atomic clear primitive
*
* This routine provides the atomic clear operator. The value of 0 is atomically
* written at <target> and the previous value at <target> is returned. (Hence,
* atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).)
*
* RETURNS: The previous value from <target>
*/
/**
*
* atomic_clear - atomic clear primitive
*
* This routine provides the atomic clear operator. The value of 0 is atomically
* written at <target> and the previous value at <target> is returned. (Hence,
* atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).)
*
* RETURNS: The previous value from <target>
*/
atomic_val_t atomic_clear(
atomic_t *target /* memory location to write to */
@ -255,16 +255,16 @@ atomic_val_t atomic_clear(
return ovalue;
}
/*******************************************************************************
*
* atomic_or - atomic bitwise inclusive OR primitive
*
* This routine provides the atomic bitwise inclusive OR operator. The <value>
* is atomically bitwise OR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* RETURNS: The previous value from <target>
*/
/**
*
* atomic_or - atomic bitwise inclusive OR primitive
*
* This routine provides the atomic bitwise inclusive OR operator. The <value>
* is atomically bitwise OR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* RETURNS: The previous value from <target>
*/
atomic_val_t atomic_or(
atomic_t *target, /* memory location to be modified */
@ -281,16 +281,16 @@ atomic_val_t atomic_or(
return ovalue;
}
/*******************************************************************************
*
* atomic_xor - atomic bitwise exclusive OR (XOR) primitive
*
* This routine provides the atomic bitwise exclusive OR operator. The <value>
* is atomically bitwise XOR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* RETURNS: The previous value from <target>
*/
/**
*
* atomic_xor - atomic bitwise exclusive OR (XOR) primitive
*
* This routine provides the atomic bitwise exclusive OR operator. The <value>
* is atomically bitwise XOR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* RETURNS: The previous value from <target>
*/
atomic_val_t atomic_xor(
atomic_t *target, /* memory location to be modified */
@ -307,16 +307,16 @@ atomic_val_t atomic_xor(
return ovalue;
}
/*******************************************************************************
*
* atomic_and - atomic bitwise AND primitive
*
* This routine provides the atomic bitwise AND operator. The <value> is
* atomically bitwise AND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* RETURNS: The previous value from <target>
*/
/**
*
* atomic_and - atomic bitwise AND primitive
*
* This routine provides the atomic bitwise AND operator. The <value> is
* atomically bitwise AND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* RETURNS: The previous value from <target>
*/
atomic_val_t atomic_and(
atomic_t *target, /* memory location to be modified */
@ -333,16 +333,16 @@ atomic_val_t atomic_and(
return ovalue;
}
/*******************************************************************************
*
* atomic_nand - atomic bitwise NAND primitive
*
* This routine provides the atomic bitwise NAND operator. The <value> is
* atomically bitwise NAND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* RETURNS: The previous value from <target>
*/
/**
*
* atomic_nand - atomic bitwise NAND primitive
*
* This routine provides the atomic bitwise NAND operator. The <value> is
* atomically bitwise NAND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* RETURNS: The previous value from <target>
*/
atomic_val_t atomic_nand(
atomic_t *target, /* memory location to be modified */

View file

@ -34,7 +34,7 @@
DESCRIPTION
This module provides core nanokernel fiber related primitives for the IA-32
processor architecture.
*/
*/
#ifdef CONFIG_MICROKERNEL
#include <microkernel.h>
@ -57,20 +57,20 @@ tNANO _nanokernel = {0};
void _ContextEntryWrapper(_ContextEntry, _ContextArg, _ContextArg, _ContextArg);
#endif /* CONFIG_GDB_INFO */
/*******************************************************************************
*
* _NewContextInternal - initialize a new execution context
*
* This function is utilized to initialize all execution contexts (both fiber
* and task). The 'priority' parameter will be set to -1 for the creation of
* task context.
*
* This function is called by _NewContext() to initialize task contexts.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
/**
*
* _NewContextInternal - initialize a new execution context
*
* This function is utilized to initialize all execution contexts (both fiber
* and task). The 'priority' parameter will be set to -1 for the creation of
* task context.
*
* This function is called by _NewContext() to initialize task contexts.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _NewContextInternal(
char *pStackMem, /* pointer to context stack memory */
@ -206,64 +206,64 @@ static void _NewContextInternal(
}
#ifdef CONFIG_GDB_INFO
/*******************************************************************************
*
* _ContextEntryWrapper - adjust stack before invoking _context_entry
*
* This function adjusts the initial stack frame created by _NewContext()
* such that the GDB stack frame unwinders recognize it as the outermost frame
* in the context's stack. The function then jumps to _context_entry().
*
* GDB normally stops unwinding a stack when it detects that it has
* reached a function called main(). Kernel tasks, however, do not have
* a main() function, and there does not appear to be a simple way of stopping
* the unwinding of the stack.
*
* Given the initial context created by _NewContext(), GDB expects to find a
* return address on the stack immediately above the context entry routine
* _context_entry, in the location occupied by the initial EFLAGS.
* GDB attempts to examine the memory at this return address, which typically
* results in an invalid access to page 0 of memory.
*
* This function overwrites the initial EFLAGS with zero. When GDB subsequently
* attempts to examine memory at address zero, the PeekPoke driver detects
* an invalid access to address zero and returns an error, which causes the
* GDB stack unwinder to stop somewhat gracefully.
*
* __________________
* | param3 | <------ Top of the stack
* |__________________|
* | param2 | Stack Grows Down
* |__________________| |
* | param1 | V
* |__________________|
* | pEntry |
* |__________________|
* | initial EFLAGS | <---- ESP when invoked by _Swap()
* |__________________| (Zeroed by this routine)
* | entryRtn | <----- Context Entry Routine invoked by _Swap()
* |__________________| (This routine if GDB_INFO)
* | <edi> | \
* |__________________| |
* | <esi> | |
* |__________________| |
* | <ebx> | |---- Initial registers restored by _Swap()
* |__________________| |
* | <ebp> | |
* |__________________| |
* | <eax> | /
* |__________________|
*
*
* The initial EFLAGS cannot be overwritten until after _Swap() has swapped in
* the new context for the first time. This routine is called by _Swap() the
* first time that the new context is swapped in, and it jumps to
* _context_entry after it has done its work.
*
* RETURNS: this routine does NOT return.
*
* \NOMANUAL
*/
/**
*
* _ContextEntryWrapper - adjust stack before invoking _context_entry
*
* This function adjusts the initial stack frame created by _NewContext()
* such that the GDB stack frame unwinders recognize it as the outermost frame
* in the context's stack. The function then jumps to _context_entry().
*
* GDB normally stops unwinding a stack when it detects that it has
* reached a function called main(). Kernel tasks, however, do not have
* a main() function, and there does not appear to be a simple way of stopping
* the unwinding of the stack.
*
* Given the initial context created by _NewContext(), GDB expects to find a
* return address on the stack immediately above the context entry routine
* _context_entry, in the location occupied by the initial EFLAGS.
* GDB attempts to examine the memory at this return address, which typically
* results in an invalid access to page 0 of memory.
*
* This function overwrites the initial EFLAGS with zero. When GDB subsequently
* attempts to examine memory at address zero, the PeekPoke driver detects
* an invalid access to address zero and returns an error, which causes the
* GDB stack unwinder to stop somewhat gracefully.
*
* __________________
* | param3 | <------ Top of the stack
* |__________________|
* | param2 | Stack Grows Down
* |__________________| |
* | param1 | V
* |__________________|
* | pEntry |
* |__________________|
* | initial EFLAGS | <---- ESP when invoked by _Swap()
* |__________________| (Zeroed by this routine)
* | entryRtn | <----- Context Entry Routine invoked by _Swap()
* |__________________| (This routine if GDB_INFO)
* | <edi> | \
* |__________________| |
* | <esi> | |
* |__________________| |
* | <ebx> | |---- Initial registers restored by _Swap()
* |__________________| |
* | <ebp> | |
* |__________________| |
* | <eax> | /
* |__________________|
*
*
* The initial EFLAGS cannot be overwritten until after _Swap() has swapped in
* the new context for the first time. This routine is called by _Swap() the
* first time that the new context is swapped in, and it jumps to
* _context_entry after it has done its work.
*
* RETURNS: this routine does NOT return.
*
* \NOMANUAL
*/
__asm__("\t.globl _context_entry\n"
"\t.section .text\n"
@ -273,20 +273,20 @@ __asm__("\t.globl _context_entry\n"
"\tjmp _context_entry\n");
#endif /* CONFIG_GDB_INFO */
/*******************************************************************************
*
* _NewContext - create a new kernel execution context
*
* This function is utilized to create execution contexts for both fiber
* contexts and kernel task contexts.
*
* The "context control block" (CCS) is carved from the "end" of the specified
* context stack memory.
*
* RETURNS: opaque pointer to initialized CCS structure
*
* \NOMANUAL
*/
/**
*
* _NewContext - create a new kernel execution context
*
* This function is utilized to create execution contexts for both fiber
* contexts and kernel task contexts.
*
* The "context control block" (CCS) is carved from the "end" of the specified
* context stack memory.
*
* RETURNS: opaque pointer to initialized CCS structure
*
* \NOMANUAL
*/
void _NewContext(
char *pStackMem, /* pointer to aligned stack memory */

View file

@ -47,7 +47,7 @@ supports the execution of the 'hlt' instruction from a guest (results in a
VM exit), and more importantly, the Hypervisor will respect the
single instruction delay slot after the 'sti' instruction as required
by nano_cpu_atomic_idle().
*/
*/
#define _ASMLANGUAGE
@ -64,21 +64,21 @@ by nano_cpu_atomic_idle().
#ifndef CONFIG_NO_ISRS
/*******************************************************************************
*
* nano_cpu_idle - power save idle routine for IA-32
*
* This function will be called by the nanokernel idle loop or possibly within
* an implementation of _sys_power_save_idle in the microkernel when the
* '_sys_power_save_flag' variable is non-zero. The IA-32 'hlt' instruction
* will be issued causing a low-power consumption sleep mode.
*
* RETURNS: N/A
*
* C function prototype:
*
* void nano_cpu_idle (void);
*/
/**
*
* nano_cpu_idle - power save idle routine for IA-32
*
* This function will be called by the nanokernel idle loop or possibly within
* an implementation of _sys_power_save_idle in the microkernel when the
* '_sys_power_save_flag' variable is non-zero. The IA-32 'hlt' instruction
* will be issued causing a low-power consumption sleep mode.
*
* RETURNS: N/A
*
* C function prototype:
*
* void nano_cpu_idle (void);
*/
SECTION_FUNC(TEXT, nano_cpu_idle)
#ifdef CONFIG_INT_LATENCY_BENCHMARK
@ -94,31 +94,31 @@ SECTION_FUNC(TEXT, nano_cpu_idle)
ret /* return after processing ISR */
/*******************************************************************************
*
* nano_cpu_atomic_idle - atomically re-enable interrupts and enter low power mode
*
* This function is utilized by the nanokernel object "wait" APIs for task
* contexts, e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(), nano_task_stack_pop_wait(),
* and nano_task_fifo_get_wait().
*
* INTERNAL
* The requirements for nano_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments
* in nano_task_lifo_get_wait(), for example, of the race condition that occurs
* if this requirement is not met.
*
* 2) After waking up from the low-power mode, the interrupt lockout state
* must be restored as indicated in the 'imask' input parameter.
*
* RETURNS: N/A
*
* C function prototype:
*
* void nano_cpu_atomic_idle (unsigned int imask);
*/
/**
*
* nano_cpu_atomic_idle - atomically re-enable interrupts and enter low power mode
*
* This function is utilized by the nanokernel object "wait" APIs for task
* contexts, e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(), nano_task_stack_pop_wait(),
* and nano_task_fifo_get_wait().
*
* INTERNAL
* The requirements for nano_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments
* in nano_task_lifo_get_wait(), for example, of the race condition that occurs
* if this requirement is not met.
*
* 2) After waking up from the low-power mode, the interrupt lockout state
* must be restored as indicated in the 'imask' input parameter.
*
* RETURNS: N/A
*
* C function prototype:
*
* void nano_cpu_atomic_idle (unsigned int imask);
*/
SECTION_FUNC(TEXT, nano_cpu_atomic_idle)
#ifdef CONFIG_INT_LATENCY_BENCHMARK

View file

@ -74,7 +74,7 @@ an error code is present on the stack or not.
NOTE: Be sure to update the arch specific definition of the _EXC_STUB_SIZE
macro to reflect the size of the full exception stub (as shown above).
The _EXC_STUB_SIZE macro is defined in arch/x86/include/nano_private.h.
*/
*/
#include <nanokernel.h>
@ -86,37 +86,37 @@ void _NanoCpuExcConnectAtDpl(unsigned int vector,
NANO_EXC_STUB pExcStubMem,
unsigned int dpl);
/*******************************************************************************
*
* nanoCpuExcConnect - connect a C routine to an exception
*
* This routine connects an exception handler coded in C to the specified
* interrupt vector. An exception is defined as a synchronous interrupt, i.e.
* an interrupt asserted as a direct result of program execution as opposed
* to a hardware device asserting an interrupt.
*
* When the exception specified by <vector> is asserted, the current context
* is saved on the current stack, i.e. a switch to some other stack is not
* performed, followed by executing <routine> which has the following signature:
*
* void (*routine) (NANO_ESF *pEsf)
*
* The <pExcStubMem> argument points to memory that the system can use to
* synthesize the exception stub that calls <routine>. The memory need not be
* initialized, but must be persistent (i.e. it cannot be on the caller's stack).
* Declaring a global or static variable of type NANO_EXC_STUB will provide a
* suitable area of the proper size.
*
* The handler is connected via an interrupt-gate descriptor having a
* descriptor privilege level (DPL) equal to zero.
*
* RETURNS: N/A
*
* INTERNAL
* The function prototype for nanoCpuExcConnect() only exists in nano_private.h,
* in other words, it's still considered private since the definitions for
* the NANO_ESF structures have not been completed.
*/
/**
*
* nanoCpuExcConnect - connect a C routine to an exception
*
* This routine connects an exception handler coded in C to the specified
* interrupt vector. An exception is defined as a synchronous interrupt, i.e.
* an interrupt asserted as a direct result of program execution as opposed
* to a hardware device asserting an interrupt.
*
* When the exception specified by <vector> is asserted, the current context
* is saved on the current stack, i.e. a switch to some other stack is not
* performed, followed by executing <routine> which has the following signature:
*
* void (*routine) (NANO_ESF *pEsf)
*
* The <pExcStubMem> argument points to memory that the system can use to
* synthesize the exception stub that calls <routine>. The memory need not be
* initialized, but must be persistent (i.e. it cannot be on the caller's stack).
* Declaring a global or static variable of type NANO_EXC_STUB will provide a
* suitable area of the proper size.
*
* The handler is connected via an interrupt-gate descriptor having a
* descriptor privilege level (DPL) equal to zero.
*
* RETURNS: N/A
*
* INTERNAL
* The function prototype for nanoCpuExcConnect() only exists in nano_private.h,
* in other words, it's still considered private since the definitions for
* the NANO_ESF structures have not been completed.
*/
void nanoCpuExcConnect(unsigned int vector, /* interrupt vector: 0 to 255 on
IA-32 */
@ -126,37 +126,37 @@ void nanoCpuExcConnect(unsigned int vector, /* interrupt vector: 0 to 255 on
_NanoCpuExcConnectAtDpl(vector, routine, pExcStubMem, 0);
}
/*******************************************************************************
*
* _NanoCpuExcConnectAtDpl - connect a C routine to an exception
*
* This routine connects an exception handler coded in C to the specified
* interrupt vector. An exception is defined as a synchronous interrupt, i.e.
* an interrupt asserted as a direct result of program execution as opposed
* to a hardware device asserting an interrupt.
*
* When the exception specified by <vector> is asserted, the current context
* is saved on the current stack, i.e. a switch to some other stack is not
* performed, followed by executing <routine> which has the following signature:
*
* void (*routine) (NANO_ESF *pEsf)
*
* The <pExcStubMem> argument points to memory that the system can use to
* synthesize the exception stub that calls <routine>. The memory need not be
* initialized, but must be persistent (i.e. it cannot be on the caller's stack).
* Declaring a global or static variable of type NANO_EXC_STUB will provide a
* suitable area of the proper size.
*
* The handler is connected via an interrupt-gate descriptor having the supplied
* descriptor privilege level (DPL).
*
* RETURNS: N/A
*
* INTERNAL
* The function prototype for nanoCpuExcConnect() only exists in nano_private.h,
* in other words, it's still considered private since the definitions for
* the NANO_ESF structures have not been completed.
*/
/**
*
* _NanoCpuExcConnectAtDpl - connect a C routine to an exception
*
* This routine connects an exception handler coded in C to the specified
* interrupt vector. An exception is defined as a synchronous interrupt, i.e.
* an interrupt asserted as a direct result of program execution as opposed
* to a hardware device asserting an interrupt.
*
* When the exception specified by <vector> is asserted, the current context
* is saved on the current stack, i.e. a switch to some other stack is not
* performed, followed by executing <routine> which has the following signature:
*
* void (*routine) (NANO_ESF *pEsf)
*
* The <pExcStubMem> argument points to memory that the system can use to
* synthesize the exception stub that calls <routine>. The memory need not be
* initialized, but must be persistent (i.e. it cannot be on the caller's stack).
* Declaring a global or static variable of type NANO_EXC_STUB will provide a
* suitable area of the proper size.
*
* The handler is connected via an interrupt-gate descriptor having the supplied
* descriptor privilege level (DPL).
*
* RETURNS: N/A
*
* INTERNAL
* The function prototype for nanoCpuExcConnect() only exists in nano_private.h,
* in other words, it's still considered private since the definitions for
* the NANO_ESF structures have not been completed.
*/
void _NanoCpuExcConnectAtDpl(
unsigned int vector, /* interrupt vector: 0 to 255 on IA-32 */

View file

@ -36,7 +36,7 @@ This module implements assembly routines to manage exceptions (synchronous
interrupts) on the Intel IA-32 architecture. More specifically,
exceptions are implemented in this module. The stubs are invoked when entering
and exiting a C exception handler.
*/
*/
#define _ASMLANGUAGE
@ -57,31 +57,31 @@ and exiting a C exception handler.
/*******************************************************************************
*
* _ExcEnt - inform the kernel of an exception
*
* This function is called from the exception stub created by nanoCpuExcConnect()
* to inform the kernel of an exception. This routine currently does
* _not_ increment a context/interrupt specific exception count. Also,
* execution of the exception handler occurs on the current stack, i.e.
* _ExcEnt() does not switch to another stack. The volatile integer
* registers are saved on the stack, and control is returned back to the
* exception stub.
*
* WARNINGS
*
* Host-based tools and the target-based GDB agent depend on the stack frame
* created by this routine to determine the locations of volatile registers.
* These tools must be updated to reflect any changes to the stack frame.
*
* RETURNS: N/A
*
* C function prototype:
*
* void _ExcEnt (void);
*
*/
/**
*
* _ExcEnt - inform the kernel of an exception
*
* This function is called from the exception stub created by nanoCpuExcConnect()
* to inform the kernel of an exception. This routine currently does
* _not_ increment a context/interrupt specific exception count. Also,
* execution of the exception handler occurs on the current stack, i.e.
* _ExcEnt() does not switch to another stack. The volatile integer
* registers are saved on the stack, and control is returned back to the
* exception stub.
*
* WARNINGS
*
* Host-based tools and the target-based GDB agent depend on the stack frame
* created by this routine to determine the locations of volatile registers.
* These tools must be updated to reflect any changes to the stack frame.
*
* RETURNS: N/A
*
* C function prototype:
*
* void _ExcEnt (void);
*
*/
SECTION_FUNC(TEXT, _ExcEnt)
@ -215,22 +215,22 @@ BRANCH_LABEL(allDone)
jmp *%eax /* "return" back to stub */
/*******************************************************************************
*
* _ExcExit - inform the kernel of an exception exit
*
* This function is called from the exception stub created by nanoCpuExcConnect()
* to inform the kernel that the processing of an exception has
* completed. This routine restores the volatile integer registers and
* then control is returned back to the interrupted context or ISR.
*
* RETURNS: N/A
*
* C function prototype:
*
* void _ExcExit (void);
*
*/
/**
*
* _ExcExit - inform the kernel of an exception exit
*
* This function is called from the exception stub created by nanoCpuExcConnect()
* to inform the kernel that the processing of an exception has
* completed. This routine restores the volatile integer registers and
* then control is returned back to the interrupted context or ISR.
*
* RETURNS: N/A
*
* C function prototype:
*
* void _ExcExit (void);
*
*/
SECTION_FUNC(TEXT, _ExcExit)

View file

@ -33,7 +33,7 @@
/*
DESCRIPTION
This module provides the _NanoFatalErrorHandler() routine.
*/
*/
#include <toolchain.h>
#include <sections.h>
@ -65,21 +65,21 @@ const NANO_ESF _default_esf = {
0xdeaddead /* SS */
};
/*******************************************************************************
*
* _NanoFatalErrorHandler - nanokernel fatal error handler
*
* This routine is called when a fatal error condition is detected by either
* hardware or software.
*
* The caller is expected to always provide a usable ESF. In the event that the
* fatal error does not have a hardware generated ESF, the caller should either
* create its own or use a pointer to the global default ESF <_default_esf>.
*
* RETURNS: This function does not return.
*
* \NOMANUAL
*/
/**
*
* _NanoFatalErrorHandler - nanokernel fatal error handler
*
* This routine is called when a fatal error condition is detected by either
* hardware or software.
*
* The caller is expected to always provide a usable ESF. In the event that the
* fatal error does not have a hardware generated ESF, the caller should either
* create its own or use a pointer to the global default ESF <_default_esf>.
*
* RETURNS: This function does not return.
*
* \NOMANUAL
*/
FUNC_NORETURN void _NanoFatalErrorHandler(
unsigned int reason, /* reason that handler was called */

View file

@ -38,7 +38,7 @@ architecture.
INTERNAL
Inline versions of these APIs, find_last_set_inline() and find_first_set_inline(),
are defined in arch.h.
*/
*/
#define _ASMLANGUAGE
@ -51,24 +51,24 @@ are defined in arch.h.
GTEXT(find_last_set)
GTEXT(find_first_set)
/*******************************************************************************
*
* find_first_set - find first set bit searching from the LSB
*
* This routine finds the first bit set in the passed argument and
* returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit to 32 for the most significant bit.
* A return value of zero indicates that the value passed is zero.
*
* RETURNS: bit position from 1 to 32, or 0 if the argument is zero.
*
* INTERNAL
* For Intel64 (x86_64) architectures, the 'cmovz' can be removed
* and leverage the fact that the 'bsrl' doesn't modify the destination operand
* when the source operand is zero. The "bitpos" variable can be preloaded
* into the destination register, and given the unconditional ++bitpos that
* is performed after the 'cmovz', the correct results are yielded.
*/
/**
*
* find_first_set - find first set bit searching from the LSB
*
* This routine finds the first bit set in the passed argument and
* returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit to 32 for the most significant bit.
* A return value of zero indicates that the value passed is zero.
*
* RETURNS: bit position from 1 to 32, or 0 if the argument is zero.
*
* INTERNAL
* For Intel64 (x86_64) architectures, the 'cmovz' can be removed
* and leverage the fact that the 'bsrl' doesn't modify the destination operand
* when the source operand is zero. The "bitpos" variable can be preloaded
* into the destination register, and given the unconditional ++bitpos that
* is performed after the 'cmovz', the correct results are yielded.
*/
SECTION_FUNC(TEXT, find_first_set)
@ -94,24 +94,24 @@ BRANCH_LABEL(ffsLsb_argNotZero) /* this label serves find_first_set() & find_las
#endif /* !CONFIG_CMOV_UNSUPPORTED */
/*******************************************************************************
*
* find_last_set - find first set bit searching from the MSB
*
* This routine finds the first bit set in the passed argument and
* returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit to 32 for the most significant bit.
* A return value of zero indicates that the value passed is zero.
*
* RETURNS: bit position from 1 to 32, or 0 if the argument is zero.
*
* INTERNAL
* For Intel64 (x86_64) architectures, the 'cmovz' can be removed
* and leverage the fact that the 'bsfl' doesn't modify the destination operand
* when the source operand is zero. The "bitpos" variable can be preloaded
* into the destination register, and given the unconditional ++bitpos that
* is performed after the 'cmovz', the correct results are yielded.
*/
/**
*
* find_last_set - find first set bit searching from the MSB
*
* This routine finds the first bit set in the passed argument and
* returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit to 32 for the most significant bit.
* A return value of zero indicates that the value passed is zero.
*
* RETURNS: bit position from 1 to 32, or 0 if the argument is zero.
*
* INTERNAL
* For Intel64 (x86_64) architectures, the 'cmovz' can be removed
* and leverage the fact that the 'bsfl' doesn't modify the destination operand
* when the source operand is zero. The "bitpos" variable can be preloaded
* into the destination register, and given the unconditional ++bitpos that
* is performed after the 'cmovz', the correct results are yielded.
*/
SECTION_FUNC(TEXT, find_last_set)

View file

@ -93,7 +93,7 @@ FP operations. All other tasks and fibers have CR0[TS] = 1 so that an attempt
to perform an FP operation will cause an exception, allowing the system to
enable FP resource sharing on its behalf.
*/
*/
#ifdef CONFIG_MICROKERNEL
#include <microkernel.h>
@ -112,74 +112,74 @@ enable FP resource sharing on its behalf.
extern uint32_t _sse_mxcsr_default_value; /* SSE control/status register default value */
#endif /* CONFIG_SSE */
/*******************************************************************************
*
* _FpCtxSave - save non-integer context information
*
* This routine saves the system's "live" non-integer context into the
* specified CCS. If the specified task or fiber supports SSE then
* x87/MMX/SSEx context info is saved, otherwise only x87/MMX context is saved.
*
* RETURNS: N/A
*/
/**
*
* _FpCtxSave - save non-integer context information
*
* This routine saves the system's "live" non-integer context into the
* specified CCS. If the specified task or fiber supports SSE then
* x87/MMX/SSEx context info is saved, otherwise only x87/MMX context is saved.
*
* RETURNS: N/A
*/
static void _FpCtxSave(tCCS *ccs)
{
_do_fp_ctx_save(ccs->flags & USE_SSE, &ccs->preempFloatReg);
}
/*******************************************************************************
*
* _FpCtxInit - initialize non-integer context information
*
* This routine initializes the system's "live" non-integer context.
*
* RETURNS: N/A
*/
/**
*
* _FpCtxInit - initialize non-integer context information
*
* This routine initializes the system's "live" non-integer context.
*
* RETURNS: N/A
*/
static inline void _FpCtxInit(tCCS *ccs)
{
_do_fp_ctx_init(ccs->flags & USE_SSE);
}
/*******************************************************************************
*
* _FpEnable - enable preservation of non-integer context information
*
* This routine allows the specified task/fiber (which may be the active
* task/fiber) to safely share the system's floating point registers with
* other tasks/fibers. The <options> parameter indicates which floating point
* register sets will be used by the specified task/fiber:
*
* a) USE_FP indicates x87 FPU and MMX registers only
* b) USE_SSE indicates x87 FPU and MMX and SSEx registers
*
* Invoking this routine creates a floating point context for the task/fiber
* that corresponds to an FPU that has been reset. The system will thereafter
* protect the task/fiber's FP context so that it is not altered during
* a pre-emptive context switch.
*
* WARNING
* This routine should only be used to enable floating point support for a
* task/fiber that does not currently have such support enabled already.
*
* RETURNS: N/A
*
* INTERNAL
* Since the transition from "non-FP supporting" to "FP supporting" must be done
* atomically to avoid confusing the floating point logic used by _Swap(),
* this routine locks interrupts to ensure that a context switch does not occur,
* The locking isn't really needed when the routine is called by a fiber
* (since context switching can't occur), but it is harmless and allows a single
* routine to be called by both tasks and fibers (thus saving code space).
*
* If necessary, the interrupt latency impact of calling this routine from a
* fiber could be lessened by re-designing things so that only task-type callers
* locked interrupts (i.e. move the locking to task_float_enable()). However,
* all calls to fiber_float_enable() would need to be reviewed to ensure they
* are only used from a fiber, rather than from "generic" code used by both
* tasks and fibers.
*/
/**
*
* _FpEnable - enable preservation of non-integer context information
*
* This routine allows the specified task/fiber (which may be the active
* task/fiber) to safely share the system's floating point registers with
* other tasks/fibers. The <options> parameter indicates which floating point
* register sets will be used by the specified task/fiber:
*
* a) USE_FP indicates x87 FPU and MMX registers only
* b) USE_SSE indicates x87 FPU and MMX and SSEx registers
*
* Invoking this routine creates a floating point context for the task/fiber
* that corresponds to an FPU that has been reset. The system will thereafter
* protect the task/fiber's FP context so that it is not altered during
* a pre-emptive context switch.
*
* WARNING
* This routine should only be used to enable floating point support for a
* task/fiber that does not currently have such support enabled already.
*
* RETURNS: N/A
*
* INTERNAL
* Since the transition from "non-FP supporting" to "FP supporting" must be done
* atomically to avoid confusing the floating point logic used by _Swap(),
* this routine locks interrupts to ensure that a context switch does not occur,
* The locking isn't really needed when the routine is called by a fiber
* (since context switching can't occur), but it is harmless and allows a single
* routine to be called by both tasks and fibers (thus saving code space).
*
* If necessary, the interrupt latency impact of calling this routine from a
* fiber could be lessened by re-designing things so that only task-type callers
* locked interrupts (i.e. move the locking to task_float_enable()). However,
* all calls to fiber_float_enable() would need to be reviewed to ensure they
* are only used from a fiber, rather than from "generic" code used by both
* tasks and fibers.
*/
void _FpEnable(tCCS *ccs,
unsigned int options /* USE_FP or USE_SSE */
@ -287,63 +287,63 @@ void _FpEnable(tCCS *ccs,
irq_unlock_inline(imask);
}
/*******************************************************************************
*
* fiber_float_enable - enable preservation of non-integer context information
*
* This routine allows a fiber to permit a task/fiber (including itself) to
* safely share the system's floating point registers with other tasks/fibers.
*
* See the description of _FpEnable() for further details.
*
* RETURNS: N/A
*/
/**
*
* fiber_float_enable - enable preservation of non-integer context information
*
* This routine allows a fiber to permit a task/fiber (including itself) to
* safely share the system's floating point registers with other tasks/fibers.
*
* See the description of _FpEnable() for further details.
*
* RETURNS: N/A
*/
FUNC_ALIAS(_FpEnable, fiber_float_enable, void);
/*******************************************************************************
*
* task_float_enable - enable preservation of non-integer context information
*
* This routine allows a task to permit a task/fiber (including itself) to
* safely share the system's floating point registers with other tasks/fibers.
*
* See the description of _FpEnable() for further details.
*
* RETURNS: N/A
*/
/**
*
* task_float_enable - enable preservation of non-integer context information
*
* This routine allows a task to permit a task/fiber (including itself) to
* safely share the system's floating point registers with other tasks/fibers.
*
* See the description of _FpEnable() for further details.
*
* RETURNS: N/A
*/
FUNC_ALIAS(_FpEnable, task_float_enable, void);
/*******************************************************************************
*
* _FpDisable - disable preservation of non-integer context information
*
* This routine prevents the specified task/fiber (which may be the active
* task/fiber) from safely sharing any of the system's floating point registers
* with other tasks/fibers.
*
* WARNING
* This routine should only be used to disable floating point support for
* a task/fiber that currently has such support enabled.
*
* RETURNS: N/A
*
* INTERNAL
* Since the transition from "FP supporting" to "non-FP supporting" must be done
* atomically to avoid confusing the floating point logic used by _Swap(),
* this routine locks interrupts to ensure that a context switch does not occur,
* The locking isn't really needed when the routine is called by a fiber
* (since context switching can't occur), but it is harmless and allows a single
* routine to be called by both tasks and fibers (thus saving code space).
*
* If necessary, the interrupt latency impact of calling this routine from a
* fiber could be lessened by re-designing things so that only task-type callers
* locked interrupts (i.e. move the locking to task_float_disable()). However,
* all calls to fiber_float_disable() would need to be reviewed to ensure they
* are only used from a fiber, rather than from "generic" code used by both
* tasks and fibers.
*/
/**
*
* _FpDisable - disable preservation of non-integer context information
*
* This routine prevents the specified task/fiber (which may be the active
* task/fiber) from safely sharing any of the system's floating point registers
* with other tasks/fibers.
*
* WARNING
* This routine should only be used to disable floating point support for
* a task/fiber that currently has such support enabled.
*
* RETURNS: N/A
*
* INTERNAL
* Since the transition from "FP supporting" to "non-FP supporting" must be done
* atomically to avoid confusing the floating point logic used by _Swap(),
* this routine locks interrupts to ensure that a context switch does not occur,
* The locking isn't really needed when the routine is called by a fiber
* (since context switching can't occur), but it is harmless and allows a single
* routine to be called by both tasks and fibers (thus saving code space).
*
* If necessary, the interrupt latency impact of calling this routine from a
* fiber could be lessened by re-designing things so that only task-type callers
* locked interrupts (i.e. move the locking to task_float_disable()). However,
* all calls to fiber_float_disable() would need to be reviewed to ensure they
* are only used from a fiber, rather than from "generic" code used by both
* tasks and fibers.
*/
void _FpDisable(tCCS *ccs)
{
@ -376,58 +376,58 @@ void _FpDisable(tCCS *ccs)
irq_unlock_inline(imask);
}
/*******************************************************************************
*
* fiber_float_disable - disable preservation of non-integer context
*information
*
* This routine allows a fiber to disallow a task/fiber (including itself) from
* safely sharing any of the system's floating point registers with other
* tasks/fibers.
*
* WARNING
* This routine should only be used to disable floating point support for
* a task/fiber that currently has such support enabled.
*
* RETURNS: N/A
*/
/**
*
* fiber_float_disable - disable preservation of non-integer context
*information
*
* This routine allows a fiber to disallow a task/fiber (including itself) from
* safely sharing any of the system's floating point registers with other
* tasks/fibers.
*
* WARNING
* This routine should only be used to disable floating point support for
* a task/fiber that currently has such support enabled.
*
* RETURNS: N/A
*/
FUNC_ALIAS(_FpDisable, fiber_float_disable, void);
/*******************************************************************************
*
* task_float_disable - disable preservation of non-integer context information
*
* This routine allows a task to disallow a task/fiber (including itself) from
* safely sharing any of the system's floating point registers with other
* tasks/fibers.
*
* WARNING
* This routine should only be used to disable floating point support for
* a task/fiber that currently has such support enabled.
*
* RETURNS: N/A
*/
/**
*
* task_float_disable - disable preservation of non-integer context information
*
* This routine allows a task to disallow a task/fiber (including itself) from
* safely sharing any of the system's floating point registers with other
* tasks/fibers.
*
* WARNING
* This routine should only be used to disable floating point support for
* a task/fiber that currently has such support enabled.
*
* RETURNS: N/A
*/
FUNC_ALIAS(_FpDisable, task_float_disable, void);
#ifdef CONFIG_AUTOMATIC_FP_ENABLING
/*******************************************************************************
*
* _FpNotAvailableExcHandler - handler for "device not available" exception
*
* This routine is registered to handle the "device not available" exception
* (vector = 7) when the AUTOMATIC_FP_ENABLING configuration option has been
* been selected.
*
* The processor will generate this exception if any x87 FPU, MMX, or SSEx
* instruction is executed while CR0[TS]=1. The handler then enables the
* current task or fiber with the USE_FP option (or the USE_SSE option if the
* SSE configuration option has been enabled).
*
* RETURNS: N/A
*/
/**
*
* _FpNotAvailableExcHandler - handler for "device not available" exception
*
* This routine is registered to handle the "device not available" exception
* (vector = 7) when the AUTOMATIC_FP_ENABLING configuration option has been
* been selected.
*
* The processor will generate this exception if any x87 FPU, MMX, or SSEx
* instruction is executed while CR0[TS]=1. The handler then enables the
* current task or fiber with the USE_FP option (or the USE_SSE option if the
* SSE configuration option has been enabled).
*
* RETURNS: N/A
*/
void _FpNotAvailableExcHandler(NANO_ESF * pEsf /* not used */
)

View file

@ -34,7 +34,7 @@
DESCRIPTION
This module contains routines for updating the global descriptor table (GDT)
for the IA-32 architecture.
*/
*/
#include <linker-defs.h>
#include <toolchain.h>

View file

@ -49,7 +49,7 @@ The _IntBoiExit() routine is provided in a separate module so that it gets
included in the final image only if an interrupt controller driver utilizing
_IntBoiExit() is present.
*/
*/
#define _ASMLANGUAGE
#include <arch/x86/asm.h>
@ -64,18 +64,18 @@ _IntBoiExit() is present.
GTEXT(_IntExit)
/*******************************************************************************
*
* _IntBoiExit - exit interrupt handler stub without invoking ISR
*
* This routine exits an interrupt handler stub without invoking the associated
* ISR handler (or the EOI handler, if present). It should only be jumped to
* by an interrupt controller driver's BOI routine, and only if the BOI routine
* is passed a single parameter by the interrupt stub.
*
* \INTERNAL
* A BOI routine that has no parameters can jump directly to _IntExit().
*/
/**
*
* _IntBoiExit - exit interrupt handler stub without invoking ISR
*
* This routine exits an interrupt handler stub without invoking the associated
* ISR handler (or the EOI handler, if present). It should only be jumped to
* by an interrupt controller driver's BOI routine, and only if the BOI routine
* is passed a single parameter by the interrupt stub.
*
* \INTERNAL
* A BOI routine that has no parameters can jump directly to _IntExit().
*/
SECTION_FUNC(TEXT, _IntBoiExit)
addl $4, %esp /* pop off the $BoiParameter */

View file

@ -85,7 +85,7 @@ NOTE: Be sure to update the arch specific definition of the _INT_STUB_SIZE macro
to reflect the maximum potential size of the interrupt stub (as shown above).
The _INT_STUB_SIZE macro is defined in include/nanokernel/x86/arch.h.
*/
*/
#ifndef CONFIG_NO_ISRS
@ -159,7 +159,7 @@ static NANO_INT_STUB dynamic_stubs[ALL_DYNAMIC_STUBS] = {
[0 ... (ALL_DYNAMIC_STUBS - 1)] = { _STUB_AVAIL, }
};
/*******************************************************************************
/**
* _int_stub_alloc - allocate dynamic interrupt stub
*
* RETURNS: index of the first available element of the STUB array or -1
@ -179,28 +179,28 @@ static int _int_stub_alloc(void)
}
#endif /* ALL_DYNAMIC_STUBS > 0 */
/*******************************************************************************
*
* _IntVecSet - connect a routine to an interrupt vector
*
* This routine "connects" the specified <routine> to the specified interrupt
* <vector>. On the IA-32 architecture, an interrupt vector is a value from
* 0 to 255. This routine merely fills in the appropriate interrupt
* descriptor table (IDT) with an interrupt-gate descriptor such that <routine>
* is invoked when interrupt <vector> is asserted. The <dpl> argument specifies
* the privilege level for the interrupt-gate descriptor; (hardware) interrupts
* and exceptions should specify a level of 0, whereas handlers for user-mode
* software generated interrupts should specify 3.
*
* RETURNS: N/A
*
* INTERNAL
* Unlike nanoCpuExcConnect() and irq_connect(), the _IntVecSet() routine
* is a very basic API that simply updates the appropriate entry in Interrupt
* Descriptor Table (IDT) such that the specified routine is invoked when the
* specified interrupt vector is asserted.
*
*/
/**
*
* _IntVecSet - connect a routine to an interrupt vector
*
* This routine "connects" the specified <routine> to the specified interrupt
* <vector>. On the IA-32 architecture, an interrupt vector is a value from
* 0 to 255. This routine merely fills in the appropriate interrupt
* descriptor table (IDT) with an interrupt-gate descriptor such that <routine>
* is invoked when interrupt <vector> is asserted. The <dpl> argument specifies
* the privilege level for the interrupt-gate descriptor; (hardware) interrupts
* and exceptions should specify a level of 0, whereas handlers for user-mode
* software generated interrupts should specify 3.
*
* RETURNS: N/A
*
* INTERNAL
* Unlike nanoCpuExcConnect() and irq_connect(), the _IntVecSet() routine
* is a very basic API that simply updates the appropriate entry in Interrupt
* Descriptor Table (IDT) such that the specified routine is invoked when the
* specified interrupt vector is asserted.
*
*/
void _IntVecSet(
unsigned int vector, /* interrupt vector: 0 to 255 on IA-32 */
@ -233,53 +233,53 @@ void _IntVecSet(
* generates an error
*/
#if ALL_DYNAMIC_STUBS > 0
/*******************************************************************************
*
* irq_connect - connect a C routine to a hardware interrupt
*
* This routine connects an interrupt service routine (ISR) coded in C to
* the specified hardware <irq>. An interrupt vector will be allocated to
* satisfy the specified <priority>. If the interrupt service routine is being
* connected to a software generated interrupt, then <irq> must be set to
* NANO_SOFT_IRQ.
*
* The specified <irq> represents a virtualized IRQ, i.e. it does not
* necessarily represent a specific IRQ line on a given interrupt controller
* device. The BSP presents a virtualized set of IRQs from 0 to N, where N
* is the total number of IRQs supported by all the interrupt controller devices
* on the board. See the BSP's documentation for the mapping of virtualized
* IRQ to physical IRQ.
*
* When the device asserts an interrupt on the specified <irq>, a switch to
* the interrupt stack is performed (if not already executing on the interrupt
* stack), followed by saving the integer (i.e. non-floating point) context of
* the currently executing task, fiber, or ISR. The ISR specified by <routine>
* will then be invoked with the single <parameter>. When the ISR returns, a
* context switch may occur.
*
* The routine searches for the first available element in the synamic_stubs
* array and uses it for the stub.
*
* RETURNS: the allocated interrupt vector
*
* WARNINGS
* Some boards utilize interrupt controllers where the interrupt vector
* cannot be programmed on an IRQ basis; as a result, the vector assigned
* to the <irq> during interrupt controller initialization will be returned.
* In these cases, the requested <priority> is not honoured since the interrupt
* prioritization is fixed by the interrupt controller (e.g. IRQ0 will always
* be the highest priority interrupt regardless of what interrupt vector
* was assigned to IRQ0).
*
* This routine does not perform range checking on the requested <priority>
* and thus, depending on the underlying interrupt controller, may result
* in the assignment of an interrupt vector located in the reserved range of
* the processor.
*
* INTERNAL
* For debug kernels, this routine shall return -1 when there are no
* vectors remaining in the specified <priority> level.
*/
/**
*
* irq_connect - connect a C routine to a hardware interrupt
*
* This routine connects an interrupt service routine (ISR) coded in C to
* the specified hardware <irq>. An interrupt vector will be allocated to
* satisfy the specified <priority>. If the interrupt service routine is being
* connected to a software generated interrupt, then <irq> must be set to
* NANO_SOFT_IRQ.
*
* The specified <irq> represents a virtualized IRQ, i.e. it does not
* necessarily represent a specific IRQ line on a given interrupt controller
* device. The BSP presents a virtualized set of IRQs from 0 to N, where N
* is the total number of IRQs supported by all the interrupt controller devices
* on the board. See the BSP's documentation for the mapping of virtualized
* IRQ to physical IRQ.
*
* When the device asserts an interrupt on the specified <irq>, a switch to
* the interrupt stack is performed (if not already executing on the interrupt
* stack), followed by saving the integer (i.e. non-floating point) context of
* the currently executing task, fiber, or ISR. The ISR specified by <routine>
* will then be invoked with the single <parameter>. When the ISR returns, a
* context switch may occur.
*
* The routine searches for the first available element in the synamic_stubs
* array and uses it for the stub.
*
* RETURNS: the allocated interrupt vector
*
* WARNINGS
* Some boards utilize interrupt controllers where the interrupt vector
* cannot be programmed on an IRQ basis; as a result, the vector assigned
* to the <irq> during interrupt controller initialization will be returned.
* In these cases, the requested <priority> is not honoured since the interrupt
* prioritization is fixed by the interrupt controller (e.g. IRQ0 will always
* be the highest priority interrupt regardless of what interrupt vector
* was assigned to IRQ0).
*
* This routine does not perform range checking on the requested <priority>
* and thus, depending on the underlying interrupt controller, may result
* in the assignment of an interrupt vector located in the reserved range of
* the processor.
*
* INTERNAL
* For debug kernels, this routine shall return -1 when there are no
* vectors remaining in the specified <priority> level.
*/
int irq_connect(
unsigned int irq, /* virtualized IRQ to connect to */
@ -478,36 +478,36 @@ int irq_connect(
}
#endif /* ALL_DYNAMIC_STUBS > 0 */
/*******************************************************************************
*
* _IntVecAlloc - allocate a free interrupt vector given <priority>
*
* This routine scans the interrupt_vectors_allocated[] array for a free vector that
* satisfies the specified <priority>. It is a utility function for use only
* by a BSP's _SysIntVecAlloc() routine.
*
* This routine assumes that the relationship between interrupt priority and
* interrupt vector is :
*
* priority = vector / 16;
*
* Since vectors 0 to 31 are reserved by the IA-32 architecture, the priorities
* of user defined interrupts range from 2 to 15. Each interrupt priority level
* contains 16 vectors, and the prioritization of interrupts within a priority
* level is determined by the vector number; the higher the vector number, the
* higher the priority within that priority level.
*
* It is also assumed that the interrupt controllers are capable of managing
* interrupt requests on a per-vector level as opposed to a per-priority level.
* For example, the local APIC on Pentium4 and later processors, the in-service
* register (ISR) and the interrupt request register (IRR) are 256 bits wide.
*
* RETURNS: allocated interrupt vector
*
* INTERNAL
* For debug kernels, this routine shall return -1 when there are no
* vectors remaining in the specified <priority> level.
*/
/**
*
* _IntVecAlloc - allocate a free interrupt vector given <priority>
*
* This routine scans the interrupt_vectors_allocated[] array for a free vector that
* satisfies the specified <priority>. It is a utility function for use only
* by a BSP's _SysIntVecAlloc() routine.
*
* This routine assumes that the relationship between interrupt priority and
* interrupt vector is :
*
* priority = vector / 16;
*
* Since vectors 0 to 31 are reserved by the IA-32 architecture, the priorities
* of user defined interrupts range from 2 to 15. Each interrupt priority level
* contains 16 vectors, and the prioritization of interrupts within a priority
* level is determined by the vector number; the higher the vector number, the
* higher the priority within that priority level.
*
* It is also assumed that the interrupt controllers are capable of managing
* interrupt requests on a per-vector level as opposed to a per-priority level.
* For example, the local APIC on Pentium4 and later processors, the in-service
* register (ISR) and the interrupt request register (IRR) are 256 bits wide.
*
* RETURNS: allocated interrupt vector
*
* INTERNAL
* For debug kernels, this routine shall return -1 when there are no
* vectors remaining in the specified <priority> level.
*/
int _IntVecAlloc(unsigned int priority)
{
@ -601,18 +601,18 @@ int _IntVecAlloc(unsigned int priority)
return vector;
}
/*******************************************************************************
*
* _IntVecMarkAllocated - mark interrupt vector as allocated
*
* This routine is used to "reserve" an interrupt vector that is allocated
* or assigned by any means other than _IntVecAllocate(). This marks the vector
* as allocated so that any future invocations of _IntVecAllocate() will not
* return that vector.
*
* RETURNS: N/A
*
*/
/**
*
* _IntVecMarkAllocated - mark interrupt vector as allocated
*
* This routine is used to "reserve" an interrupt vector that is allocated
* or assigned by any means other than _IntVecAllocate(). This marks the vector
* as allocated so that any future invocations of _IntVecAllocate() will not
* return that vector.
*
* RETURNS: N/A
*
*/
void _IntVecMarkAllocated(unsigned int vector)
{
@ -625,15 +625,15 @@ void _IntVecMarkAllocated(unsigned int vector)
irq_unlock(imask);
}
/*******************************************************************************
*
* _IntVecMarkFree - mark interrupt vector as free
*
* This routine is used to "free" an interrupt vector that is allocated
* or assigned using _IntVecAllocate() or _IntVecMarkAllocated(). This marks the
* vector as available so that any future allocations can return that vector.
*
*/
/**
*
* _IntVecMarkFree - mark interrupt vector as free
*
* This routine is used to "free" an interrupt vector that is allocated
* or assigned using _IntVecAllocate() or _IntVecMarkAllocated(). This marks the
* vector as available so that any future allocations can return that vector.
*
*/
void _IntVecMarkFree(unsigned int vector)
{

View file

@ -31,15 +31,15 @@
*/
/*
* DESCRIPTION
* This module contains the irq_handler_set() API. This routine is closely
* associated with irq_connect(), and any changes to the layout of the
* constructed interrupt stub must be reflected in both places.
*
* INTERNAL
* This routine is defined here, rather than in intconnect.c, so that it can be
* omitted from a system image if it isn't required.
*/
* DESCRIPTION
* This module contains the irq_handler_set() API. This routine is closely
* associated with irq_connect(), and any changes to the layout of the
* constructed interrupt stub must be reflected in both places.
*
* INTERNAL
* This routine is defined here, rather than in intconnect.c, so that it can be
* omitted from a system image if it isn't required.
*/
#include <nano_private.h>
@ -57,28 +57,28 @@ extern unsigned char _idt_base_address[];
#define FIRST_OPT_OPCODE_OFF 5
/*******************************************************************************
*
* irq_handler_set - set the handler in an already connected stub
*
* This routine is used to modify an already fully constructed interrupt stub
* to specify a new <routine> and/or <parameter>.
*
* WARNINGS:
*
* A fully constructed interrupt stub is generated via irq_connect(), i.e.
* the irq_handler_set() function must only be called after invoking
* irq_connect().
*
* The caller must ensure that the associated interrupt does not occur while
* this routine is executing, otherwise race conditions may arise that could
* cause the interrupt stub to invoke the handler using an incorrect routine
* and/or parameter. If possible, silence the source of the associated interrupt
* only, rather than locking out all interrupts.
*
* RETURNS: N/A
*
*/
/**
*
* irq_handler_set - set the handler in an already connected stub
*
* This routine is used to modify an already fully constructed interrupt stub
* to specify a new <routine> and/or <parameter>.
*
* WARNINGS:
*
* A fully constructed interrupt stub is generated via irq_connect(), i.e.
* the irq_handler_set() function must only be called after invoking
* irq_connect().
*
* The caller must ensure that the associated interrupt does not occur while
* this routine is executing, otherwise race conditions may arise that could
* cause the interrupt stub to invoke the handler using an incorrect routine
* and/or parameter. If possible, silence the source of the associated interrupt
* only, rather than locking out all interrupts.
*
* RETURNS: N/A
*
*/
void irq_handler_set(unsigned int vector,
void (*oldRoutine)(void *parameter),

View file

@ -36,7 +36,7 @@ This module implements assembly routines to manage interrupts on
the Intel IA-32 architecture. More specifically, the interrupt (asynchronous
exception) stubs are implemented in this module. The stubs are invoked when
entering and exiting a C interrupt handler.
*/
*/
#define _ASMLANGUAGE
@ -74,41 +74,41 @@ entering and exiting a C interrupt handler.
GTEXT(_int_latency_start)
GTEXT(_int_latency_stop)
#endif
/*******************************************************************************
*
* _IntEnt - inform the kernel of an interrupt
*
* This function is called from the interrupt stub created by irq_connect()
* to inform the kernel of an interrupt. This routine increments
* _nanokernel.nested (to support interrupt nesting), switches to the
* base of the interrupt stack, if not already on the interrupt stack, and then
* saves the volatile integer registers onto the stack. Finally, control is
* returned back to the interrupt stub code (which will then invoke the
* "application" interrupt service routine).
*
* Only the volatile integer registers are saved since ISRs are assumed not to
* utilize floating point (or SSE) instructions. If an ISR requires the usage
* of floating point (or SSE) instructions, it must first invoke nanoCpuFpSave()
* (or nanoCpuSseSave()) at the beginning of the ISR. A subsequent
* nanoCpuFpRestore() (or nanoCpuSseRestore()) is needed just prior to returning
* from the ISR. Note that the nanoCpuFpSave(), nanoCpuSseSave(),
* nanoCpuFpRestore(), and nanoCpuSseRestore() APIs have not been
* implemented yet.
*
* WARNINGS
*
* Host-based tools and the target-based GDB agent depend on the stack frame
* created by this routine to determine the locations of volatile registers.
* These tools must be updated to reflect any changes to the stack frame.
*
* RETURNS: N/A
*
* C function prototype:
*
* void _IntEnt (void);
*
* NOMANUAL
*/
/**
*
* _IntEnt - inform the kernel of an interrupt
*
* This function is called from the interrupt stub created by irq_connect()
* to inform the kernel of an interrupt. This routine increments
* _nanokernel.nested (to support interrupt nesting), switches to the
* base of the interrupt stack, if not already on the interrupt stack, and then
* saves the volatile integer registers onto the stack. Finally, control is
* returned back to the interrupt stub code (which will then invoke the
* "application" interrupt service routine).
*
* Only the volatile integer registers are saved since ISRs are assumed not to
* utilize floating point (or SSE) instructions. If an ISR requires the usage
* of floating point (or SSE) instructions, it must first invoke nanoCpuFpSave()
* (or nanoCpuSseSave()) at the beginning of the ISR. A subsequent
* nanoCpuFpRestore() (or nanoCpuSseRestore()) is needed just prior to returning
* from the ISR. Note that the nanoCpuFpSave(), nanoCpuSseSave(),
* nanoCpuFpRestore(), and nanoCpuSseRestore() APIs have not been
* implemented yet.
*
* WARNINGS
*
* Host-based tools and the target-based GDB agent depend on the stack frame
* created by this routine to determine the locations of volatile registers.
* These tools must be updated to reflect any changes to the stack frame.
*
* RETURNS: N/A
*
* C function prototype:
*
* void _IntEnt (void);
*
* NOMANUAL
*/
SECTION_FUNC(TEXT, _IntEnt)
@ -240,29 +240,29 @@ BRANCH_LABEL(_HandleIdle)
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
/*******************************************************************************
*
* _IntExit - inform the kernel of an interrupt exit
*
* This function is called from the interrupt stub created by irq_connect()
* to inform the kernel that the processing of an interrupt has
* completed. This routine decrements _nanokernel.nested (to support interrupt
* nesting), restores the volatile integer registers, and then switches
* back to the interrupted context's stack, if this isn't a nested interrupt.
*
* Finally, control is returned back to the interrupted fiber context or ISR.
* A context switch _may_ occur if the interrupted context was a task context,
* in which case one or more other fiber and task contexts will execute before
* this routine resumes and control gets returned to the interrupted task.
*
* RETURNS: N/A
*
* C function prototype:
*
* void _IntExit (void);
*
* NOMANUAL
*/
/**
*
* _IntExit - inform the kernel of an interrupt exit
*
* This function is called from the interrupt stub created by irq_connect()
* to inform the kernel that the processing of an interrupt has
* completed. This routine decrements _nanokernel.nested (to support interrupt
* nesting), restores the volatile integer registers, and then switches
* back to the interrupted context's stack, if this isn't a nested interrupt.
*
* Finally, control is returned back to the interrupted fiber context or ISR.
* A context switch _may_ occur if the interrupted context was a task context,
* in which case one or more other fiber and task contexts will execute before
* this routine resumes and control gets returned to the interrupted task.
*
* RETURNS: N/A
*
* C function prototype:
*
* void _IntExit (void);
*
* NOMANUAL
*/
SECTION_FUNC(TEXT, _IntExit)
@ -388,38 +388,38 @@ BRANCH_LABEL(nestedInterrupt)
iret
/*******************************************************************************
*
* _SpuriousIntHandler -
* _SpuriousIntNoErrCodeHandler - spurious interrupt handler stubs
*
* Interrupt-gate descriptors are statically created for all slots in the IDT
* that point to _SpuriousIntHandler() or _SpuriousIntNoErrCodeHandler(). The
* former stub is connected to exception vectors where the processor pushes an
* error code onto the stack (or kernel stack) in addition to the EFLAGS/CS/EIP
* records.
*
* A spurious interrupt is considered a fatal condition, thus this routine
* merely sets up the 'reason' and 'pEsf' parameters to the BSP provided
* routine: _SysFatalHwErrorHandler(). In other words, there is no provision
* to return to the interrupted context and thus the volatile registers
* are not saved.
*
* RETURNS: Never returns
*
* C function prototype:
*
* void _SpuriousIntHandler (void);
*
* INTERNAL
* The _IntVecSet() routine creates an interrupt-gate descriptor for all
* connections. The processor will automatically clear the IF bit
* in the EFLAGS register upon execution of the handler,
* thus _SpuriousIntNoErrCodeHandler()/_SpuriousIntHandler() shall be
* invoked with interrupts disabled.
*
* NOMANUAL
*/
/**
*
* _SpuriousIntHandler -
* _SpuriousIntNoErrCodeHandler - spurious interrupt handler stubs
*
* Interrupt-gate descriptors are statically created for all slots in the IDT
* that point to _SpuriousIntHandler() or _SpuriousIntNoErrCodeHandler(). The
* former stub is connected to exception vectors where the processor pushes an
* error code onto the stack (or kernel stack) in addition to the EFLAGS/CS/EIP
* records.
*
* A spurious interrupt is considered a fatal condition, thus this routine
* merely sets up the 'reason' and 'pEsf' parameters to the BSP provided
* routine: _SysFatalHwErrorHandler(). In other words, there is no provision
* to return to the interrupted context and thus the volatile registers
* are not saved.
*
* RETURNS: Never returns
*
* C function prototype:
*
* void _SpuriousIntHandler (void);
*
* INTERNAL
* The _IntVecSet() routine creates an interrupt-gate descriptor for all
* connections. The processor will automatically clear the IF bit
* in the EFLAGS register upon execution of the handler,
* thus _SpuriousIntNoErrCodeHandler()/_SpuriousIntHandler() shall be
* invoked with interrupts disabled.
*
* NOMANUAL
*/
SECTION_FUNC(TEXT, _SpuriousIntNoErrCodeHandler)
@ -462,34 +462,34 @@ BRANCH_LABEL(callFatalHandler)
jmp callFatalHandler
/*******************************************************************************
*
* irq_lock - disable interrupts on the local CPU
*
* This routine disables interrupts. It can be called from either interrupt
* or context level. This routine returns an architecture-dependent
* lock-out key representing the "interrupt disable state" prior to the call;
* this key can be passed to fiber_enable_ints() to re-enable interrupts.
*
* The lock-out key should only be used as the argument to the
* fiber_enable_ints() API. It should never be used to manually re-enable
* interrupts or to inspect or manipulate the contents of the source register.
*
* WARNINGS
* Invoking a kernel routine with interrupts locked may result in
* interrupts being re-enabled for an unspecified period of time. If the
* called routine blocks, interrupts will be re-enabled while another
* context executes, or while the system is idle.
*
* The "interrupt disable state" is an attribute of a context, i.e. it's part
* of the context context. Thus, if a context disables interrupts and
* subsequently invokes a kernel routine that causes the calling context
* to block, the interrupt disable state will be restored when the context is
* later rescheduled for execution.
*
* RETURNS: An architecture-dependent lock-out key representing the
* "interrupt disable state" prior to the call.
*/
/**
*
* irq_lock - disable interrupts on the local CPU
*
* This routine disables interrupts. It can be called from either interrupt
* or context level. This routine returns an architecture-dependent
* lock-out key representing the "interrupt disable state" prior to the call;
* this key can be passed to fiber_enable_ints() to re-enable interrupts.
*
* The lock-out key should only be used as the argument to the
* fiber_enable_ints() API. It should never be used to manually re-enable
* interrupts or to inspect or manipulate the contents of the source register.
*
* WARNINGS
* Invoking a kernel routine with interrupts locked may result in
* interrupts being re-enabled for an unspecified period of time. If the
* called routine blocks, interrupts will be re-enabled while another
* context executes, or while the system is idle.
*
* The "interrupt disable state" is an attribute of a context, i.e. it's part
* of the context context. Thus, if a context disables interrupts and
* subsequently invokes a kernel routine that causes the calling context
* to block, the interrupt disable state will be restored when the context is
* later rescheduled for execution.
*
* RETURNS: An architecture-dependent lock-out key representing the
* "interrupt disable state" prior to the call.
*/
SECTION_FUNC(TEXT, irq_lock)
pushfl
@ -501,16 +501,16 @@ SECTION_FUNC(TEXT, irq_lock)
ret
/*******************************************************************************
*
* irq_unlock - enable interrupts on the local CPU
*
* This routine re-enables interrupts on the local CPU. The <key> parameter
* is an architecture-dependent lock-out key that is returned by a previous
* invocation of irq_lock().
*
* This routine can be called from either a context or ISR context.
*/
/**
*
* irq_unlock - enable interrupts on the local CPU
*
* This routine re-enables interrupts on the local CPU. The <key> parameter
* is an architecture-dependent lock-out key that is returned by a previous
* invocation of irq_lock().
*
* This routine can be called from either a context or ISR context.
*/
SECTION_FUNC(TEXT, irq_unlock)
testl $0x200, SP_ARG1(%esp)

View file

@ -34,7 +34,7 @@
DESCRIPTION
This module provides the implementation of the _MsrWrite() and _MsrRead()
utilities.
*/
*/
#define _ASMLANGUAGE
@ -45,28 +45,28 @@ utilities.
GTEXT(_MsrWrite)
GTEXT(_MsrRead)
/*******************************************************************************
*
* _MsrWrite - write to a model specific register (MSR)
*
* This function is used to write to an MSR.
*
* C function prototype:
*
* void _MsrWrite (unsigned int msr, uint64_t msrData);
*
* The definitions of the so-called "Architectural MSRs" are contained
* in nano_private.h and have the format: IA32_XXX_MSR
*
* INTERNAL
* 1) The 'wrmsr' instruction was introduced in the Pentium processor; executing
* this instruction on an earlier IA-32 processor will result in an invalid
* opcode exception.
* 2) The 'wrmsr' uses the ECX, EDX, and EAX registers which matches the set of
* volatile registers!
*
* RETURNS: N/A
*/
/**
*
* _MsrWrite - write to a model specific register (MSR)
*
* This function is used to write to an MSR.
*
* C function prototype:
*
* void _MsrWrite (unsigned int msr, uint64_t msrData);
*
* The definitions of the so-called "Architectural MSRs" are contained
* in nano_private.h and have the format: IA32_XXX_MSR
*
* INTERNAL
* 1) The 'wrmsr' instruction was introduced in the Pentium processor; executing
* this instruction on an earlier IA-32 processor will result in an invalid
* opcode exception.
* 2) The 'wrmsr' uses the ECX, EDX, and EAX registers which matches the set of
* volatile registers!
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _MsrWrite)
movl SP_ARG1(%esp), %ecx /* load ECX with <msr> */
@ -76,28 +76,28 @@ SECTION_FUNC(TEXT, _MsrWrite)
ret
/*******************************************************************************
*
* _MsrRead - read from a model specific register (MSR)
*
* This function is used to read from an MSR.
*
* C function prototype:
*
* uint64_t _MsrRead (unsigned int msr);
*
* The definitions of the so-called "Architectural MSRs" are contained
* in nano_private.h and have the format: IA32_XXX_MSR
*
* INTERNAL
* 1) The 'rdmsr' instruction was introduced in the Pentium processor; executing
* this instruction on an earlier IA-32 processor will result in an invalid
* opcode exception.
* 2) The 'rdmsr' uses the ECX, EDX, and EAX registers which matches the set of
* volatile registers!
*
* RETURNS: N/A
*/
/**
*
* _MsrRead - read from a model specific register (MSR)
*
* This function is used to read from an MSR.
*
* C function prototype:
*
* uint64_t _MsrRead (unsigned int msr);
*
* The definitions of the so-called "Architectural MSRs" are contained
* in nano_private.h and have the format: IA32_XXX_MSR
*
* INTERNAL
* 1) The 'rdmsr' instruction was introduced in the Pentium processor; executing
* this instruction on an earlier IA-32 processor will result in an invalid
* opcode exception.
* 2) The 'rdmsr' uses the ECX, EDX, and EAX registers which matches the set of
* volatile registers!
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _MsrRead)
movl SP_ARG1(%esp), %ecx /* load ECX with <msr> */

View file

@ -45,7 +45,7 @@ Typically, only those members that are accessed by assembly language routines
are defined; however, it doesn't hurt to define all fields for the sake of
completeness.
*/
*/
#include <gen_offset.h> /* located in kernel/arch/common/include */

View file

@ -39,7 +39,7 @@ a representation of the save stack frame generated by _Swap() in order
to generate offsets (in the form of absolute symbols) for consumption by
host tools. Please update swapstk.h if changing the structure of the
save frame on the stack.
*/
*/
#define _ASMLANGUAGE
@ -54,58 +54,58 @@ save frame on the stack.
/* externs */
/*******************************************************************************
*
* _Swap - initiate a cooperative context switch
*
* The _Swap() routine is invoked by various nanokernel services to effect
* a cooperative context context switch. Prior to invoking _Swap(), the
* caller disables interrupts (via irq_lock) and the return 'key'
* is passed as a parameter to _Swap(). The 'key' actually represents
* the EFLAGS register prior to disabling interrupts via a 'cli' instruction.
*
* Given that _Swap() is called to effect a cooperative context context switch,
* only the non-volatile integer registers need to be saved in the tCCS of the
* outgoing context. The restoration of the integer registers of the incoming
* context depends on whether that context was preemptively context switched
* out. The INT_ACTIVE and EXC_ACTIVE bits in the tCCS->flags field will signify
* that the context was preemptively context switched out, and thus both the
* volatile and non-volatile integer registers need to be restored.
*
* The non-volatile registers need to be scrubbed to ensure they contain no
* sensitive information that could compromise system security. This is to
* make sure that information will not be leaked from one application to
* another via these volatile registers.
*
* Here, the integer registers (EAX, ECX, EDX) have been scrubbed. Any changes
* to this routine that alter the values of these registers MUST be reviewed
* for potential security impacts.
*
* Floating point registers are handled using a lazy save/restore
* mechanism since it's expected relatively few contexts will be created
* with the USE_FP or USE_SSE option bits. The nanokernel data structure
* maintains a 'current_fp' field to keep track of the context that "owns"
* the floating point registers. Floating point registers consist of
* ST0->ST7 (x87 FPU and MMX registers) and XMM0 -> XMM7.
*
* All floating point registers are considered 'volatile' thus they will
* only be saved/restored when a preemptive context context switch occurs.
*
* Floating point registers are currently NOT scrubbed, and are subject to
* potential security leaks.
*
* The scheduling algorithm is simple: schedule the head of the runnable
* FIBER context list, which is represented by _nanokernel.fiber. If there are
* no runnable FIBER contexts, then schedule the TASK context represented
* by _nanokernel.task. The _nanokernel.task field will never be NULL.
*
* RETURNS: may contain a return value setup by a call to fiberRtnValueSet()
*
* C function prototype:
*
* unsigned int _Swap (unsigned int eflags);
*
*/
/**
*
* _Swap - initiate a cooperative context switch
*
* The _Swap() routine is invoked by various nanokernel services to effect
* a cooperative context context switch. Prior to invoking _Swap(), the
* caller disables interrupts (via irq_lock) and the return 'key'
* is passed as a parameter to _Swap(). The 'key' actually represents
* the EFLAGS register prior to disabling interrupts via a 'cli' instruction.
*
* Given that _Swap() is called to effect a cooperative context context switch,
* only the non-volatile integer registers need to be saved in the tCCS of the
* outgoing context. The restoration of the integer registers of the incoming
* context depends on whether that context was preemptively context switched
* out. The INT_ACTIVE and EXC_ACTIVE bits in the tCCS->flags field will signify
* that the context was preemptively context switched out, and thus both the
* volatile and non-volatile integer registers need to be restored.
*
* The non-volatile registers need to be scrubbed to ensure they contain no
* sensitive information that could compromise system security. This is to
* make sure that information will not be leaked from one application to
* another via these volatile registers.
*
* Here, the integer registers (EAX, ECX, EDX) have been scrubbed. Any changes
* to this routine that alter the values of these registers MUST be reviewed
* for potential security impacts.
*
* Floating point registers are handled using a lazy save/restore
* mechanism since it's expected relatively few contexts will be created
* with the USE_FP or USE_SSE option bits. The nanokernel data structure
* maintains a 'current_fp' field to keep track of the context that "owns"
* the floating point registers. Floating point registers consist of
* ST0->ST7 (x87 FPU and MMX registers) and XMM0 -> XMM7.
*
* All floating point registers are considered 'volatile' thus they will
* only be saved/restored when a preemptive context context switch occurs.
*
* Floating point registers are currently NOT scrubbed, and are subject to
* potential security leaks.
*
* The scheduling algorithm is simple: schedule the head of the runnable
* FIBER context list, which is represented by _nanokernel.fiber. If there are
* no runnable FIBER contexts, then schedule the TASK context represented
* by _nanokernel.task. The _nanokernel.task field will never be NULL.
*
* RETURNS: may contain a return value setup by a call to fiberRtnValueSet()
*
* C function prototype:
*
* unsigned int _Swap (unsigned int eflags);
*
*/
SECTION_FUNC(TEXT, _Swap)
movl $_nanokernel, %eax

View file

@ -36,7 +36,7 @@ This module contains utilities to perform unaligned reads/writes from/to a
32-bit quantity. Some memory subsystems to not support the IA-32 byte
enable lines, and thus accessing an unaligned 32-bit quantity is
performed byte-by-byte.
*/
*/
#define _ASMLANGUAGE
@ -49,31 +49,31 @@ performed byte-by-byte.
GTEXT(_Unaligned32Write)
GTEXT(_Unaligned32Read)
/*******************************************************************************
*
* _Unaligned32Write - perform an unaligned 32-bit write operation
*
* This function is used during the interrupt and exception stub code
* synthesis step when writing out the 32-bit relative jmp/branch
* offsets.
*
* Generally, the 32-bit offsets are located at an odd memory address. For
* target hardware that don't fully (or properly) decode the byte enable
* lines from the IA-32 processor, this function shall be utilized to write out
* the data byte-by-byte.
*
* The BSP specific configuration option CONFIG_UNALIGNED_WRITE_UNSUPPORTED
* shall be defined when the kernel is built for target hardware that cannot
* support unaligned double word (32-bit) write operations.
*
* C function prototype:
*
* void _Unaligned32Write
* (
* unsigned int * ptr,
* unsigned int val
* );
*/
/**
*
* _Unaligned32Write - perform an unaligned 32-bit write operation
*
* This function is used during the interrupt and exception stub code
* synthesis step when writing out the 32-bit relative jmp/branch
* offsets.
*
* Generally, the 32-bit offsets are located at an odd memory address. For
* target hardware that don't fully (or properly) decode the byte enable
* lines from the IA-32 processor, this function shall be utilized to write out
* the data byte-by-byte.
*
* The BSP specific configuration option CONFIG_UNALIGNED_WRITE_UNSUPPORTED
* shall be defined when the kernel is built for target hardware that cannot
* support unaligned double word (32-bit) write operations.
*
* C function prototype:
*
* void _Unaligned32Write
* (
* unsigned int * ptr,
* unsigned int val
* );
*/
SECTION_FUNC(TEXT, _Unaligned32Write)
movl 0x4(%esp), %edx /* fetch ptr argument */
@ -86,30 +86,30 @@ SECTION_FUNC(TEXT, _Unaligned32Write)
ret
/*******************************************************************************
*
* _Unaligned32Read - perform an unaligned 32-bit read operation
*
* This function is used during the interrupt and exception stub code
* synthesis step when reading the 32-bit relative jmp/branch
* offsets.
*
* Generally, the 32-bit offsets are located at an odd memory address. For
* target hardware that don't fully (or properly) decode the byte enable
* lines from the IA-32 processor, this function shall be utilized to read
* the data byte-by-byte.
*
* The BSP specific configuration option CONFIG_UNALIGNED_WRITE_UNSUPPORTED
* shall be defined when the kernel is built for target hardware that cannot
* support unaligned double word (32-bit) write operations.
*
* C function prototype:
*
* unsigned int _Unaligned32Read
* (
* unsigned int * ptr
* );
*/
/**
*
* _Unaligned32Read - perform an unaligned 32-bit read operation
*
* This function is used during the interrupt and exception stub code
* synthesis step when reading the 32-bit relative jmp/branch
* offsets.
*
* Generally, the 32-bit offsets are located at an odd memory address. For
* target hardware that don't fully (or properly) decode the byte enable
* lines from the IA-32 processor, this function shall be utilized to read
* the data byte-by-byte.
*
* The BSP specific configuration option CONFIG_UNALIGNED_WRITE_UNSUPPORTED
* shall be defined when the kernel is built for target hardware that cannot
* support unaligned double word (32-bit) write operations.
*
* C function prototype:
*
* unsigned int _Unaligned32Read
* (
* unsigned int * ptr
* );
*/
SECTION_FUNC(TEXT, _Unaligned32Read)
movl 0x4(%esp), %edx /* fetch ptr argument */

View file

@ -40,7 +40,7 @@ booting scenarios (e.g. via GRUB or any other multiboot compliant bootloader)
now assume that the system is already in 32-bit protected mode and address line
A20 is enabled. However, the code associated with CONFIG_PROT_MODE_SWITCH has
been left in place should future booting scenarios arise which require its use.
*/
*/
#define _ASMLANGUAGE

View file

@ -34,7 +34,7 @@
DESCRIPTION
This module contains the static interrupt stubs for the various drivers employed
by x86 BSPs.
*/
*/
#define _ASMLANGUAGE

View file

@ -34,7 +34,7 @@
DESCRIPTION
This header file is used to specify and describe board-level aspects for
the 'generic_pc' BSP.
*/
*/
#ifndef __INCboardh
#define __INCboardh

View file

@ -33,7 +33,7 @@
/*
DESCRIPTION
This is the linker script for both standard images and XIP images.
*/
*/
#include <autoconf.h>

View file

@ -34,7 +34,7 @@
DESCRIPTION
This module provides routines to initialize and support board-level hardware
for the generic_pc BSP.
*/
*/
#include <nanokernel.h>
#include "board.h"
@ -86,13 +86,13 @@ static inline void ioapicInit(void)
#ifdef DO_CONSOLE_INIT
/*******************************************************************************
*
* uart_generic_info_init - initialize initialization information for one UART
*
* RETURNS: N/A
*
*/
/**
*
* uart_generic_info_init - initialize initialization information for one UART
*
* RETURNS: N/A
*
*/
void uart_generic_info_init(struct uart_init_info *p_info)
{
@ -106,15 +106,15 @@ void uart_generic_info_init(struct uart_init_info *p_info)
#if defined(DO_CONSOLE_INIT)
/*******************************************************************************
*
* consoleInit - initialize target-only console
*
* Only used for debugging.
*
* RETURNS: N/A
*
*/
/**
*
* consoleInit - initialize target-only console
*
* Only used for debugging.
*
* RETURNS: N/A
*
*/
#include <console/uart_console.h>
@ -149,16 +149,16 @@ static void bluetooth_init(void)
} while ((0))
#endif /* CONFIG_BLUETOOTH */
/*******************************************************************************
*
* _InitHardware - perform basic hardware initialization
*
* Initialize the Intel 8259A interrupt controller device driver and the
* Intel 8250 UART device driver.
* Also initialize the timer device driver, if required.
*
* RETURNS: N/A
*/
/**
*
* _InitHardware - perform basic hardware initialization
*
* Initialize the Intel 8259A interrupt controller device driver and the
* Intel 8250 UART device driver.
* Also initialize the timer device driver, if required.
*
* RETURNS: N/A
*/
void _InitHardware(void)
{

View file

@ -63,33 +63,33 @@ extern "C" {
call h; \
jmp _ExcExit;
/*******************************************************************************
*
* NANO_CPU_EXC_CONNECT - to generate and register an exception stub
*
* Generates an exception stub for the handler, <h>. It is registered
* on the vector given by <v> with the privilege level <d>; <d> should always
* be 0.
*
* Use this version of the macro if the processor pushes an error code for the
* given exception.
*/
/**
*
* NANO_CPU_EXC_CONNECT - to generate and register an exception stub
*
* Generates an exception stub for the handler, <h>. It is registered
* on the vector given by <v> with the privilege level <d>; <d> should always
* be 0.
*
* Use this version of the macro if the processor pushes an error code for the
* given exception.
*/
#define NANO_CPU_EXC_CONNECT(h, v, d) \
NANO_CPU_INT_REGISTER_ASM(h, v, d) GTEXT(MK_STUB_NAME(h)); \
SECTION_FUNC(TEXT, MK_STUB_NAME(h)) NANO_CPU_EXC_CONNECT_CODE(h)
/*******************************************************************************
*
* NANO_CPU_EXC_CONNECT_NO_ERR - to generate and register an exception stub
*
* Generates an exception stub for the handler, <h>. It is registered
* on the vector given by <v> with the privilege level <d>; <d> should always
* be 0.
*
* Use this version of the macro if the processor doesn't push an error code for
* the given exception. The created stub pushes a dummy value of 0 to keep the
* exception stack frame the same.
/**
*
* NANO_CPU_EXC_CONNECT_NO_ERR - to generate and register an exception stub
*
* Generates an exception stub for the handler, <h>. It is registered
* on the vector given by <v> with the privilege level <d>; <d> should always
* be 0.
*
* Use this version of the macro if the processor doesn't push an error code for
* the given exception. The created stub pushes a dummy value of 0 to keep the
* exception stack frame the same.
*/
#define NANO_CPU_EXC_CONNECT_NO_ERR(h, v, d) \
NANO_CPU_INT_REGISTER_ASM(h, v, d) GTEXT(MK_STUB_NAME(h)); \

View file

@ -45,14 +45,14 @@ NANO_CPU_EXC_CONNECT_NO_ERR(handler,vector,0)
#else /* !_ASMLANGUAGE */
/*******************************************************************************
*
* EflagsGet - return the current value of the EFLAGS register
*
* RETURNS: the EFLAGS register.
*
* \NOMANUAL
*/
/**
*
* EflagsGet - return the current value of the EFLAGS register
*
* RETURNS: the EFLAGS register.
*
* \NOMANUAL
*/
static inline unsigned int EflagsGet(void)
{
@ -70,15 +70,15 @@ static inline unsigned int EflagsGet(void)
#ifdef CONFIG_FP_SHARING
/*******************************************************************************
*
* _FpAccessDisable - disallow use of floating point capabilities
*
* This routine sets CR0[TS] to 1, which disallows the use of FP instructions
* by the currently executing context.
*
* RETURNS: N/A
*/
/**
*
* _FpAccessDisable - disallow use of floating point capabilities
*
* This routine sets CR0[TS] to 1, which disallows the use of FP instructions
* by the currently executing context.
*
* RETURNS: N/A
*/
static inline void _FpAccessDisable(void)
{
@ -94,17 +94,17 @@ static inline void _FpAccessDisable(void)
}
/*******************************************************************************
*
* _do_fp_ctx_save - save non-integer context information
*
* This routine saves the system's "live" non-integer context into the
* specified area. If the specified task or fiber supports SSE then
* x87/MMX/SSEx context info is saved, otherwise only x87/MMX context is saved.
* Function is invoked by _FpCtxSave(tCCS *ccs)
*
* RETURNS: N/A
*/
/**
*
* _do_fp_ctx_save - save non-integer context information
*
* This routine saves the system's "live" non-integer context into the
* specified area. If the specified task or fiber supports SSE then
* x87/MMX/SSEx context info is saved, otherwise only x87/MMX context is saved.
* Function is invoked by _FpCtxSave(tCCS *ccs)
*
* RETURNS: N/A
*/
static inline void _do_fp_ctx_save(int flags, void *preemp_float_reg)
{
@ -126,15 +126,15 @@ static inline void _do_fp_ctx_save(int flags, void *preemp_float_reg)
}
}
/*******************************************************************************
*
* _do_fp_ctx_init - initialize non-integer context information
*
* This routine initializes the system's "live" non-integer context.
* Function is invoked by _FpCtxInit(tCCS *ccs)
*
* RETURNS: N/A
*/
/**
*
* _do_fp_ctx_init - initialize non-integer context information
*
* This routine initializes the system's "live" non-integer context.
* Function is invoked by _FpCtxInit(tCCS *ccs)
*
* RETURNS: N/A
*/
static inline void _do_fp_ctx_init(int flags)
{

View file

@ -34,7 +34,7 @@
DESCRIPTION
This file provides definitions for the Global Descriptor Table (GDT) for the
IA-32 architecture.
*/
*/
#ifndef _GDT_H
#define _GDT_H

View file

@ -42,7 +42,7 @@ This file is also included by assembly language files which must #define
_ASMLANGUAGE before including this header file. Note that nanokernel assembly
source files obtains structure offset values via "absolute symbols" in the
offsets.o module.
*/
*/
#ifndef _NANO_PRIVATE_H
#define _NANO_PRIVATE_H
@ -760,18 +760,18 @@ extern tNANO _nanokernel;
/* inline function definitions */
/*******************************************************************************
*
* nanoArchInit - performs architecture-specific initialization
*
* This routine performs architecture-specific initialization of the nanokernel.
* Trivial stuff is done inline; more complex initialization is done via
* function calls.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
/**
*
* nanoArchInit - performs architecture-specific initialization
*
* This routine performs architecture-specific initialization of the nanokernel.
* Trivial stuff is done inline; more complex initialization is done via
* function calls.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static inline void nanoArchInit(void)
{
@ -809,18 +809,18 @@ static inline void nanoArchInit(void)
}
/*******************************************************************************
*
* fiberRtnValueSet - set the return value for the specified fiber (inline)
*
* The register used to store the return value from a function call invocation is
* set to <value>. It is assumed that the specified <fiber> is pending, and
* thus the fibers context is stored in its tCCS structure.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
/**
*
* fiberRtnValueSet - set the return value for the specified fiber (inline)
*
* The register used to store the return value from a function call invocation is
* set to <value>. It is assumed that the specified <fiber> is pending, and
* thus the fibers context is stored in its tCCS structure.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static inline void fiberRtnValueSet(
tCCS *fiber, /* pointer to fiber */

View file

@ -33,7 +33,7 @@
/*
DESCRIPTION
Intel-specific parts of start_task(). Only FP functionality currently.
*/
*/
#ifndef _START_TASK_ARCH__H_
#define _START_TASK_ARCH__H_

View file

@ -39,7 +39,7 @@ NOTE: _Swap() does not use this file as it uses the push instruction to save a
context. Changes to the file will not automatically be picked up by _Swap().
Conversely, changes to _Swap() should be mirrored here if the stack frame is
modified.
*/
*/
#ifndef _SWAPSTK_H
#define _SWAPSTK_H

View file

@ -34,7 +34,7 @@
DESCRIPTION
This header file is used to specify and describe board-level aspects for
the 'Quark' BSP.
*/
*/
#ifndef __INCboardh
#define __INCboardh
@ -152,31 +152,31 @@ the 'Quark' BSP.
sys_out8(data, (unsigned int)address)
#define PLB_BYTE_REG_READ(address) sys_in8((unsigned int)address)
/*******************************************************************************
*
* outByte - output byte to memory location
*
* RETURNS: N/A
*
* NOMANUAL
*/
/**
*
* outByte - output byte to memory location
*
* RETURNS: N/A
*
* NOMANUAL
*/
static inline void outByte(uint8_t data, uint32_t addr)
{
*(volatile uint8_t *)addr = data;
}
/*******************************************************************************
*
* inByte - obtain byte value from memory location
*
* This function issues the 'move' instruction to read a byte from the specified
* memory address.
*
* RETURNS: the byte read from the specified memory address
*
* NOMANUAL
*/
/**
*
* inByte - obtain byte value from memory location
*
* This function issues the 'move' instruction to read a byte from the specified
* memory address.
*
* RETURNS: the byte read from the specified memory address
*
* NOMANUAL
*/
static inline uint8_t inByte(uint32_t addr)
{
@ -194,31 +194,31 @@ static inline uint8_t inByte(uint32_t addr)
sys_out16(data, (unsigned int)address)
#define PLB_WORD_REG_READ(address) sys_in16((unsigned int)address)
/*******************************************************************************
*
* outWord - output word to memory location
*
* RETURNS: N/A
*
* NOMANUAL
*/
/**
*
* outWord - output word to memory location
*
* RETURNS: N/A
*
* NOMANUAL
*/
static inline void outWord(uint16_t data, uint32_t addr)
{
*(volatile uint16_t *)addr = data;
}
/*******************************************************************************
*
* inWord - obtain word value from memory location
*
* This function issues the 'move' instruction to read a word from the specified
* memory address.
*
* RETURNS: the word read from the specified memory address
*
* NOMANUAL
*/
/**
*
* inWord - obtain word value from memory location
*
* This function issues the 'move' instruction to read a word from the specified
* memory address.
*
* RETURNS: the word read from the specified memory address
*
* NOMANUAL
*/
static inline uint16_t inWord(uint32_t addr)
{
@ -236,31 +236,31 @@ static inline uint16_t inWord(uint32_t addr)
sys_out32(data, (unsigned int)address)
#define PLB_LONG_REG_READ(address) sys_in32((unsigned int)address)
/*******************************************************************************
*
* outLong - output long word to memory location
*
* RETURNS: N/A
*
* NOMANUAL
*/
/**
*
* outLong - output long word to memory location
*
* RETURNS: N/A
*
* NOMANUAL
*/
static inline void outLong(uint32_t data, uint32_t addr)
{
*(volatile uint32_t *)addr = data;
}
/*******************************************************************************
*
* inLong - obtain long word value from memory location
*
* This function issues the 'move' instruction to read a word from the specified
* memory address.
*
* RETURNS: the long word read from the specified memory address
*
* NOMANUAL
*/
/**
*
* inLong - obtain long word value from memory location
*
* This function issues the 'move' instruction to read a word from the specified
* memory address.
*
* RETURNS: the long word read from the specified memory address
*
* NOMANUAL
*/
static inline uint32_t inLong(uint32_t addr)
{
@ -268,19 +268,19 @@ static inline uint32_t inLong(uint32_t addr)
}
#endif /* !_ASMLANGUAGE */
/*******************************************************************************
*
* pci_pin2irq - convert PCI interrupt PIN to IRQ
*
* The routine uses "standard design consideration" and implies that
* INTA (pin 1) -> IRQ 16
* INTB (pin 2) -> IRQ 17
* INTC (pin 3) -> IRQ 18
* INTD (pin 4) -> IRQ 19
*
* RETURNS: IRQ number, -1 if the result is incorrect
*
*/
/**
*
* pci_pin2irq - convert PCI interrupt PIN to IRQ
*
* The routine uses "standard design consideration" and implies that
* INTA (pin 1) -> IRQ 16
* INTB (pin 2) -> IRQ 17
* INTC (pin 3) -> IRQ 18
* INTD (pin 4) -> IRQ 19
*
* RETURNS: IRQ number, -1 if the result is incorrect
*
*/
static inline int pci_pin2irq(int pin)
{
@ -289,13 +289,13 @@ static inline int pci_pin2irq(int pin)
return N_PIC_IRQS + pin - 1;
}
/*******************************************************************************
*
* pci_irq2pin - convert IRQ to PCI interrupt pin
*
* RETURNS: pin number, -1 if the result is incorrect
*
*/
/**
*
* pci_irq2pin - convert IRQ to PCI interrupt pin
*
* RETURNS: pin number, -1 if the result is incorrect
*
*/
static inline int pci_irq2pin(int irq)
{

View file

@ -33,7 +33,7 @@
/*
DESCRIPTION
This is the linker script for both standard images and XIP images.
*/
*/
#include <autoconf.h>

View file

@ -37,7 +37,7 @@ for the Quark BSP.
Implementation Remarks:
Handlers for the secondary serial port have not been added.
*/
*/
#include <nanokernel.h>
#include <misc/printk.h>
@ -55,13 +55,13 @@ Handlers for the secondary serial port have not been added.
#if defined(DO_CONSOLE_INIT)
/*******************************************************************************
*
* uart_generic_info_init - initialize initialization information for one UART
*
* RETURNS: N/A
*
*/
/**
*
* uart_generic_info_init - initialize initialization information for one UART
*
* RETURNS: N/A
*
*/
void uart_generic_info_init(struct uart_init_info *p_info)
{
@ -74,15 +74,15 @@ void uart_generic_info_init(struct uart_init_info *p_info)
#if defined(DO_CONSOLE_INIT)
/*******************************************************************************
*
* consoleInit - initialize target-only console
*
* Only used for debugging.
*
* RETURNS: N/A
*
*/
/**
*
* consoleInit - initialize target-only console
*
* Only used for debugging.
*
* RETURNS: N/A
*
*/
#include <console/uart_console.h>
@ -101,16 +101,16 @@ static void consoleInit(void)
} while ((0))
#endif /* DO_CONSOLE_INIT */
/*******************************************************************************
*
* _InitHardware - perform basic hardware initialization
*
* Initialize the Intel LOAPIC and IOAPIC device driver and the
* Intel 8250 UART device driver.
* Also initialize the timer device driver, if required.
*
* RETURNS: N/A
*/
/**
*
* _InitHardware - perform basic hardware initialization
*
* Initialize the Intel LOAPIC and IOAPIC device driver and the
* Intel 8250 UART device driver.
* Also initialize the timer device driver, if required.
*
* RETURNS: N/A
*/
void _InitHardware(void)
{

View file

@ -34,7 +34,7 @@
DESCRIPTION
This module provides the _SysFatalErrorHandler() routine which is common to
supported BSPs.
*/
*/
#include <nanokernel.h>
#include <toolchain.h>
@ -47,25 +47,25 @@ supported BSPs.
#define PRINTK(...)
#endif /* CONFIG_PRINTK */
/*******************************************************************************
*
* _SysFatalErrorHandler - fatal error handler
*
* This routine implements the corrective action to be taken when the system
* detects a fatal error.
*
* This sample implementation attempts to abort the current context and allow
* the system to continue executing, which may permit the system to continue
* functioning with degraded capabilities.
*
* System designers may wish to enhance or substitute this sample
* implementation to take other actions, such as logging error (or debug)
* information to a persistent repository and/or rebooting the system.
*
* RETURNS: This function does not return.
*
* \NOMANUAL
*/
/**
*
* _SysFatalErrorHandler - fatal error handler
*
* This routine implements the corrective action to be taken when the system
* detects a fatal error.
*
* This sample implementation attempts to abort the current context and allow
* the system to continue executing, which may permit the system to continue
* functioning with degraded capabilities.
*
* System designers may wish to enhance or substitute this sample
* implementation to take other actions, such as logging error (or debug)
* information to a persistent repository and/or rebooting the system.
*
* RETURNS: This function does not return.
*
* \NOMANUAL
*/
FUNC_NORETURN void _SysFatalErrorHandler(
unsigned int reason, /* fatal error reason */

View file

@ -33,7 +33,7 @@
/*
DESCRIPTION
Intel-specific parts of start_task(). Only FP functionality currently.
*/
*/
#ifdef CONFIG_MICROKERNEL
@ -51,12 +51,12 @@ Intel-specific parts of start_task(). Only FP functionality currently.
#define SSE_GROUP 0x10
/*******************************************************************************
*
* _StartTaskArch - Intel-specifc parts of task initialization
*
* RETURNS: N/A
*/
/**
*
* _StartTaskArch - Intel-specifc parts of task initialization
*
* RETURNS: N/A
*/
void _StartTaskArch(
struct k_proc *X, /* ptr to task control block */

View file

@ -35,7 +35,7 @@
Serial console driver.
Hooks into the printk and fputc (for printf) modules. Poll driven.
*/
*/
#include <nanokernel.h>
#include <arch/cpu.h>
@ -56,7 +56,7 @@
#endif
#if 0 /* NOTUSED */
/******************************************************************************
/**
*
* consoleIn - get a character from UART
*
@ -74,7 +74,7 @@ static int consoleIn(void)
#endif
#if defined(CONFIG_PRINTK) || defined(CONFIG_STDOUT_CONSOLE)
/******************************************************************************
/**
*
* consoleOut - output one character to UART
*
@ -209,7 +209,7 @@ void uart_register_input(struct nano_fifo *avail, struct nano_fifo *lines)
} while ((0))
#endif
/******************************************************************************
/**
*
* uart_console_init - initialize one UART as the console/debug port
*

View file

@ -61,7 +61,7 @@ command is issued, the 8259A will automatically reset the highest IS bit of
those that are set, since in the fully nested mode the highest IS level is
the last level acknowledged and serviced.
*/
*/
/*
* A board support package's board.h header must provide definitions for the
@ -116,15 +116,15 @@ FUNC_ALIAS(_i8259_irq_enable, irq_enable, void);
FUNC_ALIAS(_i8259_irq_disable, irq_disable, void);
#endif /* CONFIG_SHUTOFF_PIC */
/*******************************************************************************
*
* _i8259_init - initialize the Intel 8259A PIC device driver
*
* This routine initializes the Intel 8259A PIC device driver and the device
* itself.
*
* RETURNS: N/A
*/
/**
*
* _i8259_init - initialize the Intel 8259A PIC device driver
*
* This routine initializes the Intel 8259A PIC device driver and the device
* itself.
*
* RETURNS: N/A
*/
void _i8259_init(void)
{
@ -185,16 +185,16 @@ void _i8259_init(void)
}
#ifndef CONFIG_SHUTOFF_PIC
/*******************************************************************************
*
* _i8259_eoi_master - send EOI(end of interrupt) signal to the master PIC.
*
* This routine is called at the end of the interrupt handler.
*
* RETURNS: N/A
*
* ERRNO
*/
/**
*
* _i8259_eoi_master - send EOI(end of interrupt) signal to the master PIC.
*
* This routine is called at the end of the interrupt handler.
*
* RETURNS: N/A
*
* ERRNO
*/
void _i8259_eoi_master(unsigned int irq /* IRQ number to
send EOI: unused */
@ -207,17 +207,17 @@ void _i8259_eoi_master(unsigned int irq /* IRQ number to
PLB_BYTE_REG_WRITE(I8259_EOI, PIC_IACK(PIC_MASTER_BASE_ADRS));
}
/*******************************************************************************
*
* _i8259_eoi_slave - send EOI(end of interrupt) signal to the slave PIC.
*
* This routine is called at the end of the interrupt handler in the Normal
* Fully Nested Mode.
*
* RETURNS: N/A
*
* ERRNO
*/
/**
*
* _i8259_eoi_slave - send EOI(end of interrupt) signal to the slave PIC.
*
* This routine is called at the end of the interrupt handler in the Normal
* Fully Nested Mode.
*
* RETURNS: N/A
*
* ERRNO
*/
void _i8259_eoi_slave(unsigned int irq /* IRQ number to
send EOI: unused */
@ -239,22 +239,22 @@ void _i8259_eoi_slave(unsigned int irq /* IRQ number to
__asm__ volatile("popfl;\n\t");
}
/*******************************************************************************
*
* __I8259IntEnable - enable/disable a specified PIC interrupt input line
*
* This routine enables or disables a specified PIC interrupt input line. To
* enable an interrupt input line, the parameter <enable> must be non-zero.
*
* The nanokernel exports the irq_enable() and irq_disable()
* APIs (mapped to _i8259_irq_enable() and _i8259_irq_disable(), respectively).
* This function is called by _i8259_irq_enable() and _i8259_irq_disable() to
* perform the actual enabling/disabling of an IRQ to minimize footprint.
*
* RETURNS: N/A
*
* see also: _i8259_irq_disable()/_i8259_irq_enable
*/
/**
*
* __I8259IntEnable - enable/disable a specified PIC interrupt input line
*
* This routine enables or disables a specified PIC interrupt input line. To
* enable an interrupt input line, the parameter <enable> must be non-zero.
*
* The nanokernel exports the irq_enable() and irq_disable()
* APIs (mapped to _i8259_irq_enable() and _i8259_irq_disable(), respectively).
* This function is called by _i8259_irq_enable() and _i8259_irq_disable() to
* perform the actual enabling/disabling of an IRQ to minimize footprint.
*
* RETURNS: N/A
*
* see also: _i8259_irq_disable()/_i8259_irq_enable
*/
static void __I8259IntEnable(
unsigned int irq, /* IRQ number to enable */
@ -290,16 +290,16 @@ static void __I8259IntEnable(
}
/*******************************************************************************
*
* _i8259_irq_disable - disable a specified PIC interrupt input line
*
* This routine disables a specified PIC interrupt input line.
*
* RETURNS: N/A
*
* SEE ALSO: _i8259_irq_enable()
*/
/**
*
* _i8259_irq_disable - disable a specified PIC interrupt input line
*
* This routine disables a specified PIC interrupt input line.
*
* RETURNS: N/A
*
* SEE ALSO: _i8259_irq_enable()
*/
void _i8259_irq_disable(unsigned int irq /* IRQ number to disable */
)
@ -307,16 +307,16 @@ void _i8259_irq_disable(unsigned int irq /* IRQ number to disable */
return __I8259IntEnable(irq, 0);
}
/*******************************************************************************
*
* _i8259_irq_enable - enable a specified PIC interrupt input line
*
* This routine enables a specified PIC interrupt input line.
*
* RETURNS: N/A
*
* SEE ALSO: _i8259_irq_disable()
*/
/**
*
* _i8259_irq_enable - enable a specified PIC interrupt input line
*
* This routine enables a specified PIC interrupt input line.
*
* RETURNS: N/A
*
* SEE ALSO: _i8259_irq_disable()
*/
void _i8259_irq_enable(unsigned int irq /* IRQ number to enable */
)

View file

@ -42,7 +42,7 @@ The distinction between a spurious interrupt and a real one is detected by
looking at the in service register (ISR). The bit (bit 7) will be 1 indicating
a real IRQ has been inserted.
*/
*/
/* includes */
#define _ASMLANGUAGE
@ -59,20 +59,20 @@ a real IRQ has been inserted.
GDATA(_i8259_spurious_interrupt_count)
/*******************************************************************************
*
* _i8259_boi_master - detect whether it is spurious interrupt or not
*
* This routine is called before the user's interrupt handler to detect the
* spurious interrupt on the master PIC. If a spurious interrupt condition is
* detected, a global variable is incremented and the execution of the interrupt
* stub is "short circuited", i.e. a return to the interrupted context
* occurs.
*
* void _i8259_boi_master (void)
*
* RETURNS: N/A
*/
/**
*
* _i8259_boi_master - detect whether it is spurious interrupt or not
*
* This routine is called before the user's interrupt handler to detect the
* spurious interrupt on the master PIC. If a spurious interrupt condition is
* detected, a global variable is incremented and the execution of the interrupt
* stub is "short circuited", i.e. a return to the interrupted context
* occurs.
*
* void _i8259_boi_master (void)
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _i8259_boi_master)
/* disable interrupts */
@ -93,20 +93,20 @@ SECTION_FUNC(TEXT, _i8259_boi_master)
ret
/*******************************************************************************
*
* _i8259_boi_slave - detect whether it is spurious interrupt or not
*
* This routine is called before the user's interrupt handler to detect the
* spurious interrupt on the slave PIC. If a spurious interrupt condition is
* detected, a global variable is incremented and the execution of the interrupt
* stub is "short circuited", i.e. a return to the interrupted context
* occurs.
*
* void _i8259_boi_slave (void)
*
* RETURNS: N/A
*/
/**
*
* _i8259_boi_slave - detect whether it is spurious interrupt or not
*
* This routine is called before the user's interrupt handler to detect the
* spurious interrupt on the slave PIC. If a spurious interrupt condition is
* detected, a global variable is incremented and the execution of the interrupt
* stub is "short circuited", i.e. a return to the interrupted context
* occurs.
*
* void _i8259_boi_slave (void)
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _i8259_boi_slave)
/* disable interrupts */

View file

@ -73,7 +73,7 @@ This implementation doesn't support multiple IO APICs.
INCLUDE FILES: ioapic.h loapic.h
SEE ALSO: loApicIntr.c
*/
*/
#include <nanokernel.h>
#include <arch/cpu.h>
@ -209,14 +209,14 @@ static void _IoApicRedUpdateLo(unsigned int irq, uint32_t value,
* IRQ virtualization imposed by the BSP.
*/
/*******************************************************************************
*
* _ioapic_init - initialize the IO APIC or xAPIC
*
* This routine initializes the IO APIC or xAPIC.
*
* RETURNS: N/A
*/
/**
*
* _ioapic_init - initialize the IO APIC or xAPIC
*
* This routine initializes the IO APIC or xAPIC.
*
* RETURNS: N/A
*/
void _ioapic_init(void)
{
@ -261,14 +261,14 @@ void _ioapic_init(void)
}
}
/*******************************************************************************
*
* _ioapic_eoi - send EOI (End Of Interrupt) signal to IO APIC
*
* This routine sends an EOI signal to the IO APIC's interrupting source.
*
* RETURNS: N/A
*/
/**
*
* _ioapic_eoi - send EOI (End Of Interrupt) signal to IO APIC
*
* This routine sends an EOI signal to the IO APIC's interrupting source.
*
* RETURNS: N/A
*/
void _ioapic_eoi(unsigned int irq /* INT number to send EOI */
)
@ -277,16 +277,16 @@ void _ioapic_eoi(unsigned int irq /* INT number to send EOI */
*(volatile unsigned int *)(LOAPIC_BASE_ADRS + LOAPIC_EOI) = 0;
}
/*******************************************************************************
*
* _ioapic_eoi_get - get EOI (End Of Interrupt) information
*
* This routine returns EOI signalling information for a specific IRQ.
*
* RETURNS: address of routine to be called to signal EOI;
* as a side effect, also passes back indication if routine requires
* an interrupt vector argument and what the argument value should be
*/
/**
*
* _ioapic_eoi_get - get EOI (End Of Interrupt) information
*
* This routine returns EOI signalling information for a specific IRQ.
*
* RETURNS: address of routine to be called to signal EOI;
* as a side effect, also passes back indication if routine requires
* an interrupt vector argument and what the argument value should be
*/
void *_ioapic_eoi_get(unsigned int irq, /* INTIN number of interest */
char *argRequired, /* ptr to "argument required" result
@ -317,14 +317,14 @@ void *_ioapic_eoi_get(unsigned int irq, /* INTIN number of interest */
return _ioapic_eoi;
}
/*******************************************************************************
*
* _ioapic_irq_enable - enable a specified APIC interrupt input line
*
* This routine enables a specified APIC interrupt input line.
*
* RETURNS: N/A
*/
/**
*
* _ioapic_irq_enable - enable a specified APIC interrupt input line
*
* This routine enables a specified APIC interrupt input line.
*
* RETURNS: N/A
*/
void _ioapic_irq_enable(unsigned int irq /* INTIN number to enable */
)
@ -332,14 +332,14 @@ void _ioapic_irq_enable(unsigned int irq /* INTIN number to enable */
_IoApicRedUpdateLo(irq, 0, IOAPIC_INT_MASK);
}
/*******************************************************************************
*
* _ioapic_irq_disable - disable a specified APIC interrupt input line
*
* This routine disables a specified APIC interrupt input line.
*
* RETURNS: N/A
*/
/**
*
* _ioapic_irq_disable - disable a specified APIC interrupt input line
*
* This routine disables a specified APIC interrupt input line.
*
* RETURNS: N/A
*/
void _ioapic_irq_disable(unsigned int irq /* INTIN number to disable */
)
@ -347,14 +347,14 @@ void _ioapic_irq_disable(unsigned int irq /* INTIN number to disable */
_IoApicRedUpdateLo(irq, IOAPIC_INT_MASK, IOAPIC_INT_MASK);
}
/*******************************************************************************
*
* _ioapic_irq_set - programs the interrupt redirection table
*
* This routine sets up the redirection table entry for the specified IRQ
*
* RETURNS: N/A
*/
/**
*
* _ioapic_irq_set - programs the interrupt redirection table
*
* This routine sets up the redirection table entry for the specified IRQ
*
* RETURNS: N/A
*/
void _ioapic_irq_set(unsigned int irq, /* virtualized IRQ */
unsigned int vector, /* vector number */
uint32_t flags /* interrupt flags */
@ -368,15 +368,15 @@ void _ioapic_irq_set(unsigned int irq, /* virtualized IRQ */
ioApicRedSetLo(irq, rteValue);
}
/*******************************************************************************
*
* _ioapic_int_vec_set - program interrupt vector for specified irq
*
* The routine writes the interrupt vector in the Interrupt Redirection
* Table for specified irq number
*
* RETURNS: N/A
*/
/**
*
* _ioapic_int_vec_set - program interrupt vector for specified irq
*
* The routine writes the interrupt vector in the Interrupt Redirection
* Table for specified irq number
*
* RETURNS: N/A
*/
void _ioapic_int_vec_set(unsigned int irq, /* INT number */
unsigned int vector /* vector number */
)
@ -386,14 +386,14 @@ void _ioapic_int_vec_set(unsigned int irq, /* INT number */
#ifndef XIOAPIC_DIRECT_ADDRESSING
/*******************************************************************************
*
* __IoApicGet - read a 32 bit IO APIC register
*
* This routine reads the specified IO APIC register using indirect addressing.
*
* RETURNS: register value
*/
/**
*
* __IoApicGet - read a 32 bit IO APIC register
*
* This routine reads the specified IO APIC register using indirect addressing.
*
* RETURNS: register value
*/
static uint32_t __IoApicGet(
int32_t offset /* register offset (8 bits) */
@ -414,14 +414,14 @@ static uint32_t __IoApicGet(
return value;
}
/*******************************************************************************
*
* __IoApicSet - write a 32 bit IO APIC register
*
* This routine writes the specified IO APIC register using indirect addressing.
*
* RETURNS: N/A
*/
/**
*
* __IoApicSet - write a 32 bit IO APIC register
*
* This routine writes the specified IO APIC register using indirect addressing.
*
* RETURNS: N/A
*/
static void __IoApicSet(
int32_t offset, /* register offset (8 bits) */
@ -442,14 +442,14 @@ static void __IoApicSet(
#endif
/*******************************************************************************
*
* ioApicRedGetLo - get low 32 bits of Redirection Table entry
*
* This routine reads the low-order 32 bits of a Redirection Table entry.
*
* RETURNS: 32 low-order bits
*/
/**
*
* ioApicRedGetLo - get low 32 bits of Redirection Table entry
*
* This routine reads the low-order 32 bits of a Redirection Table entry.
*
* RETURNS: 32 low-order bits
*/
static uint32_t ioApicRedGetLo(unsigned int irq /* INTIN number */
)
@ -468,14 +468,14 @@ static uint32_t ioApicRedGetLo(unsigned int irq /* INTIN number */
#endif
}
/*******************************************************************************
*
* ioApicRedSetLo - set low 32 bits of Redirection Table entry
*
* This routine writes the low-order 32 bits of a Redirection Table entry.
*
* RETURNS: N/A
*/
/**
*
* ioApicRedSetLo - set low 32 bits of Redirection Table entry
*
* This routine writes the low-order 32 bits of a Redirection Table entry.
*
* RETURNS: N/A
*/
static void ioApicRedSetLo(unsigned int irq, /* INTIN number */
uint32_t lower32 /* value to be written */
@ -495,14 +495,14 @@ static void ioApicRedSetLo(unsigned int irq, /* INTIN number */
#endif
}
/*******************************************************************************
*
* ioApicRedSetHi - set high 32 bits of Redirection Table entry
*
* This routine writes the high-order 32 bits of a Redirection Table entry.
*
* RETURNS: N/A
*/
/**
*
* ioApicRedSetHi - set high 32 bits of Redirection Table entry
*
* This routine writes the high-order 32 bits of a Redirection Table entry.
*
* RETURNS: N/A
*/
static void ioApicRedSetHi(unsigned int irq, /* INTIN number */
uint32_t upper32 /* value to be written */
@ -522,15 +522,15 @@ static void ioApicRedSetHi(unsigned int irq, /* INTIN number */
#endif
}
/*******************************************************************************
*
* _IoApicRedUpdateLo - modify low 32 bits of Redirection Table entry
*
* This routine modifies selected portions of the low-order 32 bits of a
* Redirection Table entry, as indicated by the associate bit mask.
*
* RETURNS: N/A
*/
/**
*
* _IoApicRedUpdateLo - modify low 32 bits of Redirection Table entry
*
* This routine modifies selected portions of the low-order 32 bits of a
* Redirection Table entry, as indicated by the associate bit mask.
*
* RETURNS: N/A
*/
static void _IoApicRedUpdateLo(
unsigned int irq, /* INTIN number */
@ -548,15 +548,15 @@ static void _IoApicRedUpdateLo(
* macro if the I/O APIC supports the MSI redirect capability.
*/
/*******************************************************************************
*
* _IoApicRteConfigSet - write to the RTE config register for specified IRQ
*
* This routine writes the specified 32-bit <value> into the RTE configuration
* register for the specified <irq> (0 to (IOAPIC_NUM_RTES - 1))
*
* RETURNS: void
*/
/**
*
* _IoApicRteConfigSet - write to the RTE config register for specified IRQ
*
* This routine writes the specified 32-bit <value> into the RTE configuration
* register for the specified <irq> (0 to (IOAPIC_NUM_RTES - 1))
*
* RETURNS: void
*/
static void _IoApicRteConfigSet(unsigned int irq, /* INTIN number */
uint32_t value /* value to be written */
@ -576,15 +576,15 @@ static void _IoApicRteConfigSet(unsigned int irq, /* INTIN number */
*((volatile uint32_t *)(IOAPIC_BASE_ADRS + offset)) = value;
}
/*******************************************************************************
*
* _IoApicRedirRegSet - write to the specified MSI redirection register
*
* This routine writes the 32-bit <value> into the redirection register
* specified by <reg>.
*
* RETURNS: void
*/
/**
*
* _IoApicRedirRegSet - write to the specified MSI redirection register
*
* This routine writes the 32-bit <value> into the redirection register
* specified by <reg>.
*
* RETURNS: void
*/
static void _IoApicRedirRegSet(unsigned int reg, uint32_t value)
{

Some files were not shown because too many files have changed in this diff Show more