Adding in doxygen comment headers

Moving many of the functions from the old format of inline comments to
the newer doxygen format.

Change-Id: Ib0fe0d8627d7cd90219385a3ab627da8f9637d98
Signed-off-by: Dan Kalowsky <daniel.kalowsky@intel.com>
This commit is contained in:
Dan Kalowsky 2015-10-20 09:42:33 -07:00 committed by Anas Nashif
commit 3a109b1f00
43 changed files with 372 additions and 372 deletions

View file

@ -61,15 +61,14 @@ static inline void nonEssentialTaskAbort(void)
* implementation to take other actions, such as logging error (or debug) * implementation to take other actions, such as logging error (or debug)
* information to a persistent repository and/or rebooting the system. * information to a persistent repository and/or rebooting the system.
* *
* @param reason the fatal error reason
* @param pEsf pointer to exception stack frame
*
* @return N/A * @return N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
void _SysFatalErrorHandler(unsigned int reason, const NANO_ESF * pEsf)
void _SysFatalErrorHandler(
unsigned int reason, /* fatal error reason */
const NANO_ESF * pEsf /* pointer to exception stack frame */
)
{ {
nano_context_type_t curCtx = sys_execution_context_type_get(); nano_context_type_t curCtx = sys_execution_context_type_get();

View file

@ -90,19 +90,20 @@ static ALWAYS_INLINE void thread_monitor_init(struct tcs *tcs)
* *
* <options> is currently unused. * <options> is currently unused.
* *
* @param pStackmem the pointer to aligned stack memory
* @param stackSize the stack size in bytes
* @param pEntry thread entry point routine
* @param parameter1 first param to entry point
* @param parameter2 second param to entry point
* @param parameter3 third param to entry point
* @param fiber priority, -1 for task
* @param options is unused (saved for future expansion)
*
* @return N/A * @return N/A
*/ */
void _new_thread(char *pStackMem, unsigned stackSize, _thread_entry_t pEntry,
void _new_thread( void *parameter1, void *parameter2, void *parameter3,
char *pStackMem, /* pointer to aligned stack memory */ int priority, unsigned options)
unsigned stackSize, /* stack size in bytes */
_thread_entry_t pEntry, /* thread entry point routine */
void *parameter1, /* first param to entry point */
void *parameter2, /* second param to entry point */
void *parameter3, /* third param to entry point */
int priority, /* fiber priority, -1 for task */
unsigned options /* unused, for expansion */
)
{ {
char *stackEnd = pStackMem + stackSize; char *stackEnd = pStackMem + stackSize;
struct init_stack_frame *pInitCtx; struct init_stack_frame *pInitCtx;

View file

@ -38,7 +38,6 @@ definitions and more complex routines, if needed.
* *
* @return N/A * @return N/A
*/ */
void _ScbSystemReset(void) void _ScbSystemReset(void)
{ {
union __aircr reg; union __aircr reg;
@ -51,8 +50,8 @@ void _ScbSystemReset(void)
/** /**
* *
* @brief Set the number of priority groups based on the number * @brief Set the number of priority groups based on the number of exception
* of exception priorities desired * priorities desired
* *
* Exception priorities can be divided in priority groups, inside which there is * Exception priorities can be divided in priority groups, inside which there is
* no preemption. The priorities inside a group are only used to decide which * no preemption. The priorities inside a group are only used to decide which
@ -60,11 +59,11 @@ void _ScbSystemReset(void)
* *
* The number of priorities has to be a power of two, from 1 to 128. * The number of priorities has to be a power of two, from 1 to 128.
* *
* @param n the number of priorities
*
* @return N/A * @return N/A
*/ */
void _ScbNumPriGroupSet(unsigned int n)
void _ScbNumPriGroupSet(unsigned int n /* number of priorities */
)
{ {
unsigned int set; unsigned int set;
union __aircr reg; union __aircr reg;

View file

@ -61,15 +61,16 @@ const NANO_ESF _default_esf = {0xdeaddead, /* a1 */
* fatal error does not have a hardware generated ESF, the caller should either * fatal error does not have a hardware generated ESF, the caller should either
* create its own or use a pointer to the global default ESF <_default_esf>. * create its own or use a pointer to the global default ESF <_default_esf>.
* *
* @param reason the reason that the handler was called
* @param pEsf pointer to the exception stack frame
*
* @return This function does not return. * @return This function does not return.
* *
* \NOMANUAL * \NOMANUAL
*/ */
FUNC_NORETURN void _NanoFatalErrorHandler( FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
unsigned int reason, /* reason that handler was called */ const NANO_ESF *pEsf)
const NANO_ESF *pEsf /* pointer to exception stack frame */
)
{ {
switch (reason) { switch (reason) {
case _NANO_ERR_INVALID_TASK_EXIT: case _NANO_ERR_INVALID_TASK_EXIT:

View file

@ -134,8 +134,7 @@ static void _FaultThreadShow(const NANO_ESF *esf)
* \NOMANUAL * \NOMANUAL
*/ */
static void _MpuFault(const NANO_ESF *esf, static void _MpuFault(const NANO_ESF *esf, int fromHardFault)
int fromHardFault)
{ {
PR_EXC("***** MPU FAULT *****\n"); PR_EXC("***** MPU FAULT *****\n");
@ -169,8 +168,7 @@ static void _MpuFault(const NANO_ESF *esf,
* \NOMANUAL * \NOMANUAL
*/ */
static void _BusFault(const NANO_ESF *esf, static void _BusFault(const NANO_ESF *esf, int fromHardFault)
int fromHardFault)
{ {
PR_EXC("***** BUS FAULT *****\n"); PR_EXC("***** BUS FAULT *****\n");
@ -294,8 +292,7 @@ static void _DebugMonitor(const NANO_ESF *esf)
* \NOMANUAL * \NOMANUAL
*/ */
static void _ReservedException(const NANO_ESF *esf, static void _ReservedException(const NANO_ESF *esf, int fault)
int fault)
{ {
PR_EXC("***** %s %d) *****\n", PR_EXC("***** %s %d) *****\n",
fault < 16 ? "Reserved Exception (" : "Spurious interrupt (IRQ ", fault < 16 ? "Reserved Exception (" : "Spurious interrupt (IRQ ",
@ -362,15 +359,15 @@ static void _FaultDump(const NANO_ESF *esf, int fault)
* interrupt was already being handled, it is passed a pointer to both and has * interrupt was already being handled, it is passed a pointer to both and has
* to find out on which the ESP is present. * to find out on which the ESP is present.
* *
* @param msp pointer to potential ESF on MSP
* @param psp pointer to potential ESF on PSP
*
* @return This function does not return. * @return This function does not return.
* *
* \NOMANUAL * \NOMANUAL
*/ */
void _Fault( void _Fault( const NANO_ESF *msp, const NANO_ESF *psp)
const NANO_ESF *msp, /* pointer to potential ESF on MSP */
const NANO_ESF *psp /* pointer to potential ESF on PSP */
)
{ {
const NANO_ESF *esf = _ScbIsNestedExc() ? msp : psp; const NANO_ESF *esf = _ScbIsNestedExc() ? msp : psp;
int fault = _ScbActiveVectorGet(); int fault = _ScbActiveVectorGet();

View file

@ -45,7 +45,6 @@ extern void __reserved(void);
* *
* @return N/A * @return N/A
*/ */
void _irq_handler_set(unsigned int irq, void _irq_handler_set(unsigned int irq,
void (*old)(void *arg), void (*old)(void *arg),
void (*new)(void *arg), void (*new)(void *arg),
@ -73,7 +72,6 @@ void _irq_handler_set(unsigned int irq,
* *
* @return N/A * @return N/A
*/ */
void irq_enable(unsigned int irq) void irq_enable(unsigned int irq)
{ {
/* before enabling interrupts, ensure that interrupt is cleared */ /* before enabling interrupts, ensure that interrupt is cleared */
@ -90,7 +88,6 @@ void irq_enable(unsigned int irq)
* *
* @return N/A * @return N/A
*/ */
void irq_disable(unsigned int irq) void irq_disable(unsigned int irq)
{ {
_NvicIrqDisable(irq); _NvicIrqDisable(irq);
@ -111,7 +108,6 @@ void irq_disable(unsigned int irq)
* *
* @return N/A * @return N/A
*/ */
void _irq_priority_set(unsigned int irq, void _irq_priority_set(unsigned int irq,
unsigned int prio) unsigned int prio)
{ {
@ -130,7 +126,6 @@ void _irq_priority_set(unsigned int irq,
* *
* @return N/A * @return N/A
*/ */
void _irq_spurious(void *unused) void _irq_spurious(void *unused)
{ {
ARG_UNUSED(unused); ARG_UNUSED(unused);
@ -149,7 +144,6 @@ void _irq_spurious(void *unused)
* *
* @return the interrupt line number * @return the interrupt line number
*/ */
int irq_connect(unsigned int irq, int irq_connect(unsigned int irq,
unsigned int prio, unsigned int prio,
void (*isr)(void *arg), void (*isr)(void *arg),
@ -172,7 +166,6 @@ int irq_connect(unsigned int irq,
* *
* @return N/A * @return N/A
*/ */
void _irq_disconnect(unsigned int irq) void _irq_disconnect(unsigned int irq)
{ {
_irq_handler_set(irq, _sw_isr_table[irq].isr, _irq_spurious, NULL); _irq_handler_set(irq, _sw_isr_table[irq].isr, _irq_spurious, NULL);

View file

@ -61,15 +61,15 @@ static inline void nonEssentialTaskAbort(void)
* implementation to take other actions, such as logging error (or debug) * implementation to take other actions, such as logging error (or debug)
* information to a persistent repository and/or rebooting the system. * information to a persistent repository and/or rebooting the system.
* *
* @param reason fatal error reason
* @param pEsf pointer to exception stack frame
*
* @return N/A * @return N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
void _SysFatalErrorHandler( void _SysFatalErrorHandler( unsigned int reason, const NANO_ESF * pEsf)
unsigned int reason, /* fatal error reason */
const NANO_ESF * pEsf /* pointer to exception stack frame */
)
{ {
nano_context_type_t curCtx = sys_execution_context_type_get(); nano_context_type_t curCtx = sys_execution_context_type_get();

View file

@ -87,19 +87,21 @@ static ALWAYS_INLINE void _thread_monitor_init(struct tcs *tcs /* thread */
* *
* <options> is currently unused. * <options> is currently unused.
* *
* @param pStackMem the aligned stack memory
* @param stackSize stack size in bytes
* @param pEntry the entry point
* @param parameter1 entry point to the first param
* @param parameter2 entry point to the second param
* @param parameter3 entry point to the third param
* @param priority thread priority (-1 for tasks)
* @param misc options (future use)
*
* @return N/A * @return N/A
*/ */
void _new_thread( void _new_thread(char *pStackMem, unsigned stackSize, _thread_entry_t pEntry,
char *pStackMem, /* aligned stack memory */ void *parameter1, void *parameter2, void *parameter3,
unsigned stackSize, /* stack size in bytes */ int priority, unsigned options)
_thread_entry_t pEntry, /* entry point */
void *parameter1, /* entry point first param */
void *parameter2, /* entry point second param */
void *parameter3, /* entry point third param */
int priority, /* thread priority (-1 for tasks) */
unsigned options /* misc options (future) */
)
{ {
char *stackEnd = pStackMem + stackSize; char *stackEnd = pStackMem + stackSize;
struct __esf *pInitCtx; struct __esf *pInitCtx;

View file

@ -175,15 +175,16 @@ static ALWAYS_INLINE void nanoArchInit(void)
* to <value>. It is assumed that the specified <fiber> is pending, and thus * to <value>. It is assumed that the specified <fiber> is pending, and thus
* the fiber's thread is stored in its struct tcs structure. * the fiber's thread is stored in its struct tcs structure.
* *
* @param fiber pointer to the fiber
* @param value is the value to set as a return value
*
* @return N/A * @return N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
static ALWAYS_INLINE void fiberRtnValueSet( static ALWAYS_INLINE void fiberRtnValueSet(struct tcs *fiber,
struct tcs *fiber, /* pointer to fiber */ unsigned int value)
unsigned int value /* value to set as return value */
)
{ {
tESF *pEsf = (void *)fiber->preempReg.psp; tESF *pEsf = (void *)fiber->preempReg.psp;

View file

@ -51,14 +51,14 @@ as there is no requirement for this capability.
* and the write of the new value (if it occurs) all happen atomically with * and the write of the new value (if it occurs) all happen atomically with
* respect to both interrupts and accesses of other processors to <target>. * respect to both interrupts and accesses of other processors to <target>.
* *
* @param target the address to be tested
* @param oldvalue the value to be compared against
* @param newValue the value to be set to
*
* @return Returns 1 if <newValue> is written, 0 otherwise. * @return Returns 1 if <newValue> is written, 0 otherwise.
*/ */
int atomic_cas( int atomic_cas(atomic_t *target, atomic_val_t oldValue, atomic_val_t newValue)
atomic_t *target, /* address to be tested */
atomic_val_t oldValue, /* value to compare against */
atomic_val_t newValue /* value to set to */
)
{ {
int key; /* interrupt lock level */ int key; /* interrupt lock level */
atomic_val_t ovalue; /* temporary storage */ atomic_val_t ovalue; /* temporary storage */
@ -82,13 +82,13 @@ int atomic_cas(
* atomically added to the value at <target>, placing the result at <target>, * atomically added to the value at <target>, placing the result at <target>,
* and the old value from <target> is returned. * and the old value from <target> is returned.
* *
* @param target memory location to add to
* @param value the value to add
*
* @return The previous value from <target> * @return The previous value from <target>
*/ */
atomic_val_t atomic_add( atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
atomic_t *target, /* memory location to add to */
atomic_val_t value /* value to add */
)
{ {
int key; /* interrupt lock level */ int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */ atomic_val_t ovalue; /* previous value from <target> */
@ -108,14 +108,13 @@ atomic_val_t atomic_add(
* atomically subtracted from the value at <target>, placing the result at * atomically subtracted from the value at <target>, placing the result at
* <target>, and the old value from <target> is returned. * <target>, and the old value from <target> is returned.
* *
* @param target the memory location to subtract from
* @param value the value to subtract
*
* @return The previous value from <target> * @return The previous value from <target>
*/ */
atomic_val_t atomic_sub( atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value) {
atomic_t *target, /* memory location to subtract from */
atomic_val_t value /* value to subtract */
)
{
int key; /* interrupt lock level */ int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */ atomic_val_t ovalue; /* previous value from <target> */
@ -130,15 +129,15 @@ atomic_val_t atomic_sub(
* *
* @brief Atomic increment primitive * @brief Atomic increment primitive
* *
* @param target memory location to increment
*
* This routine provides the atomic increment operator. The value at <target> * This routine provides the atomic increment operator. The value at <target>
* is atomically incremented by 1, and the old value from <target> is returned. * is atomically incremented by 1, and the old value from <target> is returned.
* *
* @return The value from <target> before the increment * @return The value from <target> before the increment
*/ */
atomic_val_t atomic_inc( atomic_val_t atomic_inc(atomic_t *target)
atomic_t *target /* memory location to increment */
)
{ {
int key; /* interrupt lock level */ int key; /* interrupt lock level */
atomic_val_t ovalue; /* value from <target> before the increment */ atomic_val_t ovalue; /* value from <target> before the increment */
@ -154,15 +153,15 @@ atomic_val_t atomic_inc(
* *
* @brief Atomic decrement primitive * @brief Atomic decrement primitive
* *
* @param target memory location to decrement
*
* This routine provides the atomic decrement operator. The value at <target> * This routine provides the atomic decrement operator. The value at <target>
* is atomically decremented by 1, and the old value from <target> is returned. * is atomically decremented by 1, and the old value from <target> is returned.
* *
* @return The value from <target> prior to the decrement * @return The value from <target> prior to the decrement
*/ */
atomic_val_t atomic_dec( atomic_val_t atomic_dec(atomic_t *target)
atomic_t *target /* memory location to decrement */
)
{ {
int key; /* interrupt lock level */ int key; /* interrupt lock level */
atomic_val_t ovalue; /* value from <target> prior to the decrement */ atomic_val_t ovalue; /* value from <target> prior to the decrement */
@ -178,6 +177,8 @@ atomic_val_t atomic_dec(
* *
* @brief Atomic get primitive * @brief Atomic get primitive
* *
* @param target memory location to read from
*
* This routine provides the atomic get primitive to atomically read * This routine provides the atomic get primitive to atomically read
* a value from <target>. It simply does an ordinary load. Note that <target> * a value from <target>. It simply does an ordinary load. Note that <target>
* is expected to be aligned to a 4-byte boundary. * is expected to be aligned to a 4-byte boundary.
@ -185,8 +186,7 @@ atomic_val_t atomic_dec(
* @return The value read from <target> * @return The value read from <target>
*/ */
atomic_val_t atomic_get(const atomic_t *target /* memory location to read from */ atomic_val_t atomic_get(const atomic_t *target)
)
{ {
return *target; return *target;
} }
@ -198,13 +198,13 @@ atomic_val_t atomic_get(const atomic_t *target /* memory location to read from *
* This routine provides the atomic set operator. The <value> is atomically * This routine provides the atomic set operator. The <value> is atomically
* written at <target> and the previous value at <target> is returned. * written at <target> and the previous value at <target> is returned.
* *
* @param target the memory location to write to
* @param value the value to write
*
* @return The previous value from <target> * @return The previous value from <target>
*/ */
atomic_val_t atomic_set( atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
atomic_t *target, /* memory location to write to */
atomic_val_t value /* value to write */
)
{ {
int key; /* interrupt lock level */ int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */ atomic_val_t ovalue; /* previous value from <target> */
@ -224,12 +224,12 @@ atomic_val_t atomic_set(
* written at <target> and the previous value at <target> is returned. (Hence, * written at <target> and the previous value at <target> is returned. (Hence,
* atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).) * atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).)
* *
* @param target the memory location to write
*
* @return The previous value from <target> * @return The previous value from <target>
*/ */
atomic_val_t atomic_clear( atomic_val_t atomic_clear(atomic_t *target)
atomic_t *target /* memory location to write to */
)
{ {
int key; /* interrupt lock level */ int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */ atomic_val_t ovalue; /* previous value from <target> */
@ -249,13 +249,13 @@ atomic_val_t atomic_clear(
* is atomically bitwise OR'ed with the value at <target>, placing the result * is atomically bitwise OR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned. * at <target>, and the previous value at <target> is returned.
* *
* @param target the memory location to be modified
* @param value the value to OR
*
* @return The previous value from <target> * @return The previous value from <target>
*/ */
atomic_val_t atomic_or( atomic_val_t atomic_or(atomic_t *target, atomic_val_t value)
atomic_t *target, /* memory location to be modified */
atomic_val_t value /* value to OR */
)
{ {
int key; /* interrupt lock level */ int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */ atomic_val_t ovalue; /* previous value from <target> */
@ -275,13 +275,13 @@ atomic_val_t atomic_or(
* is atomically bitwise XOR'ed with the value at <target>, placing the result * is atomically bitwise XOR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned. * at <target>, and the previous value at <target> is returned.
* *
* @param target the memory location to be modified
* @param value the value to XOR
*
* @return The previous value from <target> * @return The previous value from <target>
*/ */
atomic_val_t atomic_xor( atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value)
atomic_t *target, /* memory location to be modified */
atomic_val_t value /* value to XOR */
)
{ {
int key; /* interrupt lock level */ int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */ atomic_val_t ovalue; /* previous value from <target> */
@ -301,13 +301,13 @@ atomic_val_t atomic_xor(
* atomically bitwise AND'ed with the value at <target>, placing the result * atomically bitwise AND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned. * at <target>, and the previous value at <target> is returned.
* *
* @param target the memory location to be modified
* @param value the value to AND
*
* @return The previous value from <target> * @return The previous value from <target>
*/ */
atomic_val_t atomic_and( atomic_val_t atomic_and(atomic_t *target, atomic_val_t value)
atomic_t *target, /* memory location to be modified */
atomic_val_t value /* value to AND */
)
{ {
int key; /* interrupt lock level */ int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */ atomic_val_t ovalue; /* previous value from <target> */
@ -327,13 +327,13 @@ atomic_val_t atomic_and(
* atomically bitwise NAND'ed with the value at <target>, placing the result * atomically bitwise NAND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned. * at <target>, and the previous value at <target> is returned.
* *
* @param target the memory location to be modified
* @param value the value to NAND
*
* @return The previous value from <target> * @return The previous value from <target>
*/ */
atomic_val_t atomic_nand( atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value)
atomic_t *target, /* memory location to be modified */
atomic_val_t value /* value to NAND */
)
{ {
int key; /* interrupt lock level */ int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */ atomic_val_t ovalue; /* previous value from <target> */

View file

@ -63,15 +63,16 @@ const NANO_ESF _default_esf = {
* fatal error does not have a hardware generated ESF, the caller should either * fatal error does not have a hardware generated ESF, the caller should either
* create its own or use a pointer to the global default ESF <_default_esf>. * create its own or use a pointer to the global default ESF <_default_esf>.
* *
* @param reason the reason that the handler was called
* @param pEsf pointer to the exception stack frame
*
* @return This function does not return. * @return This function does not return.
* *
* \NOMANUAL * \NOMANUAL
*/ */
FUNC_NORETURN void _NanoFatalErrorHandler( FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
unsigned int reason, /* reason that handler was called */ const NANO_ESF *pEsf)
const NANO_ESF *pEsf /* pointer to exception stack frame */
)
{ {
#ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK

View file

@ -99,9 +99,10 @@ extern uint32_t _sse_mxcsr_default_value; /* SSE control/status register default
* specified TCS. If the specified task or fiber supports SSE then * specified TCS. If the specified task or fiber supports SSE then
* x87/MMX/SSEx thread info is saved, otherwise only x87/MMX thread is saved. * x87/MMX/SSEx thread info is saved, otherwise only x87/MMX thread is saved.
* *
* @param tcs TBD
*
* @return N/A * @return N/A
*/ */
static void _FpCtxSave(struct tcs *tcs) static void _FpCtxSave(struct tcs *tcs)
{ {
_do_fp_ctx_save(tcs->flags & USE_SSE, &tcs->preempFloatReg); _do_fp_ctx_save(tcs->flags & USE_SSE, &tcs->preempFloatReg);
@ -113,9 +114,10 @@ static void _FpCtxSave(struct tcs *tcs)
* *
* This routine initializes the system's "live" non-integer context. * This routine initializes the system's "live" non-integer context.
* *
* @param tcs TBD
*
* @return N/A * @return N/A
*/ */
static inline void _FpCtxInit(struct tcs *tcs) static inline void _FpCtxInit(struct tcs *tcs)
{ {
_do_fp_ctx_init(tcs->flags & USE_SSE); _do_fp_ctx_init(tcs->flags & USE_SSE);
@ -142,6 +144,9 @@ static inline void _FpCtxInit(struct tcs *tcs)
* This routine should only be used to enable floating point support for a * This routine should only be used to enable floating point support for a
* task/fiber that does not currently have such support enabled already. * task/fiber that does not currently have such support enabled already.
* *
* @param tcs TDB
* @param options set to either USE_FP or USE_SSE
*
* @return N/A * @return N/A
* *
* INTERNAL * INTERNAL
@ -159,10 +164,7 @@ static inline void _FpCtxInit(struct tcs *tcs)
* are only used from a fiber, rather than from "generic" code used by both * are only used from a fiber, rather than from "generic" code used by both
* tasks and fibers. * tasks and fibers.
*/ */
void _FpEnable(struct tcs *tcs, unsigned int options)
void _FpEnable(struct tcs *tcs,
unsigned int options /* USE_FP or USE_SSE */
)
{ {
unsigned int imask; unsigned int imask;
struct tcs *fp_owner; struct tcs *fp_owner;
@ -272,7 +274,6 @@ void _FpEnable(struct tcs *tcs,
* *
* @return N/A * @return N/A
*/ */
FUNC_ALIAS(_FpEnable, fiber_float_enable, void); FUNC_ALIAS(_FpEnable, fiber_float_enable, void);
/** /**
@ -286,7 +287,6 @@ FUNC_ALIAS(_FpEnable, fiber_float_enable, void);
* *
* @return N/A * @return N/A
*/ */
FUNC_ALIAS(_FpEnable, task_float_enable, void); FUNC_ALIAS(_FpEnable, task_float_enable, void);
/** /**
@ -301,6 +301,8 @@ FUNC_ALIAS(_FpEnable, task_float_enable, void);
* This routine should only be used to disable floating point support for * This routine should only be used to disable floating point support for
* a task/fiber that currently has such support enabled. * a task/fiber that currently has such support enabled.
* *
* @param tcs TBD
*
* @return N/A * @return N/A
* *
* INTERNAL * INTERNAL
@ -318,7 +320,6 @@ FUNC_ALIAS(_FpEnable, task_float_enable, void);
* are only used from a fiber, rather than from "generic" code used by both * are only used from a fiber, rather than from "generic" code used by both
* tasks and fibers. * tasks and fibers.
*/ */
void _FpDisable(struct tcs *tcs) void _FpDisable(struct tcs *tcs)
{ {
unsigned int imask; unsigned int imask;
@ -362,7 +363,6 @@ void _FpDisable(struct tcs *tcs)
* *
* @return N/A * @return N/A
*/ */
FUNC_ALIAS(_FpDisable, fiber_float_disable, void); FUNC_ALIAS(_FpDisable, fiber_float_disable, void);
/** /**
@ -379,7 +379,6 @@ FUNC_ALIAS(_FpDisable, fiber_float_disable, void);
* *
* @return N/A * @return N/A
*/ */
FUNC_ALIAS(_FpDisable, task_float_disable, void); FUNC_ALIAS(_FpDisable, task_float_disable, void);
@ -395,11 +394,11 @@ FUNC_ALIAS(_FpDisable, task_float_disable, void);
* current task or fiber with the USE_FP option (or the USE_SSE option if the * current task or fiber with the USE_FP option (or the USE_SSE option if the
* SSE configuration option has been enabled). * SSE configuration option has been enabled).
* *
* @param pEsf this value is not used for this architecture
*
* @return N/A * @return N/A
*/ */
void _FpNotAvailableExcHandler(NANO_ESF * pEsf)
void _FpNotAvailableExcHandler(NANO_ESF * pEsf /* not used */
)
{ {
unsigned int enableOption; unsigned int enableOption;

View file

@ -161,6 +161,10 @@ static int _int_stub_alloc(void)
* *
* @brief Connect a routine to an interrupt vector * @brief Connect a routine to an interrupt vector
* *
* @param vector interrupt vector: 0 to 255 on IA-32
* @param routine a function pointer to the interrupt routine
* @param dpl priv level for interrupt-gate descriptor
*
* This routine "connects" the specified <routine> to the specified interrupt * This routine "connects" the specified <routine> to the specified interrupt
* <vector>. On the IA-32 architecture, an interrupt vector is a value from * <vector>. On the IA-32 architecture, an interrupt vector is a value from
* 0 to 255. This routine merely fills in the appropriate interrupt * 0 to 255. This routine merely fills in the appropriate interrupt
@ -180,11 +184,7 @@ static int _int_stub_alloc(void)
* *
*/ */
void _IntVecSet( void _IntVecSet( unsigned int vector, void (*routine)(void *), unsigned int dpl)
unsigned int vector, /* interrupt vector: 0 to 255 on IA-32 */
void (*routine)(void *),
unsigned int dpl /* priv level for interrupt-gate descriptor */
)
{ {
unsigned long long *pIdtEntry; unsigned long long *pIdtEntry;
unsigned int key; unsigned int key;
@ -198,9 +198,9 @@ void _IntVecSet(
pIdtEntry = (unsigned long long *)(_idt_base_address + (vector << 3)); pIdtEntry = (unsigned long long *)(_idt_base_address + (vector << 3));
/* /*
* Lock interrupts to protect the IDT entry to which _IdtEntryCreate() will * Lock interrupts to protect the IDT entry to which _IdtEntryCreate()
* write. They must be locked here because the _IdtEntryCreate() code is * will write. They must be locked here because the _IdtEntryCreate()
* shared with the 'gen_idt' host tool. * code is shared with the 'gen_idt' host tool.
*/ */
key = irq_lock(); key = irq_lock();
@ -221,6 +221,11 @@ void _IntVecSet(
* *
* @brief Connect a C routine to a hardware interrupt * @brief Connect a C routine to a hardware interrupt
* *
* @param irq virtualized IRQ to connect to
* @param priority requested priority of interrupt
* @param routine the C interrupt handler
* @param parameter parameter passed to C routine
*
* This routine connects an interrupt service routine (ISR) coded in C to * This routine connects an interrupt service routine (ISR) coded in C to
* the specified hardware <irq>. An interrupt vector will be allocated to * the specified hardware <irq>. An interrupt vector will be allocated to
* satisfy the specified <priority>. If the interrupt service routine is being * satisfy the specified <priority>. If the interrupt service routine is being
@ -265,12 +270,8 @@ void _IntVecSet(
* vectors remaining in the specified <priority> level. * vectors remaining in the specified <priority> level.
*/ */
int irq_connect( int irq_connect( unsigned int irq, unsigned int priority,
unsigned int irq, /* virtualized IRQ to connect to */ void (*routine)(void *parameter), void *parameter)
unsigned int priority, /* requested priority of interrupt */
void (*routine)(void *parameter), /* C interrupt handler */
void *parameter /* parameter passed to C routine */
)
{ {
unsigned char offsetAdjust; unsigned char offsetAdjust;
unsigned char numParameters = 1; /* stub always pushes ISR parameter */ unsigned char numParameters = 1; /* stub always pushes ISR parameter */
@ -388,10 +389,8 @@ int irq_connect(
/* /*
* Poke in the stack popping related opcode. Do it a byte at a time * Poke in the stack popping related opcode. Do it a byte at a time
* because * because &STUB_PTR[offsetAdjust] may not be aligned which does not
* &STUB_PTR[offsetAdjust] may not be aligned which does not work for * work for all targets.
* all
* targets.
*/ */
STUB_PTR[offsetAdjust] = IA32_ADD_OPCODE & 0xFF; STUB_PTR[offsetAdjust] = IA32_ADD_OPCODE & 0xFF;
@ -401,9 +400,10 @@ int irq_connect(
offsetAdjust += 3; offsetAdjust += 3;
/* /*
* generate code that invokes _IntExit(); note that a jump is used, since * generate code that invokes _IntExit(); note that a jump is used,
* _IntExit() takes care of returning back to the execution context that * since _IntExit() takes care of returning back to the execution
* experienced the interrupt (i.e. branch tail optimization) * context that experienced the interrupt (i.e. branch tail
* optimization)
*/ */
STUB_PTR[offsetAdjust] = IA32_JMP_OPCODE; STUB_PTR[offsetAdjust] = IA32_JMP_OPCODE;
@ -416,8 +416,8 @@ int irq_connect(
* There is no need to explicitly synchronize or flush the instruction * There is no need to explicitly synchronize or flush the instruction
* cache due to the above code synthesis. See the Intel 64 and IA-32 * cache due to the above code synthesis. See the Intel 64 and IA-32
* Architectures Software Developer's Manual: Volume 3A: System * Architectures Software Developer's Manual: Volume 3A: System
*Programming * Programming Guide; specifically the section titled "Self Modifying
* Guide; specifically the section titled "Self Modifying Code". * Code".
* *
* Cache synchronization/flushing is not required for the i386 as it * Cache synchronization/flushing is not required for the i386 as it
* does not contain any on-chip I-cache; likewise, post-i486 processors * does not contain any on-chip I-cache; likewise, post-i486 processors

View file

@ -38,24 +38,23 @@ Intel-specific parts of start_task(). Only FP functionality currently.
#define SSE_GROUP 0x10 #define SSE_GROUP 0x10
/** /**
*
* @brief Intel-specific parts of task initialization * @brief Intel-specific parts of task initialization
* *
* @param X pointer to task control block
* @param pOpt thread options container
*
* @return N/A * @return N/A
*/ */
void _StartTaskArch( void _StartTaskArch( struct k_task *X, unsigned int *pOpt)
struct k_task *X, /* ptr to task control block */
unsigned int *pOpt /* thread options container */
)
{ {
/* /*
* The IA-32 nanokernel implementation uses the USE_FP bit in the * The IA-32 nanokernel implementation uses the USE_FP bit in the
* struct tcs->flags structure as a "dirty bit". The USE_FP flag bit will be * struct tcs->flags structure as a "dirty bit". The USE_FP flag bit
* set whenever a thread uses any non-integer capability, whether it's * will be set whenever a thread uses any non-integer capability,
* just the x87 FPU capability, SSE instructions, or a combination of * whether it's just the x87 FPU capability, SSE instructions, or a
* both. The USE_SSE flag bit will only be set if a thread uses SSE * combination of both. The USE_SSE flag bit will only be set if a
* instructions. * thread uses SSE instructions.
* *
* However, callers of fiber_fiber_start(), task_fiber_start(), or even * However, callers of fiber_fiber_start(), task_fiber_start(), or even
* _new_thread() don't need to follow the protocol used by the IA-32 * _new_thread() don't need to follow the protocol used by the IA-32
@ -68,11 +67,10 @@ void _StartTaskArch(
* Likewise, the placement of tasks into "groups" doesn't need to follow * Likewise, the placement of tasks into "groups" doesn't need to follow
* the protocol used by the IA-32 nanokernel w.r.t. managing the * the protocol used by the IA-32 nanokernel w.r.t. managing the
* struct tcs->flags field. If a task will utilize just the x87 FPU * struct tcs->flags field. If a task will utilize just the x87 FPU
*capability, * capability, then the task only needs to be placed in the FPU_GROUP
* then the task only needs to be placed in the FPU_GROUP group. * group. If a task utilizes SSE instructions (and possibly x87 FPU
* If a task utilizes SSE instructions (and possibly x87 FPU * capability), then the task only needs to be placed in the SSE_GROUP
*capability), * group.
* then the task only needs to be placed in the SSE_GROUP group.
*/ */
*pOpt |= (X->group & SSE_GROUP) ? USE_SSE *pOpt |= (X->group & SSE_GROUP) ? USE_SSE

View file

@ -48,15 +48,16 @@ supported platforms.
* implementation to take other actions, such as logging error (or debug) * implementation to take other actions, such as logging error (or debug)
* information to a persistent repository and/or rebooting the system. * information to a persistent repository and/or rebooting the system.
* *
* @param reason the fatal error reason
* @param pEsf the pointer to the exception stack frame
*
* @return This function does not return. * @return This function does not return.
* *
* \NOMANUAL * \NOMANUAL
*/ */
FUNC_NORETURN void _SysFatalErrorHandler( FUNC_NORETURN void _SysFatalErrorHandler(unsigned int reason,
unsigned int reason, /* fatal error reason */ const NANO_ESF * pEsf)
const NANO_ESF * pEsf /* pointer to exception stack frame */
)
{ {
nano_context_type_t curCtx = sys_execution_context_type_get(); nano_context_type_t curCtx = sys_execution_context_type_get();

View file

@ -43,7 +43,7 @@ tNANO _nanokernel = {0};
#ifdef CONFIG_GDB_INFO #ifdef CONFIG_GDB_INFO
void _thread_entry_wrapper(_thread_entry_t, _thread_arg_t, void _thread_entry_wrapper(_thread_entry_t, _thread_arg_t,
_thread_arg_t, _thread_arg_t); _thread_arg_t, _thread_arg_t);
#endif /* CONFIG_GDB_INFO */ #endif /* CONFIG_GDB_INFO */
/** /**
@ -56,20 +56,22 @@ void _thread_entry_wrapper(_thread_entry_t, _thread_arg_t,
* *
* This function is called by _new_thread() to initialize tasks. * This function is called by _new_thread() to initialize tasks.
* *
* @param pStackMem pointer to thread stack memory
* @param stackSize size of a stack in bytes
* @param thread priority
* @param options thread options: USE_FP, USE_SSE
*
* @return N/A * @return N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
static void _new_thread_internal( static void _new_thread_internal(char *pStackMem, unsigned stackSize,
char *pStackMem, /* pointer to thread stack memory */ int priority, unsigned options)
unsigned stackSize, /* size of stack in bytes */
int priority, /* thread priority */
unsigned options /* thread options: USE_FP, USE_SSE */
)
{ {
unsigned long *pInitialCtx; unsigned long *pInitialCtx;
struct tcs *tcs = (struct tcs *)pStackMem; /* ptr to the new task's tcs */ /* ptr to the new task's tcs */
struct tcs *tcs = (struct tcs *)pStackMem;
#ifndef CONFIG_FP_SHARING #ifndef CONFIG_FP_SHARING
ARG_UNUSED(options); ARG_UNUSED(options);
@ -270,21 +272,24 @@ __asm__("\t.globl _thread_entry\n"
* The "thread control block" (TCS) is carved from the "end" of the specified * The "thread control block" (TCS) is carved from the "end" of the specified
* thread stack memory. * thread stack memory.
* *
* @param pStackmem the pointer to aligned stack memory
* @param stackSize the stack size in bytes
* @param pEntry thread entry point routine
* @param parameter1 first param to entry point
* @param parameter2 second param to entry point
* @param parameter3 third param to entry point
* @param priority thread priority
* @param options thread options: USE_FP, USE_SSE
*
*
* @return opaque pointer to initialized TCS structure * @return opaque pointer to initialized TCS structure
* *
* \NOMANUAL * \NOMANUAL
*/ */
void _new_thread( void _new_thread(char *pStackMem, unsigned stackSize, _thread_entry_t pEntry,
char *pStackMem, /* pointer to aligned stack memory */ void *parameter1, void *parameter2, void *parameter3,
unsigned stackSize, /* size of stack in bytes */ int priority, unsigned options)
_thread_entry_t pEntry, /* thread entry point function */
void *parameter1, /* first parameter to thread entry point function */
void *parameter2, /* second parameter to thread entry point function */
void *parameter3, /* third parameter to thread entry point function */
int priority, /* thread priority */
unsigned options /* thread options: USE_FP, USE_SSE */
)
{ {
unsigned long *pInitialThread; unsigned long *pInitialThread;

View file

@ -49,14 +49,14 @@ extern int _AdvIdleCheckSleep(void);
* passes to the _AdvIdleFunc() that put the system to sleep, which then * passes to the _AdvIdleFunc() that put the system to sleep, which then
* finishes executing. * finishes executing.
* *
* @param _Cstart the address of the _Cstart function
* @param _gdt the address of the global descriptor table in RAM
* @param _GlobalTss the address of the TSS descriptor
*
* @return does not return to caller * @return does not return to caller
*/ */
extern void _AdvIdleStart( extern void _AdvIdleStart(void (*_Cstart)(void), void *_gdt, void *_GlobalTss);
void (*_Cstart)(void), /* addr of _Cstart function */
void *_gdt, /* addr of global descriptor table in RAM */
void *_GlobalTss /* addr of TSS descriptor */
);
/* /*
* @brief Perform advanced sleep * @brief Perform advanced sleep
@ -66,11 +66,12 @@ extern void _AdvIdleStart(
* to sleep and then later allows it to resume processing; if not, the routine * to sleep and then later allows it to resume processing; if not, the routine
* returns immediately without sleeping. * returns immediately without sleeping.
* *
* @param ticks the upcoming kernel idle time
*
* @return non-zero if advanced sleep occurred; otherwise zero * @return non-zero if advanced sleep occurred; otherwise zero
*/ */
extern int _AdvIdleFunc(int32_t ticks /* upcoming kernel idle time */ extern int _AdvIdleFunc(int32_t ticks);
);
#endif /* CONFIG_ADVANCED_IDLE */ #endif /* CONFIG_ADVANCED_IDLE */

View file

@ -808,6 +808,9 @@ static inline void nanoArchInit(void)
* *
* @brief Set the return value for the specified fiber (inline) * @brief Set the return value for the specified fiber (inline)
* *
* @param fiber pointer to fiber
* @param value value to set as return value
*
* The register used to store the return value from a function call invocation is * The register used to store the return value from a function call invocation is
* set to <value>. It is assumed that the specified <fiber> is pending, and * set to <value>. It is assumed that the specified <fiber> is pending, and
* thus the fibers context is stored in its TCS. * thus the fibers context is stored in its TCS.
@ -817,10 +820,7 @@ static inline void nanoArchInit(void)
* \NOMANUAL * \NOMANUAL
*/ */
static inline void fiberRtnValueSet( static inline void fiberRtnValueSet( struct tcs *fiber, unsigned int value)
struct tcs *fiber, /* pointer to fiber */
unsigned int value /* value to set as return value */
)
{ {
/* write into 'eax' slot created in _Swap() entry */ /* write into 'eax' slot created in _Swap() entry */

View file

@ -347,14 +347,14 @@ void _loapic_int_vec_set(unsigned int irq, /* IRQ number of the interrupt */
* *
* @brief Enable an individual LOAPIC interrupt (IRQ) * @brief Enable an individual LOAPIC interrupt (IRQ)
* *
* @param irq the IRQ number of the interrupt
*
* This routine clears the interrupt mask bit in the LVT for the specified IRQ * This routine clears the interrupt mask bit in the LVT for the specified IRQ
* *
* @return N/A * @return N/A
*/ */
void _loapic_irq_enable(unsigned int irq /* IRQ number of void _loapic_irq_enable(unsigned int irq)
the interrupt */
)
{ {
volatile int *pLvt; /* pointer to local vector table */ volatile int *pLvt; /* pointer to local vector table */
int32_t oldLevel; /* previous interrupt lock level */ int32_t oldLevel; /* previous interrupt lock level */
@ -378,14 +378,14 @@ void _loapic_irq_enable(unsigned int irq /* IRQ number of
* *
* @brief Disable an individual LOAPIC interrupt (IRQ) * @brief Disable an individual LOAPIC interrupt (IRQ)
* *
* @param irq the IRQ number of the interrupt
*
* This routine clears the interrupt mask bit in the LVT for the specified IRQ * This routine clears the interrupt mask bit in the LVT for the specified IRQ
* *
* @return N/A * @return N/A
*/ */
void _loapic_irq_disable(unsigned int irq /* IRQ number of the void _loapic_irq_disable(unsigned int irq)
interrupt */
)
{ {
volatile int *pLvt; /* pointer to local vector table */ volatile int *pLvt; /* pointer to local vector table */
int32_t oldLevel; /* previous interrupt lock level */ int32_t oldLevel; /* previous interrupt lock level */

View file

@ -65,6 +65,15 @@
* IRQ28 -> LOAPIC_LINT1 * IRQ28 -> LOAPIC_LINT1
* IRQ29 -> LOAPIC_ERROR * IRQ29 -> LOAPIC_ERROR
* *
* @param irq virtualized IRQ
* @param priority get vector from <priority> group
* @param boiRtn pointer to the BOI routine; NULL if none
* @param eoiRtn pointer to the EOI routine; NULL if none
* @param boiRtnParm the BOI routine parameter if any
* @param eoiRtnParm the EOI routine parameter if any
* @param boiParamRequired the BOI routine parameter req?
* @param eoiParamRequired the EOI routine parameter req?
*
* @return the allocated interrupt vector * @return the allocated interrupt vector
* *
* @internal * @internal
@ -73,16 +82,10 @@
* parameters are invalid. * parameters are invalid.
* @endinternal * @endinternal
*/ */
int _SysIntVecAlloc( int _SysIntVecAlloc(unsigned int irq, unsigned int priority,
unsigned int irq, /* virtualized IRQ */ NANO_EOI_GET_FUNC * boiRtn, NANO_EOI_GET_FUNC * eoiRtn,
unsigned int priority, /* get vector from <priority> group */ void **boiRtnParm, void **eoiRtnParm,
NANO_EOI_GET_FUNC * boiRtn, /* ptr to BOI routine; NULL if none */ unsigned char *boiParamRequired, unsigned char *eoiParamRequired)
NANO_EOI_GET_FUNC * eoiRtn, /* ptr to EOI routine; NULL if none */
void **boiRtnParm, /* BOI routine parameter, if any */
void **eoiRtnParm, /* EOI routine parameter, if any */
unsigned char *boiParamRequired, /* BOI routine parameter req? */
unsigned char *eoiParamRequired /* BOI routine parameter req? */
)
{ {
int vector; int vector;
@ -108,13 +111,11 @@ int _SysIntVecAlloc(
* Set up the appropriate interrupt controller to generate the allocated * Set up the appropriate interrupt controller to generate the allocated
* interrupt vector for the specified IRQ. Also, provide the required * interrupt vector for the specified IRQ. Also, provide the required
* EOI and BOI related information for the interrupt stub code * EOI and BOI related information for the interrupt stub code
*generation * generation step.
* step.
* *
* For software interrupts (NANO_SOFT_IRQ), skip the interrupt * For software interrupts (NANO_SOFT_IRQ), skip the interrupt
*controller * controller programming step, and indicate that a BOI and EOI handler
* programming step, and indicate that a BOI and EOI handler is not * is not required.
* required.
* *
* Skip both steps if a vector could not be allocated. * Skip both steps if a vector could not be allocated.
*/ */
@ -133,9 +134,8 @@ int _SysIntVecAlloc(
/* /*
* query IOAPIC driver to obtain EOI handler information * query IOAPIC driver to obtain EOI handler information
* for the * for the interrupt vector that was just assigned to
* interrupt vector that was just assigned to the * the specified IRQ
* specified IRQ
*/ */
*eoiRtn = (NANO_EOI_GET_FUNC)_ioapic_eoi_get( *eoiRtn = (NANO_EOI_GET_FUNC)_ioapic_eoi_get(
@ -170,10 +170,12 @@ int _SysIntVecAlloc(
* IOAPIC is programmed for these IRQs * IOAPIC is programmed for these IRQs
* - The remaining IRQs are provided by the LOAPIC and hence the LOAPIC is * - The remaining IRQs are provided by the LOAPIC and hence the LOAPIC is
* programmed. * programmed.
*
* @param vector the vector number
* @param irq the virtualized IRQ
*
*/ */
void _SysIntVecProgram(unsigned int vector, /* vector number */ void _SysIntVecProgram(unsigned int vector, unsigned int irq)
unsigned int irq /* virtualized IRQ */
)
{ {
if (irq < CONFIG_IOAPIC_NUM_RTES) { if (irq < CONFIG_IOAPIC_NUM_RTES) {

View file

@ -42,6 +42,10 @@ This module implements the PCI H/W access functions.
* *
* @brief Read a PCI controller register * @brief Read a PCI controller register
* *
* @param reg PCI register to read
* @param data where to put the data
* @param size size of the data to read (8/16/32 bits)
*
* This routine reads the specified register from the PCI controller and * This routine reads the specified register from the PCI controller and
* places the data into the provided buffer. * places the data into the provided buffer.
* *
@ -49,10 +53,7 @@ This module implements the PCI H/W access functions.
* *
*/ */
static void pci_ctrl_read(uint32_t reg, /* PCI register to read */ static void pci_ctrl_read(uint32_t reg, uint32_t *data, uint32_t size)
uint32_t *data, /* where to put the data */
uint32_t size /* size of the data to read (8/16/32 bits) */
)
{ {
/* read based on the size requested */ /* read based on the size requested */
@ -76,6 +77,10 @@ static void pci_ctrl_read(uint32_t reg, /* PCI register to read */
* *
* @brief Write a PCI controller register * @brief Write a PCI controller register
* *
* @param reg PCI register to write
* @param data data to write
* @param size size of the data to write (8/16/32 bits)
*
* This routine writes the provided data to the specified register in the PCI * This routine writes the provided data to the specified register in the PCI
* controller. * controller.
* *
@ -83,11 +88,7 @@ static void pci_ctrl_read(uint32_t reg, /* PCI register to read */
* *
*/ */
static void pci_ctrl_write(uint32_t reg, /* PCI register to write */ static void pci_ctrl_write(uint32_t reg, uint32_t data, uint32_t size)
uint32_t data, /* data to write */
uint32_t size /* size of the data to write (8/16/32 bits)
*/
)
{ {
/* write based on the size requested */ /* write based on the size requested */
@ -111,22 +112,25 @@ static void pci_ctrl_write(uint32_t reg, /* PCI register to write */
* *
* @brief Read the PCI controller data register * @brief Read the PCI controller data register
* *
* @param controller controller number
* @param offset is the offset within the data region
* @param data is the returned data
* @param size is the size of the data to read
*
* This routine reads the data register of the specified PCI controller. * This routine reads the data register of the specified PCI controller.
* *
* @return 0 or -1 * @return 0 or -1
* *
*/ */
static int pci_ctrl_data_read(uint32_t controller, /* controller number */ static int pci_ctrl_data_read(uint32_t controller, uint32_t offset,
uint32_t offset, /* offset within data region */ uint32_t *data, uint32_t size)
uint32_t *data, /* returned data */
uint32_t size /* size of data to read */
)
{ {
/* we only support one controller */ /* we only support one controller */
if (controller != DEFAULT_PCI_CONTROLLER) if (controller != DEFAULT_PCI_CONTROLLER) {
return (-1); return (-1);
}
pci_ctrl_read(PCI_CTRL_DATA_REG + offset, data, size); pci_ctrl_read(PCI_CTRL_DATA_REG + offset, data, size);
@ -137,6 +141,11 @@ static int pci_ctrl_data_read(uint32_t controller, /* controller number */
* *
* @brief Write the PCI controller data register * @brief Write the PCI controller data register
* *
* @param controller the controller number
* @param offset is the offset within the address register
* @param data is the data to write
* @param size is the size of the data
*
* This routine writes the provided data to the data register of the * This routine writes the provided data to the data register of the
* specified PCI controller. * specified PCI controller.
* *
@ -144,16 +153,14 @@ static int pci_ctrl_data_read(uint32_t controller, /* controller number */
* *
*/ */
static int pci_ctrl_data_write(uint32_t controller, /* controller number */ static int pci_ctrl_data_write(uint32_t controller, uint32_t offset,
uint32_t offset, /* offset within address register */ uint32_t data, uint32_t size)
uint32_t data, /* data to write */
uint32_t size /* size of data */
)
{ {
/* we only support one controller */ /* we only support one controller */
if (controller != DEFAULT_PCI_CONTROLLER) if (controller != DEFAULT_PCI_CONTROLLER) {
return (-1); return (-1);
}
pci_ctrl_write(PCI_CTRL_DATA_REG + offset, data, size); pci_ctrl_write(PCI_CTRL_DATA_REG + offset, data, size);
@ -164,6 +171,11 @@ static int pci_ctrl_data_write(uint32_t controller, /* controller number */
* *
* @brief Write the PCI controller address register * @brief Write the PCI controller address register
* *
* @param controller is the controller number
* @param offset is the offset within the address register
* @param data is the data to write
* @param size is the size of the data
*
* This routine writes the provided data to the address register of the * This routine writes the provided data to the address register of the
* specified PCI controller. * specified PCI controller.
* *
@ -171,16 +183,14 @@ static int pci_ctrl_data_write(uint32_t controller, /* controller number */
* *
*/ */
static int pci_ctrl_addr_write(uint32_t controller, /* controller number */ static int pci_ctrl_addr_write(uint32_t controller, uint32_t offset,
uint32_t offset, /* offset within address register */ uint32_t data, uint32_t size)
uint32_t data, /* data to write */
uint32_t size /* size of data */
)
{ {
/* we only support one controller */ /* we only support one controller */
if (controller != DEFAULT_PCI_CONTROLLER) if (controller != DEFAULT_PCI_CONTROLLER) {
return (-1); return (-1);
}
pci_ctrl_write(PCI_CTRL_ADDR_REG + offset, data, size); pci_ctrl_write(PCI_CTRL_ADDR_REG + offset, data, size);
return 0; return 0;
@ -229,15 +239,17 @@ static int pci_ctrl_addr_write(uint32_t controller, /* controller number */
* Reading of PCI data must be performed as an atomic operation. It is up to * Reading of PCI data must be performed as an atomic operation. It is up to
* the caller to enforce this. * the caller to enforce this.
* *
* @param controller is the PCI controller number to use
* @param addr is the PCI address to read
* @param size is the size of the data in bytes
* @param data is a pointer to the data read from the device
*
* @return N/A * @return N/A
* *
*/ */
void pci_read(uint32_t controller, /* PCI controller to use */ void pci_read(uint32_t controller, union pci_addr_reg addr,
union pci_addr_reg addr, /* PCI address to read */ uint32_t size, uint32_t *data)
uint32_t size, /* size of data in bytes */
uint32_t *data /* data read from device */
)
{ {
uint32_t access_size; uint32_t access_size;
uint32_t access_offset; uint32_t access_offset;
@ -318,16 +330,17 @@ void pci_read(uint32_t controller, /* PCI controller to use */
* Writing of PCI data must be performed as an atomic operation. It is up to * Writing of PCI data must be performed as an atomic operation. It is up to
* the caller to enforce this. * the caller to enforce this.
* *
* @param controller is the PCI controller to use
* @param addr is the PCI addres to read
* @param size is the size in bytes to write
* @param data is the data to write
* *
* @return N/A * @return N/A
* *
*/ */
void pci_write(uint32_t controller, /* controller to use */ void pci_write(uint32_t controller, union pci_addr_reg addr,
union pci_addr_reg addr, /* PCI address to read */ uint32_t size, uint32_t data)
uint32_t size, /* size in bytes */
uint32_t data /* data to write */
)
{ {
uint32_t access_size; uint32_t access_size;
uint32_t access_offset; uint32_t access_offset;
@ -376,8 +389,8 @@ void pci_write(uint32_t controller, /* controller to use */
*/ */
void pci_header_get(uint32_t controller, void pci_header_get(uint32_t controller,
union pci_addr_reg pci_ctrl_addr, union pci_addr_reg pci_ctrl_addr,
union pci_dev *pci_dev_header) union pci_dev *pci_dev_header)
{ {
uint32_t i; uint32_t i;

View file

@ -145,10 +145,14 @@ void _k_fifo_enque_request(struct k_args *A)
} }
} }
int _task_fifo_put(kfifo_t queue, /* FIFO queue */ /**
void *data, /* ptr to data to add to queue */ * @brief adds data to the fifo queue
int32_t time /* maximum number of ticks to wait */ *
) * @param queue is the FIFO queue to add data to
* @param ptr is a pointer to the data to add
* @param time is the maximum number of ticks to wait
*/
int _task_fifo_put(kfifo_t queue, void *data, int32_t time)
{ {
struct k_args A; struct k_args A;

View file

@ -64,7 +64,6 @@ extern int _k_kernel_idle(void);
* *
* @return N/A * @return N/A
*/ */
void _main(void) void _main(void)
{ {
_sys_device_do_config_level(_SYS_INIT_LEVEL_NANOKERNEL); _sys_device_do_config_level(_SYS_INIT_LEVEL_NANOKERNEL);

View file

@ -134,6 +134,7 @@ void task_irq_free(kirq_t irq_obj)
* *
* This re-enables the interrupt for a task IRQ object. * This re-enables the interrupt for a task IRQ object.
* @param irq_obj IRQ object identifier * @param irq_obj IRQ object identifier
*
* @return N/A * @return N/A
*/ */
void task_irq_ack(kirq_t irq_obj) void task_irq_ack(kirq_t irq_obj)
@ -152,6 +153,7 @@ void task_irq_ack(kirq_t irq_obj)
* This tests a task IRQ object to see if it has signaled an interrupt. * This tests a task IRQ object to see if it has signaled an interrupt.
* @param irq_obj IRQ object identifier * @param irq_obj IRQ object identifier
* @param time Time to wait (in ticks) * @param time Time to wait (in ticks)
*
* @return RC_OK, RC_FAIL, or RC_TIME * @return RC_OK, RC_FAIL, or RC_TIME
*/ */
int _task_irq_test(kirq_t irq_obj, int32_t time) int _task_irq_test(kirq_t irq_obj, int32_t time)
@ -169,6 +171,7 @@ int _task_irq_test(kirq_t irq_obj, int32_t time)
* *
* This routine allocates a task IRQ object to a task. * This routine allocates a task IRQ object to a task.
* @param arg Pointer to registration request arguments * @param arg Pointer to registration request arguments
*
* @return ptr to allocated task IRQ object if successful, NULL if not * @return ptr to allocated task IRQ object if successful, NULL if not
*/ */
static int _k_task_irq_alloc(void *arg) static int _k_task_irq_alloc(void *arg)

View file

@ -39,6 +39,9 @@
* *
* @brief Copy a packet * @brief Copy a packet
* *
* @param in the packet to be copied
* @param out the packet to copy to
*
* @return N/A * @return N/A
*/ */
static void copy_packet(struct k_args **out, struct k_args *in) static void copy_packet(struct k_args **out, struct k_args *in)

View file

@ -530,17 +530,18 @@ void _k_mem_pool_block_get(struct k_args *A)
* This routine allocates a free block from the specified memory pool, ensuring * This routine allocates a free block from the specified memory pool, ensuring
* that its size is at least as big as the size requested (in bytes). * that its size is at least as big as the size requested (in bytes).
* *
* @param blockptr poitner to requested block
* @param pool_id pool from which to get block
* @param reqsize requested block size
* @param time maximum number of ticks to wait
*
* @return RC_OK, RC_FAIL, RC_TIME on success, failure, timeout respectively * @return RC_OK, RC_FAIL, RC_TIME on success, failure, timeout respectively
*/ */
int _task_mem_pool_alloc(struct k_block *blockptr, /* ptr to requested block */ int _task_mem_pool_alloc(struct k_block *blockptr, kmemory_pool_t pool_id,
kmemory_pool_t pool_id, /* pool from which to get block */ int reqsize, int32_t time)
int reqsize, /* requested block size */
int32_t time /* maximum number of ticks to wait */
)
{ {
struct k_args A; struct k_args A;
A.Comm = _K_SVC_MEM_POOL_BLOCK_GET; A.Comm = _K_SVC_MEM_POOL_BLOCK_GET;
A.Time.ticks = time; A.Time.ticks = time;
A.args.p1.pool_id = pool_id; A.args.p1.pool_id = pool_id;

View file

@ -28,7 +28,6 @@
* *
* @return N/A * @return N/A
*/ */
static void mvdreq_docont(struct k_args *Cont) static void mvdreq_docont(struct k_args *Cont)
{ {
struct k_args *next; struct k_args *next;
@ -46,7 +45,6 @@ static void mvdreq_docont(struct k_args *Cont)
* *
* @return N/A * @return N/A
*/ */
static void mvdreq_copy(struct moved_req *ReqArgs) static void mvdreq_copy(struct moved_req *ReqArgs)
{ {
memcpy(ReqArgs->destination, ReqArgs->source, memcpy(ReqArgs->destination, ReqArgs->source,
@ -64,7 +62,6 @@ static void mvdreq_copy(struct moved_req *ReqArgs)
* *
* @return N/A * @return N/A
*/ */
void _k_movedata_request(struct k_args *Req) void _k_movedata_request(struct k_args *Req)
{ {
struct moved_req *ReqArgs; struct moved_req *ReqArgs;

View file

@ -264,14 +264,11 @@ void _k_mutex_lock_request(struct k_args *A /* pointer to mutex lock
* This routine is the entry to the mutex lock kernel service. * This routine is the entry to the mutex lock kernel service.
* *
* @param mutex Mutex object * @param mutex Mutex object
* @param time Timeout value (in ticks) * @param time The maximum Timeout value (in ticks)
* *
* @return RC_OK on success, RC_FAIL on error, RC_TIME on timeout * @return RC_OK on success, RC_FAIL on error, RC_TIME on timeout
*/ */
int _task_mutex_lock( int _task_mutex_lock(kmutex_t mutex, int32_t time)
kmutex_t mutex, /* mutex to lock */
int32_t time /* max # of ticks to wait for mutex */
)
{ {
struct k_args A; /* argument packet */ struct k_args A; /* argument packet */
@ -291,13 +288,11 @@ int _task_mutex_lock(
* of the current owner to the priority level it had when it acquired the * of the current owner to the priority level it had when it acquired the
* mutex. * mutex.
* *
* @param A k_args * @param A pointer to mutex unlock request arguments
* *
* @return N/A * @return N/A
*/ */
void _k_mutex_unlock(struct k_args *A /* pointer to mutex unlock void _k_mutex_unlock(struct k_args *A)
request arguments */
)
{ {
struct _k_mutex_struct *Mutex; /* pointer internal mutex structure */ struct _k_mutex_struct *Mutex; /* pointer internal mutex structure */
int MutexId; /* mutex ID obtained from unlock request */ int MutexId; /* mutex ID obtained from unlock request */
@ -380,12 +375,11 @@ void _k_mutex_unlock(struct k_args *A /* pointer to mutex unlock
* *
* This routine is the entry to the mutex unlock kernel service. * This routine is the entry to the mutex unlock kernel service.
* *
* @param mutex Mutex * @param mutex mutex to unlock
* *
* @return N/A * @return N/A
*/ */
void _task_mutex_unlock(kmutex_t mutex /* mutex to unlock */ void _task_mutex_unlock(kmutex_t mutex)
)
{ {
struct k_args A; /* argument packet */ struct k_args A; /* argument packet */

View file

@ -36,7 +36,6 @@ a task to measure the overhead involved in issuing a kernel service request.
* *
* @return N/A * @return N/A
*/ */
void _k_nop(struct k_args *A) void _k_nop(struct k_args *A)
{ {
ARG_UNUSED(A); ARG_UNUSED(A);
@ -50,7 +49,6 @@ void _k_nop(struct k_args *A)
* *
* @return N/A * @return N/A
*/ */
void _task_nop(void) void _task_nop(void)
{ {
struct k_args A; struct k_args A;

View file

@ -28,7 +28,6 @@
* *
* @return N/A * @return N/A
*/ */
void _k_pipe_get_request(struct k_args *RequestOrig) void _k_pipe_get_request(struct k_args *RequestOrig)
{ {
struct k_args *Request; struct k_args *Request;
@ -176,7 +175,6 @@ void _k_pipe_get_request(struct k_args *RequestOrig)
* *
* @return N/A * @return N/A
*/ */
void _k_pipe_get_timeout(struct k_args *ReqProc) void _k_pipe_get_timeout(struct k_args *ReqProc)
{ {
__ASSERT_NO_MSG(NULL != ReqProc->Time.timer); __ASSERT_NO_MSG(NULL != ReqProc->Time.timer);
@ -196,7 +194,6 @@ void _k_pipe_get_timeout(struct k_args *ReqProc)
* *
* @return N/A * @return N/A
*/ */
void _k_pipe_get_reply(struct k_args *ReqProc) void _k_pipe_get_reply(struct k_args *ReqProc)
{ {
__ASSERT_NO_MSG( __ASSERT_NO_MSG(
@ -248,7 +245,6 @@ void _k_pipe_get_reply(struct k_args *ReqProc)
* *
* @return N/A * @return N/A
*/ */
void _k_pipe_get_ack(struct k_args *Request) void _k_pipe_get_ack(struct k_args *Request)
{ {
struct k_args *LocalReq; struct k_args *LocalReq;

View file

@ -29,7 +29,6 @@
* *
* @return N/A * @return N/A
*/ */
void _k_pipe_put_request(struct k_args *RequestOrig) void _k_pipe_put_request(struct k_args *RequestOrig)
{ {
struct k_args *Request; struct k_args *Request;
@ -195,7 +194,6 @@ void _k_pipe_put_request(struct k_args *RequestOrig)
* *
* @return N/A * @return N/A
*/ */
void _k_pipe_put_timeout(struct k_args *ReqProc) void _k_pipe_put_timeout(struct k_args *ReqProc)
{ {
__ASSERT_NO_MSG(NULL != ReqProc->Time.timer); __ASSERT_NO_MSG(NULL != ReqProc->Time.timer);
@ -215,7 +213,6 @@ void _k_pipe_put_timeout(struct k_args *ReqProc)
* *
* @return N/A * @return N/A
*/ */
void _k_pipe_put_reply(struct k_args *ReqProc) void _k_pipe_put_reply(struct k_args *ReqProc)
{ {
__ASSERT_NO_MSG( __ASSERT_NO_MSG(
@ -269,7 +266,6 @@ void _k_pipe_put_reply(struct k_args *ReqProc)
* *
* @return N/A * @return N/A
*/ */
void _k_pipe_put_ack(struct k_args *Request) void _k_pipe_put_ack(struct k_args *Request)
{ {
if (_ASYNCREQ == _k_pipe_request_type_get(&Request->args)) { if (_ASYNCREQ == _k_pipe_request_type_get(&Request->args)) {

View file

@ -46,7 +46,6 @@ possibly copy the remaining data
* *
* @return N/A * @return N/A
*/ */
void _k_pipe_movedata_ack(struct k_args *pEOXfer) void _k_pipe_movedata_ack(struct k_args *pEOXfer)
{ {
struct _pipe_xfer_ack_arg *pipe_xfer_ack = &pEOXfer->args.pipe_xfer_ack; struct _pipe_xfer_ack_arg *pipe_xfer_ack = &pEOXfer->args.pipe_xfer_ack;
@ -202,9 +201,8 @@ void _k_pipe_movedata_ack(struct k_args *pEOXfer)
* *
* @return N/A * @return N/A
*/ */
static kpriority_t move_priority_compute(struct k_args *writer_ptr, static kpriority_t move_priority_compute(struct k_args *writer_ptr,
struct k_args *reader_ptr) struct k_args *reader_ptr)
{ {
kpriority_t move_priority; kpriority_t move_priority;
@ -226,12 +224,11 @@ static kpriority_t move_priority_compute(struct k_args *writer_ptr,
* *
* @return N/A * @return N/A
*/ */
static void setup_movedata(struct k_args *A, static void setup_movedata(struct k_args *A,
struct _k_pipe_struct *pipe_ptr, XFER_TYPE xfer_type, struct _k_pipe_struct *pipe_ptr, XFER_TYPE xfer_type,
struct k_args *writer_ptr, struct k_args *reader_ptr, struct k_args *writer_ptr, struct k_args *reader_ptr,
void *destination, void *source, void *destination, void *source,
uint32_t size, int XferID) uint32_t size, int XferID)
{ {
struct k_args *pContSend; struct k_args *pContSend;
struct k_args *pContRecv; struct k_args *pContRecv;
@ -239,7 +236,10 @@ static void setup_movedata(struct k_args *A,
A->Comm = _K_SVC_MOVEDATA_REQ; A->Comm = _K_SVC_MOVEDATA_REQ;
A->Ctxt.task = NULL; A->Ctxt.task = NULL;
/* this caused problems when != NULL related to set/reset of state bits */ /*
* this caused problems when != NULL related to set/reset of
* state bits
*/
A->args.moved_req.action = (MovedAction)(MVDACT_SNDACK | MVDACT_RCVACK); A->args.moved_req.action = (MovedAction)(MVDACT_SNDACK | MVDACT_RCVACK);
A->args.moved_req.source = source; A->args.moved_req.source = source;
@ -306,7 +306,7 @@ static void setup_movedata(struct k_args *A,
} }
static int ReaderInProgressIsBlocked(struct _k_pipe_struct *pipe_ptr, static int ReaderInProgressIsBlocked(struct _k_pipe_struct *pipe_ptr,
struct k_args *reader_ptr) struct k_args *reader_ptr)
{ {
int iSizeSpaceInReader; int iSizeSpaceInReader;
int iAvailBufferData; int iAvailBufferData;
@ -352,7 +352,7 @@ static int ReaderInProgressIsBlocked(struct _k_pipe_struct *pipe_ptr,
} }
static int WriterInProgressIsBlocked(struct _k_pipe_struct *pipe_ptr, static int WriterInProgressIsBlocked(struct _k_pipe_struct *pipe_ptr,
struct k_args *writer_ptr) struct k_args *writer_ptr)
{ {
int iSizeDataInWriter; int iSizeDataInWriter;
int iFreeBufferSpace; int iFreeBufferSpace;
@ -407,8 +407,8 @@ static int WriterInProgressIsBlocked(struct _k_pipe_struct *pipe_ptr,
* *
* @return N/A * @return N/A
*/ */
static void pipe_read(struct _k_pipe_struct *pipe_ptr,
static void pipe_read(struct _k_pipe_struct *pipe_ptr, struct k_args *pNewReader) struct k_args *pNewReader)
{ {
struct k_args *reader_ptr; struct k_args *reader_ptr;
struct _pipe_xfer_req_arg *pipe_read_req; struct _pipe_xfer_req_arg *pipe_read_req;
@ -422,13 +422,13 @@ static void pipe_read(struct _k_pipe_struct *pipe_ptr, struct k_args *pNewReader
reader_ptr = (pNewReader != NULL) ? pNewReader : pipe_ptr->readers; reader_ptr = (pNewReader != NULL) ? pNewReader : pipe_ptr->readers;
__ASSERT_NO_MSG((pipe_ptr->readers == pNewReader) || __ASSERT_NO_MSG((pipe_ptr->readers == pNewReader) ||
(NULL == pipe_ptr->readers) || (NULL == pNewReader)); (NULL == pipe_ptr->readers) || (NULL == pNewReader));
pipe_read_req = &reader_ptr->args.pipe_xfer_req; pipe_read_req = &reader_ptr->args.pipe_xfer_req;
do { do {
size = min(pipe_ptr->desc.available_data_count, size = min(pipe_ptr->desc.available_data_count,
pipe_read_req->total_size - pipe_read_req->xferred_size); pipe_read_req->total_size - pipe_read_req->xferred_size);
if (size == 0) { if (size == 0) {
return; return;
@ -453,11 +453,14 @@ static void pipe_read(struct _k_pipe_struct *pipe_ptr, struct k_args *pNewReader
pipe_read_req->xferred_size += ret; pipe_read_req->xferred_size += ret;
if (pipe_read_req->xferred_size == pipe_read_req->total_size) { if (pipe_read_req->xferred_size == pipe_read_req->total_size) {
_k_pipe_request_status_set(pipe_read_req, TERM_SATISFIED); _k_pipe_request_status_set(pipe_read_req,
TERM_SATISFIED);
if (reader_ptr->head != NULL) { if (reader_ptr->head != NULL) {
DeListWaiter(reader_ptr); DeListWaiter(reader_ptr);
myfreetimer(&reader_ptr->Time.timer); myfreetimer(&reader_ptr->Time.timer);
} }
return; return;
} else { } else {
_k_pipe_request_status_set(pipe_read_req, XFER_BUSY); _k_pipe_request_status_set(pipe_read_req, XFER_BUSY);
@ -476,8 +479,8 @@ static void pipe_read(struct _k_pipe_struct *pipe_ptr, struct k_args *pNewReader
* *
* @return N/A * @return N/A
*/ */
static void pipe_write(struct _k_pipe_struct *pipe_ptr,
static void pipe_write(struct _k_pipe_struct *pipe_ptr, struct k_args *pNewWriter) struct k_args *pNewWriter)
{ {
struct k_args *writer_ptr; struct k_args *writer_ptr;
struct _pipe_xfer_req_arg *pipe_write_req; struct _pipe_xfer_req_arg *pipe_write_req;
@ -491,14 +494,14 @@ static void pipe_write(struct _k_pipe_struct *pipe_ptr, struct k_args *pNewWrite
writer_ptr = (pNewWriter != NULL) ? pNewWriter : pipe_ptr->writers; writer_ptr = (pNewWriter != NULL) ? pNewWriter : pipe_ptr->writers;
__ASSERT_NO_MSG(!((pipe_ptr->writers != pNewWriter) && __ASSERT_NO_MSG(!((pipe_ptr->writers != pNewWriter) &&
(NULL != pipe_ptr->writers) && (NULL != pNewWriter))); (NULL != pipe_ptr->writers) && (NULL != pNewWriter)));
pipe_write_req = &writer_ptr->args.pipe_xfer_req; pipe_write_req = &writer_ptr->args.pipe_xfer_req;
do { do {
size = min((numIterations == 2) ? pipe_ptr->desc.free_space_count size = min((numIterations == 2) ? pipe_ptr->desc.free_space_count
: pipe_ptr->desc.free_space_post_wrap_around, : pipe_ptr->desc.free_space_post_wrap_around,
pipe_write_req->total_size - pipe_write_req->xferred_size); pipe_write_req->total_size - pipe_write_req->xferred_size);
if (size == 0) { if (size == 0) {
continue; continue;
@ -512,8 +515,8 @@ static void pipe_write(struct _k_pipe_struct *pipe_ptr, struct k_args *pNewWrite
} }
GETARGS(Moved_req); GETARGS(Moved_req);
setup_movedata(Moved_req, pipe_ptr, XFER_W2B, writer_ptr, NULL, write_ptr, setup_movedata(Moved_req, pipe_ptr, XFER_W2B, writer_ptr, NULL,
(char *)(pipe_write_req->data_ptr) + write_ptr, (char *)(pipe_write_req->data_ptr) +
OCTET_TO_SIZEOFUNIT(pipe_write_req->xferred_size), OCTET_TO_SIZEOFUNIT(pipe_write_req->xferred_size),
ret, (numIterations == 2) ? id : -1); ret, (numIterations == 2) ? id : -1);
_k_movedata_request(Moved_req); _k_movedata_request(Moved_req);
@ -523,7 +526,8 @@ static void pipe_write(struct _k_pipe_struct *pipe_ptr, struct k_args *pNewWrite
pipe_write_req->xferred_size += ret; pipe_write_req->xferred_size += ret;
if (pipe_write_req->xferred_size == pipe_write_req->total_size) { if (pipe_write_req->xferred_size == pipe_write_req->total_size) {
_k_pipe_request_status_set(pipe_write_req, TERM_SATISFIED); _k_pipe_request_status_set(pipe_write_req,
TERM_SATISFIED);
if (writer_ptr->head != NULL) { if (writer_ptr->head != NULL) {
/* only listed requests have a timer */ /* only listed requests have a timer */
DeListWaiter(writer_ptr); DeListWaiter(writer_ptr);
@ -538,17 +542,17 @@ static void pipe_write(struct _k_pipe_struct *pipe_ptr, struct k_args *pNewWrite
} }
/** /**
*
* @brief Update the pipe transfer status * @brief Update the pipe transfer status
* *
* @param pActor pointer to struct k_args to be used by actor
* @param pipe_xfer_req pointer to actor's pipe process structure
* @param bytesXferred number of bytes transferred
*
* @return N/A * @return N/A
*/ */
static void pipe_xfer_status_update(struct k_args *pActor,
static void pipe_xfer_status_update( struct _pipe_xfer_req_arg *pipe_xfer_req,
struct k_args *pActor, /* ptr to struct k_args to be used by actor */ int bytesXferred)
struct _pipe_xfer_req_arg *pipe_xfer_req, /* ptr to actor's pipe process structure */
int bytesXferred /* # of bytes transferred */
)
{ {
pipe_xfer_req->num_pending_xfers++; pipe_xfer_req->num_pending_xfers++;
pipe_xfer_req->xferred_size += bytesXferred; pipe_xfer_req->xferred_size += bytesXferred;
@ -565,22 +569,26 @@ static void pipe_xfer_status_update(
} }
/** /**
*
* @brief Read and/or write from/to the pipe * @brief Read and/or write from/to the pipe
* *
* @param pipe_ptr pointer to pipe structure
* @param pNewWriter pointer to new writer struct k_args
* @param pNewReader pointer to new reader struct k_args
*
* @return N/A * @return N/A
*/ */
static void pipe_read_write(struct _k_pipe_struct *pipe_ptr,
static void pipe_read_write( struct k_args *pNewWriter,
struct _k_pipe_struct *pipe_ptr, /* ptr to pipe structure */ struct k_args *pNewReader)
struct k_args *pNewWriter, /* ptr to new writer struct k_args */
struct k_args *pNewReader /* ptr to new reader struct k_args */
)
{ {
struct k_args *reader_ptr; /* ptr to struct k_args to be used by reader */ /* ptr to struct k_args to be used by reader */
struct k_args *writer_ptr; /* ptr to struct k_args to be used by writer */ struct k_args *reader_ptr;
struct _pipe_xfer_req_arg *pipe_write_req; /* ptr to writer's pipe process structure */ /* ptr to struct k_args to be used by writer */
struct _pipe_xfer_req_arg *pipe_read_req; /* ptr to reader's pipe process structure */ struct k_args *writer_ptr;
/* ptr to writer's pipe process structure */
struct _pipe_xfer_req_arg *pipe_write_req;
/* ptr to reader's pipe process structure */
struct _pipe_xfer_req_arg *pipe_read_req;
int iT1; int iT1;
int iT2; int iT2;
@ -589,12 +597,12 @@ static void pipe_read_write(
writer_ptr = (pNewWriter != NULL) ? pNewWriter : pipe_ptr->writers; writer_ptr = (pNewWriter != NULL) ? pNewWriter : pipe_ptr->writers;
__ASSERT_NO_MSG((pipe_ptr->writers == pNewWriter) || __ASSERT_NO_MSG((pipe_ptr->writers == pNewWriter) ||
(NULL == pipe_ptr->writers) || (NULL == pNewWriter)); (NULL == pipe_ptr->writers) || (NULL == pNewWriter));
reader_ptr = (pNewReader != NULL) ? pNewReader : pipe_ptr->readers; reader_ptr = (pNewReader != NULL) ? pNewReader : pipe_ptr->readers;
__ASSERT_NO_MSG((pipe_ptr->readers == pNewReader) || __ASSERT_NO_MSG((pipe_ptr->readers == pNewReader) ||
(NULL == pipe_ptr->readers) || (NULL == pNewReader)); (NULL == pipe_ptr->readers) || (NULL == pNewReader));
/* Preparation */ /* Preparation */
pipe_write_req = &writer_ptr->args.pipe_xfer_req; pipe_write_req = &writer_ptr->args.pipe_xfer_req;

View file

@ -46,7 +46,6 @@ extern const kernelfunc _k_server_dispatch_table[];
* *
* @return pointer to selected task * @return pointer to selected task
*/ */
static struct k_task *next_task_select(void) static struct k_task *next_task_select(void)
{ {
int K_PrioListIdx; int K_PrioListIdx;
@ -82,7 +81,6 @@ static struct k_task *next_task_select(void)
* *
* @return Does not return. * @return Does not return.
*/ */
FUNC_NORETURN void _k_server(int unused1, int unused2) FUNC_NORETURN void _k_server(int unused1, int unused2)
{ {
struct k_args *pArgs; struct k_args *pArgs;

View file

@ -146,7 +146,6 @@ static inline void _TimeSliceUpdate(void)
* *
* @return number of ticks to process * @return number of ticks to process
*/ */
static inline int32_t _SysIdleElapsedTicksGet(void) static inline int32_t _SysIdleElapsedTicksGet(void)
{ {
#ifdef CONFIG_TICKLESS_IDLE #ifdef CONFIG_TICKLESS_IDLE

View file

@ -152,7 +152,6 @@ void _k_timeout_free(struct k_timer *T)
* @param ticks Number of ticks * @param ticks Number of ticks
* @return N/A * @return N/A
*/ */
void _k_timer_list_update(int ticks) void _k_timer_list_update(int ticks)
{ {
struct k_timer *T; struct k_timer *T;

View file

@ -42,7 +42,6 @@ function __stack_chk_fail and global variable __stack_chk_guard.
* *
* @return Does not return * @return Does not return
*/ */
void FUNC_NORETURN _StackCheckHandler(void) void FUNC_NORETURN _StackCheckHandler(void)
{ {
/* Stack canary error is a software fatal condition; treat it as such. /* Stack canary error is a software fatal condition; treat it as such.
@ -61,7 +60,6 @@ void FUNC_NORETURN _StackCheckHandler(void)
* Symbol referenced by GCC compiler generated code for canary value. * Symbol referenced by GCC compiler generated code for canary value.
* The canary value gets initialized in _Cstart(). * The canary value gets initialized in _Cstart().
*/ */
void __noinit *__stack_chk_guard; void __noinit *__stack_chk_guard;
/** /**
@ -71,6 +69,5 @@ void __noinit *__stack_chk_guard;
* This routine is invoked when a stack canary error is detected, indicating * This routine is invoked when a stack canary error is detected, indicating
* a buffer overflow or stack corruption problem. * a buffer overflow or stack corruption problem.
*/ */
FUNC_ALIAS(_StackCheckHandler, __stack_chk_fail, void); FUNC_ALIAS(_StackCheckHandler, __stack_chk_fail, void);
#endif #endif

View file

@ -45,7 +45,6 @@ extern CtorFuncPtr __CTOR_END__[];
* This routine is invoked by the nanokernel routine _Cstart() after the basic * This routine is invoked by the nanokernel routine _Cstart() after the basic
* hardware has been initialized. * hardware has been initialized.
*/ */
void _Ctors(void) void _Ctors(void)
{ {
unsigned int nCtors; unsigned int nCtors;

View file

@ -35,11 +35,12 @@ data structure.
* *
* Sets the nanokernel data structure idle field to a non-zero value. * Sets the nanokernel data structure idle field to a non-zero value.
* *
* @param ticks the number of ticks to idle
*
* @return N/A * @return N/A
* *
* \NOMANUAL * \NOMANUAL
*/ */
void nano_cpu_set_idle(int32_t ticks) void nano_cpu_set_idle(int32_t ticks)
{ {
extern tNANO _nanokernel; extern tNANO _nanokernel;

View file

@ -62,7 +62,6 @@ uint32_t _hw_irq_to_c_handler_latency = ULONG_MAX;
* @return N/A * @return N/A
* *
*/ */
void _int_latency_start(void) void _int_latency_start(void)
{ {
/* when interrupts are not already locked, take time stamp */ /* when interrupts are not already locked, take time stamp */
@ -82,7 +81,6 @@ void _int_latency_start(void)
* @return N/A * @return N/A
* *
*/ */
void _int_latency_stop(void) void _int_latency_stop(void)
{ {
uint32_t delta; uint32_t delta;
@ -133,7 +131,6 @@ void _int_latency_stop(void)
* @return N/A * @return N/A
* *
*/ */
void int_latency_init(void) void int_latency_init(void)
{ {
uint32_t timeToReadTime; uint32_t timeToReadTime;
@ -186,7 +183,6 @@ void int_latency_init(void)
* @return N/A * @return N/A
* *
*/ */
void int_latency_show(void) void int_latency_show(void)
{ {
uint32_t intHandlerLatency = 0; uint32_t intHandlerLatency = 0;

View file

@ -98,10 +98,12 @@ int _is_thread_essential(struct tcs *pCtx /* pointer to thread */
* current Zephyr SDK use non-Thumb code that isn't supported on Cortex-M CPUs. * current Zephyr SDK use non-Thumb code that isn't supported on Cortex-M CPUs.
* For the time being any ARM-based application that attempts to use this API * For the time being any ARM-based application that attempts to use this API
* will get a link error (which is preferable to a mysterious exception). * will get a link error (which is preferable to a mysterious exception).
*
* @param usec_to_wait
*
* @return N/A
*/ */
#ifndef CONFIG_ARM #ifndef CONFIG_ARM
void sys_thread_busy_wait(uint32_t usec_to_wait) void sys_thread_busy_wait(uint32_t usec_to_wait)
{ {
/* use 64-bit math to prevent overflow when multiplying */ /* use 64-bit math to prevent overflow when multiplying */
@ -121,7 +123,6 @@ void sys_thread_busy_wait(uint32_t usec_to_wait)
} }
} }
} }
#endif /* CONFIG_ARM */ #endif /* CONFIG_ARM */
#ifdef CONFIG_THREAD_CUSTOM_DATA #ifdef CONFIG_THREAD_CUSTOM_DATA
@ -207,6 +208,11 @@ void _thread_exit(struct tcs *thread)
* passes it three arguments. It also handles graceful termination of the * passes it three arguments. It also handles graceful termination of the
* task or fiber if the entry point function ever returns. * task or fiber if the entry point function ever returns.
* *
* @param pEntry address of the app entry point function
* @param parameter1 1st arg to the app entry point function
* @param parameter2 2nd arg to the app entry point function
* @param parameter3 3rd arg to the app entry point function
*
* @internal * @internal
* The 'noreturn' attribute is applied to this function so that the compiler * The 'noreturn' attribute is applied to this function so that the compiler
* can dispense with generating the usual preamble that is only required for * can dispense with generating the usual preamble that is only required for
@ -215,12 +221,10 @@ void _thread_exit(struct tcs *thread)
* @return Does not return * @return Does not return
* *
*/ */
FUNC_NORETURN void _thread_entry( FUNC_NORETURN void _thread_entry(_thread_entry_t pEntry,
_thread_entry_t pEntry, /* address of app entry point function */ _thread_arg_t parameter1,
_thread_arg_t parameter1, /* 1st arg to app entry point function */ _thread_arg_t parameter2,
_thread_arg_t parameter2, /* 2nd arg to app entry point function */ _thread_arg_t parameter3)
_thread_arg_t parameter3 /* 3rd arg to app entry point function */
)
{ {
/* Execute the "application" entry point function */ /* Execute the "application" entry point function */

View file

@ -105,7 +105,6 @@ extern void _Ctors(void);
* *
* @return N/A * @return N/A
*/ */
static void _main(void) static void _main(void)
{ {
_sys_device_do_config_level(_SYS_INIT_LEVEL_NANOKERNEL); _sys_device_do_config_level(_SYS_INIT_LEVEL_NANOKERNEL);
@ -132,7 +131,6 @@ extern void _main(void);
* *
* @return N/A * @return N/A
*/ */
static void nano_init(struct tcs *dummyOutContext) static void nano_init(struct tcs *dummyOutContext)
{ {
/* /*
@ -240,7 +238,6 @@ extern void *__stack_chk_guard;
* *
* @return Does not return * @return Does not return
*/ */
FUNC_NORETURN void _Cstart(void) FUNC_NORETURN void _Cstart(void)
{ {
/* floating point operations are NOT performed during nanokernel init */ /* floating point operations are NOT performed during nanokernel init */

View file

@ -25,6 +25,9 @@
* nano_fiber_stack_pop, nano_task_stack_pop, nano_isr_stack_pop * nano_fiber_stack_pop, nano_task_stack_pop, nano_isr_stack_pop
* nano_fiber_stack_pop_wait, nano_task_stack_pop_wait * nano_fiber_stack_pop_wait, nano_task_stack_pop_wait
* *
* @param stack the stack to initialize
* @param data pointer to the container for the stack
*
* @internal * @internal
* In some cases the compiler "alias" attribute is used to map two or more * In some cases the compiler "alias" attribute is used to map two or more
* APIs to the same function, since they have identical implementations. * APIs to the same function, since they have identical implementations.
@ -37,10 +40,7 @@
#include <sections.h> #include <sections.h>
void nano_stack_init( void nano_stack_init(struct nano_stack *stack, uint32_t *data)
struct nano_stack *stack, /* stack to initialize */
uint32_t *data /* container for stack */
)
{ {
stack->next = stack->base = data; stack->next = stack->base = data;
stack->fiber = (struct tcs *)0; stack->fiber = (struct tcs *)0;

View file

@ -104,7 +104,6 @@ int64_t nano_tick_get(void)
* NOTE: We use inline function for both 64-bit and 32-bit functions. * NOTE: We use inline function for both 64-bit and 32-bit functions.
* Compiler optimizes out 64-bit result handling in 32-bit version. * Compiler optimizes out 64-bit result handling in 32-bit version.
*/ */
static ALWAYS_INLINE int64_t _nano_tick_delta(int64_t *reftime) static ALWAYS_INLINE int64_t _nano_tick_delta(int64_t *reftime)
{ {
int64_t delta; int64_t delta;
@ -192,7 +191,6 @@ static inline void handle_expired_nano_timers(int ticks)
* *
* @return N/A * @return N/A
*/ */
void _nano_sys_clock_tick_announce(uint32_t ticks) void _nano_sys_clock_tick_announce(uint32_t ticks)
{ {
_nano_ticks += ticks; _nano_ticks += ticks;