Adding in doxygen comment headers

Moving many of the functions from the old format of inline comments to
the newer doxygen format.

Change-Id: Ib0fe0d8627d7cd90219385a3ab627da8f9637d98
Signed-off-by: Dan Kalowsky <daniel.kalowsky@intel.com>
This commit is contained in:
Dan Kalowsky 2015-10-20 09:42:33 -07:00 committed by Anas Nashif
commit 3a109b1f00
43 changed files with 372 additions and 372 deletions

View file

@ -61,15 +61,14 @@ static inline void nonEssentialTaskAbort(void)
* implementation to take other actions, such as logging error (or debug)
* information to a persistent repository and/or rebooting the system.
*
* @param reason the fatal error reason
* @param pEsf pointer to exception stack frame
*
* @return N/A
*
* \NOMANUAL
*/
void _SysFatalErrorHandler(
unsigned int reason, /* fatal error reason */
const NANO_ESF * pEsf /* pointer to exception stack frame */
)
void _SysFatalErrorHandler(unsigned int reason, const NANO_ESF * pEsf)
{
nano_context_type_t curCtx = sys_execution_context_type_get();

View file

@ -90,19 +90,20 @@ static ALWAYS_INLINE void thread_monitor_init(struct tcs *tcs)
*
* <options> is currently unused.
*
* @param pStackmem the pointer to aligned stack memory
* @param stackSize the stack size in bytes
* @param pEntry thread entry point routine
* @param parameter1 first param to entry point
* @param parameter2 second param to entry point
* @param parameter3 third param to entry point
* @param fiber priority, -1 for task
* @param options is unused (saved for future expansion)
*
* @return N/A
*/
void _new_thread(
char *pStackMem, /* pointer to aligned stack memory */
unsigned stackSize, /* stack size in bytes */
_thread_entry_t pEntry, /* thread entry point routine */
void *parameter1, /* first param to entry point */
void *parameter2, /* second param to entry point */
void *parameter3, /* third param to entry point */
int priority, /* fiber priority, -1 for task */
unsigned options /* unused, for expansion */
)
void _new_thread(char *pStackMem, unsigned stackSize, _thread_entry_t pEntry,
void *parameter1, void *parameter2, void *parameter3,
int priority, unsigned options)
{
char *stackEnd = pStackMem + stackSize;
struct init_stack_frame *pInitCtx;

View file

@ -38,7 +38,6 @@ definitions and more complex routines, if needed.
*
* @return N/A
*/
void _ScbSystemReset(void)
{
union __aircr reg;
@ -51,8 +50,8 @@ void _ScbSystemReset(void)
/**
*
* @brief Set the number of priority groups based on the number
* of exception priorities desired
* @brief Set the number of priority groups based on the number of exception
* priorities desired
*
* Exception priorities can be divided in priority groups, inside which there is
* no preemption. The priorities inside a group are only used to decide which
@ -60,11 +59,11 @@ void _ScbSystemReset(void)
*
* The number of priorities has to be a power of two, from 1 to 128.
*
* @param n the number of priorities
*
* @return N/A
*/
void _ScbNumPriGroupSet(unsigned int n /* number of priorities */
)
void _ScbNumPriGroupSet(unsigned int n)
{
unsigned int set;
union __aircr reg;

View file

@ -61,15 +61,16 @@ const NANO_ESF _default_esf = {0xdeaddead, /* a1 */
* fatal error does not have a hardware generated ESF, the caller should either
* create its own or use a pointer to the global default ESF <_default_esf>.
*
* @param reason the reason that the handler was called
* @param pEsf pointer to the exception stack frame
*
* @return This function does not return.
*
* \NOMANUAL
*/
FUNC_NORETURN void _NanoFatalErrorHandler(
unsigned int reason, /* reason that handler was called */
const NANO_ESF *pEsf /* pointer to exception stack frame */
)
FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
const NANO_ESF *pEsf)
{
switch (reason) {
case _NANO_ERR_INVALID_TASK_EXIT:

View file

@ -134,8 +134,7 @@ static void _FaultThreadShow(const NANO_ESF *esf)
* \NOMANUAL
*/
static void _MpuFault(const NANO_ESF *esf,
int fromHardFault)
static void _MpuFault(const NANO_ESF *esf, int fromHardFault)
{
PR_EXC("***** MPU FAULT *****\n");
@ -169,8 +168,7 @@ static void _MpuFault(const NANO_ESF *esf,
* \NOMANUAL
*/
static void _BusFault(const NANO_ESF *esf,
int fromHardFault)
static void _BusFault(const NANO_ESF *esf, int fromHardFault)
{
PR_EXC("***** BUS FAULT *****\n");
@ -294,8 +292,7 @@ static void _DebugMonitor(const NANO_ESF *esf)
* \NOMANUAL
*/
static void _ReservedException(const NANO_ESF *esf,
int fault)
static void _ReservedException(const NANO_ESF *esf, int fault)
{
PR_EXC("***** %s %d) *****\n",
fault < 16 ? "Reserved Exception (" : "Spurious interrupt (IRQ ",
@ -362,15 +359,15 @@ static void _FaultDump(const NANO_ESF *esf, int fault)
* interrupt was already being handled, it is passed a pointer to both and has
* to find out on which the ESP is present.
*
* @param msp pointer to potential ESF on MSP
* @param psp pointer to potential ESF on PSP
*
* @return This function does not return.
*
* \NOMANUAL
*/
void _Fault(
const NANO_ESF *msp, /* pointer to potential ESF on MSP */
const NANO_ESF *psp /* pointer to potential ESF on PSP */
)
void _Fault( const NANO_ESF *msp, const NANO_ESF *psp)
{
const NANO_ESF *esf = _ScbIsNestedExc() ? msp : psp;
int fault = _ScbActiveVectorGet();

View file

@ -45,7 +45,6 @@ extern void __reserved(void);
*
* @return N/A
*/
void _irq_handler_set(unsigned int irq,
void (*old)(void *arg),
void (*new)(void *arg),
@ -73,7 +72,6 @@ void _irq_handler_set(unsigned int irq,
*
* @return N/A
*/
void irq_enable(unsigned int irq)
{
/* before enabling interrupts, ensure that interrupt is cleared */
@ -90,7 +88,6 @@ void irq_enable(unsigned int irq)
*
* @return N/A
*/
void irq_disable(unsigned int irq)
{
_NvicIrqDisable(irq);
@ -111,7 +108,6 @@ void irq_disable(unsigned int irq)
*
* @return N/A
*/
void _irq_priority_set(unsigned int irq,
unsigned int prio)
{
@ -130,7 +126,6 @@ void _irq_priority_set(unsigned int irq,
*
* @return N/A
*/
void _irq_spurious(void *unused)
{
ARG_UNUSED(unused);
@ -149,7 +144,6 @@ void _irq_spurious(void *unused)
*
* @return the interrupt line number
*/
int irq_connect(unsigned int irq,
unsigned int prio,
void (*isr)(void *arg),
@ -172,7 +166,6 @@ int irq_connect(unsigned int irq,
*
* @return N/A
*/
void _irq_disconnect(unsigned int irq)
{
_irq_handler_set(irq, _sw_isr_table[irq].isr, _irq_spurious, NULL);

View file

@ -61,15 +61,15 @@ static inline void nonEssentialTaskAbort(void)
* implementation to take other actions, such as logging error (or debug)
* information to a persistent repository and/or rebooting the system.
*
* @param reason fatal error reason
* @param pEsf pointer to exception stack frame
*
* @return N/A
*
* \NOMANUAL
*/
void _SysFatalErrorHandler(
unsigned int reason, /* fatal error reason */
const NANO_ESF * pEsf /* pointer to exception stack frame */
)
void _SysFatalErrorHandler( unsigned int reason, const NANO_ESF * pEsf)
{
nano_context_type_t curCtx = sys_execution_context_type_get();

View file

@ -87,19 +87,21 @@ static ALWAYS_INLINE void _thread_monitor_init(struct tcs *tcs /* thread */
*
* <options> is currently unused.
*
* @param pStackMem the aligned stack memory
* @param stackSize stack size in bytes
* @param pEntry the entry point
* @param parameter1 entry point to the first param
* @param parameter2 entry point to the second param
* @param parameter3 entry point to the third param
* @param priority thread priority (-1 for tasks)
* @param misc options (future use)
*
* @return N/A
*/
void _new_thread(
char *pStackMem, /* aligned stack memory */
unsigned stackSize, /* stack size in bytes */
_thread_entry_t pEntry, /* entry point */
void *parameter1, /* entry point first param */
void *parameter2, /* entry point second param */
void *parameter3, /* entry point third param */
int priority, /* thread priority (-1 for tasks) */
unsigned options /* misc options (future) */
)
void _new_thread(char *pStackMem, unsigned stackSize, _thread_entry_t pEntry,
void *parameter1, void *parameter2, void *parameter3,
int priority, unsigned options)
{
char *stackEnd = pStackMem + stackSize;
struct __esf *pInitCtx;

View file

@ -175,15 +175,16 @@ static ALWAYS_INLINE void nanoArchInit(void)
* to <value>. It is assumed that the specified <fiber> is pending, and thus
* the fiber's thread is stored in its struct tcs structure.
*
* @param fiber pointer to the fiber
* @param value is the value to set as a return value
*
* @return N/A
*
* \NOMANUAL
*/
static ALWAYS_INLINE void fiberRtnValueSet(
struct tcs *fiber, /* pointer to fiber */
unsigned int value /* value to set as return value */
)
static ALWAYS_INLINE void fiberRtnValueSet(struct tcs *fiber,
unsigned int value)
{
tESF *pEsf = (void *)fiber->preempReg.psp;

View file

@ -51,14 +51,14 @@ as there is no requirement for this capability.
* and the write of the new value (if it occurs) all happen atomically with
* respect to both interrupts and accesses of other processors to <target>.
*
* @param target the address to be tested
* @param oldvalue the value to be compared against
* @param newValue the value to be set to
*
* @return Returns 1 if <newValue> is written, 0 otherwise.
*/
int atomic_cas(
atomic_t *target, /* address to be tested */
atomic_val_t oldValue, /* value to compare against */
atomic_val_t newValue /* value to set to */
)
int atomic_cas(atomic_t *target, atomic_val_t oldValue, atomic_val_t newValue)
{
int key; /* interrupt lock level */
atomic_val_t ovalue; /* temporary storage */
@ -82,13 +82,13 @@ int atomic_cas(
* atomically added to the value at <target>, placing the result at <target>,
* and the old value from <target> is returned.
*
* @param target memory location to add to
* @param value the value to add
*
* @return The previous value from <target>
*/
atomic_val_t atomic_add(
atomic_t *target, /* memory location to add to */
atomic_val_t value /* value to add */
)
atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
{
int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */
@ -108,14 +108,13 @@ atomic_val_t atomic_add(
* atomically subtracted from the value at <target>, placing the result at
* <target>, and the old value from <target> is returned.
*
* @param target the memory location to subtract from
* @param value the value to subtract
*
* @return The previous value from <target>
*/
atomic_val_t atomic_sub(
atomic_t *target, /* memory location to subtract from */
atomic_val_t value /* value to subtract */
)
{
atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value) {
int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */
@ -130,15 +129,15 @@ atomic_val_t atomic_sub(
*
* @brief Atomic increment primitive
*
* @param target memory location to increment
*
* This routine provides the atomic increment operator. The value at <target>
* is atomically incremented by 1, and the old value from <target> is returned.
*
* @return The value from <target> before the increment
*/
atomic_val_t atomic_inc(
atomic_t *target /* memory location to increment */
)
atomic_val_t atomic_inc(atomic_t *target)
{
int key; /* interrupt lock level */
atomic_val_t ovalue; /* value from <target> before the increment */
@ -154,15 +153,15 @@ atomic_val_t atomic_inc(
*
* @brief Atomic decrement primitive
*
* @param target memory location to decrement
*
* This routine provides the atomic decrement operator. The value at <target>
* is atomically decremented by 1, and the old value from <target> is returned.
*
* @return The value from <target> prior to the decrement
*/
atomic_val_t atomic_dec(
atomic_t *target /* memory location to decrement */
)
atomic_val_t atomic_dec(atomic_t *target)
{
int key; /* interrupt lock level */
atomic_val_t ovalue; /* value from <target> prior to the decrement */
@ -178,6 +177,8 @@ atomic_val_t atomic_dec(
*
* @brief Atomic get primitive
*
* @param target memory location to read from
*
* This routine provides the atomic get primitive to atomically read
* a value from <target>. It simply does an ordinary load. Note that <target>
* is expected to be aligned to a 4-byte boundary.
@ -185,8 +186,7 @@ atomic_val_t atomic_dec(
* @return The value read from <target>
*/
atomic_val_t atomic_get(const atomic_t *target /* memory location to read from */
)
atomic_val_t atomic_get(const atomic_t *target)
{
return *target;
}
@ -198,13 +198,13 @@ atomic_val_t atomic_get(const atomic_t *target /* memory location to read from *
* This routine provides the atomic set operator. The <value> is atomically
* written at <target> and the previous value at <target> is returned.
*
* @param target the memory location to write to
* @param value the value to write
*
* @return The previous value from <target>
*/
atomic_val_t atomic_set(
atomic_t *target, /* memory location to write to */
atomic_val_t value /* value to write */
)
atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
{
int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */
@ -224,12 +224,12 @@ atomic_val_t atomic_set(
* written at <target> and the previous value at <target> is returned. (Hence,
* atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).)
*
* @param target the memory location to write
*
* @return The previous value from <target>
*/
atomic_val_t atomic_clear(
atomic_t *target /* memory location to write to */
)
atomic_val_t atomic_clear(atomic_t *target)
{
int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */
@ -249,13 +249,13 @@ atomic_val_t atomic_clear(
* is atomically bitwise OR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to OR
*
* @return The previous value from <target>
*/
atomic_val_t atomic_or(
atomic_t *target, /* memory location to be modified */
atomic_val_t value /* value to OR */
)
atomic_val_t atomic_or(atomic_t *target, atomic_val_t value)
{
int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */
@ -275,13 +275,13 @@ atomic_val_t atomic_or(
* is atomically bitwise XOR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to XOR
*
* @return The previous value from <target>
*/
atomic_val_t atomic_xor(
atomic_t *target, /* memory location to be modified */
atomic_val_t value /* value to XOR */
)
atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value)
{
int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */
@ -301,13 +301,13 @@ atomic_val_t atomic_xor(
* atomically bitwise AND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to AND
*
* @return The previous value from <target>
*/
atomic_val_t atomic_and(
atomic_t *target, /* memory location to be modified */
atomic_val_t value /* value to AND */
)
atomic_val_t atomic_and(atomic_t *target, atomic_val_t value)
{
int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */
@ -327,13 +327,13 @@ atomic_val_t atomic_and(
* atomically bitwise NAND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to NAND
*
* @return The previous value from <target>
*/
atomic_val_t atomic_nand(
atomic_t *target, /* memory location to be modified */
atomic_val_t value /* value to NAND */
)
atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value)
{
int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */

View file

@ -63,15 +63,16 @@ const NANO_ESF _default_esf = {
* fatal error does not have a hardware generated ESF, the caller should either
* create its own or use a pointer to the global default ESF <_default_esf>.
*
* @param reason the reason that the handler was called
* @param pEsf pointer to the exception stack frame
*
* @return This function does not return.
*
* \NOMANUAL
*/
FUNC_NORETURN void _NanoFatalErrorHandler(
unsigned int reason, /* reason that handler was called */
const NANO_ESF *pEsf /* pointer to exception stack frame */
)
FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
const NANO_ESF *pEsf)
{
#ifdef CONFIG_PRINTK

View file

@ -99,9 +99,10 @@ extern uint32_t _sse_mxcsr_default_value; /* SSE control/status register default
* specified TCS. If the specified task or fiber supports SSE then
* x87/MMX/SSEx thread info is saved, otherwise only x87/MMX thread is saved.
*
* @param tcs TBD
*
* @return N/A
*/
static void _FpCtxSave(struct tcs *tcs)
{
_do_fp_ctx_save(tcs->flags & USE_SSE, &tcs->preempFloatReg);
@ -113,9 +114,10 @@ static void _FpCtxSave(struct tcs *tcs)
*
* This routine initializes the system's "live" non-integer context.
*
* @param tcs TBD
*
* @return N/A
*/
static inline void _FpCtxInit(struct tcs *tcs)
{
_do_fp_ctx_init(tcs->flags & USE_SSE);
@ -142,6 +144,9 @@ static inline void _FpCtxInit(struct tcs *tcs)
* This routine should only be used to enable floating point support for a
* task/fiber that does not currently have such support enabled already.
*
* @param tcs TDB
* @param options set to either USE_FP or USE_SSE
*
* @return N/A
*
* INTERNAL
@ -159,10 +164,7 @@ static inline void _FpCtxInit(struct tcs *tcs)
* are only used from a fiber, rather than from "generic" code used by both
* tasks and fibers.
*/
void _FpEnable(struct tcs *tcs,
unsigned int options /* USE_FP or USE_SSE */
)
void _FpEnable(struct tcs *tcs, unsigned int options)
{
unsigned int imask;
struct tcs *fp_owner;
@ -272,7 +274,6 @@ void _FpEnable(struct tcs *tcs,
*
* @return N/A
*/
FUNC_ALIAS(_FpEnable, fiber_float_enable, void);
/**
@ -286,7 +287,6 @@ FUNC_ALIAS(_FpEnable, fiber_float_enable, void);
*
* @return N/A
*/
FUNC_ALIAS(_FpEnable, task_float_enable, void);
/**
@ -301,6 +301,8 @@ FUNC_ALIAS(_FpEnable, task_float_enable, void);
* This routine should only be used to disable floating point support for
* a task/fiber that currently has such support enabled.
*
* @param tcs TBD
*
* @return N/A
*
* INTERNAL
@ -318,7 +320,6 @@ FUNC_ALIAS(_FpEnable, task_float_enable, void);
* are only used from a fiber, rather than from "generic" code used by both
* tasks and fibers.
*/
void _FpDisable(struct tcs *tcs)
{
unsigned int imask;
@ -362,7 +363,6 @@ void _FpDisable(struct tcs *tcs)
*
* @return N/A
*/
FUNC_ALIAS(_FpDisable, fiber_float_disable, void);
/**
@ -379,7 +379,6 @@ FUNC_ALIAS(_FpDisable, fiber_float_disable, void);
*
* @return N/A
*/
FUNC_ALIAS(_FpDisable, task_float_disable, void);
@ -395,11 +394,11 @@ FUNC_ALIAS(_FpDisable, task_float_disable, void);
* current task or fiber with the USE_FP option (or the USE_SSE option if the
* SSE configuration option has been enabled).
*
* @param pEsf this value is not used for this architecture
*
* @return N/A
*/
void _FpNotAvailableExcHandler(NANO_ESF * pEsf /* not used */
)
void _FpNotAvailableExcHandler(NANO_ESF * pEsf)
{
unsigned int enableOption;

View file

@ -161,6 +161,10 @@ static int _int_stub_alloc(void)
*
* @brief Connect a routine to an interrupt vector
*
* @param vector interrupt vector: 0 to 255 on IA-32
* @param routine a function pointer to the interrupt routine
* @param dpl priv level for interrupt-gate descriptor
*
* This routine "connects" the specified <routine> to the specified interrupt
* <vector>. On the IA-32 architecture, an interrupt vector is a value from
* 0 to 255. This routine merely fills in the appropriate interrupt
@ -180,11 +184,7 @@ static int _int_stub_alloc(void)
*
*/
void _IntVecSet(
unsigned int vector, /* interrupt vector: 0 to 255 on IA-32 */
void (*routine)(void *),
unsigned int dpl /* priv level for interrupt-gate descriptor */
)
void _IntVecSet( unsigned int vector, void (*routine)(void *), unsigned int dpl)
{
unsigned long long *pIdtEntry;
unsigned int key;
@ -198,9 +198,9 @@ void _IntVecSet(
pIdtEntry = (unsigned long long *)(_idt_base_address + (vector << 3));
/*
* Lock interrupts to protect the IDT entry to which _IdtEntryCreate() will
* write. They must be locked here because the _IdtEntryCreate() code is
* shared with the 'gen_idt' host tool.
* Lock interrupts to protect the IDT entry to which _IdtEntryCreate()
* will write. They must be locked here because the _IdtEntryCreate()
* code is shared with the 'gen_idt' host tool.
*/
key = irq_lock();
@ -221,6 +221,11 @@ void _IntVecSet(
*
* @brief Connect a C routine to a hardware interrupt
*
* @param irq virtualized IRQ to connect to
* @param priority requested priority of interrupt
* @param routine the C interrupt handler
* @param parameter parameter passed to C routine
*
* This routine connects an interrupt service routine (ISR) coded in C to
* the specified hardware <irq>. An interrupt vector will be allocated to
* satisfy the specified <priority>. If the interrupt service routine is being
@ -265,12 +270,8 @@ void _IntVecSet(
* vectors remaining in the specified <priority> level.
*/
int irq_connect(
unsigned int irq, /* virtualized IRQ to connect to */
unsigned int priority, /* requested priority of interrupt */
void (*routine)(void *parameter), /* C interrupt handler */
void *parameter /* parameter passed to C routine */
)
int irq_connect( unsigned int irq, unsigned int priority,
void (*routine)(void *parameter), void *parameter)
{
unsigned char offsetAdjust;
unsigned char numParameters = 1; /* stub always pushes ISR parameter */
@ -388,10 +389,8 @@ int irq_connect(
/*
* Poke in the stack popping related opcode. Do it a byte at a time
* because
* &STUB_PTR[offsetAdjust] may not be aligned which does not work for
* all
* targets.
* because &STUB_PTR[offsetAdjust] may not be aligned which does not
* work for all targets.
*/
STUB_PTR[offsetAdjust] = IA32_ADD_OPCODE & 0xFF;
@ -401,9 +400,10 @@ int irq_connect(
offsetAdjust += 3;
/*
* generate code that invokes _IntExit(); note that a jump is used, since
* _IntExit() takes care of returning back to the execution context that
* experienced the interrupt (i.e. branch tail optimization)
* generate code that invokes _IntExit(); note that a jump is used,
* since _IntExit() takes care of returning back to the execution
* context that experienced the interrupt (i.e. branch tail
* optimization)
*/
STUB_PTR[offsetAdjust] = IA32_JMP_OPCODE;
@ -416,8 +416,8 @@ int irq_connect(
* There is no need to explicitly synchronize or flush the instruction
* cache due to the above code synthesis. See the Intel 64 and IA-32
* Architectures Software Developer's Manual: Volume 3A: System
*Programming
* Guide; specifically the section titled "Self Modifying Code".
* Programming Guide; specifically the section titled "Self Modifying
* Code".
*
* Cache synchronization/flushing is not required for the i386 as it
* does not contain any on-chip I-cache; likewise, post-i486 processors

View file

@ -38,24 +38,23 @@ Intel-specific parts of start_task(). Only FP functionality currently.
#define SSE_GROUP 0x10
/**
*
* @brief Intel-specific parts of task initialization
*
* @param X pointer to task control block
* @param pOpt thread options container
*
* @return N/A
*/
void _StartTaskArch(
struct k_task *X, /* ptr to task control block */
unsigned int *pOpt /* thread options container */
)
void _StartTaskArch( struct k_task *X, unsigned int *pOpt)
{
/*
* The IA-32 nanokernel implementation uses the USE_FP bit in the
* struct tcs->flags structure as a "dirty bit". The USE_FP flag bit will be
* set whenever a thread uses any non-integer capability, whether it's
* just the x87 FPU capability, SSE instructions, or a combination of
* both. The USE_SSE flag bit will only be set if a thread uses SSE
* instructions.
* struct tcs->flags structure as a "dirty bit". The USE_FP flag bit
* will be set whenever a thread uses any non-integer capability,
* whether it's just the x87 FPU capability, SSE instructions, or a
* combination of both. The USE_SSE flag bit will only be set if a
* thread uses SSE instructions.
*
* However, callers of fiber_fiber_start(), task_fiber_start(), or even
* _new_thread() don't need to follow the protocol used by the IA-32
@ -68,11 +67,10 @@ void _StartTaskArch(
* Likewise, the placement of tasks into "groups" doesn't need to follow
* the protocol used by the IA-32 nanokernel w.r.t. managing the
* struct tcs->flags field. If a task will utilize just the x87 FPU
*capability,
* then the task only needs to be placed in the FPU_GROUP group.
* If a task utilizes SSE instructions (and possibly x87 FPU
*capability),
* then the task only needs to be placed in the SSE_GROUP group.
* capability, then the task only needs to be placed in the FPU_GROUP
* group. If a task utilizes SSE instructions (and possibly x87 FPU
* capability), then the task only needs to be placed in the SSE_GROUP
* group.
*/
*pOpt |= (X->group & SSE_GROUP) ? USE_SSE

View file

@ -48,15 +48,16 @@ supported platforms.
* implementation to take other actions, such as logging error (or debug)
* information to a persistent repository and/or rebooting the system.
*
* @param reason the fatal error reason
* @param pEsf the pointer to the exception stack frame
*
* @return This function does not return.
*
* \NOMANUAL
*/
FUNC_NORETURN void _SysFatalErrorHandler(
unsigned int reason, /* fatal error reason */
const NANO_ESF * pEsf /* pointer to exception stack frame */
)
FUNC_NORETURN void _SysFatalErrorHandler(unsigned int reason,
const NANO_ESF * pEsf)
{
nano_context_type_t curCtx = sys_execution_context_type_get();

View file

@ -56,20 +56,22 @@ void _thread_entry_wrapper(_thread_entry_t, _thread_arg_t,
*
* This function is called by _new_thread() to initialize tasks.
*
* @param pStackMem pointer to thread stack memory
* @param stackSize size of a stack in bytes
* @param thread priority
* @param options thread options: USE_FP, USE_SSE
*
* @return N/A
*
* \NOMANUAL
*/
static void _new_thread_internal(
char *pStackMem, /* pointer to thread stack memory */
unsigned stackSize, /* size of stack in bytes */
int priority, /* thread priority */
unsigned options /* thread options: USE_FP, USE_SSE */
)
static void _new_thread_internal(char *pStackMem, unsigned stackSize,
int priority, unsigned options)
{
unsigned long *pInitialCtx;
struct tcs *tcs = (struct tcs *)pStackMem; /* ptr to the new task's tcs */
/* ptr to the new task's tcs */
struct tcs *tcs = (struct tcs *)pStackMem;
#ifndef CONFIG_FP_SHARING
ARG_UNUSED(options);
@ -270,21 +272,24 @@ __asm__("\t.globl _thread_entry\n"
* The "thread control block" (TCS) is carved from the "end" of the specified
* thread stack memory.
*
* @param pStackmem the pointer to aligned stack memory
* @param stackSize the stack size in bytes
* @param pEntry thread entry point routine
* @param parameter1 first param to entry point
* @param parameter2 second param to entry point
* @param parameter3 third param to entry point
* @param priority thread priority
* @param options thread options: USE_FP, USE_SSE
*
*
* @return opaque pointer to initialized TCS structure
*
* \NOMANUAL
*/
void _new_thread(
char *pStackMem, /* pointer to aligned stack memory */
unsigned stackSize, /* size of stack in bytes */
_thread_entry_t pEntry, /* thread entry point function */
void *parameter1, /* first parameter to thread entry point function */
void *parameter2, /* second parameter to thread entry point function */
void *parameter3, /* third parameter to thread entry point function */
int priority, /* thread priority */
unsigned options /* thread options: USE_FP, USE_SSE */
)
void _new_thread(char *pStackMem, unsigned stackSize, _thread_entry_t pEntry,
void *parameter1, void *parameter2, void *parameter3,
int priority, unsigned options)
{
unsigned long *pInitialThread;

View file

@ -49,14 +49,14 @@ extern int _AdvIdleCheckSleep(void);
* passes to the _AdvIdleFunc() that put the system to sleep, which then
* finishes executing.
*
* @param _Cstart the address of the _Cstart function
* @param _gdt the address of the global descriptor table in RAM
* @param _GlobalTss the address of the TSS descriptor
*
* @return does not return to caller
*/
extern void _AdvIdleStart(
void (*_Cstart)(void), /* addr of _Cstart function */
void *_gdt, /* addr of global descriptor table in RAM */
void *_GlobalTss /* addr of TSS descriptor */
);
extern void _AdvIdleStart(void (*_Cstart)(void), void *_gdt, void *_GlobalTss);
/*
* @brief Perform advanced sleep
@ -66,11 +66,12 @@ extern void _AdvIdleStart(
* to sleep and then later allows it to resume processing; if not, the routine
* returns immediately without sleeping.
*
* @param ticks the upcoming kernel idle time
*
* @return non-zero if advanced sleep occurred; otherwise zero
*/
extern int _AdvIdleFunc(int32_t ticks /* upcoming kernel idle time */
);
extern int _AdvIdleFunc(int32_t ticks);
#endif /* CONFIG_ADVANCED_IDLE */

View file

@ -808,6 +808,9 @@ static inline void nanoArchInit(void)
*
* @brief Set the return value for the specified fiber (inline)
*
* @param fiber pointer to fiber
* @param value value to set as return value
*
* The register used to store the return value from a function call invocation is
* set to <value>. It is assumed that the specified <fiber> is pending, and
* thus the fibers context is stored in its TCS.
@ -817,10 +820,7 @@ static inline void nanoArchInit(void)
* \NOMANUAL
*/
static inline void fiberRtnValueSet(
struct tcs *fiber, /* pointer to fiber */
unsigned int value /* value to set as return value */
)
static inline void fiberRtnValueSet( struct tcs *fiber, unsigned int value)
{
/* write into 'eax' slot created in _Swap() entry */

View file

@ -347,14 +347,14 @@ void _loapic_int_vec_set(unsigned int irq, /* IRQ number of the interrupt */
*
* @brief Enable an individual LOAPIC interrupt (IRQ)
*
* @param irq the IRQ number of the interrupt
*
* This routine clears the interrupt mask bit in the LVT for the specified IRQ
*
* @return N/A
*/
void _loapic_irq_enable(unsigned int irq /* IRQ number of
the interrupt */
)
void _loapic_irq_enable(unsigned int irq)
{
volatile int *pLvt; /* pointer to local vector table */
int32_t oldLevel; /* previous interrupt lock level */
@ -378,14 +378,14 @@ void _loapic_irq_enable(unsigned int irq /* IRQ number of
*
* @brief Disable an individual LOAPIC interrupt (IRQ)
*
* @param irq the IRQ number of the interrupt
*
* This routine clears the interrupt mask bit in the LVT for the specified IRQ
*
* @return N/A
*/
void _loapic_irq_disable(unsigned int irq /* IRQ number of the
interrupt */
)
void _loapic_irq_disable(unsigned int irq)
{
volatile int *pLvt; /* pointer to local vector table */
int32_t oldLevel; /* previous interrupt lock level */

View file

@ -65,6 +65,15 @@
* IRQ28 -> LOAPIC_LINT1
* IRQ29 -> LOAPIC_ERROR
*
* @param irq virtualized IRQ
* @param priority get vector from <priority> group
* @param boiRtn pointer to the BOI routine; NULL if none
* @param eoiRtn pointer to the EOI routine; NULL if none
* @param boiRtnParm the BOI routine parameter if any
* @param eoiRtnParm the EOI routine parameter if any
* @param boiParamRequired the BOI routine parameter req?
* @param eoiParamRequired the EOI routine parameter req?
*
* @return the allocated interrupt vector
*
* @internal
@ -73,16 +82,10 @@
* parameters are invalid.
* @endinternal
*/
int _SysIntVecAlloc(
unsigned int irq, /* virtualized IRQ */
unsigned int priority, /* get vector from <priority> group */
NANO_EOI_GET_FUNC * boiRtn, /* ptr to BOI routine; NULL if none */
NANO_EOI_GET_FUNC * eoiRtn, /* ptr to EOI routine; NULL if none */
void **boiRtnParm, /* BOI routine parameter, if any */
void **eoiRtnParm, /* EOI routine parameter, if any */
unsigned char *boiParamRequired, /* BOI routine parameter req? */
unsigned char *eoiParamRequired /* BOI routine parameter req? */
)
int _SysIntVecAlloc(unsigned int irq, unsigned int priority,
NANO_EOI_GET_FUNC * boiRtn, NANO_EOI_GET_FUNC * eoiRtn,
void **boiRtnParm, void **eoiRtnParm,
unsigned char *boiParamRequired, unsigned char *eoiParamRequired)
{
int vector;
@ -108,13 +111,11 @@ int _SysIntVecAlloc(
* Set up the appropriate interrupt controller to generate the allocated
* interrupt vector for the specified IRQ. Also, provide the required
* EOI and BOI related information for the interrupt stub code
*generation
* step.
* generation step.
*
* For software interrupts (NANO_SOFT_IRQ), skip the interrupt
*controller
* programming step, and indicate that a BOI and EOI handler is not
* required.
* controller programming step, and indicate that a BOI and EOI handler
* is not required.
*
* Skip both steps if a vector could not be allocated.
*/
@ -133,9 +134,8 @@ int _SysIntVecAlloc(
/*
* query IOAPIC driver to obtain EOI handler information
* for the
* interrupt vector that was just assigned to the
* specified IRQ
* for the interrupt vector that was just assigned to
* the specified IRQ
*/
*eoiRtn = (NANO_EOI_GET_FUNC)_ioapic_eoi_get(
@ -170,10 +170,12 @@ int _SysIntVecAlloc(
* IOAPIC is programmed for these IRQs
* - The remaining IRQs are provided by the LOAPIC and hence the LOAPIC is
* programmed.
*
* @param vector the vector number
* @param irq the virtualized IRQ
*
*/
void _SysIntVecProgram(unsigned int vector, /* vector number */
unsigned int irq /* virtualized IRQ */
)
void _SysIntVecProgram(unsigned int vector, unsigned int irq)
{
if (irq < CONFIG_IOAPIC_NUM_RTES) {

View file

@ -42,6 +42,10 @@ This module implements the PCI H/W access functions.
*
* @brief Read a PCI controller register
*
* @param reg PCI register to read
* @param data where to put the data
* @param size size of the data to read (8/16/32 bits)
*
* This routine reads the specified register from the PCI controller and
* places the data into the provided buffer.
*
@ -49,10 +53,7 @@ This module implements the PCI H/W access functions.
*
*/
static void pci_ctrl_read(uint32_t reg, /* PCI register to read */
uint32_t *data, /* where to put the data */
uint32_t size /* size of the data to read (8/16/32 bits) */
)
static void pci_ctrl_read(uint32_t reg, uint32_t *data, uint32_t size)
{
/* read based on the size requested */
@ -76,6 +77,10 @@ static void pci_ctrl_read(uint32_t reg, /* PCI register to read */
*
* @brief Write a PCI controller register
*
* @param reg PCI register to write
* @param data data to write
* @param size size of the data to write (8/16/32 bits)
*
* This routine writes the provided data to the specified register in the PCI
* controller.
*
@ -83,11 +88,7 @@ static void pci_ctrl_read(uint32_t reg, /* PCI register to read */
*
*/
static void pci_ctrl_write(uint32_t reg, /* PCI register to write */
uint32_t data, /* data to write */
uint32_t size /* size of the data to write (8/16/32 bits)
*/
)
static void pci_ctrl_write(uint32_t reg, uint32_t data, uint32_t size)
{
/* write based on the size requested */
@ -111,22 +112,25 @@ static void pci_ctrl_write(uint32_t reg, /* PCI register to write */
*
* @brief Read the PCI controller data register
*
* @param controller controller number
* @param offset is the offset within the data region
* @param data is the returned data
* @param size is the size of the data to read
*
* This routine reads the data register of the specified PCI controller.
*
* @return 0 or -1
*
*/
static int pci_ctrl_data_read(uint32_t controller, /* controller number */
uint32_t offset, /* offset within data region */
uint32_t *data, /* returned data */
uint32_t size /* size of data to read */
)
static int pci_ctrl_data_read(uint32_t controller, uint32_t offset,
uint32_t *data, uint32_t size)
{
/* we only support one controller */
if (controller != DEFAULT_PCI_CONTROLLER)
if (controller != DEFAULT_PCI_CONTROLLER) {
return (-1);
}
pci_ctrl_read(PCI_CTRL_DATA_REG + offset, data, size);
@ -137,6 +141,11 @@ static int pci_ctrl_data_read(uint32_t controller, /* controller number */
*
* @brief Write the PCI controller data register
*
* @param controller the controller number
* @param offset is the offset within the address register
* @param data is the data to write
* @param size is the size of the data
*
* This routine writes the provided data to the data register of the
* specified PCI controller.
*
@ -144,16 +153,14 @@ static int pci_ctrl_data_read(uint32_t controller, /* controller number */
*
*/
static int pci_ctrl_data_write(uint32_t controller, /* controller number */
uint32_t offset, /* offset within address register */
uint32_t data, /* data to write */
uint32_t size /* size of data */
)
static int pci_ctrl_data_write(uint32_t controller, uint32_t offset,
uint32_t data, uint32_t size)
{
/* we only support one controller */
if (controller != DEFAULT_PCI_CONTROLLER)
if (controller != DEFAULT_PCI_CONTROLLER) {
return (-1);
}
pci_ctrl_write(PCI_CTRL_DATA_REG + offset, data, size);
@ -164,6 +171,11 @@ static int pci_ctrl_data_write(uint32_t controller, /* controller number */
*
* @brief Write the PCI controller address register
*
* @param controller is the controller number
* @param offset is the offset within the address register
* @param data is the data to write
* @param size is the size of the data
*
* This routine writes the provided data to the address register of the
* specified PCI controller.
*
@ -171,16 +183,14 @@ static int pci_ctrl_data_write(uint32_t controller, /* controller number */
*
*/
static int pci_ctrl_addr_write(uint32_t controller, /* controller number */
uint32_t offset, /* offset within address register */
uint32_t data, /* data to write */
uint32_t size /* size of data */
)
static int pci_ctrl_addr_write(uint32_t controller, uint32_t offset,
uint32_t data, uint32_t size)
{
/* we only support one controller */
if (controller != DEFAULT_PCI_CONTROLLER)
if (controller != DEFAULT_PCI_CONTROLLER) {
return (-1);
}
pci_ctrl_write(PCI_CTRL_ADDR_REG + offset, data, size);
return 0;
@ -229,15 +239,17 @@ static int pci_ctrl_addr_write(uint32_t controller, /* controller number */
* Reading of PCI data must be performed as an atomic operation. It is up to
* the caller to enforce this.
*
* @param controller is the PCI controller number to use
* @param addr is the PCI address to read
* @param size is the size of the data in bytes
* @param data is a pointer to the data read from the device
*
* @return N/A
*
*/
void pci_read(uint32_t controller, /* PCI controller to use */
union pci_addr_reg addr, /* PCI address to read */
uint32_t size, /* size of data in bytes */
uint32_t *data /* data read from device */
)
void pci_read(uint32_t controller, union pci_addr_reg addr,
uint32_t size, uint32_t *data)
{
uint32_t access_size;
uint32_t access_offset;
@ -318,16 +330,17 @@ void pci_read(uint32_t controller, /* PCI controller to use */
* Writing of PCI data must be performed as an atomic operation. It is up to
* the caller to enforce this.
*
* @param controller is the PCI controller to use
* @param addr is the PCI addres to read
* @param size is the size in bytes to write
* @param data is the data to write
*
* @return N/A
*
*/
void pci_write(uint32_t controller, /* controller to use */
union pci_addr_reg addr, /* PCI address to read */
uint32_t size, /* size in bytes */
uint32_t data /* data to write */
)
void pci_write(uint32_t controller, union pci_addr_reg addr,
uint32_t size, uint32_t data)
{
uint32_t access_size;
uint32_t access_offset;

View file

@ -145,10 +145,14 @@ void _k_fifo_enque_request(struct k_args *A)
}
}
int _task_fifo_put(kfifo_t queue, /* FIFO queue */
void *data, /* ptr to data to add to queue */
int32_t time /* maximum number of ticks to wait */
)
/**
* @brief adds data to the fifo queue
*
* @param queue is the FIFO queue to add data to
* @param ptr is a pointer to the data to add
* @param time is the maximum number of ticks to wait
*/
int _task_fifo_put(kfifo_t queue, void *data, int32_t time)
{
struct k_args A;

View file

@ -64,7 +64,6 @@ extern int _k_kernel_idle(void);
*
* @return N/A
*/
void _main(void)
{
_sys_device_do_config_level(_SYS_INIT_LEVEL_NANOKERNEL);

View file

@ -134,6 +134,7 @@ void task_irq_free(kirq_t irq_obj)
*
* This re-enables the interrupt for a task IRQ object.
* @param irq_obj IRQ object identifier
*
* @return N/A
*/
void task_irq_ack(kirq_t irq_obj)
@ -152,6 +153,7 @@ void task_irq_ack(kirq_t irq_obj)
* This tests a task IRQ object to see if it has signaled an interrupt.
* @param irq_obj IRQ object identifier
* @param time Time to wait (in ticks)
*
* @return RC_OK, RC_FAIL, or RC_TIME
*/
int _task_irq_test(kirq_t irq_obj, int32_t time)
@ -169,6 +171,7 @@ int _task_irq_test(kirq_t irq_obj, int32_t time)
*
* This routine allocates a task IRQ object to a task.
* @param arg Pointer to registration request arguments
*
* @return ptr to allocated task IRQ object if successful, NULL if not
*/
static int _k_task_irq_alloc(void *arg)

View file

@ -39,6 +39,9 @@
*
* @brief Copy a packet
*
* @param in the packet to be copied
* @param out the packet to copy to
*
* @return N/A
*/
static void copy_packet(struct k_args **out, struct k_args *in)

View file

@ -530,17 +530,18 @@ void _k_mem_pool_block_get(struct k_args *A)
* This routine allocates a free block from the specified memory pool, ensuring
* that its size is at least as big as the size requested (in bytes).
*
* @param blockptr poitner to requested block
* @param pool_id pool from which to get block
* @param reqsize requested block size
* @param time maximum number of ticks to wait
*
* @return RC_OK, RC_FAIL, RC_TIME on success, failure, timeout respectively
*/
int _task_mem_pool_alloc(struct k_block *blockptr, /* ptr to requested block */
kmemory_pool_t pool_id, /* pool from which to get block */
int reqsize, /* requested block size */
int32_t time /* maximum number of ticks to wait */
)
int _task_mem_pool_alloc(struct k_block *blockptr, kmemory_pool_t pool_id,
int reqsize, int32_t time)
{
struct k_args A;
A.Comm = _K_SVC_MEM_POOL_BLOCK_GET;
A.Time.ticks = time;
A.args.p1.pool_id = pool_id;

View file

@ -28,7 +28,6 @@
*
* @return N/A
*/
static void mvdreq_docont(struct k_args *Cont)
{
struct k_args *next;
@ -46,7 +45,6 @@ static void mvdreq_docont(struct k_args *Cont)
*
* @return N/A
*/
static void mvdreq_copy(struct moved_req *ReqArgs)
{
memcpy(ReqArgs->destination, ReqArgs->source,
@ -64,7 +62,6 @@ static void mvdreq_copy(struct moved_req *ReqArgs)
*
* @return N/A
*/
void _k_movedata_request(struct k_args *Req)
{
struct moved_req *ReqArgs;

View file

@ -264,14 +264,11 @@ void _k_mutex_lock_request(struct k_args *A /* pointer to mutex lock
* This routine is the entry to the mutex lock kernel service.
*
* @param mutex Mutex object
* @param time Timeout value (in ticks)
* @param time The maximum Timeout value (in ticks)
*
* @return RC_OK on success, RC_FAIL on error, RC_TIME on timeout
*/
int _task_mutex_lock(
kmutex_t mutex, /* mutex to lock */
int32_t time /* max # of ticks to wait for mutex */
)
int _task_mutex_lock(kmutex_t mutex, int32_t time)
{
struct k_args A; /* argument packet */
@ -291,13 +288,11 @@ int _task_mutex_lock(
* of the current owner to the priority level it had when it acquired the
* mutex.
*
* @param A k_args
* @param A pointer to mutex unlock request arguments
*
* @return N/A
*/
void _k_mutex_unlock(struct k_args *A /* pointer to mutex unlock
request arguments */
)
void _k_mutex_unlock(struct k_args *A)
{
struct _k_mutex_struct *Mutex; /* pointer internal mutex structure */
int MutexId; /* mutex ID obtained from unlock request */
@ -380,12 +375,11 @@ void _k_mutex_unlock(struct k_args *A /* pointer to mutex unlock
*
* This routine is the entry to the mutex unlock kernel service.
*
* @param mutex Mutex
* @param mutex mutex to unlock
*
* @return N/A
*/
void _task_mutex_unlock(kmutex_t mutex /* mutex to unlock */
)
void _task_mutex_unlock(kmutex_t mutex)
{
struct k_args A; /* argument packet */

View file

@ -36,7 +36,6 @@ a task to measure the overhead involved in issuing a kernel service request.
*
* @return N/A
*/
void _k_nop(struct k_args *A)
{
ARG_UNUSED(A);
@ -50,7 +49,6 @@ void _k_nop(struct k_args *A)
*
* @return N/A
*/
void _task_nop(void)
{
struct k_args A;

View file

@ -28,7 +28,6 @@
*
* @return N/A
*/
void _k_pipe_get_request(struct k_args *RequestOrig)
{
struct k_args *Request;
@ -176,7 +175,6 @@ void _k_pipe_get_request(struct k_args *RequestOrig)
*
* @return N/A
*/
void _k_pipe_get_timeout(struct k_args *ReqProc)
{
__ASSERT_NO_MSG(NULL != ReqProc->Time.timer);
@ -196,7 +194,6 @@ void _k_pipe_get_timeout(struct k_args *ReqProc)
*
* @return N/A
*/
void _k_pipe_get_reply(struct k_args *ReqProc)
{
__ASSERT_NO_MSG(
@ -248,7 +245,6 @@ void _k_pipe_get_reply(struct k_args *ReqProc)
*
* @return N/A
*/
void _k_pipe_get_ack(struct k_args *Request)
{
struct k_args *LocalReq;

View file

@ -29,7 +29,6 @@
*
* @return N/A
*/
void _k_pipe_put_request(struct k_args *RequestOrig)
{
struct k_args *Request;
@ -195,7 +194,6 @@ void _k_pipe_put_request(struct k_args *RequestOrig)
*
* @return N/A
*/
void _k_pipe_put_timeout(struct k_args *ReqProc)
{
__ASSERT_NO_MSG(NULL != ReqProc->Time.timer);
@ -215,7 +213,6 @@ void _k_pipe_put_timeout(struct k_args *ReqProc)
*
* @return N/A
*/
void _k_pipe_put_reply(struct k_args *ReqProc)
{
__ASSERT_NO_MSG(
@ -269,7 +266,6 @@ void _k_pipe_put_reply(struct k_args *ReqProc)
*
* @return N/A
*/
void _k_pipe_put_ack(struct k_args *Request)
{
if (_ASYNCREQ == _k_pipe_request_type_get(&Request->args)) {

View file

@ -46,7 +46,6 @@ possibly copy the remaining data
*
* @return N/A
*/
void _k_pipe_movedata_ack(struct k_args *pEOXfer)
{
struct _pipe_xfer_ack_arg *pipe_xfer_ack = &pEOXfer->args.pipe_xfer_ack;
@ -202,7 +201,6 @@ void _k_pipe_movedata_ack(struct k_args *pEOXfer)
*
* @return N/A
*/
static kpriority_t move_priority_compute(struct k_args *writer_ptr,
struct k_args *reader_ptr)
{
@ -226,7 +224,6 @@ static kpriority_t move_priority_compute(struct k_args *writer_ptr,
*
* @return N/A
*/
static void setup_movedata(struct k_args *A,
struct _k_pipe_struct *pipe_ptr, XFER_TYPE xfer_type,
struct k_args *writer_ptr, struct k_args *reader_ptr,
@ -239,7 +236,10 @@ static void setup_movedata(struct k_args *A,
A->Comm = _K_SVC_MOVEDATA_REQ;
A->Ctxt.task = NULL;
/* this caused problems when != NULL related to set/reset of state bits */
/*
* this caused problems when != NULL related to set/reset of
* state bits
*/
A->args.moved_req.action = (MovedAction)(MVDACT_SNDACK | MVDACT_RCVACK);
A->args.moved_req.source = source;
@ -407,8 +407,8 @@ static int WriterInProgressIsBlocked(struct _k_pipe_struct *pipe_ptr,
*
* @return N/A
*/
static void pipe_read(struct _k_pipe_struct *pipe_ptr, struct k_args *pNewReader)
static void pipe_read(struct _k_pipe_struct *pipe_ptr,
struct k_args *pNewReader)
{
struct k_args *reader_ptr;
struct _pipe_xfer_req_arg *pipe_read_req;
@ -453,11 +453,14 @@ static void pipe_read(struct _k_pipe_struct *pipe_ptr, struct k_args *pNewReader
pipe_read_req->xferred_size += ret;
if (pipe_read_req->xferred_size == pipe_read_req->total_size) {
_k_pipe_request_status_set(pipe_read_req, TERM_SATISFIED);
_k_pipe_request_status_set(pipe_read_req,
TERM_SATISFIED);
if (reader_ptr->head != NULL) {
DeListWaiter(reader_ptr);
myfreetimer(&reader_ptr->Time.timer);
}
return;
} else {
_k_pipe_request_status_set(pipe_read_req, XFER_BUSY);
@ -476,8 +479,8 @@ static void pipe_read(struct _k_pipe_struct *pipe_ptr, struct k_args *pNewReader
*
* @return N/A
*/
static void pipe_write(struct _k_pipe_struct *pipe_ptr, struct k_args *pNewWriter)
static void pipe_write(struct _k_pipe_struct *pipe_ptr,
struct k_args *pNewWriter)
{
struct k_args *writer_ptr;
struct _pipe_xfer_req_arg *pipe_write_req;
@ -512,8 +515,8 @@ static void pipe_write(struct _k_pipe_struct *pipe_ptr, struct k_args *pNewWrite
}
GETARGS(Moved_req);
setup_movedata(Moved_req, pipe_ptr, XFER_W2B, writer_ptr, NULL, write_ptr,
(char *)(pipe_write_req->data_ptr) +
setup_movedata(Moved_req, pipe_ptr, XFER_W2B, writer_ptr, NULL,
write_ptr, (char *)(pipe_write_req->data_ptr) +
OCTET_TO_SIZEOFUNIT(pipe_write_req->xferred_size),
ret, (numIterations == 2) ? id : -1);
_k_movedata_request(Moved_req);
@ -523,7 +526,8 @@ static void pipe_write(struct _k_pipe_struct *pipe_ptr, struct k_args *pNewWrite
pipe_write_req->xferred_size += ret;
if (pipe_write_req->xferred_size == pipe_write_req->total_size) {
_k_pipe_request_status_set(pipe_write_req, TERM_SATISFIED);
_k_pipe_request_status_set(pipe_write_req,
TERM_SATISFIED);
if (writer_ptr->head != NULL) {
/* only listed requests have a timer */
DeListWaiter(writer_ptr);
@ -538,17 +542,17 @@ static void pipe_write(struct _k_pipe_struct *pipe_ptr, struct k_args *pNewWrite
}
/**
*
* @brief Update the pipe transfer status
*
* @param pActor pointer to struct k_args to be used by actor
* @param pipe_xfer_req pointer to actor's pipe process structure
* @param bytesXferred number of bytes transferred
*
* @return N/A
*/
static void pipe_xfer_status_update(
struct k_args *pActor, /* ptr to struct k_args to be used by actor */
struct _pipe_xfer_req_arg *pipe_xfer_req, /* ptr to actor's pipe process structure */
int bytesXferred /* # of bytes transferred */
)
static void pipe_xfer_status_update(struct k_args *pActor,
struct _pipe_xfer_req_arg *pipe_xfer_req,
int bytesXferred)
{
pipe_xfer_req->num_pending_xfers++;
pipe_xfer_req->xferred_size += bytesXferred;
@ -565,22 +569,26 @@ static void pipe_xfer_status_update(
}
/**
*
* @brief Read and/or write from/to the pipe
*
* @param pipe_ptr pointer to pipe structure
* @param pNewWriter pointer to new writer struct k_args
* @param pNewReader pointer to new reader struct k_args
*
* @return N/A
*/
static void pipe_read_write(
struct _k_pipe_struct *pipe_ptr, /* ptr to pipe structure */
struct k_args *pNewWriter, /* ptr to new writer struct k_args */
struct k_args *pNewReader /* ptr to new reader struct k_args */
)
static void pipe_read_write(struct _k_pipe_struct *pipe_ptr,
struct k_args *pNewWriter,
struct k_args *pNewReader)
{
struct k_args *reader_ptr; /* ptr to struct k_args to be used by reader */
struct k_args *writer_ptr; /* ptr to struct k_args to be used by writer */
struct _pipe_xfer_req_arg *pipe_write_req; /* ptr to writer's pipe process structure */
struct _pipe_xfer_req_arg *pipe_read_req; /* ptr to reader's pipe process structure */
/* ptr to struct k_args to be used by reader */
struct k_args *reader_ptr;
/* ptr to struct k_args to be used by writer */
struct k_args *writer_ptr;
/* ptr to writer's pipe process structure */
struct _pipe_xfer_req_arg *pipe_write_req;
/* ptr to reader's pipe process structure */
struct _pipe_xfer_req_arg *pipe_read_req;
int iT1;
int iT2;

View file

@ -46,7 +46,6 @@ extern const kernelfunc _k_server_dispatch_table[];
*
* @return pointer to selected task
*/
static struct k_task *next_task_select(void)
{
int K_PrioListIdx;
@ -82,7 +81,6 @@ static struct k_task *next_task_select(void)
*
* @return Does not return.
*/
FUNC_NORETURN void _k_server(int unused1, int unused2)
{
struct k_args *pArgs;

View file

@ -146,7 +146,6 @@ static inline void _TimeSliceUpdate(void)
*
* @return number of ticks to process
*/
static inline int32_t _SysIdleElapsedTicksGet(void)
{
#ifdef CONFIG_TICKLESS_IDLE

View file

@ -152,7 +152,6 @@ void _k_timeout_free(struct k_timer *T)
* @param ticks Number of ticks
* @return N/A
*/
void _k_timer_list_update(int ticks)
{
struct k_timer *T;

View file

@ -42,7 +42,6 @@ function __stack_chk_fail and global variable __stack_chk_guard.
*
* @return Does not return
*/
void FUNC_NORETURN _StackCheckHandler(void)
{
/* Stack canary error is a software fatal condition; treat it as such.
@ -61,7 +60,6 @@ void FUNC_NORETURN _StackCheckHandler(void)
* Symbol referenced by GCC compiler generated code for canary value.
* The canary value gets initialized in _Cstart().
*/
void __noinit *__stack_chk_guard;
/**
@ -71,6 +69,5 @@ void __noinit *__stack_chk_guard;
* This routine is invoked when a stack canary error is detected, indicating
* a buffer overflow or stack corruption problem.
*/
FUNC_ALIAS(_StackCheckHandler, __stack_chk_fail, void);
#endif

View file

@ -45,7 +45,6 @@ extern CtorFuncPtr __CTOR_END__[];
* This routine is invoked by the nanokernel routine _Cstart() after the basic
* hardware has been initialized.
*/
void _Ctors(void)
{
unsigned int nCtors;

View file

@ -35,11 +35,12 @@ data structure.
*
* Sets the nanokernel data structure idle field to a non-zero value.
*
* @param ticks the number of ticks to idle
*
* @return N/A
*
* \NOMANUAL
*/
void nano_cpu_set_idle(int32_t ticks)
{
extern tNANO _nanokernel;

View file

@ -62,7 +62,6 @@ uint32_t _hw_irq_to_c_handler_latency = ULONG_MAX;
* @return N/A
*
*/
void _int_latency_start(void)
{
/* when interrupts are not already locked, take time stamp */
@ -82,7 +81,6 @@ void _int_latency_start(void)
* @return N/A
*
*/
void _int_latency_stop(void)
{
uint32_t delta;
@ -133,7 +131,6 @@ void _int_latency_stop(void)
* @return N/A
*
*/
void int_latency_init(void)
{
uint32_t timeToReadTime;
@ -186,7 +183,6 @@ void int_latency_init(void)
* @return N/A
*
*/
void int_latency_show(void)
{
uint32_t intHandlerLatency = 0;

View file

@ -98,10 +98,12 @@ int _is_thread_essential(struct tcs *pCtx /* pointer to thread */
* current Zephyr SDK use non-Thumb code that isn't supported on Cortex-M CPUs.
* For the time being any ARM-based application that attempts to use this API
* will get a link error (which is preferable to a mysterious exception).
*
* @param usec_to_wait
*
* @return N/A
*/
#ifndef CONFIG_ARM
void sys_thread_busy_wait(uint32_t usec_to_wait)
{
/* use 64-bit math to prevent overflow when multiplying */
@ -121,7 +123,6 @@ void sys_thread_busy_wait(uint32_t usec_to_wait)
}
}
}
#endif /* CONFIG_ARM */
#ifdef CONFIG_THREAD_CUSTOM_DATA
@ -207,6 +208,11 @@ void _thread_exit(struct tcs *thread)
* passes it three arguments. It also handles graceful termination of the
* task or fiber if the entry point function ever returns.
*
* @param pEntry address of the app entry point function
* @param parameter1 1st arg to the app entry point function
* @param parameter2 2nd arg to the app entry point function
* @param parameter3 3rd arg to the app entry point function
*
* @internal
* The 'noreturn' attribute is applied to this function so that the compiler
* can dispense with generating the usual preamble that is only required for
@ -215,12 +221,10 @@ void _thread_exit(struct tcs *thread)
* @return Does not return
*
*/
FUNC_NORETURN void _thread_entry(
_thread_entry_t pEntry, /* address of app entry point function */
_thread_arg_t parameter1, /* 1st arg to app entry point function */
_thread_arg_t parameter2, /* 2nd arg to app entry point function */
_thread_arg_t parameter3 /* 3rd arg to app entry point function */
)
FUNC_NORETURN void _thread_entry(_thread_entry_t pEntry,
_thread_arg_t parameter1,
_thread_arg_t parameter2,
_thread_arg_t parameter3)
{
/* Execute the "application" entry point function */

View file

@ -105,7 +105,6 @@ extern void _Ctors(void);
*
* @return N/A
*/
static void _main(void)
{
_sys_device_do_config_level(_SYS_INIT_LEVEL_NANOKERNEL);
@ -132,7 +131,6 @@ extern void _main(void);
*
* @return N/A
*/
static void nano_init(struct tcs *dummyOutContext)
{
/*
@ -240,7 +238,6 @@ extern void *__stack_chk_guard;
*
* @return Does not return
*/
FUNC_NORETURN void _Cstart(void)
{
/* floating point operations are NOT performed during nanokernel init */

View file

@ -25,6 +25,9 @@
* nano_fiber_stack_pop, nano_task_stack_pop, nano_isr_stack_pop
* nano_fiber_stack_pop_wait, nano_task_stack_pop_wait
*
* @param stack the stack to initialize
* @param data pointer to the container for the stack
*
* @internal
* In some cases the compiler "alias" attribute is used to map two or more
* APIs to the same function, since they have identical implementations.
@ -37,10 +40,7 @@
#include <sections.h>
void nano_stack_init(
struct nano_stack *stack, /* stack to initialize */
uint32_t *data /* container for stack */
)
void nano_stack_init(struct nano_stack *stack, uint32_t *data)
{
stack->next = stack->base = data;
stack->fiber = (struct tcs *)0;

View file

@ -104,7 +104,6 @@ int64_t nano_tick_get(void)
* NOTE: We use inline function for both 64-bit and 32-bit functions.
* Compiler optimizes out 64-bit result handling in 32-bit version.
*/
static ALWAYS_INLINE int64_t _nano_tick_delta(int64_t *reftime)
{
int64_t delta;
@ -192,7 +191,6 @@ static inline void handle_expired_nano_timers(int ticks)
*
* @return N/A
*/
void _nano_sys_clock_tick_announce(uint32_t ticks)
{
_nano_ticks += ticks;