nanokernel : clean up comments

Moving comments back to the 70 character limit.  Also moving some of the
comments up to be included in the doxygen headers instead of inline.

Change-Id: I56a6015e5fd6da81e9a06701217e62e899b6aa62
Signed-off-by: Dan Kalowsky <daniel.kalowsky@intel.com>
This commit is contained in:
Dan Kalowsky 2016-02-10 16:38:50 -08:00 committed by Gerrit Code Review
commit 21a99e639d
9 changed files with 67 additions and 54 deletions

View file

@ -85,10 +85,11 @@ void _thread_essential_clear(void)
* thread. A NULL thread pointer indicates that the current thread is
* to be queried.
*
* @param pCtx Pointer to the thread
*
* @return Non-zero if specified thread is essential, zero if it is not
*/
int _is_thread_essential(struct tcs *pCtx /* pointer to thread */
)
int _is_thread_essential(struct tcs *pCtx)
{
return ((pCtx == NULL) ? _nanokernel.current : pCtx)->flags & ESSENTIAL;
}

View file

@ -121,8 +121,7 @@ void fiber_yield(void)
(_nanokernel.current->prio >= _nanokernel.fiber->prio)) {
/*
* Reinsert current thread into the list of runnable threads,
* and
* then swap to the thread at the head of the fiber list.
* and then swap to the thread at the head of the fiber list.
*/
_nano_fiber_ready(_nanokernel.current);
@ -158,8 +157,8 @@ FUNC_NORETURN void _nano_fiber_swap(void)
/*
* Compiler can't know that _Swap() won't return and will issue a
* warning
* unless we explicitly tell it that control never gets this far.
* warning unless we explicitly tell it that control never gets this
* far.
*/
CODE_UNREACHABLE;

View file

@ -48,19 +48,20 @@
void nano_fifo_init(struct nano_fifo *fifo)
{
/*
* The wait queue and data queue occupy the same space since there cannot
* be both queued data and pending fibers in the FIFO. Care must be taken
* that, when one of the queues becomes empty, it is reset to a state
* that reflects an empty queue to both the data and wait queues.
* The wait queue and data queue occupy the same space since there
* cannot be both queued data and pending fibers in the FIFO. Care
* must be taken that, when one of the queues becomes empty, it is
* reset to a state that reflects an empty queue to both the data and
* wait queues.
*/
_nano_wait_q_init(&fifo->wait_q);
/*
* If the 'stat' field is a positive value, it indicates how many data
* elements reside in the FIFO. If the 'stat' field is a negative value,
* its absolute value indicates how many fibers are pending on the LIFO
* object. Thus a value of '0' indicates that there are no data elements
* in the LIFO _and_ there are no pending fibers.
* elements reside in the FIFO. If the 'stat' field is a negative
* value, its absolute value indicates how many fibers are pending on
* the LIFO object. Thus a value of '0' indicates that there are no
* data elements in the LIFO _and_ there are no pending fibers.
*/
fifo->stat = 0;
@ -171,8 +172,8 @@ static inline void *dequeue_data(struct nano_fifo *fifo)
if (fifo->stat == 0) {
/*
* The data_q and wait_q occupy the same space and have the same
* format, and there is already an API for resetting the wait_q, so
* use it.
* format, and there is already an API for resetting the wait_q,
* so use it.
*/
_nano_wait_q_reset(&fifo->wait_q);
} else {
@ -221,8 +222,9 @@ void *nano_task_fifo_get(struct nano_fifo *fifo, int32_t timeout_in_ticks)
do {
/*
* Predict that the branch will be taken to break out of the loop.
* There is little cost to a misprediction since that leads to idle.
* Predict that the branch will be taken to break out of the
* loop. There is little cost to a misprediction since that
* leads to idle.
*/
if (likely(fifo->stat > 0)) {
@ -238,7 +240,9 @@ void *nano_task_fifo_get(struct nano_fifo *fifo, int32_t timeout_in_ticks)
_NANO_TIMEOUT_SET_TASK_TIMEOUT(timeout_in_ticks);
/* see explanation in nano_stack.c:nano_task_stack_pop() */
/* see explanation in
* nano_stack.c:nano_task_stack_pop()
*/
nano_cpu_atomic_idle(key);
key = irq_lock();

View file

@ -140,17 +140,18 @@ extern void _main(void);
static void nano_init(struct tcs *dummyOutContext)
{
/*
* Initialize the current execution thread to permit a level of debugging
* output if an exception should happen during nanokernel initialization.
* However, don't waste effort initializing the fields of the dummy thread
* beyond those needed to identify it as a dummy thread.
* Initialize the current execution thread to permit a level of
* debugging output if an exception should happen during nanokernel
* initialization.
* However, don't waste effort initializing the fields of the dummy
* thread beyond those needed to identify it as a dummy thread.
*/
_nanokernel.current = dummyOutContext;
/*
* Do not insert dummy execution context in the list of fibers, so that it
* does not get scheduled back in once context-switched out.
* Do not insert dummy execution context in the list of fibers, so that
* it does not get scheduled back in once context-switched out.
*/
dummyOutContext->link = (struct tcs *)NULL;
@ -185,7 +186,9 @@ static void nano_init(struct tcs *dummyOutContext)
0 /* options */
);
/* indicate that failure of this task may be fatal to the entire system */
/* indicate that failure of this task may be fatal to the entire
* system
*/
_nanokernel.task->flags |= ESSENTIAL;

View file

@ -148,8 +148,9 @@ void *nano_task_lifo_get(struct nano_lifo *lifo, int32_t timeout_in_ticks)
do {
/*
* Predict that the branch will be taken to break out of the loop.
* There is little cost to a misprediction since that leads to idle.
* Predict that the branch will be taken to break out of the
* loop. There is little cost to a misprediction since that
* leads to idle.
*/
if (likely(lifo->list != NULL)) {
@ -165,7 +166,9 @@ void *nano_task_lifo_get(struct nano_lifo *lifo, int32_t timeout_in_ticks)
_NANO_TIMEOUT_SET_TASK_TIMEOUT(timeout_in_ticks);
/* see explanation in nano_stack.c:nano_task_stack_pop() */
/* see explanation in
* nano_stack.c:nano_task_stack_pop()
*/
nano_cpu_atomic_idle(imask);
imask = irq_lock();

View file

@ -163,8 +163,9 @@ int nano_task_sem_take(struct nano_sem *sem, int32_t timeout_in_ticks)
do {
/*
* Predict that the branch will be taken to break out of the loop.
* There is little cost to a misprediction since that leads to idle.
* Predict that the branch will be taken to break out of the
* loop. There is little cost to a misprediction since that
* leads to idle.
*/
if (likely(sem->nsig > 0)) {
@ -177,7 +178,9 @@ int nano_task_sem_take(struct nano_sem *sem, int32_t timeout_in_ticks)
_NANO_TIMEOUT_SET_TASK_TIMEOUT(timeout_in_ticks);
/* see explanation in nano_stack.c:nano_task_stack_pop() */
/* see explanation in
* nano_stack.c:nano_task_stack_pop()
*/
nano_cpu_atomic_idle(key);
key = irq_lock();

View file

@ -174,8 +174,9 @@ int nano_task_stack_pop(struct nano_stack *stack, uint32_t *pData, int32_t timeo
while (1) {
/*
* Predict that the branch will be taken to break out of the loop.
* There is little cost to a misprediction since that leads to idle.
* Predict that the branch will be taken to break out of the
* loop. There is little cost to a misprediction since that
* leads to idle.
*/
if (likely(stack->next > stack->base)) {
@ -190,17 +191,17 @@ int nano_task_stack_pop(struct nano_stack *stack, uint32_t *pData, int32_t timeo
}
/*
* Invoke nano_cpu_atomic_idle() with interrupts still disabled to
* prevent the scenario where an interrupt fires after re-enabling
* interrupts and before executing the "halt" instruction. If the
* ISR performs a nano_isr_stack_push() on the same stack object,
* the subsequent execution of the "halt" instruction will result
* in the queued data being ignored until the next interrupt, if
* any.
* Invoke nano_cpu_atomic_idle() with interrupts still disabled
* to prevent the scenario where an interrupt fires after
* re-enabling interrupts and before executing the "halt"
* instruction. If the ISR performs a nano_isr_stack_push() on
* the same stack object, the subsequent execution of the "halt"
* instruction will result in the queued data being ignored
* until the next interrupt, if any.
*
* Thus it should be clear that an architectures implementation
* of nano_cpu_atomic_idle() must be able to atomically re-enable
* interrupts and enter a low-power mode.
* of nano_cpu_atomic_idle() must be able to atomically
* re-enable interrupts and enter a low-power mode.
*
* This explanation is valid for all nanokernel objects: stacks,
* FIFOs, LIFOs, and semaphores, for their

View file

@ -70,9 +70,9 @@ int64_t sys_tick_get(void)
{
int64_t tmp_sys_clock_tick_count;
/*
* Lock the interrupts when reading _sys_clock_tick_count 64-bit variable.
* Some architectures (x86) do not handle 64-bit atomically, so
* we have to lock the timer interrupt that causes change of
* Lock the interrupts when reading _sys_clock_tick_count 64-bit
* variable. Some architectures (x86) do not handle 64-bit atomically,
* so we have to lock the timer interrupt that causes change of
* _sys_clock_tick_count
*/
unsigned int imask = irq_lock();
@ -116,9 +116,9 @@ static ALWAYS_INLINE int64_t _nano_tick_delta(int64_t *reftime)
int64_t saved;
/*
* Lock the interrupts when reading _sys_clock_tick_count 64-bit variable.
* Some architectures (x86) do not handle 64-bit atomically, so
* we have to lock the timer interrupt that causes change of
* Lock the interrupts when reading _sys_clock_tick_count 64-bit
* variable. Some architectures (x86) do not handle 64-bit atomically,
* so we have to lock the timer interrupt that causes change of
* _sys_clock_tick_count
*/
unsigned int imask = irq_lock();

View file

@ -41,13 +41,12 @@ FUNC_ALIAS(_timer_start, nano_timer_start, void);
* This function starts a previously initialized nanokernel timer object.
* The timer will expire in <ticks> system clock ticks.
*
* @param timer The Timer to start
* @param ticks The number of system ticks before expiration
*
* @return N/A
*/
void _timer_start(struct nano_timer *timer, /* timer to start */
int ticks /* number of system ticks
* before expiry
*/
)
void _timer_start(struct nano_timer *timer, int ticks)
{
unsigned int imask;
struct nano_timer *cur;