nanokernel : clean up comments

Moving comments back to the 70 character limit.  Also moving some of the
comments up to be included in the doxygen headers instead of inline.

Change-Id: I56a6015e5fd6da81e9a06701217e62e899b6aa62
Signed-off-by: Dan Kalowsky <daniel.kalowsky@intel.com>
This commit is contained in:
Dan Kalowsky 2016-02-10 16:38:50 -08:00 committed by Gerrit Code Review
commit 21a99e639d
9 changed files with 67 additions and 54 deletions

View file

@ -85,10 +85,11 @@ void _thread_essential_clear(void)
* thread. A NULL thread pointer indicates that the current thread is * thread. A NULL thread pointer indicates that the current thread is
* to be queried. * to be queried.
* *
* @param pCtx Pointer to the thread
*
* @return Non-zero if specified thread is essential, zero if it is not * @return Non-zero if specified thread is essential, zero if it is not
*/ */
int _is_thread_essential(struct tcs *pCtx /* pointer to thread */ int _is_thread_essential(struct tcs *pCtx)
)
{ {
return ((pCtx == NULL) ? _nanokernel.current : pCtx)->flags & ESSENTIAL; return ((pCtx == NULL) ? _nanokernel.current : pCtx)->flags & ESSENTIAL;
} }

View file

@ -121,8 +121,7 @@ void fiber_yield(void)
(_nanokernel.current->prio >= _nanokernel.fiber->prio)) { (_nanokernel.current->prio >= _nanokernel.fiber->prio)) {
/* /*
* Reinsert current thread into the list of runnable threads, * Reinsert current thread into the list of runnable threads,
* and * and then swap to the thread at the head of the fiber list.
* then swap to the thread at the head of the fiber list.
*/ */
_nano_fiber_ready(_nanokernel.current); _nano_fiber_ready(_nanokernel.current);
@ -158,8 +157,8 @@ FUNC_NORETURN void _nano_fiber_swap(void)
/* /*
* Compiler can't know that _Swap() won't return and will issue a * Compiler can't know that _Swap() won't return and will issue a
* warning * warning unless we explicitly tell it that control never gets this
* unless we explicitly tell it that control never gets this far. * far.
*/ */
CODE_UNREACHABLE; CODE_UNREACHABLE;

View file

@ -48,19 +48,20 @@
void nano_fifo_init(struct nano_fifo *fifo) void nano_fifo_init(struct nano_fifo *fifo)
{ {
/* /*
* The wait queue and data queue occupy the same space since there cannot * The wait queue and data queue occupy the same space since there
* be both queued data and pending fibers in the FIFO. Care must be taken * cannot be both queued data and pending fibers in the FIFO. Care
* that, when one of the queues becomes empty, it is reset to a state * must be taken that, when one of the queues becomes empty, it is
* that reflects an empty queue to both the data and wait queues. * reset to a state that reflects an empty queue to both the data and
* wait queues.
*/ */
_nano_wait_q_init(&fifo->wait_q); _nano_wait_q_init(&fifo->wait_q);
/* /*
* If the 'stat' field is a positive value, it indicates how many data * If the 'stat' field is a positive value, it indicates how many data
* elements reside in the FIFO. If the 'stat' field is a negative value, * elements reside in the FIFO. If the 'stat' field is a negative
* its absolute value indicates how many fibers are pending on the LIFO * value, its absolute value indicates how many fibers are pending on
* object. Thus a value of '0' indicates that there are no data elements * the LIFO object. Thus a value of '0' indicates that there are no
* in the LIFO _and_ there are no pending fibers. * data elements in the LIFO _and_ there are no pending fibers.
*/ */
fifo->stat = 0; fifo->stat = 0;
@ -171,8 +172,8 @@ static inline void *dequeue_data(struct nano_fifo *fifo)
if (fifo->stat == 0) { if (fifo->stat == 0) {
/* /*
* The data_q and wait_q occupy the same space and have the same * The data_q and wait_q occupy the same space and have the same
* format, and there is already an API for resetting the wait_q, so * format, and there is already an API for resetting the wait_q,
* use it. * so use it.
*/ */
_nano_wait_q_reset(&fifo->wait_q); _nano_wait_q_reset(&fifo->wait_q);
} else { } else {
@ -221,8 +222,9 @@ void *nano_task_fifo_get(struct nano_fifo *fifo, int32_t timeout_in_ticks)
do { do {
/* /*
* Predict that the branch will be taken to break out of the loop. * Predict that the branch will be taken to break out of the
* There is little cost to a misprediction since that leads to idle. * loop. There is little cost to a misprediction since that
* leads to idle.
*/ */
if (likely(fifo->stat > 0)) { if (likely(fifo->stat > 0)) {
@ -238,7 +240,9 @@ void *nano_task_fifo_get(struct nano_fifo *fifo, int32_t timeout_in_ticks)
_NANO_TIMEOUT_SET_TASK_TIMEOUT(timeout_in_ticks); _NANO_TIMEOUT_SET_TASK_TIMEOUT(timeout_in_ticks);
/* see explanation in nano_stack.c:nano_task_stack_pop() */ /* see explanation in
* nano_stack.c:nano_task_stack_pop()
*/
nano_cpu_atomic_idle(key); nano_cpu_atomic_idle(key);
key = irq_lock(); key = irq_lock();

View file

@ -140,17 +140,18 @@ extern void _main(void);
static void nano_init(struct tcs *dummyOutContext) static void nano_init(struct tcs *dummyOutContext)
{ {
/* /*
* Initialize the current execution thread to permit a level of debugging * Initialize the current execution thread to permit a level of
* output if an exception should happen during nanokernel initialization. * debugging output if an exception should happen during nanokernel
* However, don't waste effort initializing the fields of the dummy thread * initialization.
* beyond those needed to identify it as a dummy thread. * However, don't waste effort initializing the fields of the dummy
* thread beyond those needed to identify it as a dummy thread.
*/ */
_nanokernel.current = dummyOutContext; _nanokernel.current = dummyOutContext;
/* /*
* Do not insert dummy execution context in the list of fibers, so that it * Do not insert dummy execution context in the list of fibers, so that
* does not get scheduled back in once context-switched out. * it does not get scheduled back in once context-switched out.
*/ */
dummyOutContext->link = (struct tcs *)NULL; dummyOutContext->link = (struct tcs *)NULL;
@ -185,7 +186,9 @@ static void nano_init(struct tcs *dummyOutContext)
0 /* options */ 0 /* options */
); );
/* indicate that failure of this task may be fatal to the entire system */ /* indicate that failure of this task may be fatal to the entire
* system
*/
_nanokernel.task->flags |= ESSENTIAL; _nanokernel.task->flags |= ESSENTIAL;

View file

@ -148,8 +148,9 @@ void *nano_task_lifo_get(struct nano_lifo *lifo, int32_t timeout_in_ticks)
do { do {
/* /*
* Predict that the branch will be taken to break out of the loop. * Predict that the branch will be taken to break out of the
* There is little cost to a misprediction since that leads to idle. * loop. There is little cost to a misprediction since that
* leads to idle.
*/ */
if (likely(lifo->list != NULL)) { if (likely(lifo->list != NULL)) {
@ -165,7 +166,9 @@ void *nano_task_lifo_get(struct nano_lifo *lifo, int32_t timeout_in_ticks)
_NANO_TIMEOUT_SET_TASK_TIMEOUT(timeout_in_ticks); _NANO_TIMEOUT_SET_TASK_TIMEOUT(timeout_in_ticks);
/* see explanation in nano_stack.c:nano_task_stack_pop() */ /* see explanation in
* nano_stack.c:nano_task_stack_pop()
*/
nano_cpu_atomic_idle(imask); nano_cpu_atomic_idle(imask);
imask = irq_lock(); imask = irq_lock();

View file

@ -163,8 +163,9 @@ int nano_task_sem_take(struct nano_sem *sem, int32_t timeout_in_ticks)
do { do {
/* /*
* Predict that the branch will be taken to break out of the loop. * Predict that the branch will be taken to break out of the
* There is little cost to a misprediction since that leads to idle. * loop. There is little cost to a misprediction since that
* leads to idle.
*/ */
if (likely(sem->nsig > 0)) { if (likely(sem->nsig > 0)) {
@ -177,7 +178,9 @@ int nano_task_sem_take(struct nano_sem *sem, int32_t timeout_in_ticks)
_NANO_TIMEOUT_SET_TASK_TIMEOUT(timeout_in_ticks); _NANO_TIMEOUT_SET_TASK_TIMEOUT(timeout_in_ticks);
/* see explanation in nano_stack.c:nano_task_stack_pop() */ /* see explanation in
* nano_stack.c:nano_task_stack_pop()
*/
nano_cpu_atomic_idle(key); nano_cpu_atomic_idle(key);
key = irq_lock(); key = irq_lock();

View file

@ -174,8 +174,9 @@ int nano_task_stack_pop(struct nano_stack *stack, uint32_t *pData, int32_t timeo
while (1) { while (1) {
/* /*
* Predict that the branch will be taken to break out of the loop. * Predict that the branch will be taken to break out of the
* There is little cost to a misprediction since that leads to idle. * loop. There is little cost to a misprediction since that
* leads to idle.
*/ */
if (likely(stack->next > stack->base)) { if (likely(stack->next > stack->base)) {
@ -190,17 +191,17 @@ int nano_task_stack_pop(struct nano_stack *stack, uint32_t *pData, int32_t timeo
} }
/* /*
* Invoke nano_cpu_atomic_idle() with interrupts still disabled to * Invoke nano_cpu_atomic_idle() with interrupts still disabled
* prevent the scenario where an interrupt fires after re-enabling * to prevent the scenario where an interrupt fires after
* interrupts and before executing the "halt" instruction. If the * re-enabling interrupts and before executing the "halt"
* ISR performs a nano_isr_stack_push() on the same stack object, * instruction. If the ISR performs a nano_isr_stack_push() on
* the subsequent execution of the "halt" instruction will result * the same stack object, the subsequent execution of the "halt"
* in the queued data being ignored until the next interrupt, if * instruction will result in the queued data being ignored
* any. * until the next interrupt, if any.
* *
* Thus it should be clear that an architectures implementation * Thus it should be clear that an architectures implementation
* of nano_cpu_atomic_idle() must be able to atomically re-enable * of nano_cpu_atomic_idle() must be able to atomically
* interrupts and enter a low-power mode. * re-enable interrupts and enter a low-power mode.
* *
* This explanation is valid for all nanokernel objects: stacks, * This explanation is valid for all nanokernel objects: stacks,
* FIFOs, LIFOs, and semaphores, for their * FIFOs, LIFOs, and semaphores, for their

View file

@ -70,9 +70,9 @@ int64_t sys_tick_get(void)
{ {
int64_t tmp_sys_clock_tick_count; int64_t tmp_sys_clock_tick_count;
/* /*
* Lock the interrupts when reading _sys_clock_tick_count 64-bit variable. * Lock the interrupts when reading _sys_clock_tick_count 64-bit
* Some architectures (x86) do not handle 64-bit atomically, so * variable. Some architectures (x86) do not handle 64-bit atomically,
* we have to lock the timer interrupt that causes change of * so we have to lock the timer interrupt that causes change of
* _sys_clock_tick_count * _sys_clock_tick_count
*/ */
unsigned int imask = irq_lock(); unsigned int imask = irq_lock();
@ -116,9 +116,9 @@ static ALWAYS_INLINE int64_t _nano_tick_delta(int64_t *reftime)
int64_t saved; int64_t saved;
/* /*
* Lock the interrupts when reading _sys_clock_tick_count 64-bit variable. * Lock the interrupts when reading _sys_clock_tick_count 64-bit
* Some architectures (x86) do not handle 64-bit atomically, so * variable. Some architectures (x86) do not handle 64-bit atomically,
* we have to lock the timer interrupt that causes change of * so we have to lock the timer interrupt that causes change of
* _sys_clock_tick_count * _sys_clock_tick_count
*/ */
unsigned int imask = irq_lock(); unsigned int imask = irq_lock();

View file

@ -41,13 +41,12 @@ FUNC_ALIAS(_timer_start, nano_timer_start, void);
* This function starts a previously initialized nanokernel timer object. * This function starts a previously initialized nanokernel timer object.
* The timer will expire in <ticks> system clock ticks. * The timer will expire in <ticks> system clock ticks.
* *
* @param timer The Timer to start
* @param ticks The number of system ticks before expiration
*
* @return N/A * @return N/A
*/ */
void _timer_start(struct nano_timer *timer, /* timer to start */ void _timer_start(struct nano_timer *timer, int ticks)
int ticks /* number of system ticks
* before expiry
*/
)
{ {
unsigned int imask; unsigned int imask;
struct nano_timer *cur; struct nano_timer *cur;