From ea0d0b220c6fd6603e6b34753a53398456c6b5c4 Mon Sep 17 00:00:00 2001 From: Anas Nashif Date: Wed, 1 Jul 2015 17:22:39 -0400 Subject: [PATCH] doxygen: change comment style to match javadoc The change replaces multiple asterisks to ** at the beginning of comments and adds a space before the asterisks at the beginning of lines. Change-Id: I7656bde3bf4d9a31e38941e43b580520432dabc1 Signed-off-by: Anas Nashif --- arch/arc/core/atomic.S | 498 +++--- arch/arc/core/cpu_idle.S | 2 +- arch/arc/core/fast_irq.S | 64 +- arch/arc/core/fatal.c | 36 +- arch/arc/core/ffs.S | 46 +- arch/arc/core/irq_lock.S | 88 +- arch/arc/core/isr_wrapper.S | 4 +- arch/arc/core/offsets/offsets.c | 2 +- arch/arc/core/regular_irq.S | 38 +- arch/arc/core/swap.S | 62 +- arch/arc/fatal_error.c | 40 +- arch/arc/include/nano_private.h | 42 +- arch/arc/prep_c.c | 50 +- arch/arc/reset.S | 28 +- arch/arc/vector_table.c | 2 +- arch/arc/vector_table.h | 2 +- arch/arm/bsp/CortexM/nmi.c | 76 +- arch/arm/bsp/CortexM/prep_c.c | 50 +- arch/arm/bsp/CortexM/reset.S | 46 +- arch/arm/bsp/CortexM/scb.c | 44 +- arch/arm/bsp/CortexM/scs.c | 2 +- arch/arm/bsp/CortexM/sw_isr_table.S | 2 +- arch/arm/bsp/CortexM/vector_table.S | 2 +- arch/arm/bsp/CortexM/vector_table.h | 2 +- arch/arm/bsp/sysFatalErrorHandler.c | 40 +- arch/arm/core/atomic.S | 498 +++--- arch/arm/core/basepri.S | 44 +- arch/arm/core/context.c | 58 +- arch/arm/core/cpu_idle.S | 156 +- arch/arm/core/exc_exit.S | 70 +- arch/arm/core/fatal.c | 36 +- arch/arm/core/fault.c | 274 ++-- arch/arm/core/fault_s.S | 46 +- arch/arm/core/ffs.S | 46 +- arch/arm/core/fiber_abort.c | 26 +- arch/arm/core/gdb_stub.S | 108 +- arch/arm/core/gdb_stub_irq_vector_table.c | 2 +- arch/arm/core/irq_init.c | 22 +- arch/arm/core/irq_manage.c | 158 +- arch/arm/core/isr_wrapper.S | 28 +- arch/arm/core/offsets/offsets.c | 2 +- arch/arm/core/swap.S | 116 +- arch/arm/core/task_abort.c | 30 +- arch/arm/fsl_frdm_k64f/board.h | 2 +- arch/arm/fsl_frdm_k64f/irq_vector_table.c | 2 +- arch/arm/fsl_frdm_k64f/linker.cmd | 2 +- arch/arm/fsl_frdm_k64f/nmi_on_reset.S | 2 +- arch/arm/fsl_frdm_k64f/system.c | 70 +- arch/arm/fsl_frdm_k64f/wdog.S | 4 +- arch/arm/include/CortexM/asm_inline_gcc.h | 40 +- arch/arm/include/CortexM/exc.h | 52 +- arch/arm/include/CortexM/stack.h | 24 +- arch/arm/include/nano_private.h | 26 +- arch/arm/include/start_task_arch.h | 2 +- arch/arm/ti_lm3s6965/board.h | 2 +- arch/arm/ti_lm3s6965/irq_vector_table.c | 2 +- arch/arm/ti_lm3s6965/nmi_on_reset.S | 2 +- arch/arm/ti_lm3s6965/scp.c | 14 +- arch/arm/ti_lm3s6965/scp.h | 2 +- arch/arm/ti_lm3s6965/system.c | 54 +- arch/x86/cache.c | 28 +- arch/x86/cache_s.S | 30 +- arch/x86/core/atomic.S | 454 +++--- arch/x86/core/atomic_nolock.c | 250 +-- arch/x86/core/context.c | 174 +-- arch/x86/core/cpuhalt.S | 82 +- arch/x86/core/excconnect.c | 126 +- arch/x86/core/excstub.S | 84 +- arch/x86/core/fatal.c | 32 +- arch/x86/core/ffs.S | 74 +- arch/x86/core/float.c | 304 ++-- arch/x86/core/gdt.c | 2 +- arch/x86/core/intboiexit.S | 26 +- arch/x86/core/intconnect.c | 244 +-- arch/x86/core/inthndlset.c | 62 +- arch/x86/core/intstub.S | 258 ++-- arch/x86/core/msr.S | 90 +- arch/x86/core/offsets/offsets.c | 2 +- arch/x86/core/swap.S | 106 +- arch/x86/core/unaligned.S | 100 +- arch/x86/crt0.S | 2 +- arch/x86/driver_static_irq_stubs.S | 2 +- arch/x86/generic_pc/board.h | 2 +- arch/x86/generic_pc/linker.cmd | 2 +- arch/x86/generic_pc/system.c | 54 +- arch/x86/include/asmPrv.h | 44 +- arch/x86/include/asm_inline_gcc.h | 74 +- arch/x86/include/gdt.h | 2 +- arch/x86/include/nano_private.h | 50 +- arch/x86/include/start_task_arch.h | 2 +- arch/x86/include/swapstk.h | 2 +- arch/x86/quark/board.h | 156 +- arch/x86/quark/linker.cmd | 2 +- arch/x86/quark/system.c | 54 +- arch/x86/sys_fatal_error_handler.c | 40 +- arch/x86/task/strtask.c | 14 +- drivers/console/uart_console.c | 8 +- drivers/interrupt_controller/i8259.c | 134 +- drivers/interrupt_controller/i8259_boi.S | 58 +- drivers/interrupt_controller/ioapic_intr.c | 254 +-- drivers/interrupt_controller/loapic_intr.c | 122 +- drivers/interrupt_controller/system_apic.c | 186 +-- drivers/interrupt_controller/system_pic.c | 86 +- drivers/pci/pci.c | 22 +- drivers/pci/pci_config.c | 186 +-- drivers/pci/pci_interface.c | 302 ++-- drivers/random/rand32-timer.c | 6 +- drivers/random/rand32-timestamp.c | 6 +- drivers/serial/k20UartDrv.c | 214 +-- drivers/serial/ns16550.c | 214 +-- drivers/serial/stellarisUartDrv.c | 290 ++-- drivers/timer/arcv2_timer0.c | 146 +- drivers/timer/cortex_m_timer.c | 286 ++-- drivers/timer/cortex_m_timer_gdb.S | 2 +- drivers/timer/hpet.c | 166 +- drivers/timer/i8253.c | 252 +-- drivers/timer/loapic_timer.c | 350 ++--- include/arch/arc/arch.h | 2 +- include/arch/arc/v2/aux_regs.h | 2 +- include/arch/arc/v2/error.h | 2 +- include/arch/arc/v2/exc.h | 2 +- include/arch/arc/v2/ffs.h | 46 +- include/arch/arc/v2/irq.h | 42 +- include/arch/arc/v2/misc.h | 2 +- include/arch/arm/CortexM/asm_inline_gcc.h | 132 +- include/arch/arm/CortexM/error.h | 2 +- include/arch/arm/CortexM/exc.h | 2 +- include/arch/arm/CortexM/ffs.h | 2 +- include/arch/arm/CortexM/gdb_stub.h | 2 +- include/arch/arm/CortexM/irq.h | 6 +- include/arch/arm/CortexM/memory_map-m3-m4.h | 2 +- include/arch/arm/CortexM/memory_map.h | 2 +- include/arch/arm/CortexM/misc.h | 2 +- include/arch/arm/CortexM/nvic.h | 156 +- include/arch/arm/CortexM/scb.h | 1362 ++++++++--------- include/arch/arm/CortexM/scripts/linker.cmd | 2 +- include/arch/arm/CortexM/scs.h | 138 +- include/arch/arm/arch.h | 2 +- include/arch/x86/arch.h | 134 +- include/arch/x86/asm_inline_gcc.h | 296 ++-- include/arch/x86/linker-common-sections.h | 2 +- include/arch/x86/linker-defs-arch.h | 2 +- include/bluetooth/conn.h | 2 +- include/drivers/k20_mcg.h | 2 +- include/drivers/k20_pcr.h | 2 +- include/drivers/k20_sim.h | 2 +- include/drivers/k20_uart.h | 2 +- include/drivers/k20_watchdog.h | 10 +- include/drivers/k6x_mpu.h | 2 +- include/drivers/k6x_pmc.h | 2 +- include/drivers/pci/pci.h | 2 +- include/drivers/pci/pci_mgr.h | 10 +- include/drivers/rand32.h | 2 +- include/drivers/system_timer.h | 2 +- include/linker-defs.h | 2 +- include/linker-tool-gcc.h | 2 +- include/linker-tool.h | 2 +- include/microkernel/command_packet.h | 40 +- include/misc/__assert.h | 2 +- include/misc/dlist.h | 2 +- include/misc/lists_c.h | 4 +- include/misc/util.h | 2 +- include/sections.h | 2 +- include/sw_isr_table.h | 2 +- include/sys_clock.h | 2 +- include/toolchain.h | 2 +- include/toolchain/common.h | 2 +- include/toolchain/gcc.h | 2 +- kernel/microkernel/include/k_pipe_util.h | 4 +- kernel/microkernel/k_command_packet.c | 16 +- kernel/microkernel/k_event.c | 140 +- kernel/microkernel/k_fifo.c | 118 +- kernel/microkernel/k_idle.c | 278 ++-- kernel/microkernel/k_irq.c | 126 +- kernel/microkernel/k_mailbox.c | 254 +-- kernel/microkernel/k_memory_map.c | 104 +- kernel/microkernel/k_memory_pool.c | 214 +-- kernel/microkernel/k_move_data.c | 36 +- kernel/microkernel/k_mutex.c | 106 +- kernel/microkernel/k_nop.c | 30 +- kernel/microkernel/k_offload.c | 42 +- kernel/microkernel/k_pipe.c | 74 +- kernel/microkernel/k_pipe_buffer.c | 40 +- kernel/microkernel/k_pipe_get.c | 48 +- kernel/microkernel/k_pipe_put.c | 48 +- kernel/microkernel/k_pipe_xfer.c | 114 +- kernel/microkernel/k_semaphore.c | 436 +++--- kernel/microkernel/k_server.c | 44 +- kernel/microkernel/k_task.c | 354 ++--- kernel/microkernel/k_ticker.c | 288 ++-- kernel/microkernel/k_timer.c | 432 +++--- kernel/nanokernel/compiler_stack_protect.c | 32 +- kernel/nanokernel/ctors.c | 18 +- kernel/nanokernel/idle.c | 22 +- kernel/nanokernel/include/gen_offset.h | 2 +- kernel/nanokernel/include/nano_internal.h | 2 +- kernel/nanokernel/int_latency_bench.c | 70 +- kernel/nanokernel/nano_context.c | 210 +-- kernel/nanokernel/nano_fiber.c | 142 +- kernel/nanokernel/nano_fifo.c | 270 ++-- kernel/nanokernel/nano_init.c | 50 +- kernel/nanokernel/nano_lifo.c | 222 +-- kernel/nanokernel/nano_sema.c | 212 +-- kernel/nanokernel/nano_stack.c | 182 +-- kernel/nanokernel/nano_sys_clock.c | 156 +- kernel/nanokernel/nano_timer.c | 240 +-- kernel/nanokernel/version.c | 18 +- lib/libc/minimal/source/stdout/prf.c | 24 +- lib/libc/minimal/source/string/string.c | 120 +- misc/printk.c | 134 +- net/ip/contiki/ip/uip.h | 4 +- net/ip/contiki/ip/uipopt.h | 2 +- net/ip/contiki/ipv6/multicast/README.md | 8 +- net/ip/contiki/ipv6/uip-ds6.h | 2 +- net/ip/contiki/ipv6/uip-nd6.h | 2 +- net/ip/contiki/mac/frame802154.c | 10 +- net/ip/contiki/mac/frame802154.h | 6 +- .../sicslowpan/sicslowpan_compression.h | 2 +- .../sicslowpan/sicslowpan_fragmentation.c | 2 +- samples/include/irq_test_common.h | 12 +- samples/include/util_test_common.h | 2 +- .../microkernel/apps/hello_world/src/hello.c | 12 +- .../apps/philosophers/src/phil_fiber.c | 52 +- .../apps/philosophers/src/phil_task.c | 24 +- .../benchmark/app_kernel/src/event_b.c | 4 +- .../benchmark/app_kernel/src/fifo_b.c | 2 +- .../benchmark/app_kernel/src/fifo_r.c | 2 +- .../benchmark/app_kernel/src/mailbox_b.c | 4 +- .../benchmark/app_kernel/src/mailbox_r.c | 4 +- .../benchmark/app_kernel/src/master.c | 12 +- .../benchmark/app_kernel/src/memmap_b.c | 2 +- .../benchmark/app_kernel/src/mempool_b.c | 2 +- .../benchmark/app_kernel/src/mutex_b.c | 2 +- .../benchmark/app_kernel/src/nop_b.c | 2 +- .../benchmark/app_kernel/src/pipe_b.c | 4 +- .../benchmark/app_kernel/src/pipe_r.c | 4 +- .../benchmark/app_kernel/src/receiver.c | 4 +- .../benchmark/app_kernel/src/sema_b.c | 2 +- .../benchmark/app_kernel/src/sema_r.c | 2 +- .../benchmark/boot_time/src/boot_time.c | 14 +- .../footprint/src/microkernel_footprint.c | 6 +- .../benchmark/latency_measure/src/main.c | 8 +- .../latency_measure/src/micro_int_to_task.c | 44 +- .../src/micro_int_to_task_evt.c | 24 +- .../src/micro_sema_lock_release.c | 4 +- .../src/micro_task_switch_yield.c | 4 +- .../latency_measure/src/nano_ctx_switch.c | 26 +- .../benchmark/latency_measure/src/nano_int.c | 44 +- .../src/nano_int_lock_unlock.c | 2 +- .../latency_measure/src/nano_int_to_fiber.c | 44 +- .../src/nano_int_to_fiber_sem.c | 46 +- .../benchmark/latency_measure/src/raise_int.c | 4 +- .../benchmark/latency_measure/src/utils.c | 6 +- .../benchmark/latency_measure/src/utils.h | 6 +- .../test/test_critical/src/critical.c | 70 +- .../microkernel/test/test_events/src/events.c | 228 +-- .../test/test_events/src/test_fiber.c | 34 +- samples/microkernel/test/test_fifo/src/fifo.c | 182 +-- .../test_fp_sharing/src/float_regs_x86_gcc.h | 122 +- .../test/test_fp_sharing/src/main.c | 28 +- .../microkernel/test/test_fp_sharing/src/pi.c | 26 +- .../test/test_libs/src/libraries.c | 32 +- samples/microkernel/test/test_libs/src/main.c | 6 +- samples/microkernel/test/test_mail/src/mail.c | 124 +- samples/microkernel/test/test_mail/src/main.c | 56 +- samples/microkernel/test/test_map/src/map.c | 154 +- .../microkernel/test/test_mutex/src/mutex.c | 90 +- samples/microkernel/test/test_pipe/src/main.c | 56 +- samples/microkernel/test/test_pipe/src/pipe.c | 282 ++-- samples/microkernel/test/test_pool/src/pool.c | 182 +-- .../test/test_rand32/src/test-rand32.c | 2 +- samples/microkernel/test/test_sema/src/main.c | 154 +- samples/microkernel/test/test_sema/src/sema.c | 140 +- .../test/test_sema/src/test_fiber.c | 40 +- .../test/test_sprintf/src/test_sprintf.c | 140 +- .../test/test_stackprot/src/stackprot.c | 100 +- .../test/test_static_idt/src/static_idt.c | 102 +- .../test/test_static_idt/src/test_stubs.S | 2 +- samples/microkernel/test/test_task/src/task.c | 184 +-- .../microkernel/test/test_task_irq/src/main.c | 74 +- .../test/test_task_irq/src/raise_int.c | 4 +- .../test/test_task_irq/src/test_device.c | 42 +- .../test/test_tickless/src/test_tickless.c | 2 +- .../test/test_tickless/src/timestamps.c | 122 +- .../microkernel/test/test_timer/src/timer.c | 96 +- samples/microkernel/test/test_xip/src/test.h | 2 +- .../microkernel/test/test_xip/src/test_xip.c | 30 +- .../test/test_xip/src/test_xip_helper.c | 2 +- .../footprint/src/nanokernel_footprint.c | 8 +- .../benchmark/sys_kernel/src/lifo.c | 10 +- .../benchmark/sys_kernel/src/mwfifo.c | 10 +- .../benchmark/sys_kernel/src/sema.c | 10 +- .../benchmark/sys_kernel/src/stack.c | 10 +- .../benchmark/sys_kernel/src/syskernel.c | 12 +- .../test_arm_m3_irq_vector_table/src/main.c | 50 +- .../test/test_context/src/context.c | 324 ++-- samples/nanokernel/test/test_fifo/src/fifo.c | 194 +-- samples/nanokernel/test/test_lifo/src/lifo.c | 214 +-- samples/nanokernel/test/test_sema/src/sema.c | 210 +-- .../nanokernel/test/test_stack/src/stack.c | 230 +-- .../nanokernel/test/test_timer/src/timer.c | 228 +-- samples/network/listener/src/listener.c | 12 +- samples/network/test_15_4/src/network.c | 12 +- shared/include/nanokernel/x86/idtEnt.h | 44 +- shared/include/nanokernel/x86/segselect.h | 2 +- 305 files changed, 11249 insertions(+), 11249 deletions(-) diff --git a/arch/arc/core/atomic.S b/arch/arc/core/atomic.S index 01a5584a04d..d21d5740146 100644 --- a/arch/arc/core/atomic.S +++ b/arch/arc/core/atomic.S @@ -35,7 +35,7 @@ DESCRIPTION This library provides routines to perform a number of atomic operations on a memory location: add, subtract, increment, decrement, bitwise OR, bitwise NOR, bitwise AND, bitwise NAND, set, clear and compare-and-swap. -*/ + */ #define _ASMLANGUAGE @@ -60,49 +60,49 @@ GTEXT(atomic_cas) .section .TEXT._Atomic, "ax" .balign 2 -/******************************************************************************* -* -* atomic_clear - atomically clear a memory location -* -* This routine atomically clears the contents of and returns the old -* value that was in . -* -* This routine can be used from both task and interrupt level. -* -* RETURNS: Contents of before the atomic operation -* -* ERRNO: N/A -* -* atomic_val_t atomic_clear -* ( -* atomic_t *target /@ memory location to clear @/ -* ) -*/ +/** + * + * atomic_clear - atomically clear a memory location + * + * This routine atomically clears the contents of and returns the old + * value that was in . + * + * This routine can be used from both task and interrupt level. + * + * RETURNS: Contents of before the atomic operation + * + * ERRNO: N/A + * + * atomic_val_t atomic_clear + * ( + * atomic_t *target /@ memory location to clear @/ + * ) + */ SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_clear) mov_s r1, 0 /* fall through into atomic_set */ -/******************************************************************************* -* -* atomic_set - atomically set a memory location -* -* This routine atomically sets the contents of to and returns -* the old value that was in . -* -* This routine can be used from both task and interrupt level. -* -* RETURNS: Contents of before the atomic operation -* -* ERRNO: N/A -* -* atomic_val_t atomic_set -* ( -* atomic_t *target, /@ memory location to set @/ -* atomic_val_t value /@ set with this value @/ -* ) -* -*/ +/** + * + * atomic_set - atomically set a memory location + * + * This routine atomically sets the contents of to and returns + * the old value that was in . + * + * This routine can be used from both task and interrupt level. + * + * RETURNS: Contents of before the atomic operation + * + * ERRNO: N/A + * + * atomic_val_t atomic_set + * ( + * atomic_t *target, /@ memory location to set @/ + * atomic_val_t value /@ set with this value @/ + * ) + * + */ SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_set) @@ -111,72 +111,72 @@ SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_set) j_s.d [blink] mov_s r0, r1 /* return old value */ -/****************************************************************************** -* -* atomic_get - Get the value of a shared memory atomically -* -* This routine atomically retrieves the value in *target -* -* atomic_val_t atomic_get -* ( -* atomic_t *target /@ address of atom to be retrieved @/ -* ) -* -* RETURN: value read from address target. -* -*/ +/** + * + * atomic_get - Get the value of a shared memory atomically + * + * This routine atomically retrieves the value in *target + * + * atomic_val_t atomic_get + * ( + * atomic_t *target /@ address of atom to be retrieved @/ + * ) + * + * RETURN: value read from address target. + * + */ SECTION_FUNC(TEXT, atomic_get) ld_s r0, [r0, 0] j_s [blink] -/******************************************************************************* -* -* atomic_inc - atomically increment a memory location -* -* This routine atomically increments the value in . The operation is -* done using unsigned integer arithmetic. Various CPU architectures may impose -* restrictions with regards to the alignment and cache attributes of the -* atomic_t type. -* -* This routine can be used from both task and interrupt level. -* -* RETURNS: Contents of before the atomic operation -* -* ERRNO: N/A -* -* atomic_val_t atomic_inc -* ( -* atomic_t *target, /@ memory location to increment @/ -* ) -* -*/ +/** + * + * atomic_inc - atomically increment a memory location + * + * This routine atomically increments the value in . The operation is + * done using unsigned integer arithmetic. Various CPU architectures may impose + * restrictions with regards to the alignment and cache attributes of the + * atomic_t type. + * + * This routine can be used from both task and interrupt level. + * + * RETURNS: Contents of before the atomic operation + * + * ERRNO: N/A + * + * atomic_val_t atomic_inc + * ( + * atomic_t *target, /@ memory location to increment @/ + * ) + * + */ SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_inc) mov_s r1, 1 /* fall through into atomic_add */ -/******************************************************************************* -* -* atomic_add - atomically add a value to a memory location -* -* This routine atomically adds the contents of and , placing -* the result in . The operation is done using signed integer arithmetic. -* Various CPU architectures may impose restrictions with regards to the -* alignment and cache attributes of the atomic_t type. -* -* This routine can be used from both task and interrupt level. -* -* RETURNS: Contents of before the atomic operation -* -* ERRNO: N/A -* -* atomic_val_t atomic_add -* ( -* atomic_t *target, /@ memory location to add to @/ -* atomic_val_t value /@ value to add @/ -* ) -*/ +/** + * + * atomic_add - atomically add a value to a memory location + * + * This routine atomically adds the contents of and , placing + * the result in . The operation is done using signed integer arithmetic. + * Various CPU architectures may impose restrictions with regards to the + * alignment and cache attributes of the atomic_t type. + * + * This routine can be used from both task and interrupt level. + * + * RETURNS: Contents of before the atomic operation + * + * ERRNO: N/A + * + * atomic_val_t atomic_add + * ( + * atomic_t *target, /@ memory location to add to @/ + * atomic_val_t value /@ value to add @/ + * ) + */ SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_add) @@ -191,54 +191,54 @@ SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_add) j_s.d [blink] mov_s r0, r2 /* return old value */ -/******************************************************************************* -* -* atomic_dec - atomically decrement a memory location -* -* This routine atomically decrements the value in . The operation is -* done using unsigned integer arithmetic. Various CPU architectures may impose -* restrictions with regards to the alignment and cache attributes of the -* atomic_t type. -* -* This routine can be used from both task and interrupt level. -* -* RETURNS: Contents of before the atomic operation -* -* ERRNO: N/A -* -* atomic_val_t atomic_dec -* ( -* atomic_t *target, /@ memory location to decrement @/ -* ) -* -*/ +/** + * + * atomic_dec - atomically decrement a memory location + * + * This routine atomically decrements the value in . The operation is + * done using unsigned integer arithmetic. Various CPU architectures may impose + * restrictions with regards to the alignment and cache attributes of the + * atomic_t type. + * + * This routine can be used from both task and interrupt level. + * + * RETURNS: Contents of before the atomic operation + * + * ERRNO: N/A + * + * atomic_val_t atomic_dec + * ( + * atomic_t *target, /@ memory location to decrement @/ + * ) + * + */ SECTION_SUBSEC_FUNC(TEXT, atomic_dec_sub, atomic_dec) mov_s r1, 1 /* fall through into atomic_sub */ -/******************************************************************************* -* -* atomic_sub - atomically subtract a value from a memory location -* -* This routine atomically subtracts from the contents of , -* placing the result in . The operation is done using signed integer -* arithmetic. Various CPU architectures may impose restrictions with regards to -* the alignment and cache attributes of the atomic_t type. -* -* This routine can be used from both task and interrupt level. -* -* RETURNS: Contents of before the atomic operation -* -* ERRNO: N/A -* -* atomic_val_t atomic_sub -* ( -* atomic_t *target, /@ memory location to subtract from @/ -* atomic_val_t value /@ value to subtract @/ -* ) -* -*/ +/** + * + * atomic_sub - atomically subtract a value from a memory location + * + * This routine atomically subtracts from the contents of , + * placing the result in . The operation is done using signed integer + * arithmetic. Various CPU architectures may impose restrictions with regards to + * the alignment and cache attributes of the atomic_t type. + * + * This routine can be used from both task and interrupt level. + * + * RETURNS: Contents of before the atomic operation + * + * ERRNO: N/A + * + * atomic_val_t atomic_sub + * ( + * atomic_t *target, /@ memory location to subtract from @/ + * atomic_val_t value /@ value to subtract @/ + * ) + * + */ SECTION_SUBSEC_FUNC(TEXT, atomic_dec_sub, atomic_sub) @@ -253,28 +253,28 @@ SECTION_SUBSEC_FUNC(TEXT, atomic_dec_sub, atomic_sub) j_s.d [blink] mov_s r0, r2 /* return old value */ -/****************************************************************************** -* -* atomic_nand - atomically perform a bitwise NAND on a memory location -* -* This routine atomically performs a bitwise NAND operation of the contents of -* and , placing the result in . -* Various CPU architectures may impose restrictions with regards to the -* alignment and cache attributes of the atomic_t type. -* -* This routine can be used from both task and interrupt level. -* -* RETURNS: Contents of before the atomic operation -* -* ERRNO: N/A -* -* atomic_val_t atomic_nand -* ( -* atomic_t *target, /@ memory location to NAND @/ -* atomic_val_t value /@ NAND with this value @/ -* ) -* -*/ +/** + * + * atomic_nand - atomically perform a bitwise NAND on a memory location + * + * This routine atomically performs a bitwise NAND operation of the contents of + * and , placing the result in . + * Various CPU architectures may impose restrictions with regards to the + * alignment and cache attributes of the atomic_t type. + * + * This routine can be used from both task and interrupt level. + * + * RETURNS: Contents of before the atomic operation + * + * ERRNO: N/A + * + * atomic_val_t atomic_nand + * ( + * atomic_t *target, /@ memory location to NAND @/ + * atomic_val_t value /@ NAND with this value @/ + * ) + * + */ SECTION_FUNC(TEXT, atomic_nand) @@ -290,28 +290,28 @@ SECTION_FUNC(TEXT, atomic_nand) j_s.d [blink] mov_s r0, r2 /* return old value */ -/****************************************************************************** -* -* atomic_and - atomically perform a bitwise AND on a memory location -* -* This routine atomically performs a bitwise AND operation of the contents of -* and , placing the result in . -* Various CPU architectures may impose restrictions with regards to the -* alignment and cache attributes of the atomic_t type. -* -* This routine can be used from both task and interrupt level. -* -* RETURNS: Contents of before the atomic operation -* -* ERRNO: N/A -* -* atomic_val_t atomic_and -* ( -* atomic_t *target, /@ memory location to AND @/ -* atomic_val_t value /@ AND with this value @/ -* ) -* -*/ +/** + * + * atomic_and - atomically perform a bitwise AND on a memory location + * + * This routine atomically performs a bitwise AND operation of the contents of + * and , placing the result in . + * Various CPU architectures may impose restrictions with regards to the + * alignment and cache attributes of the atomic_t type. + * + * This routine can be used from both task and interrupt level. + * + * RETURNS: Contents of before the atomic operation + * + * ERRNO: N/A + * + * atomic_val_t atomic_and + * ( + * atomic_t *target, /@ memory location to AND @/ + * atomic_val_t value /@ AND with this value @/ + * ) + * + */ SECTION_FUNC(TEXT, atomic_and) @@ -326,28 +326,28 @@ SECTION_FUNC(TEXT, atomic_and) j_s.d [blink] mov_s r0, r2 /* return old value */ -/******************************************************************************* -* -* atomic_or - atomically perform a bitwise OR on memory location -* -* This routine atomically performs a bitwise OR operation of the contents of -* and , placing the result in . -* Various CPU architectures may impose restrictions with regards to the -* alignment and cache attributes of the atomic_t type. -* -* This routine can be used from both task and interrupt level. -* -* RETURNS: Contents of before the atomic operation -* -* ERRNO: N/A -* -* atomic_val_t atomic_or -* ( -* atomic_t *target, /@ memory location to OR @/ -* atomic_val_t value /@ OR with this value @/ -* ) -* -*/ +/** + * + * atomic_or - atomically perform a bitwise OR on memory location + * + * This routine atomically performs a bitwise OR operation of the contents of + * and , placing the result in . + * Various CPU architectures may impose restrictions with regards to the + * alignment and cache attributes of the atomic_t type. + * + * This routine can be used from both task and interrupt level. + * + * RETURNS: Contents of before the atomic operation + * + * ERRNO: N/A + * + * atomic_val_t atomic_or + * ( + * atomic_t *target, /@ memory location to OR @/ + * atomic_val_t value /@ OR with this value @/ + * ) + * + */ SECTION_FUNC(TEXT, atomic_or) @@ -362,28 +362,28 @@ SECTION_FUNC(TEXT, atomic_or) j_s.d [blink] mov_s r0, r2 /* return old value */ -/******************************************************************************* -* -* atomic_xor - atomically perform a bitwise XOR on a memory location -* -* This routine atomically performs a bitwise XOR operation of the contents of -* and , placing the result in . -* Various CPU architectures may impose restrictions with regards to the -* alignment and cache attributes of the atomic_t type. -* -* This routine can be used from both task and interrupt level. -* -* RETURNS: Contents of before the atomic operation -* -* ERRNO: N/A -* -* atomic_val_t atomic_xor -* ( -* atomic_t *target, /@ memory location to XOR @/ -* atomic_val_t value /@ XOR with this value @/ -* ) -* -*/ +/** + * + * atomic_xor - atomically perform a bitwise XOR on a memory location + * + * This routine atomically performs a bitwise XOR operation of the contents of + * and , placing the result in . + * Various CPU architectures may impose restrictions with regards to the + * alignment and cache attributes of the atomic_t type. + * + * This routine can be used from both task and interrupt level. + * + * RETURNS: Contents of before the atomic operation + * + * ERRNO: N/A + * + * atomic_val_t atomic_xor + * ( + * atomic_t *target, /@ memory location to XOR @/ + * atomic_val_t value /@ XOR with this value @/ + * ) + * + */ SECTION_FUNC(TEXT, atomic_xor) @@ -398,29 +398,29 @@ SECTION_FUNC(TEXT, atomic_xor) j_s.d [blink] mov_s r0, r2 /* return old value */ -/******************************************************************************* -* -* atomic_cas - atomically compare-and-swap the contents of a memory location -* -* This routine performs an atomic compare-and-swap. testing that the contents of -* contains , and if it does, setting the value of -* to . Various CPU architectures may impose restrictions with regards -* to the alignment and cache attributes of the atomic_t type. -* -* This routine can be used from both task and interrupt level. -* -* RETURNS: 1 if the swap is actually executed, 0 otherwise. -* -* ERRNO: N/A -* -* int atomic_cas -* ( -* atomic_t *target, /@ memory location to compare-and-swap @/ -* atomic_val_t oldValue, /@ compare to this value @/ -* atomic_val_t newValue, /@ swap with this value @/ -* ) -* -*/ +/** + * + * atomic_cas - atomically compare-and-swap the contents of a memory location + * + * This routine performs an atomic compare-and-swap. testing that the contents of + * contains , and if it does, setting the value of + * to . Various CPU architectures may impose restrictions with regards + * to the alignment and cache attributes of the atomic_t type. + * + * This routine can be used from both task and interrupt level. + * + * RETURNS: 1 if the swap is actually executed, 0 otherwise. + * + * ERRNO: N/A + * + * int atomic_cas + * ( + * atomic_t *target, /@ memory location to compare-and-swap @/ + * atomic_val_t oldValue, /@ compare to this value @/ + * atomic_val_t newValue, /@ swap with this value @/ + * ) + * + */ SECTION_FUNC(TEXT, atomic_cas) diff --git a/arch/arc/core/cpu_idle.S b/arch/arc/core/cpu_idle.S index a35a3741a22..d11b768fc94 100644 --- a/arch/arc/core/cpu_idle.S +++ b/arch/arc/core/cpu_idle.S @@ -33,7 +33,7 @@ /* DESCRIPTION CPU power management routines. -*/ + */ #define _ASMLANGUAGE diff --git a/arch/arc/core/fast_irq.S b/arch/arc/core/fast_irq.S index a6980ec0025..12c75736e32 100644 --- a/arch/arc/core/fast_irq.S +++ b/arch/arc/core/fast_irq.S @@ -35,7 +35,7 @@ DESCRIPTION This module implements the code for handling entry to and exit from Fast IRQs. See isr_wrapper.S for details. -*/ + */ #define _ASMLANGUAGE @@ -53,25 +53,25 @@ GDATA(_firq_stack) SECTION_VAR(NOINIT, _firq_stack) .space CONFIG_FIRQ_STACK_SIZE -/******************************************************************************* -* -* _firq_enter - work to be done before handing control to a FIRQ ISR -* -* The processor switches to a second register bank so registers from the -* current bank do not have to be preserved yet. The only issue is the LP_START/ -* LP_COUNT/LP_END registers, which are not banked. -* -* If all FIRQ ISRs are programmed such that there are no use of the LP -* registers (ie. no LPcc instruction), then the kernel can be configured to -* remove the use of _firq_enter(). -* -* When entering a FIRQ, interrupts might as well be locked: the processor is -* running at its highest priority, and cannot be preempted by anything. -* -* Assumption by _isr_demux: r3 is untouched by _firq_enter. -* -* RETURNS: N/A -*/ +/** + * + * _firq_enter - work to be done before handing control to a FIRQ ISR + * + * The processor switches to a second register bank so registers from the + * current bank do not have to be preserved yet. The only issue is the LP_START/ + * LP_COUNT/LP_END registers, which are not banked. + * + * If all FIRQ ISRs are programmed such that there are no use of the LP + * registers (ie. no LPcc instruction), then the kernel can be configured to + * remove the use of _firq_enter(). + * + * When entering a FIRQ, interrupts might as well be locked: the processor is + * running at its highest priority, and cannot be preempted by anything. + * + * Assumption by _isr_demux: r3 is untouched by _firq_enter. + * + * RETURNS: N/A + */ SECTION_FUNC(TEXT, _firq_enter) @@ -97,12 +97,12 @@ SECTION_FUNC(TEXT, _firq_enter) j @_isr_demux -/******************************************************************************* -* -* _firq_exit - work to be done exiting a FIRQ -* -* RETURNS: N/A -*/ +/** + * + * _firq_exit - work to be done exiting a FIRQ + * + * RETURNS: N/A + */ SECTION_FUNC(TEXT, _firq_exit) @@ -256,12 +256,12 @@ _firq_no_reschedule: /* LP registers are already restored, just switch back to bank 0 */ rtie -/******************************************************************************* -* -* _firq_stack_setup - install the FIRQ stack in register bank 1 -* -* RETURNS: N/A -*/ +/** + * + * _firq_stack_setup - install the FIRQ stack in register bank 1 + * + * RETURNS: N/A + */ SECTION_FUNC(TEXT, _firq_stack_setup) diff --git a/arch/arc/core/fatal.c b/arch/arc/core/fatal.c index 1c8ee88e08e..36999da42c8 100644 --- a/arch/arc/core/fatal.c +++ b/arch/arc/core/fatal.c @@ -34,7 +34,7 @@ DESCRIPTION This module implements the routines necessary for handling fatal faults on ARCv2 CPUs. -*/ + */ #include #include @@ -52,23 +52,23 @@ const NANO_ESF _default_esf = { 0xdeaddead, /* placeholder */ }; -/******************************************************************************* -* -* _NanoFatalErrorHandler - nanokernel fatal error handler -* -* This routine is called when fatal error conditions are detected by software -* and is responsible only for reporting the error. Once reported, it then -* invokes the user provided routine _SysFatalErrorHandler() which is -* responsible for implementing the error handling policy. -* -* The caller is expected to always provide a usable ESF. In the event that the -* fatal error does not have a hardware generated ESF, the caller should either -* create its own or use a pointer to the global default ESF <_default_esf>. -* -* RETURNS: This function does not return. -* -* \NOMANUAL -*/ +/** + * + * _NanoFatalErrorHandler - nanokernel fatal error handler + * + * This routine is called when fatal error conditions are detected by software + * and is responsible only for reporting the error. Once reported, it then + * invokes the user provided routine _SysFatalErrorHandler() which is + * responsible for implementing the error handling policy. + * + * The caller is expected to always provide a usable ESF. In the event that the + * fatal error does not have a hardware generated ESF, the caller should either + * create its own or use a pointer to the global default ESF <_default_esf>. + * + * RETURNS: This function does not return. + * + * \NOMANUAL + */ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf) diff --git a/arch/arc/core/ffs.S b/arch/arc/core/ffs.S index d7aa34c9a07..67c5ab06072 100644 --- a/arch/arc/core/ffs.S +++ b/arch/arc/core/ffs.S @@ -34,7 +34,7 @@ DESCRIPTION This library implements nanoFfsMsb() and nanoFfsLsb() which returns the most and least significant bit set respectively. -*/ + */ #define _ASMLANGUAGE @@ -46,17 +46,17 @@ most and least significant bit set respectively. GTEXT(nanoFfsMsb) GTEXT(nanoFfsLsb) -/******************************************************************************* -* -* nanoFfsMsb - find first set bit (searching from the most significant bit) -* -* This routine finds the first bit set in the argument passed it and -* returns the index of that bit. Bits are numbered starting -* at 1 from the least significant bit. A return value of zero indicates that -* the value passed is zero. -* -* RETURNS: most significant bit set -*/ +/** + * + * nanoFfsMsb - find first set bit (searching from the most significant bit) + * + * This routine finds the first bit set in the argument passed it and + * returns the index of that bit. Bits are numbered starting + * at 1 from the least significant bit. A return value of zero indicates that + * the value passed is zero. + * + * RETURNS: most significant bit set + */ SECTION_FUNC(TEXT, nanoFfsMsb) @@ -69,17 +69,17 @@ SECTION_FUNC(TEXT, nanoFfsMsb) j_s.d [blink] add.nz r0, r0, 1 -/******************************************************************************* -* -* nanoFfsLsb - find first set bit (searching from the least significant bit) -* -* This routine finds the first bit set in the argument passed it and -* returns the index of that bit. Bits are numbered starting -* at 1 from the least significant bit. A return value of zero indicates that -* the value passed is zero. -* -* RETURNS: least significant bit set -*/ +/** + * + * nanoFfsLsb - find first set bit (searching from the least significant bit) + * + * This routine finds the first bit set in the argument passed it and + * returns the index of that bit. Bits are numbered starting + * at 1 from the least significant bit. A return value of zero indicates that + * the value passed is zero. + * + * RETURNS: least significant bit set + */ SECTION_FUNC(TEXT, nanoFfsLsb) diff --git a/arch/arc/core/irq_lock.S b/arch/arc/core/irq_lock.S index cda8819e84e..7d369991990 100644 --- a/arch/arc/core/irq_lock.S +++ b/arch/arc/core/irq_lock.S @@ -40,55 +40,55 @@ #include #include -/******************************************************************************* -* -* irq_lock - disable all interrupts on the local CPU -* -* This routine disables interrupts. It can be called from either interrupt, -* task or fiber level. This routine returns an architecture-dependent -* lock-out key representing the "interrupt disable state" prior to the call; -* this key can be passed to irq_unlock() to re-enable interrupts. -* -* The lock-out key should only be used as the argument to the -* irq_unlock() API. It should never be used to manually re-enable -* interrupts or to inspect or manipulate the contents of the source register. -* -* WARNINGS -* Invoking a kernel routine with interrupts locked may result in -* interrupts being re-enabled for an unspecified period of time. If the -* called routine blocks, interrupts will be re-enabled while another -* context executes, or while the system is idle. -* -* The "interrupt disable state" is an attribute of a context. Thus, if a -* fiber or task disables interrupts and subsequently invokes a kernel -* routine that causes the calling context to block, the interrupt -* disable state will be restored when the context is later rescheduled -* for execution. -* -* RETURNS: An architecture-dependent lock-out key representing the -* "interrupt disable state" prior to the call. -* -* \NOMANUAL -*/ +/** + * + * irq_lock - disable all interrupts on the local CPU + * + * This routine disables interrupts. It can be called from either interrupt, + * task or fiber level. This routine returns an architecture-dependent + * lock-out key representing the "interrupt disable state" prior to the call; + * this key can be passed to irq_unlock() to re-enable interrupts. + * + * The lock-out key should only be used as the argument to the + * irq_unlock() API. It should never be used to manually re-enable + * interrupts or to inspect or manipulate the contents of the source register. + * + * WARNINGS + * Invoking a kernel routine with interrupts locked may result in + * interrupts being re-enabled for an unspecified period of time. If the + * called routine blocks, interrupts will be re-enabled while another + * context executes, or while the system is idle. + * + * The "interrupt disable state" is an attribute of a context. Thus, if a + * fiber or task disables interrupts and subsequently invokes a kernel + * routine that causes the calling context to block, the interrupt + * disable state will be restored when the context is later rescheduled + * for execution. + * + * RETURNS: An architecture-dependent lock-out key representing the + * "interrupt disable state" prior to the call. + * + * \NOMANUAL + */ SECTION_FUNC(TEXT, irq_lock) j_s.d [blink] clri r0 -/******************************************************************************* -* -* irq_unlock - enable all interrupts on the local CPU -* -* This routine re-enables interrupts on the local CPU. The parameter -* is an architecture-dependent lock-out key that is returned by a previous -* invocation of irq_lock(). -* -* This routine can be called from either interrupt, task or fiber level. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * irq_unlock - enable all interrupts on the local CPU + * + * This routine re-enables interrupts on the local CPU. The parameter + * is an architecture-dependent lock-out key that is returned by a previous + * invocation of irq_lock(). + * + * This routine can be called from either interrupt, task or fiber level. + * + * RETURNS: N/A + * + * \NOMANUAL + */ SECTION_FUNC(TEXT, irq_unlock) j_s.d [blink] diff --git a/arch/arc/core/isr_wrapper.S b/arch/arc/core/isr_wrapper.S index 3fe32346e32..3bac8e77329 100644 --- a/arch/arc/core/isr_wrapper.S +++ b/arch/arc/core/isr_wrapper.S @@ -35,7 +35,7 @@ DESCRIPTION Wrapper installed in vector table for handling dynamic interrupts that accept a parameter. -*/ + */ #define _ASMLANGUAGE @@ -207,7 +207,7 @@ From RIRQ: Both types of IRQs already have an IRQ stack frame: simply return from interrupt. -*/ + */ SECTION_FUNC(TEXT, _isr_enter) lr r0, [_ARC_V2_AUX_IRQ_ACT] diff --git a/arch/arc/core/offsets/offsets.c b/arch/arc/core/offsets/offsets.c index 510ee953e15..2000f373754 100644 --- a/arch/arc/core/offsets/offsets.c +++ b/arch/arc/core/offsets/offsets.c @@ -46,7 +46,7 @@ Typically, only those members that are accessed by assembly language routines are defined; however, it doesn't hurt to define all fields for the sake of completeness. -*/ + */ #include #include diff --git a/arch/arc/core/regular_irq.S b/arch/arc/core/regular_irq.S index e3dd6cf8495..81d2946307f 100644 --- a/arch/arc/core/regular_irq.S +++ b/arch/arc/core/regular_irq.S @@ -36,7 +36,7 @@ This module implements the code for handling entry to and exit from regular IRQs. See isr_wrapper.S for details. -*/ + */ #define _ASMLANGUAGE @@ -49,18 +49,18 @@ See isr_wrapper.S for details. GTEXT(_rirq_enter) GTEXT(_rirq_exit) -/******************************************************************************* -* -* _rirq_enter - work to be done before handing control to an IRQ ISR -* -* The processor pushes automatically all registers that need to be saved. -* However, since the processor always runs at kernel privilege there is no -* automatic switch to the IRQ stack: this must be done in software. -* -* Assumption by _isr_demux: r3 is untouched by _rirq_enter. -* -* RETURNS: N/A -*/ +/** + * + * _rirq_enter - work to be done before handing control to an IRQ ISR + * + * The processor pushes automatically all registers that need to be saved. + * However, since the processor always runs at kernel privilege there is no + * automatic switch to the IRQ stack: this must be done in software. + * + * Assumption by _isr_demux: r3 is untouched by _rirq_enter. + * + * RETURNS: N/A + */ SECTION_FUNC(TEXT, _rirq_enter) @@ -75,12 +75,12 @@ SECTION_FUNC(TEXT, _rirq_enter) j _isr_demux -/******************************************************************************* -* -* _rirq_exit - work to be done exiting an IRQ -* -* RETURNS: N/A -*/ +/** + * + * _rirq_exit - work to be done exiting an IRQ + * + * RETURNS: N/A + */ SECTION_FUNC(TEXT, _rirq_exit) diff --git a/arch/arc/core/swap.S b/arch/arc/core/swap.S index c6c91d3310e..1641645d8a3 100644 --- a/arch/arc/core/swap.S +++ b/arch/arc/core/swap.S @@ -36,7 +36,7 @@ This module implements the routines necessary for thread context switching on ARCv2 CPUs. See isr_wrapper.S for details. -*/ + */ #define _ASMLANGUAGE @@ -51,37 +51,37 @@ GTEXT(_Swap) GDATA(_nanokernel) -/******************************************************************************* -* -* _Swap - initiate a cooperative context switch -* -* The _Swap() routine is invoked by various nanokernel services to effect -* a cooperative context context switch. Prior to invoking _Swap(), the caller -* disables interrupts via nanoCpuIntLock() and the return 'key' is passed as a -* parameter to _Swap(). The key is in fact the value stored in the register -* operand of a CLRI instruction. -* -* It stores the intlock key parameter into current->intlock_key. +/** + * + * _Swap - initiate a cooperative context switch + * + * The _Swap() routine is invoked by various nanokernel services to effect + * a cooperative context context switch. Prior to invoking _Swap(), the caller + * disables interrupts via nanoCpuIntLock() and the return 'key' is passed as a + * parameter to _Swap(). The key is in fact the value stored in the register + * operand of a CLRI instruction. + * + * It stores the intlock key parameter into current->intlock_key. -* Given that _Swap() is called to effect a cooperative context context switch, -* the caller-saved integer registers are saved on the stack by the function -* call preamble to _Swap(). This creates a custom stack frame that will be -* popped when returning from _Swap(), but is not suitable for handling a return -* from an exception. Thus, the fact that the thread is pending because of a -* cooperative call to _Swap() has to be recorded via the _CAUSE_COOP code in -* the relinquish_cause of the context's tCCS. The _IrqExit()/_FirqExit() code -* will take care of doing the right thing to restore the thread status. -* -* When _Swap() is invoked, we know the decision to perform a context switch or -* not has already been taken and a context switch must happen. -* -* RETURNS: may contain a return value setup by a call to fiberRtnValueSet() -* -* C function prototype: -* -* unsigned int _Swap (unsigned int key); -* -*/ + * Given that _Swap() is called to effect a cooperative context context switch, + * the caller-saved integer registers are saved on the stack by the function + * call preamble to _Swap(). This creates a custom stack frame that will be + * popped when returning from _Swap(), but is not suitable for handling a return + * from an exception. Thus, the fact that the thread is pending because of a + * cooperative call to _Swap() has to be recorded via the _CAUSE_COOP code in + * the relinquish_cause of the context's tCCS. The _IrqExit()/_FirqExit() code + * will take care of doing the right thing to restore the thread status. + * + * When _Swap() is invoked, we know the decision to perform a context switch or + * not has already been taken and a context switch must happen. + * + * RETURNS: may contain a return value setup by a call to fiberRtnValueSet() + * + * C function prototype: + * + * unsigned int _Swap (unsigned int key); + * + */ SECTION_FUNC(TEXT, _Swap) diff --git a/arch/arc/fatal_error.c b/arch/arc/fatal_error.c index 6b2b5f8bfc6..e391c56c307 100644 --- a/arch/arc/fatal_error.c +++ b/arch/arc/fatal_error.c @@ -33,7 +33,7 @@ /* DESCRIPTION This module provides the _SysFatalErrorHandler() routine for ARCv2 BSPs. -*/ + */ #include #include @@ -61,25 +61,25 @@ static inline void nonEssentialTaskAbort(void) } while ((0)) #endif -/******************************************************************************* -* -* _SysFatalErrorHandler - fatal error handler -* -* This routine implements the corrective action to be taken when the system -* detects a fatal error. -* -* This sample implementation attempts to abort the current context and allow -* the system to continue executing, which may permit the system to continue -* functioning with degraded capabilities. -* -* System designers may wish to enhance or substitute this sample -* implementation to take other actions, such as logging error (or debug) -* information to a persistent repository and/or rebooting the system. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _SysFatalErrorHandler - fatal error handler + * + * This routine implements the corrective action to be taken when the system + * detects a fatal error. + * + * This sample implementation attempts to abort the current context and allow + * the system to continue executing, which may permit the system to continue + * functioning with degraded capabilities. + * + * System designers may wish to enhance or substitute this sample + * implementation to take other actions, such as logging error (or debug) + * information to a persistent repository and/or rebooting the system. + * + * RETURNS: N/A + * + * \NOMANUAL + */ void _SysFatalErrorHandler( unsigned int reason, /* fatal error reason */ diff --git a/arch/arc/include/nano_private.h b/arch/arc/include/nano_private.h index bbc97282efa..4789d11b714 100644 --- a/arch/arc/include/nano_private.h +++ b/arch/arc/include/nano_private.h @@ -39,7 +39,7 @@ This file is also included by assembly language files which must #define _ASMLANGUAGE before including this header file. Note that nanokernel assembly source files obtains structure offset values via "absolute symbols" in the offsets.o module. -*/ + */ #ifndef _NANO_PRIVATE_H #define _NANO_PRIVATE_H @@ -238,32 +238,32 @@ static ALWAYS_INLINE void nanoArchInit(void) _irq_setup(); } -/******************************************************************************* -* -* fiberRtnValueSet - set the return value for the specified fiber (inline) -* -* The register used to store the return value from a function call invocation -* to . It is assumed that the specified is pending, and thus -* the fiber's context is stored in its tCCS structure. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * fiberRtnValueSet - set the return value for the specified fiber (inline) + * + * The register used to store the return value from a function call invocation + * to . It is assumed that the specified is pending, and thus + * the fiber's context is stored in its tCCS structure. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static ALWAYS_INLINE void fiberRtnValueSet(tCCS *fiber, unsigned int value) { fiber->return_value = value; } -/******************************************************************************* -* -* _IS_IN_ISR - indicates if kernel is handling interrupt -* -* RETURNS: 1 if interrupt handler is executed, 0 otherwise -* -* \NOMANUAL -*/ +/** + * + * _IS_IN_ISR - indicates if kernel is handling interrupt + * + * RETURNS: 1 if interrupt handler is executed, 0 otherwise + * + * \NOMANUAL + */ static ALWAYS_INLINE int _IS_IN_ISR(void) { diff --git a/arch/arc/prep_c.c b/arch/arc/prep_c.c index 468e08c1c84..e228cf7f452 100644 --- a/arch/arc/prep_c.c +++ b/arch/arc/prep_c.c @@ -38,20 +38,20 @@ call _Cstart(). Stack is available in this module, but not the global data/bss until their initialization is performed. -*/ + */ #include #include #include -/******************************************************************************* -* -* bssZero - clear BSS -* -* This routine clears the BSS region, so all bytes are 0. -* -* RETURNS: N/A -*/ +/** + * + * bssZero - clear BSS + * + * This routine clears the BSS region, so all bytes are 0. + * + * RETURNS: N/A + */ static void bssZero(void) { @@ -63,14 +63,14 @@ static void bssZero(void) } } -/******************************************************************************* -* -* dataCopy - copy the data section from ROM to RAM -* -* This routine copies the data section from ROM to RAM. -* -* RETURNS: N/A -*/ +/** + * + * dataCopy - copy the data section from ROM to RAM + * + * This routine copies the data section from ROM to RAM. + * + * RETURNS: N/A + */ #ifdef CONFIG_XIP static void dataCopy(void) @@ -90,14 +90,14 @@ static void dataCopy(void) #endif extern FUNC_NORETURN void _Cstart(void); -/******************************************************************************* -* -* _PrepC - prepare to and run C code -* -* This routine prepares for the execution of and runs C code. -* -* RETURNS: N/A -*/ +/** + * + * _PrepC - prepare to and run C code + * + * This routine prepares for the execution of and runs C code. + * + * RETURNS: N/A + */ void _PrepC(void) { diff --git a/arch/arc/reset.S b/arch/arc/reset.S index 2da87516f42..5a38f438b70 100644 --- a/arch/arc/reset.S +++ b/arch/arc/reset.S @@ -33,7 +33,7 @@ /* DESCRIPTION Reset handler that prepares the system for running C code. -*/ + */ #define _ASMLANGUAGE @@ -46,19 +46,19 @@ Reset handler that prepares the system for running C code. GTEXT(__reset) -/******************************************************************************* -* -* __reset - reset vector -* -* Ran when the system comes out of reset. The processor is at supervisor level. -* -* Locking interrupts prevents anything from interrupting the CPU. -* -* When these steps are completed, jump to _PrepC(), which will finish setting -* up the system for running C code. -* -* RETURNS: N/A -*/ +/** + * + * __reset - reset vector + * + * Ran when the system comes out of reset. The processor is at supervisor level. + * + * Locking interrupts prevents anything from interrupting the CPU. + * + * When these steps are completed, jump to _PrepC(), which will finish setting + * up the system for running C code. + * + * RETURNS: N/A + */ SECTION_FUNC(TEXT,__reset) diff --git a/arch/arc/vector_table.c b/arch/arc/vector_table.c index 50c9b5e4620..6a4b9f3ea95 100644 --- a/arch/arc/vector_table.c +++ b/arch/arc/vector_table.c @@ -45,7 +45,7 @@ to work around an issue with the assembler where: statements would end up with the two half-words of the functions' addresses swapped. -*/ + */ #include #include diff --git a/arch/arc/vector_table.h b/arch/arc/vector_table.h index 28f12851f07..e5ff3c9c335 100644 --- a/arch/arc/vector_table.h +++ b/arch/arc/vector_table.h @@ -40,7 +40,7 @@ System exception handler names all have the same format: __ Refer to the ARCv2 manual for an explanation of the exceptions. -*/ + */ #ifndef _VECTOR_TABLE__H_ #define _VECTOR_TABLE__H_ diff --git a/arch/arm/bsp/CortexM/nmi.c b/arch/arm/bsp/CortexM/nmi.c index c767543fab8..81e27892ed3 100644 --- a/arch/arm/bsp/CortexM/nmi.c +++ b/arch/arm/bsp/CortexM/nmi.c @@ -35,7 +35,7 @@ DESCRIPTION Provides a boot time handler that simply hangs in a sleep loop, and a run time handler that resets the CPU. Also provides a mechanism for hooking a custom run time handler. -*/ + */ #include #include @@ -52,15 +52,15 @@ extern void _SysNmiOnReset(void); typedef void (*_NmiHandler_t)(void); static _NmiHandler_t handler = _SysNmiOnReset; -/******************************************************************************* -* -* _DefaultHandler - default NMI handler installed when kernel is up -* -* The default handler outputs a error message and reboots the target. It is -* installed by calling _NmiInit(); -* -* RETURNS: N/A -*/ +/** + * + * _DefaultHandler - default NMI handler installed when kernel is up + * + * The default handler outputs a error message and reboots the target. It is + * installed by calling _NmiInit(); + * + * RETURNS: N/A + */ static void _DefaultHandler(void) { @@ -68,32 +68,32 @@ static void _DefaultHandler(void) _ScbSystemReset(); } -/******************************************************************************* -* -* _NmiInit - install default runtime NMI handler -* -* Meant to be called by BSP code if they want to install a simple NMI handler -* that reboots the target. It should be installed after the console is -* initialized. -* -* RETURNS: N/A -*/ +/** + * + * _NmiInit - install default runtime NMI handler + * + * Meant to be called by BSP code if they want to install a simple NMI handler + * that reboots the target. It should be installed after the console is + * initialized. + * + * RETURNS: N/A + */ void _NmiInit(void) { handler = _DefaultHandler; } -/******************************************************************************* -* -* _NmiHandlerSet - install a custom runtime NMI handler -* -* Meant to be called by BSP code if they want to install a custom NMI handler -* that reboots. It should be installed after the console is initialized if it is -* meant to output to the console. -* -* RETURNS: N/A -*/ +/** + * + * _NmiHandlerSet - install a custom runtime NMI handler + * + * Meant to be called by BSP code if they want to install a custom NMI handler + * that reboots. It should be installed after the console is initialized if it is + * meant to output to the console. + * + * RETURNS: N/A + */ void _NmiHandlerSet(void (*pHandler)(void)) { @@ -101,14 +101,14 @@ void _NmiHandlerSet(void (*pHandler)(void)) } #endif /* CONFIG_RUNTIME_NMI */ -/******************************************************************************* -* -* __nmi - handler installed in the vector table -* -* Simply call what is installed in 'static void(*handler)(void)'. -* -* RETURNS: N/A -*/ +/** + * + * __nmi - handler installed in the vector table + * + * Simply call what is installed in 'static void(*handler)(void)'. + * + * RETURNS: N/A + */ void __nmi(void) { diff --git a/arch/arm/bsp/CortexM/prep_c.c b/arch/arm/bsp/CortexM/prep_c.c index 1ceec3800bf..d8a1cfa3790 100644 --- a/arch/arm/bsp/CortexM/prep_c.c +++ b/arch/arm/bsp/CortexM/prep_c.c @@ -38,20 +38,20 @@ call _Cstart(). Stack is available in this module, but not the global data/bss until their initialization is performed. -*/ + */ #include #include #include -/******************************************************************************* -* -* bssZero - clear BSS -* -* This routine clears the BSS region, so all bytes are 0. -* -* RETURNS: N/A -*/ +/** + * + * bssZero - clear BSS + * + * This routine clears the BSS region, so all bytes are 0. + * + * RETURNS: N/A + */ static void bssZero(void) { @@ -63,14 +63,14 @@ static void bssZero(void) } } -/******************************************************************************* -* -* dataCopy - copy the data section from ROM to RAM -* -* This routine copies the data section from ROM to RAM. -* -* RETURNS: N/A -*/ +/** + * + * dataCopy - copy the data section from ROM to RAM + * + * This routine copies the data section from ROM to RAM. + * + * RETURNS: N/A + */ #ifdef CONFIG_XIP static void dataCopy(void) @@ -90,14 +90,14 @@ static void dataCopy(void) #endif extern FUNC_NORETURN void _Cstart(void); -/******************************************************************************* -* -* _PrepC - prepare to and run C code -* -* This routine prepares for the execution of and runs C code. -* -* RETURNS: N/A -*/ +/** + * + * _PrepC - prepare to and run C code + * + * This routine prepares for the execution of and runs C code. + * + * RETURNS: N/A + */ void _PrepC(void) { diff --git a/arch/arm/bsp/CortexM/reset.S b/arch/arm/bsp/CortexM/reset.S index bff1d3a2ad0..8029bf224ec 100644 --- a/arch/arm/bsp/CortexM/reset.S +++ b/arch/arm/bsp/CortexM/reset.S @@ -33,7 +33,7 @@ /* DESCRIPTION Reset handler that prepares the system for running C code. -*/ + */ #define _ASMLANGUAGE @@ -47,28 +47,28 @@ _ASM_FILE_PROLOGUE GTEXT(__reset) -/******************************************************************************* -* -* __reset - reset vector -* -* Ran when the system comes out of reset. The processor is in thread mode with -* privileged level. At this point, the main stack pointer (MSP) is already -* pointing to a valid area in SRAM. -* -* Locking interrupts prevents anything but NMIs and hard faults from -* interrupting the CPU. A default NMI handler is already in place in the -* vector table, and the boot code should not generate hard fault, or we're in -* deep trouble. -* -* We want to use the process stack pointer (PSP) instead of the MSP, since the -* MSP is to be set up to point to the one-and-only interrupt stack during later -* boot. That would not be possible if in use for running C code. -* -* When these steps are completed, jump to _PrepC(), which will finish setting -* up the system for running C code. -* -* RETURNS: N/A -*/ +/** + * + * __reset - reset vector + * + * Ran when the system comes out of reset. The processor is in thread mode with + * privileged level. At this point, the main stack pointer (MSP) is already + * pointing to a valid area in SRAM. + * + * Locking interrupts prevents anything but NMIs and hard faults from + * interrupting the CPU. A default NMI handler is already in place in the + * vector table, and the boot code should not generate hard fault, or we're in + * deep trouble. + * + * We want to use the process stack pointer (PSP) instead of the MSP, since the + * MSP is to be set up to point to the one-and-only interrupt stack during later + * boot. That would not be possible if in use for running C code. + * + * When these steps are completed, jump to _PrepC(), which will finish setting + * up the system for running C code. + * + * RETURNS: N/A + */ SECTION_FUNC(TEXT,__reset) diff --git a/arch/arm/bsp/CortexM/scb.c b/arch/arm/bsp/CortexM/scb.c index 7ef417dc89c..6999645c431 100644 --- a/arch/arm/bsp/CortexM/scb.c +++ b/arch/arm/bsp/CortexM/scb.c @@ -36,7 +36,7 @@ DESCRIPTION Most of the SCB interface consists of simple bit-flipping methods, and is implemented as inline functions in scb.h. This module thus contains only data definitions and more complex routines, if needed. -*/ + */ #include #include @@ -44,14 +44,14 @@ definitions and more complex routines, if needed. #define SCB_AIRCR_VECTKEY_EN_W 0x05FA -/******************************************************************************* -* -* _ScbSystemReset - reset the system -* -* This routine resets the processor. -* -* RETURNS: N/A -*/ +/** + * + * _ScbSystemReset - reset the system + * + * This routine resets the processor. + * + * RETURNS: N/A + */ void _ScbSystemReset(void) { @@ -63,19 +63,19 @@ void _ScbSystemReset(void) __scs.scb.aircr.val = reg.val; } -/******************************************************************************* -* -* _ScbNumPriGroupSet - set the number of priority groups based on the number -* of exception priorities desired -* -* Exception priorities can be divided in priority groups, inside which there is -* no preemption. The priorities inside a group are only used to decide which -* exception will run when more than one is ready to be handled. -* -* The number of priorities has to be a power of two, from 1 to 128. -* -* RETURNS: N/A -*/ +/** + * + * _ScbNumPriGroupSet - set the number of priority groups based on the number + * of exception priorities desired + * + * Exception priorities can be divided in priority groups, inside which there is + * no preemption. The priorities inside a group are only used to decide which + * exception will run when more than one is ready to be handled. + * + * The number of priorities has to be a power of two, from 1 to 128. + * + * RETURNS: N/A + */ void _ScbNumPriGroupSet(unsigned int n /* number of priorities */ ) diff --git a/arch/arm/bsp/CortexM/scs.c b/arch/arm/bsp/CortexM/scs.c index 40753c018e3..b28605cf3ea 100644 --- a/arch/arm/bsp/CortexM/scs.c +++ b/arch/arm/bsp/CortexM/scs.c @@ -35,7 +35,7 @@ DESCRIPTION Most of the SCS interface consists of simple bit-flipping methods, and is implemented as inline functions in scs.h. This module thus contains only data definitions and more complex routines, if needed. -*/ + */ #include #include diff --git a/arch/arm/bsp/CortexM/sw_isr_table.S b/arch/arm/bsp/CortexM/sw_isr_table.S index b50bd9c50e7..129f498db04 100644 --- a/arch/arm/bsp/CortexM/sw_isr_table.S +++ b/arch/arm/bsp/CortexM/sw_isr_table.S @@ -33,7 +33,7 @@ /* DESCRIPTION Software ISR table for ARM -*/ + */ #define _ASMLANGUAGE diff --git a/arch/arm/bsp/CortexM/vector_table.S b/arch/arm/bsp/CortexM/vector_table.S index f3ed7e496fc..9dcd88365c9 100644 --- a/arch/arm/bsp/CortexM/vector_table.S +++ b/arch/arm/bsp/CortexM/vector_table.S @@ -38,7 +38,7 @@ point, ie. the first instruction executed. The table is populated with all the system exception handlers. The NMI vector must be populated with a valid handler since it can happen at any time. The rest should not be triggered until the kernel is ready to handle them. -*/ + */ #define _ASMLANGUAGE diff --git a/arch/arm/bsp/CortexM/vector_table.h b/arch/arm/bsp/CortexM/vector_table.h index d5415bd1019..9412648fb38 100644 --- a/arch/arm/bsp/CortexM/vector_table.h +++ b/arch/arm/bsp/CortexM/vector_table.h @@ -40,7 +40,7 @@ System exception handler names all have the same format: __ No other symbol has the same format, so they are easy to spot. -*/ + */ #ifndef _VECTOR_TABLE__H_ #define _VECTOR_TABLE__H_ diff --git a/arch/arm/bsp/sysFatalErrorHandler.c b/arch/arm/bsp/sysFatalErrorHandler.c index fcab06f6692..2c8fc06cad3 100644 --- a/arch/arm/bsp/sysFatalErrorHandler.c +++ b/arch/arm/bsp/sysFatalErrorHandler.c @@ -33,7 +33,7 @@ /* DESCRIPTION This module provides the _SysFatalErrorHandler() routine for Cortex-M BSPs. -*/ + */ #include #include @@ -61,25 +61,25 @@ static inline void nonEssentialTaskAbort(void) } while ((0)) #endif -/******************************************************************************* -* -* _SysFatalErrorHandler - fatal error handler -* -* This routine implements the corrective action to be taken when the system -* detects a fatal error. -* -* This sample implementation attempts to abort the current context and allow -* the system to continue executing, which may permit the system to continue -* functioning with degraded capabilities. -* -* System designers may wish to enhance or substitute this sample -* implementation to take other actions, such as logging error (or debug) -* information to a persistent repository and/or rebooting the system. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _SysFatalErrorHandler - fatal error handler + * + * This routine implements the corrective action to be taken when the system + * detects a fatal error. + * + * This sample implementation attempts to abort the current context and allow + * the system to continue executing, which may permit the system to continue + * functioning with degraded capabilities. + * + * System designers may wish to enhance or substitute this sample + * implementation to take other actions, such as logging error (or debug) + * information to a persistent repository and/or rebooting the system. + * + * RETURNS: N/A + * + * \NOMANUAL + */ void _SysFatalErrorHandler( unsigned int reason, /* fatal error reason */ diff --git a/arch/arm/core/atomic.S b/arch/arm/core/atomic.S index 56a745b41b1..815e86c8c6c 100644 --- a/arch/arm/core/atomic.S +++ b/arch/arm/core/atomic.S @@ -35,7 +35,7 @@ DESCRIPTION This library provides routines to perform a number of atomic operations on a memory location: add, subtract, increment, decrement, bitwise OR, bitwise NOR, bitwise AND, bitwise NAND, set, clear and compare-and-swap. -*/ + */ #define _ASMLANGUAGE @@ -59,49 +59,49 @@ GTEXT(atomic_inc) GTEXT(atomic_sub) GTEXT(atomic_cas) -/******************************************************************************* -* -* atomic_clear - atomically clear a memory location -* -* This routine atomically clears the contents of and returns the old -* value that was in . -* -* This routine can be used from both task and interrupt level. -* -* RETURNS: Contents of before the atomic operation -* -* ERRNO: N/A -* -* atomic_val_t atomic_clear -* ( -* atomic_t *target /@ memory location to clear @/ -* ) -*/ +/** + * + * atomic_clear - atomically clear a memory location + * + * This routine atomically clears the contents of and returns the old + * value that was in . + * + * This routine can be used from both task and interrupt level. + * + * RETURNS: Contents of before the atomic operation + * + * ERRNO: N/A + * + * atomic_val_t atomic_clear + * ( + * atomic_t *target /@ memory location to clear @/ + * ) + */ SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_clear) MOV r1, #0 /* fall through into atomic_set */ -/******************************************************************************* -* -* atomic_set - atomically set a memory location -* -* This routine atomically sets the contents of to and returns -* the old value that was in . -* -* This routine can be used from both task and interrupt level. -* -* RETURNS: Contents of before the atomic operation -* -* ERRNO: N/A -* -* atomic_val_t atomic_set -* ( -* atomic_t *target, /@ memory location to set @/ -* atomic_val_t value /@ set with this value @/ -* ) -* -*/ +/** + * + * atomic_set - atomically set a memory location + * + * This routine atomically sets the contents of to and returns + * the old value that was in . + * + * This routine can be used from both task and interrupt level. + * + * RETURNS: Contents of before the atomic operation + * + * ERRNO: N/A + * + * atomic_val_t atomic_set + * ( + * atomic_t *target, /@ memory location to set @/ + * atomic_val_t value /@ set with this value @/ + * ) + * + */ SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_set) @@ -114,72 +114,72 @@ SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_set) MOV r0, r2 /* return old value */ MOV pc, lr -/****************************************************************************** -* -* atomic_get - Get the value of a shared memory atomically -* -* This routine atomically retrieves the value in *target -* -* long atomic_get -* ( -* atomic_t * target /@ address of atom to be retrieved @/ -* ) -* -* RETURN: value read from address target. -* -*/ +/** + * + * atomic_get - Get the value of a shared memory atomically + * + * This routine atomically retrieves the value in *target + * + * long atomic_get + * ( + * atomic_t * target /@ address of atom to be retrieved @/ + * ) + * + * RETURN: value read from address target. + * + */ SECTION_FUNC(TEXT, atomic_get) LDR r0, [r0] MOV pc, lr -/******************************************************************************* -* -* atomic_inc - atomically increment a memory location -* -* This routine atomically increments the value in . The operation is -* done using unsigned integer arithmetic. Various CPU architectures may impose -* restrictions with regards to the alignment and cache attributes of the -* atomic_t type. -* -* This routine can be used from both task and interrupt level. -* -* RETURNS: Contents of before the atomic operation -* -* ERRNO: N/A -* -* atomic_val_t atomic_inc -* ( -* atomic_t *target, /@ memory location to increment @/ -* ) -* -*/ +/** + * + * atomic_inc - atomically increment a memory location + * + * This routine atomically increments the value in . The operation is + * done using unsigned integer arithmetic. Various CPU architectures may impose + * restrictions with regards to the alignment and cache attributes of the + * atomic_t type. + * + * This routine can be used from both task and interrupt level. + * + * RETURNS: Contents of before the atomic operation + * + * ERRNO: N/A + * + * atomic_val_t atomic_inc + * ( + * atomic_t *target, /@ memory location to increment @/ + * ) + * + */ SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_inc) MOV r1, #1 /* fall through into atomic_add */ -/******************************************************************************* -* -* atomic_add - atomically add a value to a memory location -* -* This routine atomically adds the contents of and , placing -* the result in . The operation is done using signed integer arithmetic. -* Various CPU architectures may impose restrictions with regards to the -* alignment and cache attributes of the atomic_t type. -* -* This routine can be used from both task and interrupt level. -* -* RETURNS: Contents of before the atomic operation -* -* ERRNO: N/A -* -* atomic_val_t atomic_add -* ( -* atomic_t *target, /@ memory location to add to @/ -* atomic_val_t value /@ value to add @/ -* ) -*/ +/** + * + * atomic_add - atomically add a value to a memory location + * + * This routine atomically adds the contents of and , placing + * the result in . The operation is done using signed integer arithmetic. + * Various CPU architectures may impose restrictions with regards to the + * alignment and cache attributes of the atomic_t type. + * + * This routine can be used from both task and interrupt level. + * + * RETURNS: Contents of before the atomic operation + * + * ERRNO: N/A + * + * atomic_val_t atomic_add + * ( + * atomic_t *target, /@ memory location to add to @/ + * atomic_val_t value /@ value to add @/ + * ) + */ SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_add) @@ -193,54 +193,54 @@ SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_add) MOV r0, r2 /* return old value */ MOV pc, lr -/******************************************************************************* -* -* atomic_dec - atomically decrement a memory location -* -* This routine atomically decrements the value in . The operation is -* done using unsigned integer arithmetic. Various CPU architectures may impose -* restrictions with regards to the alignment and cache attributes of the -* atomic_t type. -* -* This routine can be used from both task and interrupt level. -* -* RETURNS: Contents of before the atomic operation -* -* ERRNO: N/A -* -* atomic_val_t atomic_dec -* ( -* atomic_t *target, /@ memory location to decrement @/ -* ) -* -*/ +/** + * + * atomic_dec - atomically decrement a memory location + * + * This routine atomically decrements the value in . The operation is + * done using unsigned integer arithmetic. Various CPU architectures may impose + * restrictions with regards to the alignment and cache attributes of the + * atomic_t type. + * + * This routine can be used from both task and interrupt level. + * + * RETURNS: Contents of before the atomic operation + * + * ERRNO: N/A + * + * atomic_val_t atomic_dec + * ( + * atomic_t *target, /@ memory location to decrement @/ + * ) + * + */ SECTION_SUBSEC_FUNC(TEXT, atomic_decSub, atomic_dec) MOV r1, #1 /* fall through into atomic_sub */ -/******************************************************************************* -* -* atomic_sub - atomically subtract a value from a memory location -* -* This routine atomically subtracts from the contents of , -* placing the result in . The operation is done using signed integer -* arithmetic. Various CPU architectures may impose restrictions with regards to -* the alignment and cache attributes of the atomic_t type. -* -* This routine can be used from both task and interrupt level. -* -* RETURNS: Contents of before the atomic operation -* -* ERRNO: N/A -* -* atomic_val_t atomic_sub -* ( -* atomic_t *target, /@ memory location to subtract from @/ -* atomic_val_t value /@ value to subtract @/ -* ) -* -*/ +/** + * + * atomic_sub - atomically subtract a value from a memory location + * + * This routine atomically subtracts from the contents of , + * placing the result in . The operation is done using signed integer + * arithmetic. Various CPU architectures may impose restrictions with regards to + * the alignment and cache attributes of the atomic_t type. + * + * This routine can be used from both task and interrupt level. + * + * RETURNS: Contents of before the atomic operation + * + * ERRNO: N/A + * + * atomic_val_t atomic_sub + * ( + * atomic_t *target, /@ memory location to subtract from @/ + * atomic_val_t value /@ value to subtract @/ + * ) + * + */ SECTION_SUBSEC_FUNC(TEXT, atomic_decSub, atomic_sub) @@ -253,28 +253,28 @@ SECTION_SUBSEC_FUNC(TEXT, atomic_decSub, atomic_sub) MOV r0, r2 /* return old value */ MOV pc, lr -/****************************************************************************** -* -* atomic_nand - atomically perform a bitwise NAND on a memory location -* -* This routine atomically performs a bitwise NAND operation of the contents of -* and , placing the result in . -* Various CPU architectures may impose restrictions with regards to the -* alignment and cache attributes of the atomic_t type. -* -* This routine can be used from both task and interrupt level. -* -* RETURNS: Contents of before the atomic operation -* -* ERRNO: N/A -* -* atomic_val_t atomic_nand -* ( -* atomic_t *target, /@ memory location to NAND @/ -* atomic_val_t value /@ NAND with this value @/ -* ) -* -*/ +/** + * + * atomic_nand - atomically perform a bitwise NAND on a memory location + * + * This routine atomically performs a bitwise NAND operation of the contents of + * and , placing the result in . + * Various CPU architectures may impose restrictions with regards to the + * alignment and cache attributes of the atomic_t type. + * + * This routine can be used from both task and interrupt level. + * + * RETURNS: Contents of before the atomic operation + * + * ERRNO: N/A + * + * atomic_val_t atomic_nand + * ( + * atomic_t *target, /@ memory location to NAND @/ + * atomic_val_t value /@ NAND with this value @/ + * ) + * + */ SECTION_FUNC(TEXT, atomic_nand) @@ -288,28 +288,28 @@ SECTION_FUNC(TEXT, atomic_nand) MOV r0, r2 /* return old value */ MOV pc, lr -/****************************************************************************** -* -* atomic_and - atomically perform a bitwise AND on a memory location -* -* This routine atomically performs a bitwise AND operation of the contents of -* and , placing the result in . -* Various CPU architectures may impose restrictions with regards to the -* alignment and cache attributes of the atomic_t type. -* -* This routine can be used from both task and interrupt level. -* -* RETURNS: Contents of before the atomic operation -* -* ERRNO: N/A -* -* atomic_val_t atomic_and -* ( -* atomic_t *target, /@ memory location to AND @/ -* atomic_val_t value /@ AND with this value @/ -* ) -* -*/ +/** + * + * atomic_and - atomically perform a bitwise AND on a memory location + * + * This routine atomically performs a bitwise AND operation of the contents of + * and , placing the result in . + * Various CPU architectures may impose restrictions with regards to the + * alignment and cache attributes of the atomic_t type. + * + * This routine can be used from both task and interrupt level. + * + * RETURNS: Contents of before the atomic operation + * + * ERRNO: N/A + * + * atomic_val_t atomic_and + * ( + * atomic_t *target, /@ memory location to AND @/ + * atomic_val_t value /@ AND with this value @/ + * ) + * + */ SECTION_FUNC(TEXT, atomic_and) @@ -322,28 +322,28 @@ SECTION_FUNC(TEXT, atomic_and) MOV r0, r2 /* return old value */ MOV pc, lr -/******************************************************************************* -* -* atomic_or - atomically perform a bitwise OR on memory location -* -* This routine atomically performs a bitwise OR operation of the contents of -* and , placing the result in . -* Various CPU architectures may impose restrictions with regards to the -* alignment and cache attributes of the atomic_t type. -* -* This routine can be used from both task and interrupt level. -* -* RETURNS: Contents of before the atomic operation -* -* ERRNO: N/A -* -* atomic_val_t atomic_or -* ( -* atomic_t *target, /@ memory location to OR @/ -* atomic_val_t value /@ OR with this value @/ -* ) -* -*/ +/** + * + * atomic_or - atomically perform a bitwise OR on memory location + * + * This routine atomically performs a bitwise OR operation of the contents of + * and , placing the result in . + * Various CPU architectures may impose restrictions with regards to the + * alignment and cache attributes of the atomic_t type. + * + * This routine can be used from both task and interrupt level. + * + * RETURNS: Contents of before the atomic operation + * + * ERRNO: N/A + * + * atomic_val_t atomic_or + * ( + * atomic_t *target, /@ memory location to OR @/ + * atomic_val_t value /@ OR with this value @/ + * ) + * + */ SECTION_FUNC(TEXT, atomic_or) @@ -356,28 +356,28 @@ SECTION_FUNC(TEXT, atomic_or) MOV r0, r2 /* return old value */ MOV pc, lr -/******************************************************************************* -* -* atomic_xor - atomically perform a bitwise XOR on a memory location -* -* This routine atomically performs a bitwise XOR operation of the contents of -* and , placing the result in . -* Various CPU architectures may impose restrictions with regards to the -* alignment and cache attributes of the atomic_t type. -* -* This routine can be used from both task and interrupt level. -* -* RETURNS: Contents of before the atomic operation -* -* ERRNO: N/A -* -* atomic_val_t atomic_xor -* ( -* atomic_t *target, /@ memory location to XOR @/ -* atomic_val_t value /@ XOR with this value @/ -* ) -* -*/ +/** + * + * atomic_xor - atomically perform a bitwise XOR on a memory location + * + * This routine atomically performs a bitwise XOR operation of the contents of + * and , placing the result in . + * Various CPU architectures may impose restrictions with regards to the + * alignment and cache attributes of the atomic_t type. + * + * This routine can be used from both task and interrupt level. + * + * RETURNS: Contents of before the atomic operation + * + * ERRNO: N/A + * + * atomic_val_t atomic_xor + * ( + * atomic_t *target, /@ memory location to XOR @/ + * atomic_val_t value /@ XOR with this value @/ + * ) + * + */ SECTION_FUNC(TEXT, atomic_xor) @@ -390,29 +390,29 @@ SECTION_FUNC(TEXT, atomic_xor) MOV r0, r2 /* return old value */ MOV pc, lr -/******************************************************************************* -* -* atomic_cas - atomically compare-and-swap the contents of a memory location -* -* This routine performs an atomic compare-and-swap. testing that the contents of -* contains , and if it does, setting the value of -* to . Various CPU architectures may impose restrictions with regards -* to the alignment and cache attributes of the atomic_t type. -* -* This routine can be used from both task and interrupt level. -* -* RETURNS: 1 if the swap is actually executed, 0 otherwise. -* -* ERRNO: N/A -* -* int atomic_cas -* ( -* atomic_t *target, /@ memory location to compare-and-swap @/ -* atomic_val_t oldValue, /@ compare to this value @/ -* atomic_val_t newValue, /@ swap with this value @/ -* ) -* -*/ +/** + * + * atomic_cas - atomically compare-and-swap the contents of a memory location + * + * This routine performs an atomic compare-and-swap. testing that the contents of + * contains , and if it does, setting the value of + * to . Various CPU architectures may impose restrictions with regards + * to the alignment and cache attributes of the atomic_t type. + * + * This routine can be used from both task and interrupt level. + * + * RETURNS: 1 if the swap is actually executed, 0 otherwise. + * + * ERRNO: N/A + * + * int atomic_cas + * ( + * atomic_t *target, /@ memory location to compare-and-swap @/ + * atomic_val_t oldValue, /@ compare to this value @/ + * atomic_val_t newValue, /@ swap with this value @/ + * ) + * + */ SECTION_FUNC(TEXT, atomic_cas) diff --git a/arch/arm/core/basepri.S b/arch/arm/core/basepri.S index 7ccec5f3cbd..7729d9b3ee6 100644 --- a/arch/arm/core/basepri.S +++ b/arch/arm/core/basepri.S @@ -46,7 +46,7 @@ unlocked. This achieves two purposes: 2. Zero Interrupt Latency (ZLI) is achievable via this by allowing certain interrupts to set their priority to 1, thus being allowed in when interrupts are locked for regular interrupts. -*/ + */ #define _ASMLANGUAGE @@ -59,18 +59,18 @@ _ASM_FILE_PROLOGUE GTEXT(irq_lock) GTEXT(irq_unlock) -/******************************************************************************* -* -* irq_lock - lock interrupts -* -* Prevent exceptions of priority lower than to the two highest priorities from -* interrupting the CPU. -* -* This function can be called recursively: it will return a key to return the -* state of interrupt locking to the previous level. -* -* RETURNS: a key to return to the previous interrupt locking level -*/ +/** + * + * irq_lock - lock interrupts + * + * Prevent exceptions of priority lower than to the two highest priorities from + * interrupting the CPU. + * + * This function can be called recursively: it will return a key to return the + * state of interrupt locking to the previous level. + * + * RETURNS: a key to return to the previous interrupt locking level + */ SECTION_FUNC(TEXT,irq_lock) movs.n r1, #_EXC_IRQ_DEFAULT_PRIO @@ -78,15 +78,15 @@ SECTION_FUNC(TEXT,irq_lock) msr BASEPRI, r1 bx lr -/******************************************************************************* -* -* irq_unlock - unlock interrupts -* -* Return the state of interrupt locking to a previous level, passed in via the -* parameter, obtained from a previous call to irq_lock(). -* -* RETURNS: N/A -*/ +/** + * + * irq_unlock - unlock interrupts + * + * Return the state of interrupt locking to a previous level, passed in via the + * parameter, obtained from a previous call to irq_lock(). + * + * RETURNS: N/A + */ SECTION_FUNC(TEXT,irq_unlock) msr BASEPRI, r0 diff --git a/arch/arm/core/context.c b/arch/arm/core/context.c index 3a86cd683a9..b796aede60a 100644 --- a/arch/arm/core/context.c +++ b/arch/arm/core/context.c @@ -34,7 +34,7 @@ DESCRIPTION Core nanokernel fiber related primitives for the ARM Cortex-M processor architecture. -*/ + */ #include #include @@ -53,14 +53,14 @@ tNANO _nanokernel = {0}; #endif #if defined(CONFIG_CONTEXT_MONITOR) -/******************************************************************************* -* -* _context_monitor_init - initialize context monitoring support -* -* Currently only inserts the new context in the list of active contexts. -* -* RETURNS: N/A -*/ +/** + * + * _context_monitor_init - initialize context monitoring support + * + * Currently only inserts the new context in the list of active contexts. + * + * RETURNS: N/A + */ static ALWAYS_INLINE void _context_monitor_init(struct ccs *pCcs /* context */ ) @@ -81,26 +81,26 @@ static ALWAYS_INLINE void _context_monitor_init(struct ccs *pCcs /* context */ } #endif /* CONFIG_CONTEXT_MONITOR */ -/******************************************************************************* -* -* _NewContext - intialize a new context (thread) from its stack space -* -* The control structure (CCS) is put at the lower address of the stack. An -* initial context, to be "restored" by __pendsv(), is put at the other end of -* the stack, and thus reusable by the stack when not needed anymore. -* -* The initial context is an exception stack frame (ESF) since exiting the -* PendSV exception will want to pop an ESF. Interestingly, even if the lsb of -* an instruction address to jump to must always be set since the CPU always -* runs in thumb mode, the ESF expects the real address of the instruction, -* with the lsb *not* set (instructions are always aligned on 16 bit halfwords). -* Since the compiler automatically sets the lsb of function addresses, we have -* to unset it manually before storing it in the 'pc' field of the ESF. -* -* is currently unused. -* -* RETURNS: N/A -*/ +/** + * + * _NewContext - intialize a new context (thread) from its stack space + * + * The control structure (CCS) is put at the lower address of the stack. An + * initial context, to be "restored" by __pendsv(), is put at the other end of + * the stack, and thus reusable by the stack when not needed anymore. + * + * The initial context is an exception stack frame (ESF) since exiting the + * PendSV exception will want to pop an ESF. Interestingly, even if the lsb of + * an instruction address to jump to must always be set since the CPU always + * runs in thumb mode, the ESF expects the real address of the instruction, + * with the lsb *not* set (instructions are always aligned on 16 bit halfwords). + * Since the compiler automatically sets the lsb of function addresses, we have + * to unset it manually before storing it in the 'pc' field of the ESF. + * + * is currently unused. + * + * RETURNS: N/A + */ void _NewContext( char *pStackMem, /* aligned stack memory */ diff --git a/arch/arm/core/cpu_idle.S b/arch/arm/core/cpu_idle.S index 0422ad61263..a135e76c3bc 100644 --- a/arch/arm/core/cpu_idle.S +++ b/arch/arm/core/cpu_idle.S @@ -32,7 +32,7 @@ /* DESCRIPTION -*/ + */ #define _ASMLANGUAGE @@ -56,19 +56,19 @@ GTEXT(nano_cpu_atomic_idle) #define _SCR_INIT_BITS _SCB_SCR_SEVONPEND -/******************************************************************************* -* -* _CpuIdleInit - initialization of CPU idle -* -* Only called by nanoArchInit(). Sets SEVONPEND bit once for the system's -* duration. -* -* RETURNS: N/A -* -* C function prototype: -* -* void _CpuIdleInit (void); -*/ +/** + * + * _CpuIdleInit - initialization of CPU idle + * + * Only called by nanoArchInit(). Sets SEVONPEND bit once for the system's + * duration. + * + * RETURNS: N/A + * + * C function prototype: + * + * void _CpuIdleInit (void); + */ SECTION_FUNC(TEXT, _CpuIdleInit) ldr r1, =_SCB_SCR @@ -78,36 +78,36 @@ SECTION_FUNC(TEXT, _CpuIdleInit) #ifdef CONFIG_ADVANCED_POWER_MANAGEMENT -/******************************************************************************* -* -* _NanoIdleValGet - get the kernel idle setting -* -* Returns the nanokernel idle setting, in ticks. Only called by __systick(). -* -* RETURNS: the requested number of ticks for the kernel to be idle -* -* C function prototype: -* -* int32_t _NanoIdleValGet (void); -*/ +/** + * + * _NanoIdleValGet - get the kernel idle setting + * + * Returns the nanokernel idle setting, in ticks. Only called by __systick(). + * + * RETURNS: the requested number of ticks for the kernel to be idle + * + * C function prototype: + * + * int32_t _NanoIdleValGet (void); + */ SECTION_FUNC(TEXT, _NanoIdleValGet) ldr r0, =_nanokernel ldr r0, [r0, #__tNANO_idle_OFFSET] bx lr -/******************************************************************************* -* -* _NanoIdleValClear - clear the kernel idle setting -* -* Sets the nanokernel idle setting to 0. Only called by __systick(). -* -* RETURNS: N/A -* -* C function prototype: -* -* void _NanoIdleValClear (void); -*/ +/** + * + * _NanoIdleValClear - clear the kernel idle setting + * + * Sets the nanokernel idle setting to 0. Only called by __systick(). + * + * RETURNS: N/A + * + * C function prototype: + * + * void _NanoIdleValClear (void); + */ SECTION_FUNC(TEXT, _NanoIdleValClear) ldr r0, =_nanokernel @@ -117,21 +117,21 @@ SECTION_FUNC(TEXT, _NanoIdleValClear) #endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */ -/******************************************************************************* -* -* nano_cpu_idle - power save idle routine for ARM Cortex-M -* -* This function will be called by the nanokernel idle loop or possibly within -* an implementation of _sys_power_save_idle in the microkernel when the -* '_sys_power_save_flag' variable is non-zero. The ARM 'wfi' instruction -* will be issued, causing a low-power consumption sleep mode. -* -* RETURNS: N/A -* -* C function prototype: -* -* void nano_cpu_idle (void); -*/ +/** + * + * nano_cpu_idle - power save idle routine for ARM Cortex-M + * + * This function will be called by the nanokernel idle loop or possibly within + * an implementation of _sys_power_save_idle in the microkernel when the + * '_sys_power_save_flag' variable is non-zero. The ARM 'wfi' instruction + * will be issued, causing a low-power consumption sleep mode. + * + * RETURNS: N/A + * + * C function prototype: + * + * void nano_cpu_idle (void); + */ SECTION_FUNC(TEXT, nano_cpu_idle) /* clear BASEPRI so wfi is awakened by incoming interrupts */ @@ -142,31 +142,31 @@ SECTION_FUNC(TEXT, nano_cpu_idle) bx lr -/******************************************************************************* -* -* nano_cpu_atomic_idle - atomically re-enable interrupts and enter low power mode -* -* This function is utilized by the nanokernel object "wait" APIs for task -* contexts, e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(), nano_task_stack_pop_wait(), -* and nano_task_fifo_get_wait(). -* -* INTERNAL -* The requirements for nano_cpu_atomic_idle() are as follows: -* 1) The enablement of interrupts and entering a low-power mode needs to be -* atomic, i.e. there should be no period of time where interrupts are -* enabled before the processor enters a low-power mode. See the comments -* in nano_task_lifo_get_wait(), for example, of the race condition that occurs -* if this requirement is not met. -* -* 2) After waking up from the low-power mode, the interrupt lockout state -* must be restored as indicated in the 'imask' input parameter. -* -* RETURNS: N/A -* -* C function prototype: -* -* void nano_cpu_atomic_idle (unsigned int imask); -*/ +/** + * + * nano_cpu_atomic_idle - atomically re-enable interrupts and enter low power mode + * + * This function is utilized by the nanokernel object "wait" APIs for task + * contexts, e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(), nano_task_stack_pop_wait(), + * and nano_task_fifo_get_wait(). + * + * INTERNAL + * The requirements for nano_cpu_atomic_idle() are as follows: + * 1) The enablement of interrupts and entering a low-power mode needs to be + * atomic, i.e. there should be no period of time where interrupts are + * enabled before the processor enters a low-power mode. See the comments + * in nano_task_lifo_get_wait(), for example, of the race condition that occurs + * if this requirement is not met. + * + * 2) After waking up from the low-power mode, the interrupt lockout state + * must be restored as indicated in the 'imask' input parameter. + * + * RETURNS: N/A + * + * C function prototype: + * + * void nano_cpu_atomic_idle (unsigned int imask); + */ SECTION_FUNC(TEXT, nano_cpu_atomic_idle) diff --git a/arch/arm/core/exc_exit.S b/arch/arm/core/exc_exit.S index 47083eaec42..8c38ae5bce2 100644 --- a/arch/arm/core/exc_exit.S +++ b/arch/arm/core/exc_exit.S @@ -36,7 +36,7 @@ DESCRIPTION Provides functions for performing kernel handling when exiting exceptions or interrupts that are installed directly in the vector table (i.e. that are not wrapped around by _isr_wrapper()). -*/ + */ #define _ASMLANGUAGE @@ -61,46 +61,46 @@ GDATA(_nanokernel) #endif #define _EXIT_EXC_IF_FIBER_NOT_READY _EXIT_EXC_IF_FIBER_PREEMPTED -/******************************************************************************* -* -* _IntExit - kernel housekeeping when exiting interrupt handler installed -* directly in vector table -* -* Kernel allows installing interrupt handlers (ISRs) directly into the vector -* table to get the lowest interrupt latency possible. This allows the ISR to be -* invoked directly without going through a software interrupt table. However, -* upon exiting the ISR, some kernel work must still be performed, namely -* possible context switching. While ISRs connected in the software interrupt -* table do this automatically via a wrapper, ISRs connected directly in the -* vector table must invoke _IntExit() as the *very last* action before -* returning. -* -* e.g. -* -* void myISR(void) -* { -* printk("in %s\n", __FUNCTION__); -* doStuff(); -* _IntExit(); -* } -* -* RETURNS: N/A -*/ +/** + * + * _IntExit - kernel housekeeping when exiting interrupt handler installed + * directly in vector table + * + * Kernel allows installing interrupt handlers (ISRs) directly into the vector + * table to get the lowest interrupt latency possible. This allows the ISR to be + * invoked directly without going through a software interrupt table. However, + * upon exiting the ISR, some kernel work must still be performed, namely + * possible context switching. While ISRs connected in the software interrupt + * table do this automatically via a wrapper, ISRs connected directly in the + * vector table must invoke _IntExit() as the *very last* action before + * returning. + * + * e.g. + * + * void myISR(void) + * { + * printk("in %s\n", __FUNCTION__); + * doStuff(); + * _IntExit(); + * } + * + * RETURNS: N/A + */ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit) /* _IntExit falls through to _ExcExit (they are aliases of each other) */ -/******************************************************************************* -* -* _ExcExit - kernel housekeeping when exiting exception handler installed -* directly in vector table -* -* See _IntExit(). -* -* RETURNS: N/A -*/ +/** + * + * _ExcExit - kernel housekeeping when exiting exception handler installed + * directly in vector table + * + * See _IntExit(). + * + * RETURNS: N/A + */ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit) diff --git a/arch/arm/core/fatal.c b/arch/arm/core/fatal.c index b6b4ebd4234..f648e7a3faf 100644 --- a/arch/arm/core/fatal.c +++ b/arch/arm/core/fatal.c @@ -33,7 +33,7 @@ /* DESCRIPTION This module provides the _NanoFatalErrorHandler() routine for ARM Cortex-M. -*/ + */ #include #include @@ -62,23 +62,23 @@ const NANO_ESF _default_esf = {0xdeaddead, /* a1 */ 0xdeaddead, /* xpsr */ }; -/******************************************************************************* -* -* _NanoFatalErrorHandler - nanokernel fatal error handler -* -* This routine is called when fatal error conditions are detected by software -* and is responsible only for reporting the error. Once reported, it then -* invokes the user provided routine _SysFatalErrorHandler() which is -* responsible for implementing the error handling policy. -* -* The caller is expected to always provide a usable ESF. In the event that the -* fatal error does not have a hardware generated ESF, the caller should either -* create its own or use a pointer to the global default ESF <_default_esf>. -* -* RETURNS: This function does not return. -* -* \NOMANUAL -*/ +/** + * + * _NanoFatalErrorHandler - nanokernel fatal error handler + * + * This routine is called when fatal error conditions are detected by software + * and is responsible only for reporting the error. Once reported, it then + * invokes the user provided routine _SysFatalErrorHandler() which is + * responsible for implementing the error handling policy. + * + * The caller is expected to always provide a usable ESF. In the event that the + * fatal error does not have a hardware generated ESF, the caller should either + * create its own or use a pointer to the global default ESF <_default_esf>. + * + * RETURNS: This function does not return. + * + * \NOMANUAL + */ FUNC_NORETURN void _NanoFatalErrorHandler( unsigned int reason, /* reason that handler was called */ diff --git a/arch/arm/core/fault.c b/arch/arm/core/fault.c index 43612062aac..6710e04b5c9 100644 --- a/arch/arm/core/fault.c +++ b/arch/arm/core/fault.c @@ -33,7 +33,7 @@ /* DESCRIPTION Common fault handler for ARM Cortex-M processors. -*/ + */ #include #include @@ -59,24 +59,24 @@ Common fault handler for ARM Cortex-M processors. #endif #if (CONFIG_FAULT_DUMP == 1) -/******************************************************************************* -* -* _FaultDump - dump information regarding fault (FAULT_DUMP == 1) -* -* Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 1 -* (short form). -* -* eg. (precise bus error escalated to hard fault): -* -* Fault! EXC #3, Thread: 0x200000dc, instr: 0x000011d3 -* HARD FAULT: Escalation (see below)! -* MMFSR: 0x00000000, BFSR: 0x00000082, UFSR: 0x00000000 -* BFAR: 0xff001234 -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _FaultDump - dump information regarding fault (FAULT_DUMP == 1) + * + * Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 1 + * (short form). + * + * eg. (precise bus error escalated to hard fault): + * + * Fault! EXC #3, Thread: 0x200000dc, instr: 0x000011d3 + * HARD FAULT: Escalation (see below)! + * MMFSR: 0x00000000, BFSR: 0x00000082, UFSR: 0x00000000 + * BFAR: 0xff001234 + * + * RETURNS: N/A + * + * \NOMANUAL + */ void _FaultDump(const NANO_ESF *esf, int fault) { @@ -118,16 +118,16 @@ void _FaultDump(const NANO_ESF *esf, int fault) #endif #if (CONFIG_FAULT_DUMP == 2) -/******************************************************************************* -* -* _FaultContextShow - dump context information -* -* See _FaultDump() for example. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _FaultContextShow - dump context information + * + * See _FaultDump() for example. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void _FaultContextShow(const NANO_ESF *esf) { @@ -137,16 +137,16 @@ static void _FaultContextShow(const NANO_ESF *esf) esf->pc); } -/******************************************************************************* -* -* _MpuFault - dump MPU fault information -* -* See _FaultDump() for example. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _MpuFault - dump MPU fault information + * + * See _FaultDump() for example. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void _MpuFault(const NANO_ESF *esf, int fromHardFault) @@ -172,16 +172,16 @@ static void _MpuFault(const NANO_ESF *esf, } } -/******************************************************************************* -* -* _BusFault - dump bus fault information -* -* See _FaultDump() for example. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _BusFault - dump bus fault information + * + * See _FaultDump() for example. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void _BusFault(const NANO_ESF *esf, int fromHardFault) @@ -213,16 +213,16 @@ static void _BusFault(const NANO_ESF *esf, } } -/******************************************************************************* -* -* _UsageFault - dump usage fault information -* -* See _FaultDump() for example. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _UsageFault - dump usage fault information + * + * See _FaultDump() for example. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void _UsageFault(const NANO_ESF *esf) { @@ -253,16 +253,16 @@ static void _UsageFault(const NANO_ESF *esf) _ScbUsageFaultAllFaultsReset(); } -/******************************************************************************* -* -* _HardFault - dump hard fault information -* -* See _FaultDump() for example. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _HardFault - dump hard fault information + * + * See _FaultDump() for example. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void _HardFault(const NANO_ESF *esf) { @@ -281,32 +281,32 @@ static void _HardFault(const NANO_ESF *esf) } } -/******************************************************************************* -* -* _DebugMonitor - dump debug monitor exception information -* -* See _FaultDump() for example. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _DebugMonitor - dump debug monitor exception information + * + * See _FaultDump() for example. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void _DebugMonitor(const NANO_ESF *esf) { PR_EXC("***** Debug monitor exception (not implemented) *****\n"); } -/******************************************************************************* -* -* _ReservedException - dump reserved exception information -* -* See _FaultDump() for example. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _ReservedException - dump reserved exception information + * + * See _FaultDump() for example. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void _ReservedException(const NANO_ESF *esf, int fault) @@ -316,27 +316,27 @@ static void _ReservedException(const NANO_ESF *esf, fault - 16); } -/******************************************************************************* -* -* _FaultDump - dump information regarding fault (FAULT_DUMP == 2) -* -* Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 2 -* (long form). -* -* eg. (precise bus error escalated to hard fault): -* -* Executing context ID (thread): 0x200000dc -* Faulting instruction address: 0x000011d3 -* ***** HARD FAULT ***** -* Fault escalation (see below) -* ***** BUS FAULT ***** -* Precise data bus error -* Address: 0xff001234 -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _FaultDump - dump information regarding fault (FAULT_DUMP == 2) + * + * Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 2 + * (long form). + * + * eg. (precise bus error escalated to hard fault): + * + * Executing context ID (thread): 0x200000dc + * Faulting instruction address: 0x000011d3 + * ***** HARD FAULT ***** + * Fault escalation (see below) + * ***** BUS FAULT ***** + * Precise data bus error + * Address: 0xff001234 + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void _FaultDump(const NANO_ESF *esf, int fault) { @@ -363,23 +363,23 @@ static void _FaultDump(const NANO_ESF *esf, int fault) } #endif /* FAULT_DUMP == 2 */ -/******************************************************************************* -* -* _Fault - fault handler -* -* This routine is called when fatal error conditions are detected by hardware -* and is responsible only for reporting the error. Once reported, it then -* invokes the user provided routine _SysFatalErrorHandler() which is -* responsible for implementing the error handling policy. -* -* Since the ESF can be either on the MSP or PSP depending if an exception or -* interrupt was already being handled, it is passed a pointer to both and has -* to find out on which the ESP is present. -* -* RETURNS: This function does not return. -* -* \NOMANUAL -*/ +/** + * + * _Fault - fault handler + * + * This routine is called when fatal error conditions are detected by hardware + * and is responsible only for reporting the error. Once reported, it then + * invokes the user provided routine _SysFatalErrorHandler() which is + * responsible for implementing the error handling policy. + * + * Since the ESF can be either on the MSP or PSP depending if an exception or + * interrupt was already being handled, it is passed a pointer to both and has + * to find out on which the ESP is present. + * + * RETURNS: This function does not return. + * + * \NOMANUAL + */ void _Fault( const NANO_ESF *msp, /* pointer to potential ESF on MSP */ @@ -394,16 +394,16 @@ void _Fault( _SysFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, esf); } -/******************************************************************************* -* -* _FaultInit - initialization of fault handling -* -* Turns on the desired hardware faults. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _FaultInit - initialization of fault handling + * + * Turns on the desired hardware faults. + * + * RETURNS: N/A + * + * \NOMANUAL + */ void _FaultInit(void) { diff --git a/arch/arm/core/fault_s.S b/arch/arm/core/fault_s.S index ecc13fe859a..ab67aa507d9 100644 --- a/arch/arm/core/fault_s.S +++ b/arch/arm/core/fault_s.S @@ -33,7 +33,7 @@ /* DESCRIPTION Fault handlers for ARM Cortex-M processors. -*/ + */ #define _ASMLANGUAGE @@ -52,28 +52,28 @@ GTEXT(__usage_fault) GTEXT(__debug_monitor) GTEXT(__reserved) -/******************************************************************************* -* -* __fault - fault handler installed in the fault and reserved vectors -* -* Entry point for the hard fault, MPU fault, bus fault, usage fault, debug -* monitor and reserved exceptions. -* -* Save the values of the MSP and PSP in r0 and r1 respectively, so the first -* and second parameters to the _Fault() C function that will handle the rest. -* This has to be done because at this point we do not know if the fault -* happened while handling an exception or not, and thus the ESF could be on -* either stack. _Fault() will find out where the ESF resides. -* -* Provides these symbols: -* -* __hard_fault -* __mpu_fault -* __bus_fault -* __usage_fault -* __debug_monitor -* __reserved -*/ +/** + * + * __fault - fault handler installed in the fault and reserved vectors + * + * Entry point for the hard fault, MPU fault, bus fault, usage fault, debug + * monitor and reserved exceptions. + * + * Save the values of the MSP and PSP in r0 and r1 respectively, so the first + * and second parameters to the _Fault() C function that will handle the rest. + * This has to be done because at this point we do not know if the fault + * happened while handling an exception or not, and thus the ESF could be on + * either stack. _Fault() will find out where the ESF resides. + * + * Provides these symbols: + * + * __hard_fault + * __mpu_fault + * __bus_fault + * __usage_fault + * __debug_monitor + * __reserved + */ SECTION_SUBSEC_FUNC(TEXT,__fault,__hard_fault) SECTION_SUBSEC_FUNC(TEXT,__fault,__mpu_fault) diff --git a/arch/arm/core/ffs.S b/arch/arm/core/ffs.S index b68ad44101b..417a9494612 100644 --- a/arch/arm/core/ffs.S +++ b/arch/arm/core/ffs.S @@ -34,7 +34,7 @@ DESCRIPTION This library implements find_last_set() and find_first_set() which returns the most and least significant bit set respectively. -*/ + */ #define _ASMLANGUAGE @@ -48,17 +48,17 @@ _ASM_FILE_PROLOGUE GTEXT(find_last_set) GTEXT(find_first_set) -/******************************************************************************* -* -* find_last_set - find first set bit (searching from the most significant bit) -* -* This routine finds the first bit set in the argument passed it and -* returns the index of that bit. Bits are numbered starting -* at 1 from the least significant bit. A return value of zero indicates that -* the value passed is zero. -* -* RETURNS: most significant bit set -*/ +/** + * + * find_last_set - find first set bit (searching from the most significant bit) + * + * This routine finds the first bit set in the argument passed it and + * returns the index of that bit. Bits are numbered starting + * at 1 from the least significant bit. A return value of zero indicates that + * the value passed is zero. + * + * RETURNS: most significant bit set + */ SECTION_FUNC(TEXT, find_last_set) @@ -69,17 +69,17 @@ SECTION_FUNC(TEXT, find_last_set) mov pc, lr -/******************************************************************************* -* -* find_first_set - find first set bit (searching from the least significant bit) -* -* This routine finds the first bit set in the argument passed it and -* returns the index of that bit. Bits are numbered starting -* at 1 from the least significant bit. A return value of zero indicates that -* the value passed is zero. -* -* RETURNS: least significant bit set -*/ +/** + * + * find_first_set - find first set bit (searching from the least significant bit) + * + * This routine finds the first bit set in the argument passed it and + * returns the index of that bit. Bits are numbered starting + * at 1 from the least significant bit. A return value of zero indicates that + * the value passed is zero. + * + * RETURNS: least significant bit set + */ SECTION_FUNC(TEXT, find_first_set) diff --git a/arch/arm/core/fiber_abort.c b/arch/arm/core/fiber_abort.c index 193c453fdfa..9be6351b940 100644 --- a/arch/arm/core/fiber_abort.c +++ b/arch/arm/core/fiber_abort.c @@ -38,7 +38,7 @@ point returns or when it aborts itself, the CPU is in thread mode and must call _Swap() (which triggers a service call), but when in handler mode, the CPU must exit handler mode to cause the context switch, and thus must queue the PendSV exception. -*/ + */ #ifdef CONFIG_MICROKERNEL #include @@ -51,18 +51,18 @@ the PendSV exception. #include #include -/******************************************************************************* -* -* fiber_abort - abort the currently executing fiber -* -* Possible reasons for a fiber aborting: -* -* - the fiber explicitly aborts itself by calling this routine -* - the fiber implicitly aborts by returning from its entry point -* - the fiber encounters a fatal exception -* -* RETURNS: N/A -*/ +/** + * + * fiber_abort - abort the currently executing fiber + * + * Possible reasons for a fiber aborting: + * + * - the fiber explicitly aborts itself by calling this routine + * - the fiber implicitly aborts by returning from its entry point + * - the fiber encounters a fatal exception + * + * RETURNS: N/A + */ void fiber_abort(void) { diff --git a/arch/arm/core/gdb_stub.S b/arch/arm/core/gdb_stub.S index 7800c5b5ab7..621671dc329 100644 --- a/arch/arm/core/gdb_stub.S +++ b/arch/arm/core/gdb_stub.S @@ -39,7 +39,7 @@ that we are running in an exception. Upon exception exit, it must be recorded that the task is not in an exception anymore. -*/ + */ #define _ASMLANGUAGE @@ -51,27 +51,27 @@ anymore. _ASM_FILE_PROLOGUE -/******************************************************************************* -* -* _GdbStubExcEntry - exception entry extra work when GDB_INFO is enabled -* -* During normal system operation, the callee-saved registers are saved lazily -* only when a context switch is required. To allow looking at the current -* threads registers while debugging an exception/interrupt, they must be saved -* upon entry since the handler could be using them: thus, looking at the CPU -* registers would show the current system state and not the current *thread*'s -* state. -* -* Also, record the fact that the thread is currently interrupted so that VQEMU -* looks into the CCS and not the CPU registers to obtain the current thread's -* register values. -* -* NOTE: -* - must be called with interrupts locked -* - cannot use r0 without saving it first -* -* RETURNS: N/A -*/ +/** + * + * _GdbStubExcEntry - exception entry extra work when GDB_INFO is enabled + * + * During normal system operation, the callee-saved registers are saved lazily + * only when a context switch is required. To allow looking at the current + * threads registers while debugging an exception/interrupt, they must be saved + * upon entry since the handler could be using them: thus, looking at the CPU + * registers would show the current system state and not the current *thread*'s + * state. + * + * Also, record the fact that the thread is currently interrupted so that VQEMU + * looks into the CCS and not the CPU registers to obtain the current thread's + * register values. + * + * NOTE: + * - must be called with interrupts locked + * - cannot use r0 without saving it first + * + * RETURNS: N/A + */ SECTION_FUNC(TEXT, _GdbStubExcEntry) @@ -95,20 +95,20 @@ SECTION_FUNC(TEXT, _GdbStubExcEntry) bx lr -/******************************************************************************* -* -* _GdbStubExcExit - exception exit extra clean up when GDB_INFO is enabled -* -* Record the fact that the thread is not interrupted anymore so that VQEMU -* looks at the CPU registers and not into the CCS to obtain the current -* thread's register values. Only do this if this is not a nested exception. -* -* NOTE: -* - must be called with interrupts locked -* - cannot use r0 without saving it first -* -* RETURNS: N/A -*/ +/** + * + * _GdbStubExcExit - exception exit extra clean up when GDB_INFO is enabled + * + * Record the fact that the thread is not interrupted anymore so that VQEMU + * looks at the CPU registers and not into the CCS to obtain the current + * thread's register values. Only do this if this is not a nested exception. + * + * NOTE: + * - must be called with interrupts locked + * - cannot use r0 without saving it first + * + * RETURNS: N/A + */ SECTION_FUNC(TEXT, _GdbStubExcExit) @@ -129,24 +129,24 @@ SECTION_FUNC(TEXT, _GdbStubExcExit) bx lr -/******************************************************************************* -* -* _irq_vector_table_entry_with_gdb_stub - stub for ISRs installed directly in -* vector table -* -* The kernel on Cortex-M3/4 can be configured so that ISRs -* are installed directly in the vector table for maximum efficiency. -* -* When OS-awareness is enabled, a stub must be inserted to invoke -* _GdbStubExcEntry() before the user ISR runs, to save the current task's -* registers. This stub thus gets inserted in the vector table instead of the -* user's ISR. The user's IRQ vector table gets pushed after the vector table -* automatically by the linker script: this is all transparent to the user. -* This stub must also act as a demuxer that find the running exception and -* invoke the user's real ISR. -* -* RETURNS: N/A -*/ +/** + * + * _irq_vector_table_entry_with_gdb_stub - stub for ISRs installed directly in + * vector table + * + * The kernel on Cortex-M3/4 can be configured so that ISRs + * are installed directly in the vector table for maximum efficiency. + * + * When OS-awareness is enabled, a stub must be inserted to invoke + * _GdbStubExcEntry() before the user ISR runs, to save the current task's + * registers. This stub thus gets inserted in the vector table instead of the + * user's ISR. The user's IRQ vector table gets pushed after the vector table + * automatically by the linker script: this is all transparent to the user. + * This stub must also act as a demuxer that find the running exception and + * invoke the user's real ISR. + * + * RETURNS: N/A + */ SECTION_FUNC(TEXT, _irq_vector_table_entry_with_gdb_stub) diff --git a/arch/arm/core/gdb_stub_irq_vector_table.c b/arch/arm/core/gdb_stub_irq_vector_table.c index 1b3c78ba3a9..2d934e113ba 100644 --- a/arch/arm/core/gdb_stub_irq_vector_table.c +++ b/arch/arm/core/gdb_stub_irq_vector_table.c @@ -35,7 +35,7 @@ DESCRIPTION When GDB is enabled, the static IRQ vector table needs to install the _irq_vector_table_entry_with_gdb_stub stub to do some work before calling the user-installed ISRs. -*/ + */ #include #include diff --git a/arch/arm/core/irq_init.c b/arch/arm/core/irq_init.c index f72f26ce0e1..390950a3487 100644 --- a/arch/arm/core/irq_init.c +++ b/arch/arm/core/irq_init.c @@ -38,23 +38,23 @@ point returns or when it aborts itself, the CPU is in thread mode and must call _Swap() (which triggers a service call), but when in handler mode, the CPU must exit handler mode to cause the context switch, and thus must queue the PendSV exception. -*/ + */ #include #include #include #include -/******************************************************************************* -* -* _IntLibInit - initialize interrupts -* -* Ensures all interrupts have their priority set to _EXC_IRQ_DEFAULT_PRIO and -* not 0, which they have it set to when coming out of reset. This ensures that -* interrupt locking via BASEPRI works as expected. -* -* RETURNS: N/A -*/ +/** + * + * _IntLibInit - initialize interrupts + * + * Ensures all interrupts have their priority set to _EXC_IRQ_DEFAULT_PRIO and + * not 0, which they have it set to when coming out of reset. This ensures that + * interrupt locking via BASEPRI works as expected. + * + * RETURNS: N/A + */ void _IntLibInit(void) { diff --git a/arch/arm/core/irq_manage.c b/arch/arm/core/irq_manage.c index af582d07e3a..2d2155f65df 100644 --- a/arch/arm/core/irq_manage.c +++ b/arch/arm/core/irq_manage.c @@ -35,7 +35,7 @@ DESCRIPTION Interrupt management: enabling/disabling and dynamic ISR connecting/replacing. SW_ISR_TABLE_DYNAMIC has to be enabled for connecting ISRs at runtime. -*/ + */ #include #include @@ -46,18 +46,18 @@ SW_ISR_TABLE_DYNAMIC has to be enabled for connecting ISRs at runtime. extern void __reserved(void); -/******************************************************************************* -* -* irq_handler_set - replace an interrupt handler by another -* -* An interrupt's ISR can be replaced at runtime. Care must be taken that the -* interrupt is disabled before doing this. -* -* This routine will hang if is not found in the table and ASSERT_ON is -* enabled. -* -* RETURNS: N/A -*/ +/** + * + * irq_handler_set - replace an interrupt handler by another + * + * An interrupt's ISR can be replaced at runtime. Care must be taken that the + * interrupt is disabled before doing this. + * + * This routine will hang if is not found in the table and ASSERT_ON is + * enabled. + * + * RETURNS: N/A + */ void irq_handler_set(unsigned int irq, void (*old)(void *arg), @@ -76,16 +76,16 @@ void irq_handler_set(unsigned int irq, irq_unlock_inline(key); } -/******************************************************************************* -* -* irq_enable - enable an interrupt line -* -* Clear possible pending interrupts on the line, and enable the interrupt -* line. After this call, the CPU will receive interrupts for the specified -* . -* -* RETURNS: N/A -*/ +/** + * + * irq_enable - enable an interrupt line + * + * Clear possible pending interrupts on the line, and enable the interrupt + * line. After this call, the CPU will receive interrupts for the specified + * . + * + * RETURNS: N/A + */ void irq_enable(unsigned int irq) { @@ -94,35 +94,35 @@ void irq_enable(unsigned int irq) _NvicIrqEnable(irq); } -/******************************************************************************* -* -* irq_disable - disable an interrupt line -* -* Disable an interrupt line. After this call, the CPU will stop receiving -* interrupts for the specified . -* -* RETURNS: N/A -*/ +/** + * + * irq_disable - disable an interrupt line + * + * Disable an interrupt line. After this call, the CPU will stop receiving + * interrupts for the specified . + * + * RETURNS: N/A + */ void irq_disable(unsigned int irq) { _NvicIrqDisable(irq); } -/******************************************************************************* -* -* irq_priority_set - set an interrupt's priority -* -* Valid values are from 1 to 255. Interrupts of priority 1 are not masked when -* interrupts are locked system-wide, so care must be taken when using them. ISR -* installed with priority 1 interrupts cannot make kernel calls. -* -* Priority 0 is reserved for kernel usage and cannot be used. -* -* The priority is verified if ASSERT_ON is enabled. -* -* RETURNS: N/A -*/ +/** + * + * irq_priority_set - set an interrupt's priority + * + * Valid values are from 1 to 255. Interrupts of priority 1 are not masked when + * interrupts are locked system-wide, so care must be taken when using them. ISR + * installed with priority 1 interrupts cannot make kernel calls. + * + * Priority 0 is reserved for kernel usage and cannot be used. + * + * The priority is verified if ASSERT_ON is enabled. + * + * RETURNS: N/A + */ void irq_priority_set(unsigned int irq, unsigned int prio) @@ -131,17 +131,17 @@ void irq_priority_set(unsigned int irq, _NvicIrqPrioSet(irq, _EXC_PRIO(prio)); } -/******************************************************************************* -* -* _irq_spurious - spurious interrupt handler -* -* Installed in all dynamic interrupt slots at boot time. Throws an error if -* called. -* -* See __reserved(). -* -* RETURNS: N/A -*/ +/** + * + * _irq_spurious - spurious interrupt handler + * + * Installed in all dynamic interrupt slots at boot time. Throws an error if + * called. + * + * See __reserved(). + * + * RETURNS: N/A + */ void _irq_spurious(void *unused) { @@ -149,18 +149,18 @@ void _irq_spurious(void *unused) __reserved(); } -/******************************************************************************* -* -* irq_connect - connect an ISR to an interrupt line -* -* is connected to interrupt line (exception #+16). No prior -* ISR can have been connected on interrupt line since the system booted. -* -* This routine will hang if another ISR was connected for interrupt line -* and ASSERT_ON is enabled; if ASSERT_ON is disabled, it will fail silently. -* -* RETURNS: the interrupt line number -*/ +/** + * + * irq_connect - connect an ISR to an interrupt line + * + * is connected to interrupt line (exception #+16). No prior + * ISR can have been connected on interrupt line since the system booted. + * + * This routine will hang if another ISR was connected for interrupt line + * and ASSERT_ON is enabled; if ASSERT_ON is disabled, it will fail silently. + * + * RETURNS: the interrupt line number + */ int irq_connect(unsigned int irq, unsigned int prio, @@ -172,16 +172,16 @@ int irq_connect(unsigned int irq, return irq; } -/******************************************************************************* -* -* irq_disconnect - disconnect an ISR from an interrupt line -* -* Interrupt line (exception #+16) is disconnected from its ISR and -* the latter is replaced by _irq_spurious(). irq_disable() should have -* been called before invoking this routine. -* -* RETURNS: N/A -*/ +/** + * + * irq_disconnect - disconnect an ISR from an interrupt line + * + * Interrupt line (exception #+16) is disconnected from its ISR and + * the latter is replaced by _irq_spurious(). irq_disable() should have + * been called before invoking this routine. + * + * RETURNS: N/A + */ void irq_disconnect(unsigned int irq) { diff --git a/arch/arm/core/isr_wrapper.S b/arch/arm/core/isr_wrapper.S index e6d90cbefe6..3ce5b05c00a 100644 --- a/arch/arm/core/isr_wrapper.S +++ b/arch/arm/core/isr_wrapper.S @@ -35,7 +35,7 @@ DESCRIPTION Wrapper installed in vector table for handling dynamic interrupts that accept a parameter. -*/ + */ #define _ASMLANGUAGE @@ -53,19 +53,19 @@ GDATA(_sw_isr_table) GTEXT(_isr_wrapper) GTEXT(_IntExit) -/******************************************************************************* -* -* _isr_wrapper - wrapper around ISRs when inserted in software ISR table -* -* When inserted in the vector table, _isr_wrapper() demuxes the ISR table using -* the running interrupt number as the index, and invokes the registered ISR -* with its correspoding argument. When returning from the ISR, it determines -* if a context switch needs to happen (see documentation for __pendsv()) and -* pends the PendSV exception if so: the latter will perform the context switch -* itself. -* -* RETURNS: N/A -*/ +/** + * + * _isr_wrapper - wrapper around ISRs when inserted in software ISR table + * + * When inserted in the vector table, _isr_wrapper() demuxes the ISR table using + * the running interrupt number as the index, and invokes the registered ISR + * with its correspoding argument. When returning from the ISR, it determines + * if a context switch needs to happen (see documentation for __pendsv()) and + * pends the PendSV exception if so: the latter will perform the context switch + * itself. + * + * RETURNS: N/A + */ SECTION_FUNC(TEXT, _isr_wrapper) _GDB_STUB_EXC_ENTRY diff --git a/arch/arm/core/offsets/offsets.c b/arch/arm/core/offsets/offsets.c index 9a29f5fe3de..f5211c43200 100644 --- a/arch/arm/core/offsets/offsets.c +++ b/arch/arm/core/offsets/offsets.c @@ -46,7 +46,7 @@ Typically, only those members that are accessed by assembly language routines are defined; however, it doesn't hurt to define all fields for the sake of completeness. -*/ + */ #include #include diff --git a/arch/arm/core/swap.S b/arch/arm/core/swap.S index d4e985c9606..9413a49afe2 100644 --- a/arch/arm/core/swap.S +++ b/arch/arm/core/swap.S @@ -34,7 +34,7 @@ DESCRIPTION This module implements the routines necessary for thread context switching on ARM Cortex-M3/M4 CPUs. -*/ + */ #define _ASMLANGUAGE @@ -51,23 +51,23 @@ GTEXT(__pendsv) GDATA(_nanokernel) -/******************************************************************************* -* -* __pendsv - PendSV exception handler, handling context switches -* -* The PendSV exception is the only context in the system that can perform -* context switching. When an execution context finds out it has to switch -* contexts, it pends the PendSV exception. -* -* When PendSV is pended, the decision that a context switch must happen has -* already been taken. In other words, when __pendsv() runs, we *know* we have -* to swap *something*. -* -* The scheduling algorithm is simple: schedule the head of the runnable FIBER -* context list, which is represented by _nanokernel.fiber. If there are no -* runnable FIBER contexts, then schedule the TASK context represented by -* _nanokernel.task. The _nanokernel.task field will never be NULL. -*/ +/** + * + * __pendsv - PendSV exception handler, handling context switches + * + * The PendSV exception is the only context in the system that can perform + * context switching. When an execution context finds out it has to switch + * contexts, it pends the PendSV exception. + * + * When PendSV is pended, the decision that a context switch must happen has + * already been taken. In other words, when __pendsv() runs, we *know* we have + * to swap *something*. + * + * The scheduling algorithm is simple: schedule the head of the runnable FIBER + * context list, which is represented by _nanokernel.fiber. If there are no + * runnable FIBER contexts, then schedule the TASK context represented by + * _nanokernel.task. The _nanokernel.task field will never be NULL. + */ SECTION_FUNC(TEXT, __pendsv) @@ -146,15 +146,15 @@ SECTION_FUNC(TEXT, __pendsv) /* exc return */ bx lr -/******************************************************************************* -* -* __svc - service call handler -* -* The service call (svc) is only used in _Swap() to enter handler mode so we -* can go through the PendSV exception to perform a context switch. -* -* RETURNS: N/A -*/ +/** + * + * __svc - service call handler + * + * The service call (svc) is only used in _Swap() to enter handler mode so we + * can go through the PendSV exception to perform a context switch. + * + * RETURNS: N/A + */ SECTION_FUNC(TEXT, __svc) @@ -178,38 +178,38 @@ SECTION_FUNC(TEXT, __svc) /* handler mode exit, to PendSV */ bx lr -/******************************************************************************* -* -* _Swap - initiate a cooperative context switch -* -* The _Swap() routine is invoked by various nanokernel services to effect -* a cooperative context context switch. Prior to invoking _Swap(), the caller -* disables interrupts via irq_lock() and the return 'key' is passed as a -* parameter to _Swap(). The 'key' actually represents the BASEPRI register -* prior to disabling interrupts via the BASEPRI mechanism. -* -* _Swap() itself does not do much. -* -* It simply stores the intlock key (the BASEPRI value) parameter into -* current->basepri, and then triggers a service call exception (svc) to setup -* the PendSV exception, which does the heavy lifting of context switching. +/** + * + * _Swap - initiate a cooperative context switch + * + * The _Swap() routine is invoked by various nanokernel services to effect + * a cooperative context context switch. Prior to invoking _Swap(), the caller + * disables interrupts via irq_lock() and the return 'key' is passed as a + * parameter to _Swap(). The 'key' actually represents the BASEPRI register + * prior to disabling interrupts via the BASEPRI mechanism. + * + * _Swap() itself does not do much. + * + * It simply stores the intlock key (the BASEPRI value) parameter into + * current->basepri, and then triggers a service call exception (svc) to setup + * the PendSV exception, which does the heavy lifting of context switching. -* This is the only place we have to save BASEPRI since the other paths to -* __pendsv all come from handling an interrupt, which means we know the -* interrupts were not locked: in that case the BASEPRI value is 0. -* -* Given that _Swap() is called to effect a cooperative context context switch, -* only the caller-saved integer registers need to be saved in the tCCS of the -* outgoing context. This is all performed by the hardware, which stores it in -* its exception stack frame, created when handling the svc exception. -* -* RETURNS: may contain a return value setup by a call to fiberRtnValueSet() -* -* C function prototype: -* -* unsigned int _Swap (unsigned int basepri); -* -*/ + * This is the only place we have to save BASEPRI since the other paths to + * __pendsv all come from handling an interrupt, which means we know the + * interrupts were not locked: in that case the BASEPRI value is 0. + * + * Given that _Swap() is called to effect a cooperative context context switch, + * only the caller-saved integer registers need to be saved in the tCCS of the + * outgoing context. This is all performed by the hardware, which stores it in + * its exception stack frame, created when handling the svc exception. + * + * RETURNS: may contain a return value setup by a call to fiberRtnValueSet() + * + * C function prototype: + * + * unsigned int _Swap (unsigned int basepri); + * + */ SECTION_FUNC(TEXT, _Swap) diff --git a/arch/arm/core/task_abort.c b/arch/arm/core/task_abort.c index 8214e95ac3d..da2ac9b46fd 100644 --- a/arch/arm/core/task_abort.c +++ b/arch/arm/core/task_abort.c @@ -38,7 +38,7 @@ point returns or when it aborts itself, the CPU is in thread mode and must call the equivalent of task_abort(), but when in handler mode, the CPU must queue a packet to K_swapper(), then exit handler mode to queue the PendSV exception and cause the immediate context switch to K_swapper. -*/ + */ #ifdef CONFIG_MICROKERNEL @@ -52,20 +52,20 @@ PendSV exception and cause the immediate context switch to K_swapper. static struct k_args cmd_packet; -/******************************************************************************* -* -* _TaskAbort - abort the current task -* -* Possible reasons for a task aborting: -* -* - the task explicitly aborts itself by calling this routine -* - the task implicitly aborts by returning from its entry point -* - the task encounters a fatal exception -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _TaskAbort - abort the current task + * + * Possible reasons for a task aborting: + * + * - the task explicitly aborts itself by calling this routine + * - the task implicitly aborts by returning from its entry point + * - the task encounters a fatal exception + * + * RETURNS: N/A + * + * \NOMANUAL + */ void _TaskAbort(void) { diff --git a/arch/arm/fsl_frdm_k64f/board.h b/arch/arm/fsl_frdm_k64f/board.h index 84579101b41..087e69e93de 100644 --- a/arch/arm/fsl_frdm_k64f/board.h +++ b/arch/arm/fsl_frdm_k64f/board.h @@ -34,7 +34,7 @@ DESCRIPTION This header file is used to specify and describe board-level aspects for the 'fsl_frdm_k64f' BSP. -*/ + */ #ifndef _BOARD__H_ #define _BOARD__H_ diff --git a/arch/arm/fsl_frdm_k64f/irq_vector_table.c b/arch/arm/fsl_frdm_k64f/irq_vector_table.c index 96c37bf26fe..f2da54bb0bd 100644 --- a/arch/arm/fsl_frdm_k64f/irq_vector_table.c +++ b/arch/arm/fsl_frdm_k64f/irq_vector_table.c @@ -40,7 +40,7 @@ a) When software-managed ISRs (SW_ISR_TABLE) is enabled, and in that case it b) When the BSP is written so that device ISRs are installed directly in the vector table, they are enumerated here. -*/ + */ #include #include diff --git a/arch/arm/fsl_frdm_k64f/linker.cmd b/arch/arm/fsl_frdm_k64f/linker.cmd index f084712ab61..53d4ddfb1bf 100644 --- a/arch/arm/fsl_frdm_k64f/linker.cmd +++ b/arch/arm/fsl_frdm_k64f/linker.cmd @@ -33,7 +33,7 @@ /* DESCRIPTION This is the linker script for both standard images and XIP images. -*/ + */ /* Flash base address and size */ #define FLASH_START 0x00000000 diff --git a/arch/arm/fsl_frdm_k64f/nmi_on_reset.S b/arch/arm/fsl_frdm_k64f/nmi_on_reset.S index ff531dca95e..de470b0c153 100644 --- a/arch/arm/fsl_frdm_k64f/nmi_on_reset.S +++ b/arch/arm/fsl_frdm_k64f/nmi_on_reset.S @@ -37,7 +37,7 @@ is to hard hang, sleeping. This might be preferable than rebooting to help debugging, or because rebooting might trigger the exact same problem over and over. -*/ + */ #define _ASMLANGUAGE diff --git a/arch/arm/fsl_frdm_k64f/system.c b/arch/arm/fsl_frdm_k64f/system.c index 686d57f52bc..2ac57cdaa73 100644 --- a/arch/arm/fsl_frdm_k64f/system.c +++ b/arch/arm/fsl_frdm_k64f/system.c @@ -34,7 +34,7 @@ DESCRIPTION This module provides routines to initialize and support board-level hardware for the fsl_frdm_k64f BSP. -*/ + */ #include #include @@ -94,21 +94,21 @@ uint8_t __security_frdm_k64f_section __security_frdm_k64f[] = { /* Reserved for FlexNVM feature (unsupported by this MCU) */ 0xFF, 0xFF}; -/******************************************************************************* -* -* clkInit - initialize the system clock -* -* This routine will configure the multipurpose clock generator (MCG) to -* set up the system clock. -* The MCG has nine possible modes, including Stop mode. This routine assumes -* that the current MCG mode is FLL Engaged Internal (FEI), as from reset. -* It transitions through the FLL Bypassed External (FBE) and -* PLL Bypassed External (PBE) modes to get to the desired -* PLL Engaged External (PEE) mode and generate the maximum 120 MHz system clock. -* -* RETURNS: N/A -* -*/ +/** + * + * clkInit - initialize the system clock + * + * This routine will configure the multipurpose clock generator (MCG) to + * set up the system clock. + * The MCG has nine possible modes, including Stop mode. This routine assumes + * that the current MCG mode is FLL Engaged Internal (FEI), as from reset. + * It transitions through the FLL Bypassed External (FBE) and + * PLL Bypassed External (PBE) modes to get to the desired + * PLL Engaged External (PEE) mode and generate the maximum 120 MHz system clock. + * + * RETURNS: N/A + * + */ static void clkInit(void) { @@ -247,15 +247,15 @@ static void clkInit(void) #if defined(DO_CONSOLE_INIT) -/******************************************************************************* -* -* consoleInit - initialize target-only console -* -* Only used for debugging. -* -* RETURNS: N/A -* -*/ +/** + * + * consoleInit - initialize target-only console + * + * Only used for debugging. + * + * RETURNS: N/A + * + */ #include @@ -298,16 +298,16 @@ static void consoleInit(void) } while ((0)) #endif /* DO_CONSOLE_INIT */ -/******************************************************************************* -* -* _InitHardware - perform basic hardware initialization -* -* Initialize the interrupt controller device drivers and the -* Kinetis UART device driver. -* Also initialize the timer device driver, if required. -* -* RETURNS: N/A -*/ +/** + * + * _InitHardware - perform basic hardware initialization + * + * Initialize the interrupt controller device drivers and the + * Kinetis UART device driver. + * Also initialize the timer device driver, if required. + * + * RETURNS: N/A + */ void _InitHardware(void) { diff --git a/arch/arm/fsl_frdm_k64f/wdog.S b/arch/arm/fsl_frdm_k64f/wdog.S index b76b30711ab..7fd151ebac2 100644 --- a/arch/arm/fsl_frdm_k64f/wdog.S +++ b/arch/arm/fsl_frdm_k64f/wdog.S @@ -33,7 +33,7 @@ /* DESCRIPTION This module initializes the watchdog for the fsl_frdm_k64f BSP. -*/ + */ #define _ASMLANGUAGE @@ -53,7 +53,7 @@ GTEXT(_WdogInit) #define WDOG_UNLOCK_1_CMD 0xC520 #define WDOG_UNLOCK_2_CMD 0xD928 -/******************************************************************************* +/** * * _WdogInit - Watchdog timer disable routine * diff --git a/arch/arm/include/CortexM/asm_inline_gcc.h b/arch/arm/include/CortexM/asm_inline_gcc.h index df6bfeb442f..ad0ed246a92 100644 --- a/arch/arm/include/CortexM/asm_inline_gcc.h +++ b/arch/arm/include/CortexM/asm_inline_gcc.h @@ -40,16 +40,16 @@ #ifndef _ASMLANGUAGE -/******************************************************************************* -* -* _IpsrGet - obtain value of IPSR register -* -* Obtain and return current value of IPSR register. -* -* RETURNS: the contents of the IPSR register -* -* \NOMANUAL -*/ +/** + * + * _IpsrGet - obtain value of IPSR register + * + * Obtain and return current value of IPSR register. + * + * RETURNS: the contents of the IPSR register + * + * \NOMANUAL + */ static ALWAYS_INLINE uint32_t _IpsrGet(void) { @@ -59,16 +59,16 @@ static ALWAYS_INLINE uint32_t _IpsrGet(void) return vector; } -/******************************************************************************* -* -* _MspSet - set the value of the Main Stack Pointer register -* -* Store the value of in MSP register. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _MspSet - set the value of the Main Stack Pointer register + * + * Store the value of in MSP register. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static ALWAYS_INLINE void _MspSet(uint32_t msp /* value to store in MSP */ ) diff --git a/arch/arm/include/CortexM/exc.h b/arch/arm/include/CortexM/exc.h index e2644b8219f..d498d94e33e 100644 --- a/arch/arm/include/CortexM/exc.h +++ b/arch/arm/include/CortexM/exc.h @@ -33,7 +33,7 @@ /* DESCRIPTION Exception/interrupt context helpers. -*/ + */ #ifndef _ARM_CORTEXM_ISR__H_ #define _ARM_CORTEXM_ISR__H_ @@ -47,19 +47,19 @@ Exception/interrupt context helpers. #else -/******************************************************************************* -* -* _IsInIsr - find out if running in an ISR context -* -* The current executing vector is found in the IPSR register. We consider the -* IRQs (exception 16 and up), and the PendSV and SYSTICK exceptions, to be -* interrupts. Taking a fault within an exception is also considered in -* interrupt context. -* -* RETURNS: 1 if in ISR, 0 if not. -* -* \NOMANUAL -*/ +/** + * + * _IsInIsr - find out if running in an ISR context + * + * The current executing vector is found in the IPSR register. We consider the + * IRQs (exception 16 and up), and the PendSV and SYSTICK exceptions, to be + * interrupts. Taking a fault within an exception is also considered in + * interrupt context. + * + * RETURNS: 1 if in ISR, 0 if not. + * + * \NOMANUAL + */ static ALWAYS_INLINE int _IsInIsr(void) { uint32_t vector = _IpsrGet(); @@ -68,18 +68,18 @@ static ALWAYS_INLINE int _IsInIsr(void) return (vector > 13) || (vector && _ScbIsNestedExc()); } -/******************************************************************************* -* _ExcSetup - setup system exceptions -* -* Set exception priorities to conform with the BASEPRI locking mechanism. -* Set PendSV priority to lowest possible. -* -* Enable fault exceptions. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * _ExcSetup - setup system exceptions + * + * Set exception priorities to conform with the BASEPRI locking mechanism. + * Set PendSV priority to lowest possible. + * + * Enable fault exceptions. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static ALWAYS_INLINE void _ExcSetup(void) { diff --git a/arch/arm/include/CortexM/stack.h b/arch/arm/include/CortexM/stack.h index 261da4a9c9f..9984215defb 100644 --- a/arch/arm/include/CortexM/stack.h +++ b/arch/arm/include/CortexM/stack.h @@ -33,7 +33,7 @@ /* DESCRIPTION Stack helper functions. -*/ + */ #ifndef _ARM_CORTEXM_STACK__H_ #define _ARM_CORTEXM_STACK__H_ @@ -68,17 +68,17 @@ Stack helper functions. extern char _interrupt_stack[CONFIG_ISR_STACK_SIZE]; -/******************************************************************************* -* -* _InterruptStackSetup - setup interrupt stack -* -* On Cortex-M, the interrupt stack is registered in the MSP (main stack -* pointer) register, and switched to automatically when taking an exception. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _InterruptStackSetup - setup interrupt stack + * + * On Cortex-M, the interrupt stack is registered in the MSP (main stack + * pointer) register, and switched to automatically when taking an exception. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static ALWAYS_INLINE void _InterruptStackSetup(void) { diff --git a/arch/arm/include/nano_private.h b/arch/arm/include/nano_private.h index 3345259a21e..775a99037a1 100644 --- a/arch/arm/include/nano_private.h +++ b/arch/arm/include/nano_private.h @@ -39,7 +39,7 @@ This file is also included by assembly language files which must #define _ASMLANGUAGE before including this header file. Note that nanokernel assembly source files obtains structure offset values via "absolute symbols" in the offsets.o module. -*/ + */ #ifndef _NANO_PRIVATE_H #define _NANO_PRIVATE_H @@ -184,18 +184,18 @@ static ALWAYS_INLINE void nanoArchInit(void) _CpuIdleInit(); } -/******************************************************************************* -* -* fiberRtnValueSet - set the return value for the specified fiber (inline) -* -* The register used to store the return value from a function call invocation -* to . It is assumed that the specified is pending, and thus -* the fiber's context is stored in its tCCS structure. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * fiberRtnValueSet - set the return value for the specified fiber (inline) + * + * The register used to store the return value from a function call invocation + * to . It is assumed that the specified is pending, and thus + * the fiber's context is stored in its tCCS structure. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static ALWAYS_INLINE void fiberRtnValueSet( tCCS *fiber, /* pointer to fiber */ diff --git a/arch/arm/include/start_task_arch.h b/arch/arm/include/start_task_arch.h index 1958a09f430..c8db4f27bcb 100644 --- a/arch/arm/include/start_task_arch.h +++ b/arch/arm/include/start_task_arch.h @@ -35,7 +35,7 @@ DESCRIPTION ARM-specific parts of start_task(). Currently empty, only here for abstraction. -*/ + */ #ifndef _START_TASK_ARCH__H_ #define _START_TASK_ARCH__H_ diff --git a/arch/arm/ti_lm3s6965/board.h b/arch/arm/ti_lm3s6965/board.h index b3f198f2471..d5e23183950 100644 --- a/arch/arm/ti_lm3s6965/board.h +++ b/arch/arm/ti_lm3s6965/board.h @@ -34,7 +34,7 @@ DESCRIPTION This header file is used to specify and describe board-level aspects for the 'ti_lm3s6965' BSP. -*/ + */ #ifndef _BOARD__H_ #define _BOARD__H_ diff --git a/arch/arm/ti_lm3s6965/irq_vector_table.c b/arch/arm/ti_lm3s6965/irq_vector_table.c index 96c37bf26fe..f2da54bb0bd 100644 --- a/arch/arm/ti_lm3s6965/irq_vector_table.c +++ b/arch/arm/ti_lm3s6965/irq_vector_table.c @@ -40,7 +40,7 @@ a) When software-managed ISRs (SW_ISR_TABLE) is enabled, and in that case it b) When the BSP is written so that device ISRs are installed directly in the vector table, they are enumerated here. -*/ + */ #include #include diff --git a/arch/arm/ti_lm3s6965/nmi_on_reset.S b/arch/arm/ti_lm3s6965/nmi_on_reset.S index ff531dca95e..de470b0c153 100644 --- a/arch/arm/ti_lm3s6965/nmi_on_reset.S +++ b/arch/arm/ti_lm3s6965/nmi_on_reset.S @@ -37,7 +37,7 @@ is to hard hang, sleeping. This might be preferable than rebooting to help debugging, or because rebooting might trigger the exact same problem over and over. -*/ + */ #define _ASMLANGUAGE diff --git a/arch/arm/ti_lm3s6965/scp.c b/arch/arm/ti_lm3s6965/scp.c index f9675665bc4..3d9fbb7a734 100644 --- a/arch/arm/ti_lm3s6965/scp.c +++ b/arch/arm/ti_lm3s6965/scp.c @@ -37,7 +37,7 @@ Library for controlling target-specific devices present in the 0x400fe000 peripherals memory region. Currently, only enabling the main OSC with default value is implemented. -*/ + */ #include #include @@ -49,12 +49,12 @@ Currently, only enabling the main OSC with default value is implemented. volatile struct __scp __scp_section __scp; -/******************************************************************************* -* -* _ScpMainOscEnable - enable main oscillator with default frequency of 6MHz -* -* RETURNS: N/A -*/ +/** + * + * _ScpMainOscEnable - enable main oscillator with default frequency of 6MHz + * + * RETURNS: N/A + */ void _ScpMainOscEnable(void) { union __rcc reg; diff --git a/arch/arm/ti_lm3s6965/scp.h b/arch/arm/ti_lm3s6965/scp.h index e1be6b9ccfc..b10e425d020 100644 --- a/arch/arm/ti_lm3s6965/scp.h +++ b/arch/arm/ti_lm3s6965/scp.h @@ -46,7 +46,7 @@ These modules are not defined: The registers and bit field names are taken from the 'Stellaris LM3S6965 Microcontroller DATA SHEET (DS-LM3S6965-12746.2515) revision H' document, section 5.4/5.5, pp .184-200. -*/ + */ #ifndef _SCP_H_ #define _SCP_H_ diff --git a/arch/arm/ti_lm3s6965/system.c b/arch/arm/ti_lm3s6965/system.c index 9b7fdcb1737..a0b2ae78562 100644 --- a/arch/arm/ti_lm3s6965/system.c +++ b/arch/arm/ti_lm3s6965/system.c @@ -34,7 +34,7 @@ DESCRIPTION This module provides routines to initialize and support board-level hardware for the ti_lm3s6965 BSP. -*/ + */ #include #include @@ -59,13 +59,13 @@ extern void _NmiInit(void); #if defined(DO_CONSOLE_INIT) -/******************************************************************************* -* -* uart_generic_info_init - initialize generic information for one UART -* -* RETURNS: N/A -* -*/ +/** + * + * uart_generic_info_init - initialize generic information for one UART + * + * RETURNS: N/A + * + */ inline void uart_generic_info_init(struct uart_init_info *pInfo) { @@ -79,15 +79,15 @@ inline void uart_generic_info_init(struct uart_init_info *pInfo) #if defined(DO_CONSOLE_INIT) -/******************************************************************************* -* -* consoleInit - initialize target-only console -* -* Only used for debugging. -* -* RETURNS: N/A -* -*/ +/** + * + * consoleInit - initialize target-only console + * + * Only used for debugging. + * + * RETURNS: N/A + * + */ #include @@ -132,16 +132,16 @@ static void bluetooth_init(void) } while ((0)) #endif /* CONFIG_BLUETOOTH */ -/******************************************************************************* -* -* _InitHardware - perform basic hardware initialization -* -* Initialize the interrupt controller device drivers and the -* integrated 16550-compatible UART device driver. -* Also initialize the timer device driver, if required. -* -* RETURNS: N/A -*/ +/** + * + * _InitHardware - perform basic hardware initialization + * + * Initialize the interrupt controller device drivers and the + * integrated 16550-compatible UART device driver. + * Also initialize the timer device driver, if required. + * + * RETURNS: N/A + */ void _InitHardware(void) { diff --git a/arch/x86/cache.c b/arch/x86/cache.c index 200d15b7b5f..16a4e080926 100644 --- a/arch/x86/cache.c +++ b/arch/x86/cache.c @@ -32,7 +32,7 @@ /* DESCRIPTION This module contains functions for manipulation caches. -*/ + */ #include #include @@ -44,19 +44,19 @@ This module contains functions for manipulation caches. #error Cannot use this implementation with a cache line size of 0 #endif -/******************************************************************************* -* -* _SysCacheFlush - flush a page to main memory -* -* No alignment is required for either or , but since -* _SysCacheFlush() iterates on the cache lines, a cache line alignment for both -* is optimal. -* -* The cache line size is specified via the CONFIG_CACHE_LINE_SIZE kconfig -* option. -* -* RETURNS: N/A -*/ +/** + * + * _SysCacheFlush - flush a page to main memory + * + * No alignment is required for either or , but since + * _SysCacheFlush() iterates on the cache lines, a cache line alignment for both + * is optimal. + * + * The cache line size is specified via the CONFIG_CACHE_LINE_SIZE kconfig + * option. + * + * RETURNS: N/A + */ void _SysCacheFlush(VIRT_ADDR virt, size_t size) { diff --git a/arch/x86/cache_s.S b/arch/x86/cache_s.S index 22f864173c1..373c3cb8142 100644 --- a/arch/x86/cache_s.S +++ b/arch/x86/cache_s.S @@ -32,7 +32,7 @@ /* DESCRIPTION This module contains functions for manipulating caches. -*/ + */ #ifndef CONFIG_CLFLUSH_INSTRUCTION_SUPPORTED @@ -42,20 +42,20 @@ This module contains functions for manipulating caches. /* externs (internal APIs) */ GTEXT(_SysCacheFlush) -/******************************************************************************* -* -* _SysCacheFlush - flush a page to main memory -* -* This implementation flushes the whole cache. -* -* C signature: -* -* void _SysCacheFlush (VIRT_ADDR virt, size_t size) -* -* Both parameters are ignored in this implementation. -* -* RETURNS: N/A -*/ +/** + * + * _SysCacheFlush - flush a page to main memory + * + * This implementation flushes the whole cache. + * + * C signature: + * + * void _SysCacheFlush (VIRT_ADDR virt, size_t size) + * + * Both parameters are ignored in this implementation. + * + * RETURNS: N/A + */ SECTION_FUNC(TEXT, _SysCacheFlush) wbinvd diff --git a/arch/x86/core/atomic.S b/arch/x86/core/atomic.S index 7bd91acf1c8..ecee894ba6f 100644 --- a/arch/x86/core/atomic.S +++ b/arch/x86/core/atomic.S @@ -41,7 +41,7 @@ service routines, and to operations performed by peer processors. INTERNAL These operators are currently unavailable to user space applications, as there is no requirement for this capability. -*/ + */ /* includes */ @@ -66,33 +66,33 @@ as there is no requirement for this capability. GTEXT(atomic_and) GTEXT(atomic_nand) -/******************************************************************************* -* -* atomic_cas - atomic compare-and-set primitive -* -* This routine provides the compare-and-set operator. If the original value at -* equals , then is stored at and the -* function returns 1. -* -* If the original value at does not equal , then the store -* is not done and the function returns 0. -* -* The reading of the original value at , the comparison, -* and the write of the new value (if it occurs) all happen atomically with -* respect to both interrupts and accesses of other processors to . -* -* RETURNS: Returns 1 if is written, 0 otherwise. -* -* int atomic_cas -* ( -* atomic_t * target, /@ address to be tested @/ -* atomic_val_t oldValue, /@ value to compare against @/ -* atomic_val_t newValue /@ value to compare against @/ -* ) -* -* INTERNAL -* The 'cmpxchg' instruction is NOT supported on processor prior to the 80486 -*/ +/** + * + * atomic_cas - atomic compare-and-set primitive + * + * This routine provides the compare-and-set operator. If the original value at + * equals , then is stored at and the + * function returns 1. + * + * If the original value at does not equal , then the store + * is not done and the function returns 0. + * + * The reading of the original value at , the comparison, + * and the write of the new value (if it occurs) all happen atomically with + * respect to both interrupts and accesses of other processors to . + * + * RETURNS: Returns 1 if is written, 0 otherwise. + * + * int atomic_cas + * ( + * atomic_t * target, /@ address to be tested @/ + * atomic_val_t oldValue, /@ value to compare against @/ + * atomic_val_t newValue /@ value to compare against @/ + * ) + * + * INTERNAL + * The 'cmpxchg' instruction is NOT supported on processor prior to the 80486 + */ SECTION_FUNC(TEXT, atomic_cas) @@ -113,25 +113,25 @@ BRANCH_LABEL(atomic_cas1) ret -/******************************************************************************* -* -* atomic_add - atomic add primitive -* -* This routine provides the atomic addition operator. The is -* atomically added to the value at , placing the result at , -* and the old value from is returned. -* -* RETURNS: The previous value from -* -* atomic_val_t atomic_add -* ( -* atomic_t * target, /@ memory location to add to @/ -* atomic_val_t value /@ value to add @/ -* ) -* -* INTERNAL -* The 'xadd' instruction is NOT supported on processor prior to the 80486 -*/ +/** + * + * atomic_add - atomic add primitive + * + * This routine provides the atomic addition operator. The is + * atomically added to the value at , placing the result at , + * and the old value from is returned. + * + * RETURNS: The previous value from + * + * atomic_val_t atomic_add + * ( + * atomic_t * target, /@ memory location to add to @/ + * atomic_val_t value /@ value to add @/ + * ) + * + * INTERNAL + * The 'xadd' instruction is NOT supported on processor prior to the 80486 + */ SECTION_FUNC(TEXT, atomic_add) @@ -145,25 +145,25 @@ SECTION_FUNC(TEXT, atomic_add) ret -/******************************************************************************* -* -* atomic_sub - atomic subtraction primitive -* -* This routine provides the atomic subtraction operator. The is -* atomically subtracted from the value at , placing the result at -* , and the old value from is returned. -* -* RETURNS: The previous value from -* -* atomic_val_t atomic_sub -* ( -* atomic_t * target, /@ memory location to subtract from @/ -* atomic_val_t value /@ value to subtract @/ -* ) -* -* INTERNAL -* The 'xadd' instruction is NOT supported on processor prior to the 80486 -*/ +/** + * + * atomic_sub - atomic subtraction primitive + * + * This routine provides the atomic subtraction operator. The is + * atomically subtracted from the value at , placing the result at + * , and the old value from is returned. + * + * RETURNS: The previous value from + * + * atomic_val_t atomic_sub + * ( + * atomic_t * target, /@ memory location to subtract from @/ + * atomic_val_t value /@ value to subtract @/ + * ) + * + * INTERNAL + * The 'xadd' instruction is NOT supported on processor prior to the 80486 + */ SECTION_FUNC(TEXT, atomic_sub) @@ -178,23 +178,23 @@ SECTION_FUNC(TEXT, atomic_sub) ret -/******************************************************************************* -* -* atomic_inc - atomic increment primitive -* -* This routine provides the atomic increment operator. The value at -* is atomically incremented by 1, and the old value from is returned. -* -* RETURNS: The value from before the increment -* -* atomic_val_t atomic_inc -* ( -* atomic_t *target /@ memory location to increment @/ -* ) -* -* INTERNAL -* The 'xadd' instruction is NOT supported on processor prior to the 80486 -*/ +/** + * + * atomic_inc - atomic increment primitive + * + * This routine provides the atomic increment operator. The value at + * is atomically incremented by 1, and the old value from is returned. + * + * RETURNS: The value from before the increment + * + * atomic_val_t atomic_inc + * ( + * atomic_t *target /@ memory location to increment @/ + * ) + * + * INTERNAL + * The 'xadd' instruction is NOT supported on processor prior to the 80486 + */ SECTION_FUNC(TEXT, atomic_inc) @@ -210,23 +210,23 @@ SECTION_FUNC(TEXT, atomic_inc) ret -/******************************************************************************* -* -* atomic_dec - atomic decrement primitive -* -* This routine provides the atomic decrement operator. The value at -* is atomically decremented by 1, and the old value from is returned. -* -* RETURNS: The value from prior to the decrement -* -* atomic_val_t atomic_dec -* ( -* atomic_t *target /@ memory location to decrement @/ -* ) -* -* INTERNAL -* The 'xadd' instruction is NOT supported on processor prior to the 80486 -*/ +/** + * + * atomic_dec - atomic decrement primitive + * + * This routine provides the atomic decrement operator. The value at + * is atomically decremented by 1, and the old value from is returned. + * + * RETURNS: The value from prior to the decrement + * + * atomic_val_t atomic_dec + * ( + * atomic_t *target /@ memory location to decrement @/ + * ) + * + * INTERNAL + * The 'xadd' instruction is NOT supported on processor prior to the 80486 + */ SECTION_FUNC(TEXT, atomic_dec) @@ -240,22 +240,22 @@ SECTION_FUNC(TEXT, atomic_dec) ret -/******************************************************************************* -* -* atomic_get - atomic get primitive -* -* This routine provides the atomic get primitive to atomically read -* a value from . It simply does an ordinary load. Note that -* is expected to be aligned to a 4-byte boundary. -* -* RETURNS: The value read from -* -* atomic_t atomic_get -* ( -* atomic_t *target /@ memory location to read from @/ -* ) -* -*/ +/** + * + * atomic_get - atomic get primitive + * + * This routine provides the atomic get primitive to atomically read + * a value from . It simply does an ordinary load. Note that + * is expected to be aligned to a 4-byte boundary. + * + * RETURNS: The value read from + * + * atomic_t atomic_get + * ( + * atomic_t *target /@ memory location to read from @/ + * ) + * + */ SECTION_FUNC(TEXT, atomic_get) @@ -264,25 +264,25 @@ SECTION_FUNC(TEXT, atomic_get) ret -/******************************************************************************* -* -* atomic_set - atomic get-and-set primitive -* -* This routine provides the atomic set operator. The is atomically -* written at and the previous value at is returned. -* -* RETURNS: The previous value from -* -* atomic_val_t atomic_set -* ( -* atomic_t *target, /@ memory location to write to @/ -* atomic_val_t value /@ value to set @/ -* ) -* -* INTERNAL -* The XCHG instruction is executed on the specified address to -* swap in value. The value swapped out is returned by this function. -*/ +/** + * + * atomic_set - atomic get-and-set primitive + * + * This routine provides the atomic set operator. The is atomically + * written at and the previous value at is returned. + * + * RETURNS: The previous value from + * + * atomic_val_t atomic_set + * ( + * atomic_t *target, /@ memory location to write to @/ + * atomic_val_t value /@ value to set @/ + * ) + * + * INTERNAL + * The XCHG instruction is executed on the specified address to + * swap in value. The value swapped out is returned by this function. + */ SECTION_FUNC(TEXT, atomic_set) @@ -304,22 +304,22 @@ SECTION_FUNC(TEXT, atomic_set) ret -/******************************************************************************* -* -* atomic_clear - atomic clear primitive -* -* This routine provides the atomic clear operator. The value of 0 is atomically -* written at and the previous value at is returned. (Hence, -* atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).) -* -* RETURNS: The previous value from -* -* atomic_val_t atomic_clear -* ( -* atomic_t *target /@ memory location to write to @/ -* ) -* -*/ +/** + * + * atomic_clear - atomic clear primitive + * + * This routine provides the atomic clear operator. The value of 0 is atomically + * written at and the previous value at is returned. (Hence, + * atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).) + * + * RETURNS: The previous value from + * + * atomic_val_t atomic_clear + * ( + * atomic_t *target /@ memory location to write to @/ + * ) + * + */ SECTION_FUNC(TEXT, atomic_clear) @@ -341,25 +341,25 @@ SECTION_FUNC(TEXT, atomic_clear) ret -/******************************************************************************* -* -* atomic_or - atomic bitwise inclusive OR primitive -* -* This routine provides the atomic bitwise inclusive OR operator. The -* is atomically bitwise OR'ed with the value at , placing the result -* at , and the previous value at is returned. -* -* RETURNS: The previous value from -* -* atomic_val_t atomic_or -* ( -* atomic_t *target, /@ memory location to be modified @/ -* atomic_val_t value /@ value to OR @/ -* ) -* -* INTERNAL -* The 'cmpxchg' instruction is NOT supported on processor prior to the 80486 -*/ +/** + * + * atomic_or - atomic bitwise inclusive OR primitive + * + * This routine provides the atomic bitwise inclusive OR operator. The + * is atomically bitwise OR'ed with the value at , placing the result + * at , and the previous value at is returned. + * + * RETURNS: The previous value from + * + * atomic_val_t atomic_or + * ( + * atomic_t *target, /@ memory location to be modified @/ + * atomic_val_t value /@ value to OR @/ + * ) + * + * INTERNAL + * The 'cmpxchg' instruction is NOT supported on processor prior to the 80486 + */ SECTION_FUNC(TEXT, atomic_or) @@ -379,25 +379,25 @@ BRANCH_LABEL(atomic_or_retry) ret -/******************************************************************************* -* -* atomic_xor - atomic bitwise exclusive OR (XOR) primitive -* -* This routine provides the atomic bitwise exclusive OR operator. The -* is atomically bitwise XOR'ed with the value at , placing the result -* at , and the previous value at is returned. -* -* RETURNS: The previous value from -* -* atomic_val_t atomic_xor -* ( -* atomic_t *target, /@ memory location to be modified @/ -* atomic_t value /@ value to XOR @/ -* ) -* -* INTERNAL -* The 'cmpxchg' instruction is NOT supported on processor prior to the 80486 -*/ +/** + * + * atomic_xor - atomic bitwise exclusive OR (XOR) primitive + * + * This routine provides the atomic bitwise exclusive OR operator. The + * is atomically bitwise XOR'ed with the value at , placing the result + * at , and the previous value at is returned. + * + * RETURNS: The previous value from + * + * atomic_val_t atomic_xor + * ( + * atomic_t *target, /@ memory location to be modified @/ + * atomic_t value /@ value to XOR @/ + * ) + * + * INTERNAL + * The 'cmpxchg' instruction is NOT supported on processor prior to the 80486 + */ SECTION_FUNC(TEXT, atomic_xor) @@ -417,25 +417,25 @@ BRANCH_LABEL(atomic_xor_retry) ret -/******************************************************************************* -* -* atomic_and - atomic bitwise AND primitive -* -* This routine provides the atomic bitwise AND operator. The is -* atomically bitwise AND'ed with the value at , placing the result -* at , and the previous value at is returned. -* -* RETURNS: The previous value from -* -* atomic_val_t atomic_and -* ( -* atomic_t *target, /@ memory location to be modified @/ -* atomic_val_t value /@ value to AND @/ -* ) -* -* INTERNAL -* The 'cmpxchg' instruction is NOT supported on processor prior to the 80486 -*/ +/** + * + * atomic_and - atomic bitwise AND primitive + * + * This routine provides the atomic bitwise AND operator. The is + * atomically bitwise AND'ed with the value at , placing the result + * at , and the previous value at is returned. + * + * RETURNS: The previous value from + * + * atomic_val_t atomic_and + * ( + * atomic_t *target, /@ memory location to be modified @/ + * atomic_val_t value /@ value to AND @/ + * ) + * + * INTERNAL + * The 'cmpxchg' instruction is NOT supported on processor prior to the 80486 + */ SECTION_FUNC(TEXT, atomic_and) @@ -455,25 +455,25 @@ BRANCH_LABEL(atomic_and_retry) ret -/******************************************************************************* -* -* atomic_nand - atomic bitwise NAND primitive -* -* This routine provides the atomic bitwise NAND operator. The is -* atomically bitwise NAND'ed with the value at , placing the result -* at , and the previous value at is returned. -* -* RETURNS: The previous value from -* -* atomic_val_t atomic_nand -* ( -* atomic_t * target, /@ memory location to be modified @/ -* atomic_val_t value /@ value to NAND @/ -* ) -* -* INTERNAL -* The 'cmpxchg' instruction is NOT supported on processor prior to the 80486 -*/ +/** + * + * atomic_nand - atomic bitwise NAND primitive + * + * This routine provides the atomic bitwise NAND operator. The is + * atomically bitwise NAND'ed with the value at , placing the result + * at , and the previous value at is returned. + * + * RETURNS: The previous value from + * + * atomic_val_t atomic_nand + * ( + * atomic_t * target, /@ memory location to be modified @/ + * atomic_val_t value /@ value to NAND @/ + * ) + * + * INTERNAL + * The 'cmpxchg' instruction is NOT supported on processor prior to the 80486 + */ SECTION_FUNC(TEXT, atomic_nand) diff --git a/arch/x86/core/atomic_nolock.c b/arch/x86/core/atomic_nolock.c index 79d7b1fa188..2ac8b238271 100644 --- a/arch/x86/core/atomic_nolock.c +++ b/arch/x86/core/atomic_nolock.c @@ -43,30 +43,30 @@ operators that do utilize the LOCK prefix instruction. INTERNAL These operators are currently unavailable to user space applications as there is no requirement for this capability. -*/ + */ #if defined(CONFIG_LOCK_INSTRUCTION_UNSUPPORTED) #include #include -/******************************************************************************* -* -* atomic_cas - atomic compare-and-set primitive -* -* This routine provides the compare-and-set operator. If the original value at -* equals , then is stored at and the -* function returns 1. -* -* If the original value at does not equal , then the store -* is not done and the function returns 0. -* -* The reading of the original value at , the comparison, -* and the write of the new value (if it occurs) all happen atomically with -* respect to both interrupts and accesses of other processors to . -* -* RETURNS: Returns 1 if is written, 0 otherwise. -*/ +/** + * + * atomic_cas - atomic compare-and-set primitive + * + * This routine provides the compare-and-set operator. If the original value at + * equals , then is stored at and the + * function returns 1. + * + * If the original value at does not equal , then the store + * is not done and the function returns 0. + * + * The reading of the original value at , the comparison, + * and the write of the new value (if it occurs) all happen atomically with + * respect to both interrupts and accesses of other processors to . + * + * RETURNS: Returns 1 if is written, 0 otherwise. + */ int atomic_cas( atomic_t *target, /* address to be tested */ @@ -88,16 +88,16 @@ int atomic_cas( return 1; } -/******************************************************************************* -* -* atomic_add - atomic addition primitive -* -* This routine provides the atomic addition operator. The is -* atomically added to the value at , placing the result at , -* and the old value from is returned. -* -* RETURNS: The previous value from -*/ +/** + * + * atomic_add - atomic addition primitive + * + * This routine provides the atomic addition operator. The is + * atomically added to the value at , placing the result at , + * and the old value from is returned. + * + * RETURNS: The previous value from + */ atomic_val_t atomic_add( atomic_t *target, /* memory location to add to */ @@ -114,16 +114,16 @@ atomic_val_t atomic_add( return ovalue; } -/******************************************************************************* -* -* atomic_sub - atomic subtraction primitive -* -* This routine provides the atomic subtraction operator. The is -* atomically subtracted from the value at , placing the result at -* , and the old value from is returned. -* -* RETURNS: The previous value from -*/ +/** + * + * atomic_sub - atomic subtraction primitive + * + * This routine provides the atomic subtraction operator. The is + * atomically subtracted from the value at , placing the result at + * , and the old value from is returned. + * + * RETURNS: The previous value from + */ atomic_val_t atomic_sub( atomic_t *target, /* memory location to subtract from */ @@ -140,15 +140,15 @@ atomic_val_t atomic_sub( return ovalue; } -/******************************************************************************* -* -* atomic_inc - atomic increment primitive -* -* This routine provides the atomic increment operator. The value at -* is atomically incremented by 1, and the old value from is returned. -* -* RETURNS: The value from before the increment -*/ +/** + * + * atomic_inc - atomic increment primitive + * + * This routine provides the atomic increment operator. The value at + * is atomically incremented by 1, and the old value from is returned. + * + * RETURNS: The value from before the increment + */ atomic_val_t atomic_inc( atomic_t *target /* memory location to increment */ @@ -164,15 +164,15 @@ atomic_val_t atomic_inc( return ovalue; } -/******************************************************************************* -* -* atomic_dec - atomic decrement primitive -* -* This routine provides the atomic decrement operator. The value at -* is atomically decremented by 1, and the old value from is returned. -* -* RETURNS: The value from prior to the decrement -*/ +/** + * + * atomic_dec - atomic decrement primitive + * + * This routine provides the atomic decrement operator. The value at + * is atomically decremented by 1, and the old value from is returned. + * + * RETURNS: The value from prior to the decrement + */ atomic_val_t atomic_dec( atomic_t *target /* memory location to decrement */ @@ -188,16 +188,16 @@ atomic_val_t atomic_dec( return ovalue; } -/******************************************************************************* -* -* atomic_get - atomic get primitive -* -* This routine provides the atomic get primitive to atomically read -* a value from . It simply does an ordinary load. Note that -* is expected to be aligned to a 4-byte boundary. -* -* RETURNS: The value read from -*/ +/** + * + * atomic_get - atomic get primitive + * + * This routine provides the atomic get primitive to atomically read + * a value from . It simply does an ordinary load. Note that + * is expected to be aligned to a 4-byte boundary. + * + * RETURNS: The value read from + */ atomic_val_t atomic_get(const atomic_t *target /* memory location to read from */ ) @@ -205,15 +205,15 @@ atomic_val_t atomic_get(const atomic_t *target /* memory location to read from * return *target; } -/******************************************************************************* -* -* atomic_set - atomic get-and-set primitive -* -* This routine provides the atomic set operator. The is atomically -* written at and the previous value at is returned. -* -* RETURNS: The previous value from -*/ +/** + * + * atomic_set - atomic get-and-set primitive + * + * This routine provides the atomic set operator. The is atomically + * written at and the previous value at is returned. + * + * RETURNS: The previous value from + */ atomic_val_t atomic_set( atomic_t *target, /* memory location to write to */ @@ -230,16 +230,16 @@ atomic_val_t atomic_set( return ovalue; } -/******************************************************************************* -* -* atomic_clear - atomic clear primitive -* -* This routine provides the atomic clear operator. The value of 0 is atomically -* written at and the previous value at is returned. (Hence, -* atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).) -* -* RETURNS: The previous value from -*/ +/** + * + * atomic_clear - atomic clear primitive + * + * This routine provides the atomic clear operator. The value of 0 is atomically + * written at and the previous value at is returned. (Hence, + * atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).) + * + * RETURNS: The previous value from + */ atomic_val_t atomic_clear( atomic_t *target /* memory location to write to */ @@ -255,16 +255,16 @@ atomic_val_t atomic_clear( return ovalue; } -/******************************************************************************* -* -* atomic_or - atomic bitwise inclusive OR primitive -* -* This routine provides the atomic bitwise inclusive OR operator. The -* is atomically bitwise OR'ed with the value at , placing the result -* at , and the previous value at is returned. -* -* RETURNS: The previous value from -*/ +/** + * + * atomic_or - atomic bitwise inclusive OR primitive + * + * This routine provides the atomic bitwise inclusive OR operator. The + * is atomically bitwise OR'ed with the value at , placing the result + * at , and the previous value at is returned. + * + * RETURNS: The previous value from + */ atomic_val_t atomic_or( atomic_t *target, /* memory location to be modified */ @@ -281,16 +281,16 @@ atomic_val_t atomic_or( return ovalue; } -/******************************************************************************* -* -* atomic_xor - atomic bitwise exclusive OR (XOR) primitive -* -* This routine provides the atomic bitwise exclusive OR operator. The -* is atomically bitwise XOR'ed with the value at , placing the result -* at , and the previous value at is returned. -* -* RETURNS: The previous value from -*/ +/** + * + * atomic_xor - atomic bitwise exclusive OR (XOR) primitive + * + * This routine provides the atomic bitwise exclusive OR operator. The + * is atomically bitwise XOR'ed with the value at , placing the result + * at , and the previous value at is returned. + * + * RETURNS: The previous value from + */ atomic_val_t atomic_xor( atomic_t *target, /* memory location to be modified */ @@ -307,16 +307,16 @@ atomic_val_t atomic_xor( return ovalue; } -/******************************************************************************* -* -* atomic_and - atomic bitwise AND primitive -* -* This routine provides the atomic bitwise AND operator. The is -* atomically bitwise AND'ed with the value at , placing the result -* at , and the previous value at is returned. -* -* RETURNS: The previous value from -*/ +/** + * + * atomic_and - atomic bitwise AND primitive + * + * This routine provides the atomic bitwise AND operator. The is + * atomically bitwise AND'ed with the value at , placing the result + * at , and the previous value at is returned. + * + * RETURNS: The previous value from + */ atomic_val_t atomic_and( atomic_t *target, /* memory location to be modified */ @@ -333,16 +333,16 @@ atomic_val_t atomic_and( return ovalue; } -/******************************************************************************* -* -* atomic_nand - atomic bitwise NAND primitive -* -* This routine provides the atomic bitwise NAND operator. The is -* atomically bitwise NAND'ed with the value at , placing the result -* at , and the previous value at is returned. -* -* RETURNS: The previous value from -*/ +/** + * + * atomic_nand - atomic bitwise NAND primitive + * + * This routine provides the atomic bitwise NAND operator. The is + * atomically bitwise NAND'ed with the value at , placing the result + * at , and the previous value at is returned. + * + * RETURNS: The previous value from + */ atomic_val_t atomic_nand( atomic_t *target, /* memory location to be modified */ diff --git a/arch/x86/core/context.c b/arch/x86/core/context.c index dc3c92214e2..0787fcd23e0 100644 --- a/arch/x86/core/context.c +++ b/arch/x86/core/context.c @@ -34,7 +34,7 @@ DESCRIPTION This module provides core nanokernel fiber related primitives for the IA-32 processor architecture. -*/ + */ #ifdef CONFIG_MICROKERNEL #include @@ -57,20 +57,20 @@ tNANO _nanokernel = {0}; void _ContextEntryWrapper(_ContextEntry, _ContextArg, _ContextArg, _ContextArg); #endif /* CONFIG_GDB_INFO */ -/******************************************************************************* -* -* _NewContextInternal - initialize a new execution context -* -* This function is utilized to initialize all execution contexts (both fiber -* and task). The 'priority' parameter will be set to -1 for the creation of -* task context. -* -* This function is called by _NewContext() to initialize task contexts. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _NewContextInternal - initialize a new execution context + * + * This function is utilized to initialize all execution contexts (both fiber + * and task). The 'priority' parameter will be set to -1 for the creation of + * task context. + * + * This function is called by _NewContext() to initialize task contexts. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void _NewContextInternal( char *pStackMem, /* pointer to context stack memory */ @@ -206,64 +206,64 @@ static void _NewContextInternal( } #ifdef CONFIG_GDB_INFO -/******************************************************************************* -* -* _ContextEntryWrapper - adjust stack before invoking _context_entry -* -* This function adjusts the initial stack frame created by _NewContext() -* such that the GDB stack frame unwinders recognize it as the outermost frame -* in the context's stack. The function then jumps to _context_entry(). -* -* GDB normally stops unwinding a stack when it detects that it has -* reached a function called main(). Kernel tasks, however, do not have -* a main() function, and there does not appear to be a simple way of stopping -* the unwinding of the stack. -* -* Given the initial context created by _NewContext(), GDB expects to find a -* return address on the stack immediately above the context entry routine -* _context_entry, in the location occupied by the initial EFLAGS. -* GDB attempts to examine the memory at this return address, which typically -* results in an invalid access to page 0 of memory. -* -* This function overwrites the initial EFLAGS with zero. When GDB subsequently -* attempts to examine memory at address zero, the PeekPoke driver detects -* an invalid access to address zero and returns an error, which causes the -* GDB stack unwinder to stop somewhat gracefully. -* -* __________________ -* | param3 | <------ Top of the stack -* |__________________| -* | param2 | Stack Grows Down -* |__________________| | -* | param1 | V -* |__________________| -* | pEntry | -* |__________________| -* | initial EFLAGS | <---- ESP when invoked by _Swap() -* |__________________| (Zeroed by this routine) -* | entryRtn | <----- Context Entry Routine invoked by _Swap() -* |__________________| (This routine if GDB_INFO) -* | | \ -* |__________________| | -* | | | -* |__________________| | -* | | |---- Initial registers restored by _Swap() -* |__________________| | -* | | | -* |__________________| | -* | | / -* |__________________| -* -* -* The initial EFLAGS cannot be overwritten until after _Swap() has swapped in -* the new context for the first time. This routine is called by _Swap() the -* first time that the new context is swapped in, and it jumps to -* _context_entry after it has done its work. -* -* RETURNS: this routine does NOT return. -* -* \NOMANUAL -*/ +/** + * + * _ContextEntryWrapper - adjust stack before invoking _context_entry + * + * This function adjusts the initial stack frame created by _NewContext() + * such that the GDB stack frame unwinders recognize it as the outermost frame + * in the context's stack. The function then jumps to _context_entry(). + * + * GDB normally stops unwinding a stack when it detects that it has + * reached a function called main(). Kernel tasks, however, do not have + * a main() function, and there does not appear to be a simple way of stopping + * the unwinding of the stack. + * + * Given the initial context created by _NewContext(), GDB expects to find a + * return address on the stack immediately above the context entry routine + * _context_entry, in the location occupied by the initial EFLAGS. + * GDB attempts to examine the memory at this return address, which typically + * results in an invalid access to page 0 of memory. + * + * This function overwrites the initial EFLAGS with zero. When GDB subsequently + * attempts to examine memory at address zero, the PeekPoke driver detects + * an invalid access to address zero and returns an error, which causes the + * GDB stack unwinder to stop somewhat gracefully. + * + * __________________ + * | param3 | <------ Top of the stack + * |__________________| + * | param2 | Stack Grows Down + * |__________________| | + * | param1 | V + * |__________________| + * | pEntry | + * |__________________| + * | initial EFLAGS | <---- ESP when invoked by _Swap() + * |__________________| (Zeroed by this routine) + * | entryRtn | <----- Context Entry Routine invoked by _Swap() + * |__________________| (This routine if GDB_INFO) + * | | \ + * |__________________| | + * | | | + * |__________________| | + * | | |---- Initial registers restored by _Swap() + * |__________________| | + * | | | + * |__________________| | + * | | / + * |__________________| + * + * + * The initial EFLAGS cannot be overwritten until after _Swap() has swapped in + * the new context for the first time. This routine is called by _Swap() the + * first time that the new context is swapped in, and it jumps to + * _context_entry after it has done its work. + * + * RETURNS: this routine does NOT return. + * + * \NOMANUAL + */ __asm__("\t.globl _context_entry\n" "\t.section .text\n" @@ -273,20 +273,20 @@ __asm__("\t.globl _context_entry\n" "\tjmp _context_entry\n"); #endif /* CONFIG_GDB_INFO */ -/******************************************************************************* -* -* _NewContext - create a new kernel execution context -* -* This function is utilized to create execution contexts for both fiber -* contexts and kernel task contexts. -* -* The "context control block" (CCS) is carved from the "end" of the specified -* context stack memory. -* -* RETURNS: opaque pointer to initialized CCS structure -* -* \NOMANUAL -*/ +/** + * + * _NewContext - create a new kernel execution context + * + * This function is utilized to create execution contexts for both fiber + * contexts and kernel task contexts. + * + * The "context control block" (CCS) is carved from the "end" of the specified + * context stack memory. + * + * RETURNS: opaque pointer to initialized CCS structure + * + * \NOMANUAL + */ void _NewContext( char *pStackMem, /* pointer to aligned stack memory */ diff --git a/arch/x86/core/cpuhalt.S b/arch/x86/core/cpuhalt.S index e642430ff85..deabf725d1e 100644 --- a/arch/x86/core/cpuhalt.S +++ b/arch/x86/core/cpuhalt.S @@ -47,7 +47,7 @@ supports the execution of the 'hlt' instruction from a guest (results in a VM exit), and more importantly, the Hypervisor will respect the single instruction delay slot after the 'sti' instruction as required by nano_cpu_atomic_idle(). -*/ + */ #define _ASMLANGUAGE @@ -64,21 +64,21 @@ by nano_cpu_atomic_idle(). #ifndef CONFIG_NO_ISRS -/******************************************************************************* -* -* nano_cpu_idle - power save idle routine for IA-32 -* -* This function will be called by the nanokernel idle loop or possibly within -* an implementation of _sys_power_save_idle in the microkernel when the -* '_sys_power_save_flag' variable is non-zero. The IA-32 'hlt' instruction -* will be issued causing a low-power consumption sleep mode. -* -* RETURNS: N/A -* -* C function prototype: -* -* void nano_cpu_idle (void); -*/ +/** + * + * nano_cpu_idle - power save idle routine for IA-32 + * + * This function will be called by the nanokernel idle loop or possibly within + * an implementation of _sys_power_save_idle in the microkernel when the + * '_sys_power_save_flag' variable is non-zero. The IA-32 'hlt' instruction + * will be issued causing a low-power consumption sleep mode. + * + * RETURNS: N/A + * + * C function prototype: + * + * void nano_cpu_idle (void); + */ SECTION_FUNC(TEXT, nano_cpu_idle) #ifdef CONFIG_INT_LATENCY_BENCHMARK @@ -94,31 +94,31 @@ SECTION_FUNC(TEXT, nano_cpu_idle) ret /* return after processing ISR */ -/******************************************************************************* -* -* nano_cpu_atomic_idle - atomically re-enable interrupts and enter low power mode -* -* This function is utilized by the nanokernel object "wait" APIs for task -* contexts, e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(), nano_task_stack_pop_wait(), -* and nano_task_fifo_get_wait(). -* -* INTERNAL -* The requirements for nano_cpu_atomic_idle() are as follows: -* 1) The enablement of interrupts and entering a low-power mode needs to be -* atomic, i.e. there should be no period of time where interrupts are -* enabled before the processor enters a low-power mode. See the comments -* in nano_task_lifo_get_wait(), for example, of the race condition that occurs -* if this requirement is not met. -* -* 2) After waking up from the low-power mode, the interrupt lockout state -* must be restored as indicated in the 'imask' input parameter. -* -* RETURNS: N/A -* -* C function prototype: -* -* void nano_cpu_atomic_idle (unsigned int imask); -*/ +/** + * + * nano_cpu_atomic_idle - atomically re-enable interrupts and enter low power mode + * + * This function is utilized by the nanokernel object "wait" APIs for task + * contexts, e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(), nano_task_stack_pop_wait(), + * and nano_task_fifo_get_wait(). + * + * INTERNAL + * The requirements for nano_cpu_atomic_idle() are as follows: + * 1) The enablement of interrupts and entering a low-power mode needs to be + * atomic, i.e. there should be no period of time where interrupts are + * enabled before the processor enters a low-power mode. See the comments + * in nano_task_lifo_get_wait(), for example, of the race condition that occurs + * if this requirement is not met. + * + * 2) After waking up from the low-power mode, the interrupt lockout state + * must be restored as indicated in the 'imask' input parameter. + * + * RETURNS: N/A + * + * C function prototype: + * + * void nano_cpu_atomic_idle (unsigned int imask); + */ SECTION_FUNC(TEXT, nano_cpu_atomic_idle) #ifdef CONFIG_INT_LATENCY_BENCHMARK diff --git a/arch/x86/core/excconnect.c b/arch/x86/core/excconnect.c index 7f72a27c894..4babb07934e 100644 --- a/arch/x86/core/excconnect.c +++ b/arch/x86/core/excconnect.c @@ -74,7 +74,7 @@ an error code is present on the stack or not. NOTE: Be sure to update the arch specific definition of the _EXC_STUB_SIZE macro to reflect the size of the full exception stub (as shown above). The _EXC_STUB_SIZE macro is defined in arch/x86/include/nano_private.h. -*/ + */ #include @@ -86,37 +86,37 @@ void _NanoCpuExcConnectAtDpl(unsigned int vector, NANO_EXC_STUB pExcStubMem, unsigned int dpl); -/******************************************************************************* -* -* nanoCpuExcConnect - connect a C routine to an exception -* -* This routine connects an exception handler coded in C to the specified -* interrupt vector. An exception is defined as a synchronous interrupt, i.e. -* an interrupt asserted as a direct result of program execution as opposed -* to a hardware device asserting an interrupt. -* -* When the exception specified by is asserted, the current context -* is saved on the current stack, i.e. a switch to some other stack is not -* performed, followed by executing which has the following signature: -* -* void (*routine) (NANO_ESF *pEsf) -* -* The argument points to memory that the system can use to -* synthesize the exception stub that calls . The memory need not be -* initialized, but must be persistent (i.e. it cannot be on the caller's stack). -* Declaring a global or static variable of type NANO_EXC_STUB will provide a -* suitable area of the proper size. -* -* The handler is connected via an interrupt-gate descriptor having a -* descriptor privilege level (DPL) equal to zero. -* -* RETURNS: N/A -* -* INTERNAL -* The function prototype for nanoCpuExcConnect() only exists in nano_private.h, -* in other words, it's still considered private since the definitions for -* the NANO_ESF structures have not been completed. -*/ +/** + * + * nanoCpuExcConnect - connect a C routine to an exception + * + * This routine connects an exception handler coded in C to the specified + * interrupt vector. An exception is defined as a synchronous interrupt, i.e. + * an interrupt asserted as a direct result of program execution as opposed + * to a hardware device asserting an interrupt. + * + * When the exception specified by is asserted, the current context + * is saved on the current stack, i.e. a switch to some other stack is not + * performed, followed by executing which has the following signature: + * + * void (*routine) (NANO_ESF *pEsf) + * + * The argument points to memory that the system can use to + * synthesize the exception stub that calls . The memory need not be + * initialized, but must be persistent (i.e. it cannot be on the caller's stack). + * Declaring a global or static variable of type NANO_EXC_STUB will provide a + * suitable area of the proper size. + * + * The handler is connected via an interrupt-gate descriptor having a + * descriptor privilege level (DPL) equal to zero. + * + * RETURNS: N/A + * + * INTERNAL + * The function prototype for nanoCpuExcConnect() only exists in nano_private.h, + * in other words, it's still considered private since the definitions for + * the NANO_ESF structures have not been completed. + */ void nanoCpuExcConnect(unsigned int vector, /* interrupt vector: 0 to 255 on IA-32 */ @@ -126,37 +126,37 @@ void nanoCpuExcConnect(unsigned int vector, /* interrupt vector: 0 to 255 on _NanoCpuExcConnectAtDpl(vector, routine, pExcStubMem, 0); } -/******************************************************************************* -* -* _NanoCpuExcConnectAtDpl - connect a C routine to an exception -* -* This routine connects an exception handler coded in C to the specified -* interrupt vector. An exception is defined as a synchronous interrupt, i.e. -* an interrupt asserted as a direct result of program execution as opposed -* to a hardware device asserting an interrupt. -* -* When the exception specified by is asserted, the current context -* is saved on the current stack, i.e. a switch to some other stack is not -* performed, followed by executing which has the following signature: -* -* void (*routine) (NANO_ESF *pEsf) -* -* The argument points to memory that the system can use to -* synthesize the exception stub that calls . The memory need not be -* initialized, but must be persistent (i.e. it cannot be on the caller's stack). -* Declaring a global or static variable of type NANO_EXC_STUB will provide a -* suitable area of the proper size. -* -* The handler is connected via an interrupt-gate descriptor having the supplied -* descriptor privilege level (DPL). -* -* RETURNS: N/A -* -* INTERNAL -* The function prototype for nanoCpuExcConnect() only exists in nano_private.h, -* in other words, it's still considered private since the definitions for -* the NANO_ESF structures have not been completed. -*/ +/** + * + * _NanoCpuExcConnectAtDpl - connect a C routine to an exception + * + * This routine connects an exception handler coded in C to the specified + * interrupt vector. An exception is defined as a synchronous interrupt, i.e. + * an interrupt asserted as a direct result of program execution as opposed + * to a hardware device asserting an interrupt. + * + * When the exception specified by is asserted, the current context + * is saved on the current stack, i.e. a switch to some other stack is not + * performed, followed by executing which has the following signature: + * + * void (*routine) (NANO_ESF *pEsf) + * + * The argument points to memory that the system can use to + * synthesize the exception stub that calls . The memory need not be + * initialized, but must be persistent (i.e. it cannot be on the caller's stack). + * Declaring a global or static variable of type NANO_EXC_STUB will provide a + * suitable area of the proper size. + * + * The handler is connected via an interrupt-gate descriptor having the supplied + * descriptor privilege level (DPL). + * + * RETURNS: N/A + * + * INTERNAL + * The function prototype for nanoCpuExcConnect() only exists in nano_private.h, + * in other words, it's still considered private since the definitions for + * the NANO_ESF structures have not been completed. + */ void _NanoCpuExcConnectAtDpl( unsigned int vector, /* interrupt vector: 0 to 255 on IA-32 */ diff --git a/arch/x86/core/excstub.S b/arch/x86/core/excstub.S index 7f91deca7bb..8a7aaf5830f 100644 --- a/arch/x86/core/excstub.S +++ b/arch/x86/core/excstub.S @@ -36,7 +36,7 @@ This module implements assembly routines to manage exceptions (synchronous interrupts) on the Intel IA-32 architecture. More specifically, exceptions are implemented in this module. The stubs are invoked when entering and exiting a C exception handler. -*/ + */ #define _ASMLANGUAGE @@ -57,31 +57,31 @@ and exiting a C exception handler. -/******************************************************************************* -* -* _ExcEnt - inform the kernel of an exception -* -* This function is called from the exception stub created by nanoCpuExcConnect() -* to inform the kernel of an exception. This routine currently does -* _not_ increment a context/interrupt specific exception count. Also, -* execution of the exception handler occurs on the current stack, i.e. -* _ExcEnt() does not switch to another stack. The volatile integer -* registers are saved on the stack, and control is returned back to the -* exception stub. -* -* WARNINGS -* -* Host-based tools and the target-based GDB agent depend on the stack frame -* created by this routine to determine the locations of volatile registers. -* These tools must be updated to reflect any changes to the stack frame. -* -* RETURNS: N/A -* -* C function prototype: -* -* void _ExcEnt (void); -* -*/ +/** + * + * _ExcEnt - inform the kernel of an exception + * + * This function is called from the exception stub created by nanoCpuExcConnect() + * to inform the kernel of an exception. This routine currently does + * _not_ increment a context/interrupt specific exception count. Also, + * execution of the exception handler occurs on the current stack, i.e. + * _ExcEnt() does not switch to another stack. The volatile integer + * registers are saved on the stack, and control is returned back to the + * exception stub. + * + * WARNINGS + * + * Host-based tools and the target-based GDB agent depend on the stack frame + * created by this routine to determine the locations of volatile registers. + * These tools must be updated to reflect any changes to the stack frame. + * + * RETURNS: N/A + * + * C function prototype: + * + * void _ExcEnt (void); + * + */ SECTION_FUNC(TEXT, _ExcEnt) @@ -215,22 +215,22 @@ BRANCH_LABEL(allDone) jmp *%eax /* "return" back to stub */ -/******************************************************************************* -* -* _ExcExit - inform the kernel of an exception exit -* -* This function is called from the exception stub created by nanoCpuExcConnect() -* to inform the kernel that the processing of an exception has -* completed. This routine restores the volatile integer registers and -* then control is returned back to the interrupted context or ISR. -* -* RETURNS: N/A -* -* C function prototype: -* -* void _ExcExit (void); -* -*/ +/** + * + * _ExcExit - inform the kernel of an exception exit + * + * This function is called from the exception stub created by nanoCpuExcConnect() + * to inform the kernel that the processing of an exception has + * completed. This routine restores the volatile integer registers and + * then control is returned back to the interrupted context or ISR. + * + * RETURNS: N/A + * + * C function prototype: + * + * void _ExcExit (void); + * + */ SECTION_FUNC(TEXT, _ExcExit) diff --git a/arch/x86/core/fatal.c b/arch/x86/core/fatal.c index 6066fa422cf..16ed6d61ebc 100644 --- a/arch/x86/core/fatal.c +++ b/arch/x86/core/fatal.c @@ -33,7 +33,7 @@ /* DESCRIPTION This module provides the _NanoFatalErrorHandler() routine. -*/ + */ #include #include @@ -65,21 +65,21 @@ const NANO_ESF _default_esf = { 0xdeaddead /* SS */ }; -/******************************************************************************* -* -* _NanoFatalErrorHandler - nanokernel fatal error handler -* -* This routine is called when a fatal error condition is detected by either -* hardware or software. -* -* The caller is expected to always provide a usable ESF. In the event that the -* fatal error does not have a hardware generated ESF, the caller should either -* create its own or use a pointer to the global default ESF <_default_esf>. -* -* RETURNS: This function does not return. -* -* \NOMANUAL -*/ +/** + * + * _NanoFatalErrorHandler - nanokernel fatal error handler + * + * This routine is called when a fatal error condition is detected by either + * hardware or software. + * + * The caller is expected to always provide a usable ESF. In the event that the + * fatal error does not have a hardware generated ESF, the caller should either + * create its own or use a pointer to the global default ESF <_default_esf>. + * + * RETURNS: This function does not return. + * + * \NOMANUAL + */ FUNC_NORETURN void _NanoFatalErrorHandler( unsigned int reason, /* reason that handler was called */ diff --git a/arch/x86/core/ffs.S b/arch/x86/core/ffs.S index 00230b5df30..cd8b570fcac 100644 --- a/arch/x86/core/ffs.S +++ b/arch/x86/core/ffs.S @@ -38,7 +38,7 @@ architecture. INTERNAL Inline versions of these APIs, find_last_set_inline() and find_first_set_inline(), are defined in arch.h. -*/ + */ #define _ASMLANGUAGE @@ -51,24 +51,24 @@ are defined in arch.h. GTEXT(find_last_set) GTEXT(find_first_set) -/******************************************************************************* -* -* find_first_set - find first set bit searching from the LSB -* -* This routine finds the first bit set in the passed argument and -* returns the index of that bit. Bits are numbered starting -* at 1 from the least significant bit to 32 for the most significant bit. -* A return value of zero indicates that the value passed is zero. -* -* RETURNS: bit position from 1 to 32, or 0 if the argument is zero. -* -* INTERNAL -* For Intel64 (x86_64) architectures, the 'cmovz' can be removed -* and leverage the fact that the 'bsrl' doesn't modify the destination operand -* when the source operand is zero. The "bitpos" variable can be preloaded -* into the destination register, and given the unconditional ++bitpos that -* is performed after the 'cmovz', the correct results are yielded. -*/ +/** + * + * find_first_set - find first set bit searching from the LSB + * + * This routine finds the first bit set in the passed argument and + * returns the index of that bit. Bits are numbered starting + * at 1 from the least significant bit to 32 for the most significant bit. + * A return value of zero indicates that the value passed is zero. + * + * RETURNS: bit position from 1 to 32, or 0 if the argument is zero. + * + * INTERNAL + * For Intel64 (x86_64) architectures, the 'cmovz' can be removed + * and leverage the fact that the 'bsrl' doesn't modify the destination operand + * when the source operand is zero. The "bitpos" variable can be preloaded + * into the destination register, and given the unconditional ++bitpos that + * is performed after the 'cmovz', the correct results are yielded. + */ SECTION_FUNC(TEXT, find_first_set) @@ -94,24 +94,24 @@ BRANCH_LABEL(ffsLsb_argNotZero) /* this label serves find_first_set() & find_las #endif /* !CONFIG_CMOV_UNSUPPORTED */ -/******************************************************************************* -* -* find_last_set - find first set bit searching from the MSB -* -* This routine finds the first bit set in the passed argument and -* returns the index of that bit. Bits are numbered starting -* at 1 from the least significant bit to 32 for the most significant bit. -* A return value of zero indicates that the value passed is zero. -* -* RETURNS: bit position from 1 to 32, or 0 if the argument is zero. -* -* INTERNAL -* For Intel64 (x86_64) architectures, the 'cmovz' can be removed -* and leverage the fact that the 'bsfl' doesn't modify the destination operand -* when the source operand is zero. The "bitpos" variable can be preloaded -* into the destination register, and given the unconditional ++bitpos that -* is performed after the 'cmovz', the correct results are yielded. -*/ +/** + * + * find_last_set - find first set bit searching from the MSB + * + * This routine finds the first bit set in the passed argument and + * returns the index of that bit. Bits are numbered starting + * at 1 from the least significant bit to 32 for the most significant bit. + * A return value of zero indicates that the value passed is zero. + * + * RETURNS: bit position from 1 to 32, or 0 if the argument is zero. + * + * INTERNAL + * For Intel64 (x86_64) architectures, the 'cmovz' can be removed + * and leverage the fact that the 'bsfl' doesn't modify the destination operand + * when the source operand is zero. The "bitpos" variable can be preloaded + * into the destination register, and given the unconditional ++bitpos that + * is performed after the 'cmovz', the correct results are yielded. + */ SECTION_FUNC(TEXT, find_last_set) diff --git a/arch/x86/core/float.c b/arch/x86/core/float.c index a6817330426..73df54ca2c6 100644 --- a/arch/x86/core/float.c +++ b/arch/x86/core/float.c @@ -93,7 +93,7 @@ FP operations. All other tasks and fibers have CR0[TS] = 1 so that an attempt to perform an FP operation will cause an exception, allowing the system to enable FP resource sharing on its behalf. -*/ + */ #ifdef CONFIG_MICROKERNEL #include @@ -112,74 +112,74 @@ enable FP resource sharing on its behalf. extern uint32_t _sse_mxcsr_default_value; /* SSE control/status register default value */ #endif /* CONFIG_SSE */ -/******************************************************************************* -* -* _FpCtxSave - save non-integer context information -* -* This routine saves the system's "live" non-integer context into the -* specified CCS. If the specified task or fiber supports SSE then -* x87/MMX/SSEx context info is saved, otherwise only x87/MMX context is saved. -* -* RETURNS: N/A -*/ +/** + * + * _FpCtxSave - save non-integer context information + * + * This routine saves the system's "live" non-integer context into the + * specified CCS. If the specified task or fiber supports SSE then + * x87/MMX/SSEx context info is saved, otherwise only x87/MMX context is saved. + * + * RETURNS: N/A + */ static void _FpCtxSave(tCCS *ccs) { _do_fp_ctx_save(ccs->flags & USE_SSE, &ccs->preempFloatReg); } -/******************************************************************************* -* -* _FpCtxInit - initialize non-integer context information -* -* This routine initializes the system's "live" non-integer context. -* -* RETURNS: N/A -*/ +/** + * + * _FpCtxInit - initialize non-integer context information + * + * This routine initializes the system's "live" non-integer context. + * + * RETURNS: N/A + */ static inline void _FpCtxInit(tCCS *ccs) { _do_fp_ctx_init(ccs->flags & USE_SSE); } -/******************************************************************************* -* -* _FpEnable - enable preservation of non-integer context information -* -* This routine allows the specified task/fiber (which may be the active -* task/fiber) to safely share the system's floating point registers with -* other tasks/fibers. The parameter indicates which floating point -* register sets will be used by the specified task/fiber: -* -* a) USE_FP indicates x87 FPU and MMX registers only -* b) USE_SSE indicates x87 FPU and MMX and SSEx registers -* -* Invoking this routine creates a floating point context for the task/fiber -* that corresponds to an FPU that has been reset. The system will thereafter -* protect the task/fiber's FP context so that it is not altered during -* a pre-emptive context switch. -* -* WARNING -* This routine should only be used to enable floating point support for a -* task/fiber that does not currently have such support enabled already. -* -* RETURNS: N/A -* -* INTERNAL -* Since the transition from "non-FP supporting" to "FP supporting" must be done -* atomically to avoid confusing the floating point logic used by _Swap(), -* this routine locks interrupts to ensure that a context switch does not occur, -* The locking isn't really needed when the routine is called by a fiber -* (since context switching can't occur), but it is harmless and allows a single -* routine to be called by both tasks and fibers (thus saving code space). -* -* If necessary, the interrupt latency impact of calling this routine from a -* fiber could be lessened by re-designing things so that only task-type callers -* locked interrupts (i.e. move the locking to task_float_enable()). However, -* all calls to fiber_float_enable() would need to be reviewed to ensure they -* are only used from a fiber, rather than from "generic" code used by both -* tasks and fibers. -*/ +/** + * + * _FpEnable - enable preservation of non-integer context information + * + * This routine allows the specified task/fiber (which may be the active + * task/fiber) to safely share the system's floating point registers with + * other tasks/fibers. The parameter indicates which floating point + * register sets will be used by the specified task/fiber: + * + * a) USE_FP indicates x87 FPU and MMX registers only + * b) USE_SSE indicates x87 FPU and MMX and SSEx registers + * + * Invoking this routine creates a floating point context for the task/fiber + * that corresponds to an FPU that has been reset. The system will thereafter + * protect the task/fiber's FP context so that it is not altered during + * a pre-emptive context switch. + * + * WARNING + * This routine should only be used to enable floating point support for a + * task/fiber that does not currently have such support enabled already. + * + * RETURNS: N/A + * + * INTERNAL + * Since the transition from "non-FP supporting" to "FP supporting" must be done + * atomically to avoid confusing the floating point logic used by _Swap(), + * this routine locks interrupts to ensure that a context switch does not occur, + * The locking isn't really needed when the routine is called by a fiber + * (since context switching can't occur), but it is harmless and allows a single + * routine to be called by both tasks and fibers (thus saving code space). + * + * If necessary, the interrupt latency impact of calling this routine from a + * fiber could be lessened by re-designing things so that only task-type callers + * locked interrupts (i.e. move the locking to task_float_enable()). However, + * all calls to fiber_float_enable() would need to be reviewed to ensure they + * are only used from a fiber, rather than from "generic" code used by both + * tasks and fibers. + */ void _FpEnable(tCCS *ccs, unsigned int options /* USE_FP or USE_SSE */ @@ -287,63 +287,63 @@ void _FpEnable(tCCS *ccs, irq_unlock_inline(imask); } -/******************************************************************************* -* -* fiber_float_enable - enable preservation of non-integer context information -* -* This routine allows a fiber to permit a task/fiber (including itself) to -* safely share the system's floating point registers with other tasks/fibers. -* -* See the description of _FpEnable() for further details. -* -* RETURNS: N/A -*/ +/** + * + * fiber_float_enable - enable preservation of non-integer context information + * + * This routine allows a fiber to permit a task/fiber (including itself) to + * safely share the system's floating point registers with other tasks/fibers. + * + * See the description of _FpEnable() for further details. + * + * RETURNS: N/A + */ FUNC_ALIAS(_FpEnable, fiber_float_enable, void); -/******************************************************************************* -* -* task_float_enable - enable preservation of non-integer context information -* -* This routine allows a task to permit a task/fiber (including itself) to -* safely share the system's floating point registers with other tasks/fibers. -* -* See the description of _FpEnable() for further details. -* -* RETURNS: N/A -*/ +/** + * + * task_float_enable - enable preservation of non-integer context information + * + * This routine allows a task to permit a task/fiber (including itself) to + * safely share the system's floating point registers with other tasks/fibers. + * + * See the description of _FpEnable() for further details. + * + * RETURNS: N/A + */ FUNC_ALIAS(_FpEnable, task_float_enable, void); -/******************************************************************************* -* -* _FpDisable - disable preservation of non-integer context information -* -* This routine prevents the specified task/fiber (which may be the active -* task/fiber) from safely sharing any of the system's floating point registers -* with other tasks/fibers. -* -* WARNING -* This routine should only be used to disable floating point support for -* a task/fiber that currently has such support enabled. -* -* RETURNS: N/A -* -* INTERNAL -* Since the transition from "FP supporting" to "non-FP supporting" must be done -* atomically to avoid confusing the floating point logic used by _Swap(), -* this routine locks interrupts to ensure that a context switch does not occur, -* The locking isn't really needed when the routine is called by a fiber -* (since context switching can't occur), but it is harmless and allows a single -* routine to be called by both tasks and fibers (thus saving code space). -* -* If necessary, the interrupt latency impact of calling this routine from a -* fiber could be lessened by re-designing things so that only task-type callers -* locked interrupts (i.e. move the locking to task_float_disable()). However, -* all calls to fiber_float_disable() would need to be reviewed to ensure they -* are only used from a fiber, rather than from "generic" code used by both -* tasks and fibers. -*/ +/** + * + * _FpDisable - disable preservation of non-integer context information + * + * This routine prevents the specified task/fiber (which may be the active + * task/fiber) from safely sharing any of the system's floating point registers + * with other tasks/fibers. + * + * WARNING + * This routine should only be used to disable floating point support for + * a task/fiber that currently has such support enabled. + * + * RETURNS: N/A + * + * INTERNAL + * Since the transition from "FP supporting" to "non-FP supporting" must be done + * atomically to avoid confusing the floating point logic used by _Swap(), + * this routine locks interrupts to ensure that a context switch does not occur, + * The locking isn't really needed when the routine is called by a fiber + * (since context switching can't occur), but it is harmless and allows a single + * routine to be called by both tasks and fibers (thus saving code space). + * + * If necessary, the interrupt latency impact of calling this routine from a + * fiber could be lessened by re-designing things so that only task-type callers + * locked interrupts (i.e. move the locking to task_float_disable()). However, + * all calls to fiber_float_disable() would need to be reviewed to ensure they + * are only used from a fiber, rather than from "generic" code used by both + * tasks and fibers. + */ void _FpDisable(tCCS *ccs) { @@ -376,58 +376,58 @@ void _FpDisable(tCCS *ccs) irq_unlock_inline(imask); } -/******************************************************************************* -* -* fiber_float_disable - disable preservation of non-integer context -*information -* -* This routine allows a fiber to disallow a task/fiber (including itself) from -* safely sharing any of the system's floating point registers with other -* tasks/fibers. -* -* WARNING -* This routine should only be used to disable floating point support for -* a task/fiber that currently has such support enabled. -* -* RETURNS: N/A -*/ +/** + * + * fiber_float_disable - disable preservation of non-integer context + *information + * + * This routine allows a fiber to disallow a task/fiber (including itself) from + * safely sharing any of the system's floating point registers with other + * tasks/fibers. + * + * WARNING + * This routine should only be used to disable floating point support for + * a task/fiber that currently has such support enabled. + * + * RETURNS: N/A + */ FUNC_ALIAS(_FpDisable, fiber_float_disable, void); -/******************************************************************************* -* -* task_float_disable - disable preservation of non-integer context information -* -* This routine allows a task to disallow a task/fiber (including itself) from -* safely sharing any of the system's floating point registers with other -* tasks/fibers. -* -* WARNING -* This routine should only be used to disable floating point support for -* a task/fiber that currently has such support enabled. -* -* RETURNS: N/A -*/ +/** + * + * task_float_disable - disable preservation of non-integer context information + * + * This routine allows a task to disallow a task/fiber (including itself) from + * safely sharing any of the system's floating point registers with other + * tasks/fibers. + * + * WARNING + * This routine should only be used to disable floating point support for + * a task/fiber that currently has such support enabled. + * + * RETURNS: N/A + */ FUNC_ALIAS(_FpDisable, task_float_disable, void); #ifdef CONFIG_AUTOMATIC_FP_ENABLING -/******************************************************************************* -* -* _FpNotAvailableExcHandler - handler for "device not available" exception -* -* This routine is registered to handle the "device not available" exception -* (vector = 7) when the AUTOMATIC_FP_ENABLING configuration option has been -* been selected. -* -* The processor will generate this exception if any x87 FPU, MMX, or SSEx -* instruction is executed while CR0[TS]=1. The handler then enables the -* current task or fiber with the USE_FP option (or the USE_SSE option if the -* SSE configuration option has been enabled). -* -* RETURNS: N/A -*/ +/** + * + * _FpNotAvailableExcHandler - handler for "device not available" exception + * + * This routine is registered to handle the "device not available" exception + * (vector = 7) when the AUTOMATIC_FP_ENABLING configuration option has been + * been selected. + * + * The processor will generate this exception if any x87 FPU, MMX, or SSEx + * instruction is executed while CR0[TS]=1. The handler then enables the + * current task or fiber with the USE_FP option (or the USE_SSE option if the + * SSE configuration option has been enabled). + * + * RETURNS: N/A + */ void _FpNotAvailableExcHandler(NANO_ESF * pEsf /* not used */ ) diff --git a/arch/x86/core/gdt.c b/arch/x86/core/gdt.c index 6096296c80c..50748e643f5 100644 --- a/arch/x86/core/gdt.c +++ b/arch/x86/core/gdt.c @@ -34,7 +34,7 @@ DESCRIPTION This module contains routines for updating the global descriptor table (GDT) for the IA-32 architecture. -*/ + */ #include #include diff --git a/arch/x86/core/intboiexit.S b/arch/x86/core/intboiexit.S index c1073b94021..5fd85d37f48 100644 --- a/arch/x86/core/intboiexit.S +++ b/arch/x86/core/intboiexit.S @@ -49,7 +49,7 @@ The _IntBoiExit() routine is provided in a separate module so that it gets included in the final image only if an interrupt controller driver utilizing _IntBoiExit() is present. -*/ + */ #define _ASMLANGUAGE #include @@ -64,18 +64,18 @@ _IntBoiExit() is present. GTEXT(_IntExit) -/******************************************************************************* -* -* _IntBoiExit - exit interrupt handler stub without invoking ISR -* -* This routine exits an interrupt handler stub without invoking the associated -* ISR handler (or the EOI handler, if present). It should only be jumped to -* by an interrupt controller driver's BOI routine, and only if the BOI routine -* is passed a single parameter by the interrupt stub. -* -* \INTERNAL -* A BOI routine that has no parameters can jump directly to _IntExit(). -*/ +/** + * + * _IntBoiExit - exit interrupt handler stub without invoking ISR + * + * This routine exits an interrupt handler stub without invoking the associated + * ISR handler (or the EOI handler, if present). It should only be jumped to + * by an interrupt controller driver's BOI routine, and only if the BOI routine + * is passed a single parameter by the interrupt stub. + * + * \INTERNAL + * A BOI routine that has no parameters can jump directly to _IntExit(). + */ SECTION_FUNC(TEXT, _IntBoiExit) addl $4, %esp /* pop off the $BoiParameter */ diff --git a/arch/x86/core/intconnect.c b/arch/x86/core/intconnect.c index 5c8cc91f25f..d77344f4585 100644 --- a/arch/x86/core/intconnect.c +++ b/arch/x86/core/intconnect.c @@ -85,7 +85,7 @@ NOTE: Be sure to update the arch specific definition of the _INT_STUB_SIZE macro to reflect the maximum potential size of the interrupt stub (as shown above). The _INT_STUB_SIZE macro is defined in include/nanokernel/x86/arch.h. -*/ + */ #ifndef CONFIG_NO_ISRS @@ -159,7 +159,7 @@ static NANO_INT_STUB dynamic_stubs[ALL_DYNAMIC_STUBS] = { [0 ... (ALL_DYNAMIC_STUBS - 1)] = { _STUB_AVAIL, } }; -/******************************************************************************* +/** * _int_stub_alloc - allocate dynamic interrupt stub * * RETURNS: index of the first available element of the STUB array or -1 @@ -179,28 +179,28 @@ static int _int_stub_alloc(void) } #endif /* ALL_DYNAMIC_STUBS > 0 */ -/******************************************************************************* -* -* _IntVecSet - connect a routine to an interrupt vector -* -* This routine "connects" the specified to the specified interrupt -* . On the IA-32 architecture, an interrupt vector is a value from -* 0 to 255. This routine merely fills in the appropriate interrupt -* descriptor table (IDT) with an interrupt-gate descriptor such that -* is invoked when interrupt is asserted. The argument specifies -* the privilege level for the interrupt-gate descriptor; (hardware) interrupts -* and exceptions should specify a level of 0, whereas handlers for user-mode -* software generated interrupts should specify 3. -* -* RETURNS: N/A -* -* INTERNAL -* Unlike nanoCpuExcConnect() and irq_connect(), the _IntVecSet() routine -* is a very basic API that simply updates the appropriate entry in Interrupt -* Descriptor Table (IDT) such that the specified routine is invoked when the -* specified interrupt vector is asserted. -* -*/ +/** + * + * _IntVecSet - connect a routine to an interrupt vector + * + * This routine "connects" the specified to the specified interrupt + * . On the IA-32 architecture, an interrupt vector is a value from + * 0 to 255. This routine merely fills in the appropriate interrupt + * descriptor table (IDT) with an interrupt-gate descriptor such that + * is invoked when interrupt is asserted. The argument specifies + * the privilege level for the interrupt-gate descriptor; (hardware) interrupts + * and exceptions should specify a level of 0, whereas handlers for user-mode + * software generated interrupts should specify 3. + * + * RETURNS: N/A + * + * INTERNAL + * Unlike nanoCpuExcConnect() and irq_connect(), the _IntVecSet() routine + * is a very basic API that simply updates the appropriate entry in Interrupt + * Descriptor Table (IDT) such that the specified routine is invoked when the + * specified interrupt vector is asserted. + * + */ void _IntVecSet( unsigned int vector, /* interrupt vector: 0 to 255 on IA-32 */ @@ -233,53 +233,53 @@ void _IntVecSet( * generates an error */ #if ALL_DYNAMIC_STUBS > 0 -/******************************************************************************* -* -* irq_connect - connect a C routine to a hardware interrupt -* -* This routine connects an interrupt service routine (ISR) coded in C to -* the specified hardware . An interrupt vector will be allocated to -* satisfy the specified . If the interrupt service routine is being -* connected to a software generated interrupt, then must be set to -* NANO_SOFT_IRQ. -* -* The specified represents a virtualized IRQ, i.e. it does not -* necessarily represent a specific IRQ line on a given interrupt controller -* device. The BSP presents a virtualized set of IRQs from 0 to N, where N -* is the total number of IRQs supported by all the interrupt controller devices -* on the board. See the BSP's documentation for the mapping of virtualized -* IRQ to physical IRQ. -* -* When the device asserts an interrupt on the specified , a switch to -* the interrupt stack is performed (if not already executing on the interrupt -* stack), followed by saving the integer (i.e. non-floating point) context of -* the currently executing task, fiber, or ISR. The ISR specified by -* will then be invoked with the single . When the ISR returns, a -* context switch may occur. -* -* The routine searches for the first available element in the synamic_stubs -* array and uses it for the stub. -* -* RETURNS: the allocated interrupt vector -* -* WARNINGS -* Some boards utilize interrupt controllers where the interrupt vector -* cannot be programmed on an IRQ basis; as a result, the vector assigned -* to the during interrupt controller initialization will be returned. -* In these cases, the requested is not honoured since the interrupt -* prioritization is fixed by the interrupt controller (e.g. IRQ0 will always -* be the highest priority interrupt regardless of what interrupt vector -* was assigned to IRQ0). -* -* This routine does not perform range checking on the requested -* and thus, depending on the underlying interrupt controller, may result -* in the assignment of an interrupt vector located in the reserved range of -* the processor. -* -* INTERNAL -* For debug kernels, this routine shall return -1 when there are no -* vectors remaining in the specified level. -*/ +/** + * + * irq_connect - connect a C routine to a hardware interrupt + * + * This routine connects an interrupt service routine (ISR) coded in C to + * the specified hardware . An interrupt vector will be allocated to + * satisfy the specified . If the interrupt service routine is being + * connected to a software generated interrupt, then must be set to + * NANO_SOFT_IRQ. + * + * The specified represents a virtualized IRQ, i.e. it does not + * necessarily represent a specific IRQ line on a given interrupt controller + * device. The BSP presents a virtualized set of IRQs from 0 to N, where N + * is the total number of IRQs supported by all the interrupt controller devices + * on the board. See the BSP's documentation for the mapping of virtualized + * IRQ to physical IRQ. + * + * When the device asserts an interrupt on the specified , a switch to + * the interrupt stack is performed (if not already executing on the interrupt + * stack), followed by saving the integer (i.e. non-floating point) context of + * the currently executing task, fiber, or ISR. The ISR specified by + * will then be invoked with the single . When the ISR returns, a + * context switch may occur. + * + * The routine searches for the first available element in the synamic_stubs + * array and uses it for the stub. + * + * RETURNS: the allocated interrupt vector + * + * WARNINGS + * Some boards utilize interrupt controllers where the interrupt vector + * cannot be programmed on an IRQ basis; as a result, the vector assigned + * to the during interrupt controller initialization will be returned. + * In these cases, the requested is not honoured since the interrupt + * prioritization is fixed by the interrupt controller (e.g. IRQ0 will always + * be the highest priority interrupt regardless of what interrupt vector + * was assigned to IRQ0). + * + * This routine does not perform range checking on the requested + * and thus, depending on the underlying interrupt controller, may result + * in the assignment of an interrupt vector located in the reserved range of + * the processor. + * + * INTERNAL + * For debug kernels, this routine shall return -1 when there are no + * vectors remaining in the specified level. + */ int irq_connect( unsigned int irq, /* virtualized IRQ to connect to */ @@ -478,36 +478,36 @@ int irq_connect( } #endif /* ALL_DYNAMIC_STUBS > 0 */ -/******************************************************************************* -* -* _IntVecAlloc - allocate a free interrupt vector given -* -* This routine scans the interrupt_vectors_allocated[] array for a free vector that -* satisfies the specified . It is a utility function for use only -* by a BSP's _SysIntVecAlloc() routine. -* -* This routine assumes that the relationship between interrupt priority and -* interrupt vector is : -* -* priority = vector / 16; -* -* Since vectors 0 to 31 are reserved by the IA-32 architecture, the priorities -* of user defined interrupts range from 2 to 15. Each interrupt priority level -* contains 16 vectors, and the prioritization of interrupts within a priority -* level is determined by the vector number; the higher the vector number, the -* higher the priority within that priority level. -* -* It is also assumed that the interrupt controllers are capable of managing -* interrupt requests on a per-vector level as opposed to a per-priority level. -* For example, the local APIC on Pentium4 and later processors, the in-service -* register (ISR) and the interrupt request register (IRR) are 256 bits wide. -* -* RETURNS: allocated interrupt vector -* -* INTERNAL -* For debug kernels, this routine shall return -1 when there are no -* vectors remaining in the specified level. -*/ +/** + * + * _IntVecAlloc - allocate a free interrupt vector given + * + * This routine scans the interrupt_vectors_allocated[] array for a free vector that + * satisfies the specified . It is a utility function for use only + * by a BSP's _SysIntVecAlloc() routine. + * + * This routine assumes that the relationship between interrupt priority and + * interrupt vector is : + * + * priority = vector / 16; + * + * Since vectors 0 to 31 are reserved by the IA-32 architecture, the priorities + * of user defined interrupts range from 2 to 15. Each interrupt priority level + * contains 16 vectors, and the prioritization of interrupts within a priority + * level is determined by the vector number; the higher the vector number, the + * higher the priority within that priority level. + * + * It is also assumed that the interrupt controllers are capable of managing + * interrupt requests on a per-vector level as opposed to a per-priority level. + * For example, the local APIC on Pentium4 and later processors, the in-service + * register (ISR) and the interrupt request register (IRR) are 256 bits wide. + * + * RETURNS: allocated interrupt vector + * + * INTERNAL + * For debug kernels, this routine shall return -1 when there are no + * vectors remaining in the specified level. + */ int _IntVecAlloc(unsigned int priority) { @@ -601,18 +601,18 @@ int _IntVecAlloc(unsigned int priority) return vector; } -/******************************************************************************* -* -* _IntVecMarkAllocated - mark interrupt vector as allocated -* -* This routine is used to "reserve" an interrupt vector that is allocated -* or assigned by any means other than _IntVecAllocate(). This marks the vector -* as allocated so that any future invocations of _IntVecAllocate() will not -* return that vector. -* -* RETURNS: N/A -* -*/ +/** + * + * _IntVecMarkAllocated - mark interrupt vector as allocated + * + * This routine is used to "reserve" an interrupt vector that is allocated + * or assigned by any means other than _IntVecAllocate(). This marks the vector + * as allocated so that any future invocations of _IntVecAllocate() will not + * return that vector. + * + * RETURNS: N/A + * + */ void _IntVecMarkAllocated(unsigned int vector) { @@ -625,15 +625,15 @@ void _IntVecMarkAllocated(unsigned int vector) irq_unlock(imask); } -/******************************************************************************* -* -* _IntVecMarkFree - mark interrupt vector as free -* -* This routine is used to "free" an interrupt vector that is allocated -* or assigned using _IntVecAllocate() or _IntVecMarkAllocated(). This marks the -* vector as available so that any future allocations can return that vector. -* -*/ +/** + * + * _IntVecMarkFree - mark interrupt vector as free + * + * This routine is used to "free" an interrupt vector that is allocated + * or assigned using _IntVecAllocate() or _IntVecMarkAllocated(). This marks the + * vector as available so that any future allocations can return that vector. + * + */ void _IntVecMarkFree(unsigned int vector) { diff --git a/arch/x86/core/inthndlset.c b/arch/x86/core/inthndlset.c index 0e691baa6b3..99cb9ebdafb 100644 --- a/arch/x86/core/inthndlset.c +++ b/arch/x86/core/inthndlset.c @@ -31,15 +31,15 @@ */ /* -* DESCRIPTION -* This module contains the irq_handler_set() API. This routine is closely -* associated with irq_connect(), and any changes to the layout of the -* constructed interrupt stub must be reflected in both places. -* -* INTERNAL -* This routine is defined here, rather than in intconnect.c, so that it can be -* omitted from a system image if it isn't required. -*/ + * DESCRIPTION + * This module contains the irq_handler_set() API. This routine is closely + * associated with irq_connect(), and any changes to the layout of the + * constructed interrupt stub must be reflected in both places. + * + * INTERNAL + * This routine is defined here, rather than in intconnect.c, so that it can be + * omitted from a system image if it isn't required. + */ #include @@ -57,28 +57,28 @@ extern unsigned char _idt_base_address[]; #define FIRST_OPT_OPCODE_OFF 5 -/******************************************************************************* -* -* irq_handler_set - set the handler in an already connected stub -* -* This routine is used to modify an already fully constructed interrupt stub -* to specify a new and/or . -* -* WARNINGS: -* -* A fully constructed interrupt stub is generated via irq_connect(), i.e. -* the irq_handler_set() function must only be called after invoking -* irq_connect(). -* -* The caller must ensure that the associated interrupt does not occur while -* this routine is executing, otherwise race conditions may arise that could -* cause the interrupt stub to invoke the handler using an incorrect routine -* and/or parameter. If possible, silence the source of the associated interrupt -* only, rather than locking out all interrupts. -* -* RETURNS: N/A -* -*/ +/** + * + * irq_handler_set - set the handler in an already connected stub + * + * This routine is used to modify an already fully constructed interrupt stub + * to specify a new and/or . + * + * WARNINGS: + * + * A fully constructed interrupt stub is generated via irq_connect(), i.e. + * the irq_handler_set() function must only be called after invoking + * irq_connect(). + * + * The caller must ensure that the associated interrupt does not occur while + * this routine is executing, otherwise race conditions may arise that could + * cause the interrupt stub to invoke the handler using an incorrect routine + * and/or parameter. If possible, silence the source of the associated interrupt + * only, rather than locking out all interrupts. + * + * RETURNS: N/A + * + */ void irq_handler_set(unsigned int vector, void (*oldRoutine)(void *parameter), diff --git a/arch/x86/core/intstub.S b/arch/x86/core/intstub.S index 86c47a9ce8a..731346f60f3 100644 --- a/arch/x86/core/intstub.S +++ b/arch/x86/core/intstub.S @@ -36,7 +36,7 @@ This module implements assembly routines to manage interrupts on the Intel IA-32 architecture. More specifically, the interrupt (asynchronous exception) stubs are implemented in this module. The stubs are invoked when entering and exiting a C interrupt handler. -*/ + */ #define _ASMLANGUAGE @@ -74,41 +74,41 @@ entering and exiting a C interrupt handler. GTEXT(_int_latency_start) GTEXT(_int_latency_stop) #endif -/******************************************************************************* -* -* _IntEnt - inform the kernel of an interrupt -* -* This function is called from the interrupt stub created by irq_connect() -* to inform the kernel of an interrupt. This routine increments -* _nanokernel.nested (to support interrupt nesting), switches to the -* base of the interrupt stack, if not already on the interrupt stack, and then -* saves the volatile integer registers onto the stack. Finally, control is -* returned back to the interrupt stub code (which will then invoke the -* "application" interrupt service routine). -* -* Only the volatile integer registers are saved since ISRs are assumed not to -* utilize floating point (or SSE) instructions. If an ISR requires the usage -* of floating point (or SSE) instructions, it must first invoke nanoCpuFpSave() -* (or nanoCpuSseSave()) at the beginning of the ISR. A subsequent -* nanoCpuFpRestore() (or nanoCpuSseRestore()) is needed just prior to returning -* from the ISR. Note that the nanoCpuFpSave(), nanoCpuSseSave(), -* nanoCpuFpRestore(), and nanoCpuSseRestore() APIs have not been -* implemented yet. -* -* WARNINGS -* -* Host-based tools and the target-based GDB agent depend on the stack frame -* created by this routine to determine the locations of volatile registers. -* These tools must be updated to reflect any changes to the stack frame. -* -* RETURNS: N/A -* -* C function prototype: -* -* void _IntEnt (void); -* -* NOMANUAL -*/ +/** + * + * _IntEnt - inform the kernel of an interrupt + * + * This function is called from the interrupt stub created by irq_connect() + * to inform the kernel of an interrupt. This routine increments + * _nanokernel.nested (to support interrupt nesting), switches to the + * base of the interrupt stack, if not already on the interrupt stack, and then + * saves the volatile integer registers onto the stack. Finally, control is + * returned back to the interrupt stub code (which will then invoke the + * "application" interrupt service routine). + * + * Only the volatile integer registers are saved since ISRs are assumed not to + * utilize floating point (or SSE) instructions. If an ISR requires the usage + * of floating point (or SSE) instructions, it must first invoke nanoCpuFpSave() + * (or nanoCpuSseSave()) at the beginning of the ISR. A subsequent + * nanoCpuFpRestore() (or nanoCpuSseRestore()) is needed just prior to returning + * from the ISR. Note that the nanoCpuFpSave(), nanoCpuSseSave(), + * nanoCpuFpRestore(), and nanoCpuSseRestore() APIs have not been + * implemented yet. + * + * WARNINGS + * + * Host-based tools and the target-based GDB agent depend on the stack frame + * created by this routine to determine the locations of volatile registers. + * These tools must be updated to reflect any changes to the stack frame. + * + * RETURNS: N/A + * + * C function prototype: + * + * void _IntEnt (void); + * + * NOMANUAL + */ SECTION_FUNC(TEXT, _IntEnt) @@ -240,29 +240,29 @@ BRANCH_LABEL(_HandleIdle) #endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */ -/******************************************************************************* -* -* _IntExit - inform the kernel of an interrupt exit -* -* This function is called from the interrupt stub created by irq_connect() -* to inform the kernel that the processing of an interrupt has -* completed. This routine decrements _nanokernel.nested (to support interrupt -* nesting), restores the volatile integer registers, and then switches -* back to the interrupted context's stack, if this isn't a nested interrupt. -* -* Finally, control is returned back to the interrupted fiber context or ISR. -* A context switch _may_ occur if the interrupted context was a task context, -* in which case one or more other fiber and task contexts will execute before -* this routine resumes and control gets returned to the interrupted task. -* -* RETURNS: N/A -* -* C function prototype: -* -* void _IntExit (void); -* -* NOMANUAL -*/ +/** + * + * _IntExit - inform the kernel of an interrupt exit + * + * This function is called from the interrupt stub created by irq_connect() + * to inform the kernel that the processing of an interrupt has + * completed. This routine decrements _nanokernel.nested (to support interrupt + * nesting), restores the volatile integer registers, and then switches + * back to the interrupted context's stack, if this isn't a nested interrupt. + * + * Finally, control is returned back to the interrupted fiber context or ISR. + * A context switch _may_ occur if the interrupted context was a task context, + * in which case one or more other fiber and task contexts will execute before + * this routine resumes and control gets returned to the interrupted task. + * + * RETURNS: N/A + * + * C function prototype: + * + * void _IntExit (void); + * + * NOMANUAL + */ SECTION_FUNC(TEXT, _IntExit) @@ -388,38 +388,38 @@ BRANCH_LABEL(nestedInterrupt) iret -/******************************************************************************* -* -* _SpuriousIntHandler - -* _SpuriousIntNoErrCodeHandler - spurious interrupt handler stubs -* -* Interrupt-gate descriptors are statically created for all slots in the IDT -* that point to _SpuriousIntHandler() or _SpuriousIntNoErrCodeHandler(). The -* former stub is connected to exception vectors where the processor pushes an -* error code onto the stack (or kernel stack) in addition to the EFLAGS/CS/EIP -* records. -* -* A spurious interrupt is considered a fatal condition, thus this routine -* merely sets up the 'reason' and 'pEsf' parameters to the BSP provided -* routine: _SysFatalHwErrorHandler(). In other words, there is no provision -* to return to the interrupted context and thus the volatile registers -* are not saved. -* -* RETURNS: Never returns -* -* C function prototype: -* -* void _SpuriousIntHandler (void); -* -* INTERNAL -* The _IntVecSet() routine creates an interrupt-gate descriptor for all -* connections. The processor will automatically clear the IF bit -* in the EFLAGS register upon execution of the handler, -* thus _SpuriousIntNoErrCodeHandler()/_SpuriousIntHandler() shall be -* invoked with interrupts disabled. -* -* NOMANUAL -*/ +/** + * + * _SpuriousIntHandler - + * _SpuriousIntNoErrCodeHandler - spurious interrupt handler stubs + * + * Interrupt-gate descriptors are statically created for all slots in the IDT + * that point to _SpuriousIntHandler() or _SpuriousIntNoErrCodeHandler(). The + * former stub is connected to exception vectors where the processor pushes an + * error code onto the stack (or kernel stack) in addition to the EFLAGS/CS/EIP + * records. + * + * A spurious interrupt is considered a fatal condition, thus this routine + * merely sets up the 'reason' and 'pEsf' parameters to the BSP provided + * routine: _SysFatalHwErrorHandler(). In other words, there is no provision + * to return to the interrupted context and thus the volatile registers + * are not saved. + * + * RETURNS: Never returns + * + * C function prototype: + * + * void _SpuriousIntHandler (void); + * + * INTERNAL + * The _IntVecSet() routine creates an interrupt-gate descriptor for all + * connections. The processor will automatically clear the IF bit + * in the EFLAGS register upon execution of the handler, + * thus _SpuriousIntNoErrCodeHandler()/_SpuriousIntHandler() shall be + * invoked with interrupts disabled. + * + * NOMANUAL + */ SECTION_FUNC(TEXT, _SpuriousIntNoErrCodeHandler) @@ -462,34 +462,34 @@ BRANCH_LABEL(callFatalHandler) jmp callFatalHandler -/******************************************************************************* -* -* irq_lock - disable interrupts on the local CPU -* -* This routine disables interrupts. It can be called from either interrupt -* or context level. This routine returns an architecture-dependent -* lock-out key representing the "interrupt disable state" prior to the call; -* this key can be passed to fiber_enable_ints() to re-enable interrupts. -* -* The lock-out key should only be used as the argument to the -* fiber_enable_ints() API. It should never be used to manually re-enable -* interrupts or to inspect or manipulate the contents of the source register. -* -* WARNINGS -* Invoking a kernel routine with interrupts locked may result in -* interrupts being re-enabled for an unspecified period of time. If the -* called routine blocks, interrupts will be re-enabled while another -* context executes, or while the system is idle. -* -* The "interrupt disable state" is an attribute of a context, i.e. it's part -* of the context context. Thus, if a context disables interrupts and -* subsequently invokes a kernel routine that causes the calling context -* to block, the interrupt disable state will be restored when the context is -* later rescheduled for execution. -* -* RETURNS: An architecture-dependent lock-out key representing the -* "interrupt disable state" prior to the call. -*/ +/** + * + * irq_lock - disable interrupts on the local CPU + * + * This routine disables interrupts. It can be called from either interrupt + * or context level. This routine returns an architecture-dependent + * lock-out key representing the "interrupt disable state" prior to the call; + * this key can be passed to fiber_enable_ints() to re-enable interrupts. + * + * The lock-out key should only be used as the argument to the + * fiber_enable_ints() API. It should never be used to manually re-enable + * interrupts or to inspect or manipulate the contents of the source register. + * + * WARNINGS + * Invoking a kernel routine with interrupts locked may result in + * interrupts being re-enabled for an unspecified period of time. If the + * called routine blocks, interrupts will be re-enabled while another + * context executes, or while the system is idle. + * + * The "interrupt disable state" is an attribute of a context, i.e. it's part + * of the context context. Thus, if a context disables interrupts and + * subsequently invokes a kernel routine that causes the calling context + * to block, the interrupt disable state will be restored when the context is + * later rescheduled for execution. + * + * RETURNS: An architecture-dependent lock-out key representing the + * "interrupt disable state" prior to the call. + */ SECTION_FUNC(TEXT, irq_lock) pushfl @@ -501,16 +501,16 @@ SECTION_FUNC(TEXT, irq_lock) ret -/******************************************************************************* -* -* irq_unlock - enable interrupts on the local CPU -* -* This routine re-enables interrupts on the local CPU. The parameter -* is an architecture-dependent lock-out key that is returned by a previous -* invocation of irq_lock(). -* -* This routine can be called from either a context or ISR context. -*/ +/** + * + * irq_unlock - enable interrupts on the local CPU + * + * This routine re-enables interrupts on the local CPU. The parameter + * is an architecture-dependent lock-out key that is returned by a previous + * invocation of irq_lock(). + * + * This routine can be called from either a context or ISR context. + */ SECTION_FUNC(TEXT, irq_unlock) testl $0x200, SP_ARG1(%esp) diff --git a/arch/x86/core/msr.S b/arch/x86/core/msr.S index b25259e4f77..a5d0d822e00 100644 --- a/arch/x86/core/msr.S +++ b/arch/x86/core/msr.S @@ -34,7 +34,7 @@ DESCRIPTION This module provides the implementation of the _MsrWrite() and _MsrRead() utilities. -*/ + */ #define _ASMLANGUAGE @@ -45,28 +45,28 @@ utilities. GTEXT(_MsrWrite) GTEXT(_MsrRead) -/******************************************************************************* -* -* _MsrWrite - write to a model specific register (MSR) -* -* This function is used to write to an MSR. -* -* C function prototype: -* -* void _MsrWrite (unsigned int msr, uint64_t msrData); -* -* The definitions of the so-called "Architectural MSRs" are contained -* in nano_private.h and have the format: IA32_XXX_MSR -* -* INTERNAL -* 1) The 'wrmsr' instruction was introduced in the Pentium processor; executing -* this instruction on an earlier IA-32 processor will result in an invalid -* opcode exception. -* 2) The 'wrmsr' uses the ECX, EDX, and EAX registers which matches the set of -* volatile registers! -* -* RETURNS: N/A -*/ +/** + * + * _MsrWrite - write to a model specific register (MSR) + * + * This function is used to write to an MSR. + * + * C function prototype: + * + * void _MsrWrite (unsigned int msr, uint64_t msrData); + * + * The definitions of the so-called "Architectural MSRs" are contained + * in nano_private.h and have the format: IA32_XXX_MSR + * + * INTERNAL + * 1) The 'wrmsr' instruction was introduced in the Pentium processor; executing + * this instruction on an earlier IA-32 processor will result in an invalid + * opcode exception. + * 2) The 'wrmsr' uses the ECX, EDX, and EAX registers which matches the set of + * volatile registers! + * + * RETURNS: N/A + */ SECTION_FUNC(TEXT, _MsrWrite) movl SP_ARG1(%esp), %ecx /* load ECX with */ @@ -76,28 +76,28 @@ SECTION_FUNC(TEXT, _MsrWrite) ret -/******************************************************************************* -* -* _MsrRead - read from a model specific register (MSR) -* -* This function is used to read from an MSR. -* -* C function prototype: -* -* uint64_t _MsrRead (unsigned int msr); -* -* The definitions of the so-called "Architectural MSRs" are contained -* in nano_private.h and have the format: IA32_XXX_MSR -* -* INTERNAL -* 1) The 'rdmsr' instruction was introduced in the Pentium processor; executing -* this instruction on an earlier IA-32 processor will result in an invalid -* opcode exception. -* 2) The 'rdmsr' uses the ECX, EDX, and EAX registers which matches the set of -* volatile registers! -* -* RETURNS: N/A -*/ +/** + * + * _MsrRead - read from a model specific register (MSR) + * + * This function is used to read from an MSR. + * + * C function prototype: + * + * uint64_t _MsrRead (unsigned int msr); + * + * The definitions of the so-called "Architectural MSRs" are contained + * in nano_private.h and have the format: IA32_XXX_MSR + * + * INTERNAL + * 1) The 'rdmsr' instruction was introduced in the Pentium processor; executing + * this instruction on an earlier IA-32 processor will result in an invalid + * opcode exception. + * 2) The 'rdmsr' uses the ECX, EDX, and EAX registers which matches the set of + * volatile registers! + * + * RETURNS: N/A + */ SECTION_FUNC(TEXT, _MsrRead) movl SP_ARG1(%esp), %ecx /* load ECX with */ diff --git a/arch/x86/core/offsets/offsets.c b/arch/x86/core/offsets/offsets.c index 2c9e6628210..e872afc47c5 100644 --- a/arch/x86/core/offsets/offsets.c +++ b/arch/x86/core/offsets/offsets.c @@ -45,7 +45,7 @@ Typically, only those members that are accessed by assembly language routines are defined; however, it doesn't hurt to define all fields for the sake of completeness. -*/ + */ #include /* located in kernel/arch/common/include */ diff --git a/arch/x86/core/swap.S b/arch/x86/core/swap.S index 3b4f70c8d24..cefcd96bd27 100644 --- a/arch/x86/core/swap.S +++ b/arch/x86/core/swap.S @@ -39,7 +39,7 @@ a representation of the save stack frame generated by _Swap() in order to generate offsets (in the form of absolute symbols) for consumption by host tools. Please update swapstk.h if changing the structure of the save frame on the stack. -*/ + */ #define _ASMLANGUAGE @@ -54,58 +54,58 @@ save frame on the stack. /* externs */ -/******************************************************************************* -* -* _Swap - initiate a cooperative context switch -* -* The _Swap() routine is invoked by various nanokernel services to effect -* a cooperative context context switch. Prior to invoking _Swap(), the -* caller disables interrupts (via irq_lock) and the return 'key' -* is passed as a parameter to _Swap(). The 'key' actually represents -* the EFLAGS register prior to disabling interrupts via a 'cli' instruction. -* -* Given that _Swap() is called to effect a cooperative context context switch, -* only the non-volatile integer registers need to be saved in the tCCS of the -* outgoing context. The restoration of the integer registers of the incoming -* context depends on whether that context was preemptively context switched -* out. The INT_ACTIVE and EXC_ACTIVE bits in the tCCS->flags field will signify -* that the context was preemptively context switched out, and thus both the -* volatile and non-volatile integer registers need to be restored. -* -* The non-volatile registers need to be scrubbed to ensure they contain no -* sensitive information that could compromise system security. This is to -* make sure that information will not be leaked from one application to -* another via these volatile registers. -* -* Here, the integer registers (EAX, ECX, EDX) have been scrubbed. Any changes -* to this routine that alter the values of these registers MUST be reviewed -* for potential security impacts. -* -* Floating point registers are handled using a lazy save/restore -* mechanism since it's expected relatively few contexts will be created -* with the USE_FP or USE_SSE option bits. The nanokernel data structure -* maintains a 'current_fp' field to keep track of the context that "owns" -* the floating point registers. Floating point registers consist of -* ST0->ST7 (x87 FPU and MMX registers) and XMM0 -> XMM7. -* -* All floating point registers are considered 'volatile' thus they will -* only be saved/restored when a preemptive context context switch occurs. -* -* Floating point registers are currently NOT scrubbed, and are subject to -* potential security leaks. -* -* The scheduling algorithm is simple: schedule the head of the runnable -* FIBER context list, which is represented by _nanokernel.fiber. If there are -* no runnable FIBER contexts, then schedule the TASK context represented -* by _nanokernel.task. The _nanokernel.task field will never be NULL. -* -* RETURNS: may contain a return value setup by a call to fiberRtnValueSet() -* -* C function prototype: -* -* unsigned int _Swap (unsigned int eflags); -* -*/ +/** + * + * _Swap - initiate a cooperative context switch + * + * The _Swap() routine is invoked by various nanokernel services to effect + * a cooperative context context switch. Prior to invoking _Swap(), the + * caller disables interrupts (via irq_lock) and the return 'key' + * is passed as a parameter to _Swap(). The 'key' actually represents + * the EFLAGS register prior to disabling interrupts via a 'cli' instruction. + * + * Given that _Swap() is called to effect a cooperative context context switch, + * only the non-volatile integer registers need to be saved in the tCCS of the + * outgoing context. The restoration of the integer registers of the incoming + * context depends on whether that context was preemptively context switched + * out. The INT_ACTIVE and EXC_ACTIVE bits in the tCCS->flags field will signify + * that the context was preemptively context switched out, and thus both the + * volatile and non-volatile integer registers need to be restored. + * + * The non-volatile registers need to be scrubbed to ensure they contain no + * sensitive information that could compromise system security. This is to + * make sure that information will not be leaked from one application to + * another via these volatile registers. + * + * Here, the integer registers (EAX, ECX, EDX) have been scrubbed. Any changes + * to this routine that alter the values of these registers MUST be reviewed + * for potential security impacts. + * + * Floating point registers are handled using a lazy save/restore + * mechanism since it's expected relatively few contexts will be created + * with the USE_FP or USE_SSE option bits. The nanokernel data structure + * maintains a 'current_fp' field to keep track of the context that "owns" + * the floating point registers. Floating point registers consist of + * ST0->ST7 (x87 FPU and MMX registers) and XMM0 -> XMM7. + * + * All floating point registers are considered 'volatile' thus they will + * only be saved/restored when a preemptive context context switch occurs. + * + * Floating point registers are currently NOT scrubbed, and are subject to + * potential security leaks. + * + * The scheduling algorithm is simple: schedule the head of the runnable + * FIBER context list, which is represented by _nanokernel.fiber. If there are + * no runnable FIBER contexts, then schedule the TASK context represented + * by _nanokernel.task. The _nanokernel.task field will never be NULL. + * + * RETURNS: may contain a return value setup by a call to fiberRtnValueSet() + * + * C function prototype: + * + * unsigned int _Swap (unsigned int eflags); + * + */ SECTION_FUNC(TEXT, _Swap) movl $_nanokernel, %eax diff --git a/arch/x86/core/unaligned.S b/arch/x86/core/unaligned.S index bf2276092ca..0e3498bce58 100644 --- a/arch/x86/core/unaligned.S +++ b/arch/x86/core/unaligned.S @@ -36,7 +36,7 @@ This module contains utilities to perform unaligned reads/writes from/to a 32-bit quantity. Some memory subsystems to not support the IA-32 byte enable lines, and thus accessing an unaligned 32-bit quantity is performed byte-by-byte. -*/ + */ #define _ASMLANGUAGE @@ -49,31 +49,31 @@ performed byte-by-byte. GTEXT(_Unaligned32Write) GTEXT(_Unaligned32Read) -/******************************************************************************* -* -* _Unaligned32Write - perform an unaligned 32-bit write operation -* -* This function is used during the interrupt and exception stub code -* synthesis step when writing out the 32-bit relative jmp/branch -* offsets. -* -* Generally, the 32-bit offsets are located at an odd memory address. For -* target hardware that don't fully (or properly) decode the byte enable -* lines from the IA-32 processor, this function shall be utilized to write out -* the data byte-by-byte. -* -* The BSP specific configuration option CONFIG_UNALIGNED_WRITE_UNSUPPORTED -* shall be defined when the kernel is built for target hardware that cannot -* support unaligned double word (32-bit) write operations. -* -* C function prototype: -* -* void _Unaligned32Write -* ( -* unsigned int * ptr, -* unsigned int val -* ); -*/ +/** + * + * _Unaligned32Write - perform an unaligned 32-bit write operation + * + * This function is used during the interrupt and exception stub code + * synthesis step when writing out the 32-bit relative jmp/branch + * offsets. + * + * Generally, the 32-bit offsets are located at an odd memory address. For + * target hardware that don't fully (or properly) decode the byte enable + * lines from the IA-32 processor, this function shall be utilized to write out + * the data byte-by-byte. + * + * The BSP specific configuration option CONFIG_UNALIGNED_WRITE_UNSUPPORTED + * shall be defined when the kernel is built for target hardware that cannot + * support unaligned double word (32-bit) write operations. + * + * C function prototype: + * + * void _Unaligned32Write + * ( + * unsigned int * ptr, + * unsigned int val + * ); + */ SECTION_FUNC(TEXT, _Unaligned32Write) movl 0x4(%esp), %edx /* fetch ptr argument */ @@ -86,30 +86,30 @@ SECTION_FUNC(TEXT, _Unaligned32Write) ret -/******************************************************************************* -* -* _Unaligned32Read - perform an unaligned 32-bit read operation -* -* This function is used during the interrupt and exception stub code -* synthesis step when reading the 32-bit relative jmp/branch -* offsets. -* -* Generally, the 32-bit offsets are located at an odd memory address. For -* target hardware that don't fully (or properly) decode the byte enable -* lines from the IA-32 processor, this function shall be utilized to read -* the data byte-by-byte. -* -* The BSP specific configuration option CONFIG_UNALIGNED_WRITE_UNSUPPORTED -* shall be defined when the kernel is built for target hardware that cannot -* support unaligned double word (32-bit) write operations. -* -* C function prototype: -* -* unsigned int _Unaligned32Read -* ( -* unsigned int * ptr -* ); -*/ +/** + * + * _Unaligned32Read - perform an unaligned 32-bit read operation + * + * This function is used during the interrupt and exception stub code + * synthesis step when reading the 32-bit relative jmp/branch + * offsets. + * + * Generally, the 32-bit offsets are located at an odd memory address. For + * target hardware that don't fully (or properly) decode the byte enable + * lines from the IA-32 processor, this function shall be utilized to read + * the data byte-by-byte. + * + * The BSP specific configuration option CONFIG_UNALIGNED_WRITE_UNSUPPORTED + * shall be defined when the kernel is built for target hardware that cannot + * support unaligned double word (32-bit) write operations. + * + * C function prototype: + * + * unsigned int _Unaligned32Read + * ( + * unsigned int * ptr + * ); + */ SECTION_FUNC(TEXT, _Unaligned32Read) movl 0x4(%esp), %edx /* fetch ptr argument */ diff --git a/arch/x86/crt0.S b/arch/x86/crt0.S index 2534d7f5607..7a52b9d8779 100644 --- a/arch/x86/crt0.S +++ b/arch/x86/crt0.S @@ -40,7 +40,7 @@ booting scenarios (e.g. via GRUB or any other multiboot compliant bootloader) now assume that the system is already in 32-bit protected mode and address line A20 is enabled. However, the code associated with CONFIG_PROT_MODE_SWITCH has been left in place should future booting scenarios arise which require its use. -*/ + */ #define _ASMLANGUAGE diff --git a/arch/x86/driver_static_irq_stubs.S b/arch/x86/driver_static_irq_stubs.S index aa390d9ecf7..077a759e25e 100644 --- a/arch/x86/driver_static_irq_stubs.S +++ b/arch/x86/driver_static_irq_stubs.S @@ -34,7 +34,7 @@ DESCRIPTION This module contains the static interrupt stubs for the various drivers employed by x86 BSPs. -*/ + */ #define _ASMLANGUAGE diff --git a/arch/x86/generic_pc/board.h b/arch/x86/generic_pc/board.h index c24648d435c..f667483fa99 100644 --- a/arch/x86/generic_pc/board.h +++ b/arch/x86/generic_pc/board.h @@ -34,7 +34,7 @@ DESCRIPTION This header file is used to specify and describe board-level aspects for the 'generic_pc' BSP. -*/ + */ #ifndef __INCboardh #define __INCboardh diff --git a/arch/x86/generic_pc/linker.cmd b/arch/x86/generic_pc/linker.cmd index d6f567899e9..d0362006eed 100644 --- a/arch/x86/generic_pc/linker.cmd +++ b/arch/x86/generic_pc/linker.cmd @@ -33,7 +33,7 @@ /* DESCRIPTION This is the linker script for both standard images and XIP images. -*/ + */ #include diff --git a/arch/x86/generic_pc/system.c b/arch/x86/generic_pc/system.c index 411e7f0e705..78383eb97f8 100644 --- a/arch/x86/generic_pc/system.c +++ b/arch/x86/generic_pc/system.c @@ -34,7 +34,7 @@ DESCRIPTION This module provides routines to initialize and support board-level hardware for the generic_pc BSP. -*/ + */ #include #include "board.h" @@ -86,13 +86,13 @@ static inline void ioapicInit(void) #ifdef DO_CONSOLE_INIT -/******************************************************************************* -* -* uart_generic_info_init - initialize initialization information for one UART -* -* RETURNS: N/A -* -*/ +/** + * + * uart_generic_info_init - initialize initialization information for one UART + * + * RETURNS: N/A + * + */ void uart_generic_info_init(struct uart_init_info *p_info) { @@ -106,15 +106,15 @@ void uart_generic_info_init(struct uart_init_info *p_info) #if defined(DO_CONSOLE_INIT) -/******************************************************************************* -* -* consoleInit - initialize target-only console -* -* Only used for debugging. -* -* RETURNS: N/A -* -*/ +/** + * + * consoleInit - initialize target-only console + * + * Only used for debugging. + * + * RETURNS: N/A + * + */ #include @@ -149,16 +149,16 @@ static void bluetooth_init(void) } while ((0)) #endif /* CONFIG_BLUETOOTH */ -/******************************************************************************* -* -* _InitHardware - perform basic hardware initialization -* -* Initialize the Intel 8259A interrupt controller device driver and the -* Intel 8250 UART device driver. -* Also initialize the timer device driver, if required. -* -* RETURNS: N/A -*/ +/** + * + * _InitHardware - perform basic hardware initialization + * + * Initialize the Intel 8259A interrupt controller device driver and the + * Intel 8250 UART device driver. + * Also initialize the timer device driver, if required. + * + * RETURNS: N/A + */ void _InitHardware(void) { diff --git a/arch/x86/include/asmPrv.h b/arch/x86/include/asmPrv.h index c425a42b61d..da87631e6c8 100644 --- a/arch/x86/include/asmPrv.h +++ b/arch/x86/include/asmPrv.h @@ -63,33 +63,33 @@ extern "C" { call h; \ jmp _ExcExit; -/******************************************************************************* -* -* NANO_CPU_EXC_CONNECT - to generate and register an exception stub -* -* Generates an exception stub for the handler, . It is registered -* on the vector given by with the privilege level ; should always -* be 0. -* -* Use this version of the macro if the processor pushes an error code for the -* given exception. -*/ +/** + * + * NANO_CPU_EXC_CONNECT - to generate and register an exception stub + * + * Generates an exception stub for the handler, . It is registered + * on the vector given by with the privilege level ; should always + * be 0. + * + * Use this version of the macro if the processor pushes an error code for the + * given exception. + */ #define NANO_CPU_EXC_CONNECT(h, v, d) \ NANO_CPU_INT_REGISTER_ASM(h, v, d) GTEXT(MK_STUB_NAME(h)); \ SECTION_FUNC(TEXT, MK_STUB_NAME(h)) NANO_CPU_EXC_CONNECT_CODE(h) -/******************************************************************************* -* -* NANO_CPU_EXC_CONNECT_NO_ERR - to generate and register an exception stub -* -* Generates an exception stub for the handler, . It is registered -* on the vector given by with the privilege level ; should always -* be 0. -* -* Use this version of the macro if the processor doesn't push an error code for -* the given exception. The created stub pushes a dummy value of 0 to keep the -* exception stack frame the same. +/** + * + * NANO_CPU_EXC_CONNECT_NO_ERR - to generate and register an exception stub + * + * Generates an exception stub for the handler, . It is registered + * on the vector given by with the privilege level ; should always + * be 0. + * + * Use this version of the macro if the processor doesn't push an error code for + * the given exception. The created stub pushes a dummy value of 0 to keep the + * exception stack frame the same. */ #define NANO_CPU_EXC_CONNECT_NO_ERR(h, v, d) \ NANO_CPU_INT_REGISTER_ASM(h, v, d) GTEXT(MK_STUB_NAME(h)); \ diff --git a/arch/x86/include/asm_inline_gcc.h b/arch/x86/include/asm_inline_gcc.h index bf41f3a6f10..94218736c4e 100644 --- a/arch/x86/include/asm_inline_gcc.h +++ b/arch/x86/include/asm_inline_gcc.h @@ -45,14 +45,14 @@ NANO_CPU_EXC_CONNECT_NO_ERR(handler,vector,0) #else /* !_ASMLANGUAGE */ -/******************************************************************************* -* -* EflagsGet - return the current value of the EFLAGS register -* -* RETURNS: the EFLAGS register. -* -* \NOMANUAL -*/ +/** + * + * EflagsGet - return the current value of the EFLAGS register + * + * RETURNS: the EFLAGS register. + * + * \NOMANUAL + */ static inline unsigned int EflagsGet(void) { @@ -70,15 +70,15 @@ static inline unsigned int EflagsGet(void) #ifdef CONFIG_FP_SHARING -/******************************************************************************* -* -* _FpAccessDisable - disallow use of floating point capabilities -* -* This routine sets CR0[TS] to 1, which disallows the use of FP instructions -* by the currently executing context. -* -* RETURNS: N/A -*/ +/** + * + * _FpAccessDisable - disallow use of floating point capabilities + * + * This routine sets CR0[TS] to 1, which disallows the use of FP instructions + * by the currently executing context. + * + * RETURNS: N/A + */ static inline void _FpAccessDisable(void) { @@ -94,17 +94,17 @@ static inline void _FpAccessDisable(void) } -/******************************************************************************* -* -* _do_fp_ctx_save - save non-integer context information -* -* This routine saves the system's "live" non-integer context into the -* specified area. If the specified task or fiber supports SSE then -* x87/MMX/SSEx context info is saved, otherwise only x87/MMX context is saved. -* Function is invoked by _FpCtxSave(tCCS *ccs) -* -* RETURNS: N/A -*/ +/** + * + * _do_fp_ctx_save - save non-integer context information + * + * This routine saves the system's "live" non-integer context into the + * specified area. If the specified task or fiber supports SSE then + * x87/MMX/SSEx context info is saved, otherwise only x87/MMX context is saved. + * Function is invoked by _FpCtxSave(tCCS *ccs) + * + * RETURNS: N/A + */ static inline void _do_fp_ctx_save(int flags, void *preemp_float_reg) { @@ -126,15 +126,15 @@ static inline void _do_fp_ctx_save(int flags, void *preemp_float_reg) } } -/******************************************************************************* -* -* _do_fp_ctx_init - initialize non-integer context information -* -* This routine initializes the system's "live" non-integer context. -* Function is invoked by _FpCtxInit(tCCS *ccs) -* -* RETURNS: N/A -*/ +/** + * + * _do_fp_ctx_init - initialize non-integer context information + * + * This routine initializes the system's "live" non-integer context. + * Function is invoked by _FpCtxInit(tCCS *ccs) + * + * RETURNS: N/A + */ static inline void _do_fp_ctx_init(int flags) { diff --git a/arch/x86/include/gdt.h b/arch/x86/include/gdt.h index c8081b4b030..928bfc446ab 100644 --- a/arch/x86/include/gdt.h +++ b/arch/x86/include/gdt.h @@ -34,7 +34,7 @@ DESCRIPTION This file provides definitions for the Global Descriptor Table (GDT) for the IA-32 architecture. -*/ + */ #ifndef _GDT_H #define _GDT_H diff --git a/arch/x86/include/nano_private.h b/arch/x86/include/nano_private.h index e8f07158dbb..28f43317510 100644 --- a/arch/x86/include/nano_private.h +++ b/arch/x86/include/nano_private.h @@ -42,7 +42,7 @@ This file is also included by assembly language files which must #define _ASMLANGUAGE before including this header file. Note that nanokernel assembly source files obtains structure offset values via "absolute symbols" in the offsets.o module. -*/ + */ #ifndef _NANO_PRIVATE_H #define _NANO_PRIVATE_H @@ -760,18 +760,18 @@ extern tNANO _nanokernel; /* inline function definitions */ -/******************************************************************************* -* -* nanoArchInit - performs architecture-specific initialization -* -* This routine performs architecture-specific initialization of the nanokernel. -* Trivial stuff is done inline; more complex initialization is done via -* function calls. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * nanoArchInit - performs architecture-specific initialization + * + * This routine performs architecture-specific initialization of the nanokernel. + * Trivial stuff is done inline; more complex initialization is done via + * function calls. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static inline void nanoArchInit(void) { @@ -809,18 +809,18 @@ static inline void nanoArchInit(void) } -/******************************************************************************* -* -* fiberRtnValueSet - set the return value for the specified fiber (inline) -* -* The register used to store the return value from a function call invocation is -* set to . It is assumed that the specified is pending, and -* thus the fibers context is stored in its tCCS structure. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * fiberRtnValueSet - set the return value for the specified fiber (inline) + * + * The register used to store the return value from a function call invocation is + * set to . It is assumed that the specified is pending, and + * thus the fibers context is stored in its tCCS structure. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static inline void fiberRtnValueSet( tCCS *fiber, /* pointer to fiber */ diff --git a/arch/x86/include/start_task_arch.h b/arch/x86/include/start_task_arch.h index d34891a9003..02d4967c934 100644 --- a/arch/x86/include/start_task_arch.h +++ b/arch/x86/include/start_task_arch.h @@ -33,7 +33,7 @@ /* DESCRIPTION Intel-specific parts of start_task(). Only FP functionality currently. -*/ + */ #ifndef _START_TASK_ARCH__H_ #define _START_TASK_ARCH__H_ diff --git a/arch/x86/include/swapstk.h b/arch/x86/include/swapstk.h index 4fa95109310..0574fb3f26b 100644 --- a/arch/x86/include/swapstk.h +++ b/arch/x86/include/swapstk.h @@ -39,7 +39,7 @@ NOTE: _Swap() does not use this file as it uses the push instruction to save a context. Changes to the file will not automatically be picked up by _Swap(). Conversely, changes to _Swap() should be mirrored here if the stack frame is modified. -*/ + */ #ifndef _SWAPSTK_H #define _SWAPSTK_H diff --git a/arch/x86/quark/board.h b/arch/x86/quark/board.h index 9589461a6ca..d1bd1a6538c 100644 --- a/arch/x86/quark/board.h +++ b/arch/x86/quark/board.h @@ -34,7 +34,7 @@ DESCRIPTION This header file is used to specify and describe board-level aspects for the 'Quark' BSP. -*/ + */ #ifndef __INCboardh #define __INCboardh @@ -152,31 +152,31 @@ the 'Quark' BSP. sys_out8(data, (unsigned int)address) #define PLB_BYTE_REG_READ(address) sys_in8((unsigned int)address) -/******************************************************************************* -* -* outByte - output byte to memory location -* -* RETURNS: N/A -* -* NOMANUAL -*/ +/** + * + * outByte - output byte to memory location + * + * RETURNS: N/A + * + * NOMANUAL + */ static inline void outByte(uint8_t data, uint32_t addr) { *(volatile uint8_t *)addr = data; } -/******************************************************************************* -* -* inByte - obtain byte value from memory location -* -* This function issues the 'move' instruction to read a byte from the specified -* memory address. -* -* RETURNS: the byte read from the specified memory address -* -* NOMANUAL -*/ +/** + * + * inByte - obtain byte value from memory location + * + * This function issues the 'move' instruction to read a byte from the specified + * memory address. + * + * RETURNS: the byte read from the specified memory address + * + * NOMANUAL + */ static inline uint8_t inByte(uint32_t addr) { @@ -194,31 +194,31 @@ static inline uint8_t inByte(uint32_t addr) sys_out16(data, (unsigned int)address) #define PLB_WORD_REG_READ(address) sys_in16((unsigned int)address) -/******************************************************************************* -* -* outWord - output word to memory location -* -* RETURNS: N/A -* -* NOMANUAL -*/ +/** + * + * outWord - output word to memory location + * + * RETURNS: N/A + * + * NOMANUAL + */ static inline void outWord(uint16_t data, uint32_t addr) { *(volatile uint16_t *)addr = data; } -/******************************************************************************* -* -* inWord - obtain word value from memory location -* -* This function issues the 'move' instruction to read a word from the specified -* memory address. -* -* RETURNS: the word read from the specified memory address -* -* NOMANUAL -*/ +/** + * + * inWord - obtain word value from memory location + * + * This function issues the 'move' instruction to read a word from the specified + * memory address. + * + * RETURNS: the word read from the specified memory address + * + * NOMANUAL + */ static inline uint16_t inWord(uint32_t addr) { @@ -236,31 +236,31 @@ static inline uint16_t inWord(uint32_t addr) sys_out32(data, (unsigned int)address) #define PLB_LONG_REG_READ(address) sys_in32((unsigned int)address) -/******************************************************************************* -* -* outLong - output long word to memory location -* -* RETURNS: N/A -* -* NOMANUAL -*/ +/** + * + * outLong - output long word to memory location + * + * RETURNS: N/A + * + * NOMANUAL + */ static inline void outLong(uint32_t data, uint32_t addr) { *(volatile uint32_t *)addr = data; } -/******************************************************************************* -* -* inLong - obtain long word value from memory location -* -* This function issues the 'move' instruction to read a word from the specified -* memory address. -* -* RETURNS: the long word read from the specified memory address -* -* NOMANUAL -*/ +/** + * + * inLong - obtain long word value from memory location + * + * This function issues the 'move' instruction to read a word from the specified + * memory address. + * + * RETURNS: the long word read from the specified memory address + * + * NOMANUAL + */ static inline uint32_t inLong(uint32_t addr) { @@ -268,19 +268,19 @@ static inline uint32_t inLong(uint32_t addr) } #endif /* !_ASMLANGUAGE */ -/******************************************************************************* -* -* pci_pin2irq - convert PCI interrupt PIN to IRQ -* -* The routine uses "standard design consideration" and implies that -* INTA (pin 1) -> IRQ 16 -* INTB (pin 2) -> IRQ 17 -* INTC (pin 3) -> IRQ 18 -* INTD (pin 4) -> IRQ 19 -* -* RETURNS: IRQ number, -1 if the result is incorrect -* -*/ +/** + * + * pci_pin2irq - convert PCI interrupt PIN to IRQ + * + * The routine uses "standard design consideration" and implies that + * INTA (pin 1) -> IRQ 16 + * INTB (pin 2) -> IRQ 17 + * INTC (pin 3) -> IRQ 18 + * INTD (pin 4) -> IRQ 19 + * + * RETURNS: IRQ number, -1 if the result is incorrect + * + */ static inline int pci_pin2irq(int pin) { @@ -289,13 +289,13 @@ static inline int pci_pin2irq(int pin) return N_PIC_IRQS + pin - 1; } -/******************************************************************************* -* -* pci_irq2pin - convert IRQ to PCI interrupt pin -* -* RETURNS: pin number, -1 if the result is incorrect -* -*/ +/** + * + * pci_irq2pin - convert IRQ to PCI interrupt pin + * + * RETURNS: pin number, -1 if the result is incorrect + * + */ static inline int pci_irq2pin(int irq) { diff --git a/arch/x86/quark/linker.cmd b/arch/x86/quark/linker.cmd index d6f567899e9..d0362006eed 100644 --- a/arch/x86/quark/linker.cmd +++ b/arch/x86/quark/linker.cmd @@ -33,7 +33,7 @@ /* DESCRIPTION This is the linker script for both standard images and XIP images. -*/ + */ #include diff --git a/arch/x86/quark/system.c b/arch/x86/quark/system.c index f276e29fcee..82489e66181 100644 --- a/arch/x86/quark/system.c +++ b/arch/x86/quark/system.c @@ -37,7 +37,7 @@ for the Quark BSP. Implementation Remarks: Handlers for the secondary serial port have not been added. -*/ + */ #include #include @@ -55,13 +55,13 @@ Handlers for the secondary serial port have not been added. #if defined(DO_CONSOLE_INIT) -/******************************************************************************* -* -* uart_generic_info_init - initialize initialization information for one UART -* -* RETURNS: N/A -* -*/ +/** + * + * uart_generic_info_init - initialize initialization information for one UART + * + * RETURNS: N/A + * + */ void uart_generic_info_init(struct uart_init_info *p_info) { @@ -74,15 +74,15 @@ void uart_generic_info_init(struct uart_init_info *p_info) #if defined(DO_CONSOLE_INIT) -/******************************************************************************* -* -* consoleInit - initialize target-only console -* -* Only used for debugging. -* -* RETURNS: N/A -* -*/ +/** + * + * consoleInit - initialize target-only console + * + * Only used for debugging. + * + * RETURNS: N/A + * + */ #include @@ -101,16 +101,16 @@ static void consoleInit(void) } while ((0)) #endif /* DO_CONSOLE_INIT */ -/******************************************************************************* -* -* _InitHardware - perform basic hardware initialization -* -* Initialize the Intel LOAPIC and IOAPIC device driver and the -* Intel 8250 UART device driver. -* Also initialize the timer device driver, if required. -* -* RETURNS: N/A -*/ +/** + * + * _InitHardware - perform basic hardware initialization + * + * Initialize the Intel LOAPIC and IOAPIC device driver and the + * Intel 8250 UART device driver. + * Also initialize the timer device driver, if required. + * + * RETURNS: N/A + */ void _InitHardware(void) { diff --git a/arch/x86/sys_fatal_error_handler.c b/arch/x86/sys_fatal_error_handler.c index 73fcf776ff7..5211439494b 100644 --- a/arch/x86/sys_fatal_error_handler.c +++ b/arch/x86/sys_fatal_error_handler.c @@ -34,7 +34,7 @@ DESCRIPTION This module provides the _SysFatalErrorHandler() routine which is common to supported BSPs. -*/ + */ #include #include @@ -47,25 +47,25 @@ supported BSPs. #define PRINTK(...) #endif /* CONFIG_PRINTK */ -/******************************************************************************* -* -* _SysFatalErrorHandler - fatal error handler -* -* This routine implements the corrective action to be taken when the system -* detects a fatal error. -* -* This sample implementation attempts to abort the current context and allow -* the system to continue executing, which may permit the system to continue -* functioning with degraded capabilities. -* -* System designers may wish to enhance or substitute this sample -* implementation to take other actions, such as logging error (or debug) -* information to a persistent repository and/or rebooting the system. -* -* RETURNS: This function does not return. -* -* \NOMANUAL -*/ +/** + * + * _SysFatalErrorHandler - fatal error handler + * + * This routine implements the corrective action to be taken when the system + * detects a fatal error. + * + * This sample implementation attempts to abort the current context and allow + * the system to continue executing, which may permit the system to continue + * functioning with degraded capabilities. + * + * System designers may wish to enhance or substitute this sample + * implementation to take other actions, such as logging error (or debug) + * information to a persistent repository and/or rebooting the system. + * + * RETURNS: This function does not return. + * + * \NOMANUAL + */ FUNC_NORETURN void _SysFatalErrorHandler( unsigned int reason, /* fatal error reason */ diff --git a/arch/x86/task/strtask.c b/arch/x86/task/strtask.c index cfcfbd62f06..6da2eb26b9d 100644 --- a/arch/x86/task/strtask.c +++ b/arch/x86/task/strtask.c @@ -33,7 +33,7 @@ /* DESCRIPTION Intel-specific parts of start_task(). Only FP functionality currently. -*/ + */ #ifdef CONFIG_MICROKERNEL @@ -51,12 +51,12 @@ Intel-specific parts of start_task(). Only FP functionality currently. #define SSE_GROUP 0x10 -/******************************************************************************* -* -* _StartTaskArch - Intel-specifc parts of task initialization -* -* RETURNS: N/A -*/ +/** + * + * _StartTaskArch - Intel-specifc parts of task initialization + * + * RETURNS: N/A + */ void _StartTaskArch( struct k_proc *X, /* ptr to task control block */ diff --git a/drivers/console/uart_console.c b/drivers/console/uart_console.c index 0c5c0391d67..0b26ebb5f99 100644 --- a/drivers/console/uart_console.c +++ b/drivers/console/uart_console.c @@ -35,7 +35,7 @@ Serial console driver. Hooks into the printk and fputc (for printf) modules. Poll driven. -*/ + */ #include #include @@ -56,7 +56,7 @@ #endif #if 0 /* NOTUSED */ -/****************************************************************************** +/** * * consoleIn - get a character from UART * @@ -74,7 +74,7 @@ static int consoleIn(void) #endif #if defined(CONFIG_PRINTK) || defined(CONFIG_STDOUT_CONSOLE) -/****************************************************************************** +/** * * consoleOut - output one character to UART * @@ -209,7 +209,7 @@ void uart_register_input(struct nano_fifo *avail, struct nano_fifo *lines) } while ((0)) #endif -/****************************************************************************** +/** * * uart_console_init - initialize one UART as the console/debug port * diff --git a/drivers/interrupt_controller/i8259.c b/drivers/interrupt_controller/i8259.c index 5d9619a3f6d..7cc1539f1db 100644 --- a/drivers/interrupt_controller/i8259.c +++ b/drivers/interrupt_controller/i8259.c @@ -61,7 +61,7 @@ command is issued, the 8259A will automatically reset the highest IS bit of those that are set, since in the fully nested mode the highest IS level is the last level acknowledged and serviced. -*/ + */ /* * A board support package's board.h header must provide definitions for the @@ -116,15 +116,15 @@ FUNC_ALIAS(_i8259_irq_enable, irq_enable, void); FUNC_ALIAS(_i8259_irq_disable, irq_disable, void); #endif /* CONFIG_SHUTOFF_PIC */ -/******************************************************************************* -* -* _i8259_init - initialize the Intel 8259A PIC device driver -* -* This routine initializes the Intel 8259A PIC device driver and the device -* itself. -* -* RETURNS: N/A -*/ +/** + * + * _i8259_init - initialize the Intel 8259A PIC device driver + * + * This routine initializes the Intel 8259A PIC device driver and the device + * itself. + * + * RETURNS: N/A + */ void _i8259_init(void) { @@ -185,16 +185,16 @@ void _i8259_init(void) } #ifndef CONFIG_SHUTOFF_PIC -/******************************************************************************* -* -* _i8259_eoi_master - send EOI(end of interrupt) signal to the master PIC. -* -* This routine is called at the end of the interrupt handler. -* -* RETURNS: N/A -* -* ERRNO -*/ +/** + * + * _i8259_eoi_master - send EOI(end of interrupt) signal to the master PIC. + * + * This routine is called at the end of the interrupt handler. + * + * RETURNS: N/A + * + * ERRNO + */ void _i8259_eoi_master(unsigned int irq /* IRQ number to send EOI: unused */ @@ -207,17 +207,17 @@ void _i8259_eoi_master(unsigned int irq /* IRQ number to PLB_BYTE_REG_WRITE(I8259_EOI, PIC_IACK(PIC_MASTER_BASE_ADRS)); } -/******************************************************************************* -* -* _i8259_eoi_slave - send EOI(end of interrupt) signal to the slave PIC. -* -* This routine is called at the end of the interrupt handler in the Normal -* Fully Nested Mode. -* -* RETURNS: N/A -* -* ERRNO -*/ +/** + * + * _i8259_eoi_slave - send EOI(end of interrupt) signal to the slave PIC. + * + * This routine is called at the end of the interrupt handler in the Normal + * Fully Nested Mode. + * + * RETURNS: N/A + * + * ERRNO + */ void _i8259_eoi_slave(unsigned int irq /* IRQ number to send EOI: unused */ @@ -239,22 +239,22 @@ void _i8259_eoi_slave(unsigned int irq /* IRQ number to __asm__ volatile("popfl;\n\t"); } -/******************************************************************************* -* -* __I8259IntEnable - enable/disable a specified PIC interrupt input line -* -* This routine enables or disables a specified PIC interrupt input line. To -* enable an interrupt input line, the parameter must be non-zero. -* -* The nanokernel exports the irq_enable() and irq_disable() -* APIs (mapped to _i8259_irq_enable() and _i8259_irq_disable(), respectively). -* This function is called by _i8259_irq_enable() and _i8259_irq_disable() to -* perform the actual enabling/disabling of an IRQ to minimize footprint. -* -* RETURNS: N/A -* -* see also: _i8259_irq_disable()/_i8259_irq_enable -*/ +/** + * + * __I8259IntEnable - enable/disable a specified PIC interrupt input line + * + * This routine enables or disables a specified PIC interrupt input line. To + * enable an interrupt input line, the parameter must be non-zero. + * + * The nanokernel exports the irq_enable() and irq_disable() + * APIs (mapped to _i8259_irq_enable() and _i8259_irq_disable(), respectively). + * This function is called by _i8259_irq_enable() and _i8259_irq_disable() to + * perform the actual enabling/disabling of an IRQ to minimize footprint. + * + * RETURNS: N/A + * + * see also: _i8259_irq_disable()/_i8259_irq_enable + */ static void __I8259IntEnable( unsigned int irq, /* IRQ number to enable */ @@ -290,16 +290,16 @@ static void __I8259IntEnable( } -/******************************************************************************* -* -* _i8259_irq_disable - disable a specified PIC interrupt input line -* -* This routine disables a specified PIC interrupt input line. -* -* RETURNS: N/A -* -* SEE ALSO: _i8259_irq_enable() -*/ +/** + * + * _i8259_irq_disable - disable a specified PIC interrupt input line + * + * This routine disables a specified PIC interrupt input line. + * + * RETURNS: N/A + * + * SEE ALSO: _i8259_irq_enable() + */ void _i8259_irq_disable(unsigned int irq /* IRQ number to disable */ ) @@ -307,16 +307,16 @@ void _i8259_irq_disable(unsigned int irq /* IRQ number to disable */ return __I8259IntEnable(irq, 0); } -/******************************************************************************* -* -* _i8259_irq_enable - enable a specified PIC interrupt input line -* -* This routine enables a specified PIC interrupt input line. -* -* RETURNS: N/A -* -* SEE ALSO: _i8259_irq_disable() -*/ +/** + * + * _i8259_irq_enable - enable a specified PIC interrupt input line + * + * This routine enables a specified PIC interrupt input line. + * + * RETURNS: N/A + * + * SEE ALSO: _i8259_irq_disable() + */ void _i8259_irq_enable(unsigned int irq /* IRQ number to enable */ ) diff --git a/drivers/interrupt_controller/i8259_boi.S b/drivers/interrupt_controller/i8259_boi.S index d4ddcbb3ae4..e5f4b04f7f3 100644 --- a/drivers/interrupt_controller/i8259_boi.S +++ b/drivers/interrupt_controller/i8259_boi.S @@ -42,7 +42,7 @@ The distinction between a spurious interrupt and a real one is detected by looking at the in service register (ISR). The bit (bit 7) will be 1 indicating a real IRQ has been inserted. -*/ + */ /* includes */ #define _ASMLANGUAGE @@ -59,20 +59,20 @@ a real IRQ has been inserted. GDATA(_i8259_spurious_interrupt_count) -/******************************************************************************* -* -* _i8259_boi_master - detect whether it is spurious interrupt or not -* -* This routine is called before the user's interrupt handler to detect the -* spurious interrupt on the master PIC. If a spurious interrupt condition is -* detected, a global variable is incremented and the execution of the interrupt -* stub is "short circuited", i.e. a return to the interrupted context -* occurs. -* -* void _i8259_boi_master (void) -* -* RETURNS: N/A -*/ +/** + * + * _i8259_boi_master - detect whether it is spurious interrupt or not + * + * This routine is called before the user's interrupt handler to detect the + * spurious interrupt on the master PIC. If a spurious interrupt condition is + * detected, a global variable is incremented and the execution of the interrupt + * stub is "short circuited", i.e. a return to the interrupted context + * occurs. + * + * void _i8259_boi_master (void) + * + * RETURNS: N/A + */ SECTION_FUNC(TEXT, _i8259_boi_master) /* disable interrupts */ @@ -93,20 +93,20 @@ SECTION_FUNC(TEXT, _i8259_boi_master) ret -/******************************************************************************* -* -* _i8259_boi_slave - detect whether it is spurious interrupt or not -* -* This routine is called before the user's interrupt handler to detect the -* spurious interrupt on the slave PIC. If a spurious interrupt condition is -* detected, a global variable is incremented and the execution of the interrupt -* stub is "short circuited", i.e. a return to the interrupted context -* occurs. -* -* void _i8259_boi_slave (void) -* -* RETURNS: N/A -*/ +/** + * + * _i8259_boi_slave - detect whether it is spurious interrupt or not + * + * This routine is called before the user's interrupt handler to detect the + * spurious interrupt on the slave PIC. If a spurious interrupt condition is + * detected, a global variable is incremented and the execution of the interrupt + * stub is "short circuited", i.e. a return to the interrupted context + * occurs. + * + * void _i8259_boi_slave (void) + * + * RETURNS: N/A + */ SECTION_FUNC(TEXT, _i8259_boi_slave) /* disable interrupts */ diff --git a/drivers/interrupt_controller/ioapic_intr.c b/drivers/interrupt_controller/ioapic_intr.c index 035ac97afb4..0bc97a69119 100644 --- a/drivers/interrupt_controller/ioapic_intr.c +++ b/drivers/interrupt_controller/ioapic_intr.c @@ -73,7 +73,7 @@ This implementation doesn't support multiple IO APICs. INCLUDE FILES: ioapic.h loapic.h SEE ALSO: loApicIntr.c -*/ + */ #include #include @@ -209,14 +209,14 @@ static void _IoApicRedUpdateLo(unsigned int irq, uint32_t value, * IRQ virtualization imposed by the BSP. */ -/******************************************************************************* -* -* _ioapic_init - initialize the IO APIC or xAPIC -* -* This routine initializes the IO APIC or xAPIC. -* -* RETURNS: N/A -*/ +/** + * + * _ioapic_init - initialize the IO APIC or xAPIC + * + * This routine initializes the IO APIC or xAPIC. + * + * RETURNS: N/A + */ void _ioapic_init(void) { @@ -261,14 +261,14 @@ void _ioapic_init(void) } } -/******************************************************************************* -* -* _ioapic_eoi - send EOI (End Of Interrupt) signal to IO APIC -* -* This routine sends an EOI signal to the IO APIC's interrupting source. -* -* RETURNS: N/A -*/ +/** + * + * _ioapic_eoi - send EOI (End Of Interrupt) signal to IO APIC + * + * This routine sends an EOI signal to the IO APIC's interrupting source. + * + * RETURNS: N/A + */ void _ioapic_eoi(unsigned int irq /* INT number to send EOI */ ) @@ -277,16 +277,16 @@ void _ioapic_eoi(unsigned int irq /* INT number to send EOI */ *(volatile unsigned int *)(LOAPIC_BASE_ADRS + LOAPIC_EOI) = 0; } -/******************************************************************************* -* -* _ioapic_eoi_get - get EOI (End Of Interrupt) information -* -* This routine returns EOI signalling information for a specific IRQ. -* -* RETURNS: address of routine to be called to signal EOI; -* as a side effect, also passes back indication if routine requires -* an interrupt vector argument and what the argument value should be -*/ +/** + * + * _ioapic_eoi_get - get EOI (End Of Interrupt) information + * + * This routine returns EOI signalling information for a specific IRQ. + * + * RETURNS: address of routine to be called to signal EOI; + * as a side effect, also passes back indication if routine requires + * an interrupt vector argument and what the argument value should be + */ void *_ioapic_eoi_get(unsigned int irq, /* INTIN number of interest */ char *argRequired, /* ptr to "argument required" result @@ -317,14 +317,14 @@ void *_ioapic_eoi_get(unsigned int irq, /* INTIN number of interest */ return _ioapic_eoi; } -/******************************************************************************* -* -* _ioapic_irq_enable - enable a specified APIC interrupt input line -* -* This routine enables a specified APIC interrupt input line. -* -* RETURNS: N/A -*/ +/** + * + * _ioapic_irq_enable - enable a specified APIC interrupt input line + * + * This routine enables a specified APIC interrupt input line. + * + * RETURNS: N/A + */ void _ioapic_irq_enable(unsigned int irq /* INTIN number to enable */ ) @@ -332,14 +332,14 @@ void _ioapic_irq_enable(unsigned int irq /* INTIN number to enable */ _IoApicRedUpdateLo(irq, 0, IOAPIC_INT_MASK); } -/******************************************************************************* -* -* _ioapic_irq_disable - disable a specified APIC interrupt input line -* -* This routine disables a specified APIC interrupt input line. -* -* RETURNS: N/A -*/ +/** + * + * _ioapic_irq_disable - disable a specified APIC interrupt input line + * + * This routine disables a specified APIC interrupt input line. + * + * RETURNS: N/A + */ void _ioapic_irq_disable(unsigned int irq /* INTIN number to disable */ ) @@ -347,14 +347,14 @@ void _ioapic_irq_disable(unsigned int irq /* INTIN number to disable */ _IoApicRedUpdateLo(irq, IOAPIC_INT_MASK, IOAPIC_INT_MASK); } -/******************************************************************************* -* -* _ioapic_irq_set - programs the interrupt redirection table -* -* This routine sets up the redirection table entry for the specified IRQ -* -* RETURNS: N/A -*/ +/** + * + * _ioapic_irq_set - programs the interrupt redirection table + * + * This routine sets up the redirection table entry for the specified IRQ + * + * RETURNS: N/A + */ void _ioapic_irq_set(unsigned int irq, /* virtualized IRQ */ unsigned int vector, /* vector number */ uint32_t flags /* interrupt flags */ @@ -368,15 +368,15 @@ void _ioapic_irq_set(unsigned int irq, /* virtualized IRQ */ ioApicRedSetLo(irq, rteValue); } -/******************************************************************************* -* -* _ioapic_int_vec_set - program interrupt vector for specified irq -* -* The routine writes the interrupt vector in the Interrupt Redirection -* Table for specified irq number -* -* RETURNS: N/A -*/ +/** + * + * _ioapic_int_vec_set - program interrupt vector for specified irq + * + * The routine writes the interrupt vector in the Interrupt Redirection + * Table for specified irq number + * + * RETURNS: N/A + */ void _ioapic_int_vec_set(unsigned int irq, /* INT number */ unsigned int vector /* vector number */ ) @@ -386,14 +386,14 @@ void _ioapic_int_vec_set(unsigned int irq, /* INT number */ #ifndef XIOAPIC_DIRECT_ADDRESSING -/******************************************************************************* -* -* __IoApicGet - read a 32 bit IO APIC register -* -* This routine reads the specified IO APIC register using indirect addressing. -* -* RETURNS: register value -*/ +/** + * + * __IoApicGet - read a 32 bit IO APIC register + * + * This routine reads the specified IO APIC register using indirect addressing. + * + * RETURNS: register value + */ static uint32_t __IoApicGet( int32_t offset /* register offset (8 bits) */ @@ -414,14 +414,14 @@ static uint32_t __IoApicGet( return value; } -/******************************************************************************* -* -* __IoApicSet - write a 32 bit IO APIC register -* -* This routine writes the specified IO APIC register using indirect addressing. -* -* RETURNS: N/A -*/ +/** + * + * __IoApicSet - write a 32 bit IO APIC register + * + * This routine writes the specified IO APIC register using indirect addressing. + * + * RETURNS: N/A + */ static void __IoApicSet( int32_t offset, /* register offset (8 bits) */ @@ -442,14 +442,14 @@ static void __IoApicSet( #endif -/******************************************************************************* -* -* ioApicRedGetLo - get low 32 bits of Redirection Table entry -* -* This routine reads the low-order 32 bits of a Redirection Table entry. -* -* RETURNS: 32 low-order bits -*/ +/** + * + * ioApicRedGetLo - get low 32 bits of Redirection Table entry + * + * This routine reads the low-order 32 bits of a Redirection Table entry. + * + * RETURNS: 32 low-order bits + */ static uint32_t ioApicRedGetLo(unsigned int irq /* INTIN number */ ) @@ -468,14 +468,14 @@ static uint32_t ioApicRedGetLo(unsigned int irq /* INTIN number */ #endif } -/******************************************************************************* -* -* ioApicRedSetLo - set low 32 bits of Redirection Table entry -* -* This routine writes the low-order 32 bits of a Redirection Table entry. -* -* RETURNS: N/A -*/ +/** + * + * ioApicRedSetLo - set low 32 bits of Redirection Table entry + * + * This routine writes the low-order 32 bits of a Redirection Table entry. + * + * RETURNS: N/A + */ static void ioApicRedSetLo(unsigned int irq, /* INTIN number */ uint32_t lower32 /* value to be written */ @@ -495,14 +495,14 @@ static void ioApicRedSetLo(unsigned int irq, /* INTIN number */ #endif } -/******************************************************************************* -* -* ioApicRedSetHi - set high 32 bits of Redirection Table entry -* -* This routine writes the high-order 32 bits of a Redirection Table entry. -* -* RETURNS: N/A -*/ +/** + * + * ioApicRedSetHi - set high 32 bits of Redirection Table entry + * + * This routine writes the high-order 32 bits of a Redirection Table entry. + * + * RETURNS: N/A + */ static void ioApicRedSetHi(unsigned int irq, /* INTIN number */ uint32_t upper32 /* value to be written */ @@ -522,15 +522,15 @@ static void ioApicRedSetHi(unsigned int irq, /* INTIN number */ #endif } -/******************************************************************************* -* -* _IoApicRedUpdateLo - modify low 32 bits of Redirection Table entry -* -* This routine modifies selected portions of the low-order 32 bits of a -* Redirection Table entry, as indicated by the associate bit mask. -* -* RETURNS: N/A -*/ +/** + * + * _IoApicRedUpdateLo - modify low 32 bits of Redirection Table entry + * + * This routine modifies selected portions of the low-order 32 bits of a + * Redirection Table entry, as indicated by the associate bit mask. + * + * RETURNS: N/A + */ static void _IoApicRedUpdateLo( unsigned int irq, /* INTIN number */ @@ -548,15 +548,15 @@ static void _IoApicRedUpdateLo( * macro if the I/O APIC supports the MSI redirect capability. */ -/******************************************************************************* -* -* _IoApicRteConfigSet - write to the RTE config register for specified IRQ -* -* This routine writes the specified 32-bit into the RTE configuration -* register for the specified (0 to (IOAPIC_NUM_RTES - 1)) -* -* RETURNS: void -*/ +/** + * + * _IoApicRteConfigSet - write to the RTE config register for specified IRQ + * + * This routine writes the specified 32-bit into the RTE configuration + * register for the specified (0 to (IOAPIC_NUM_RTES - 1)) + * + * RETURNS: void + */ static void _IoApicRteConfigSet(unsigned int irq, /* INTIN number */ uint32_t value /* value to be written */ @@ -576,15 +576,15 @@ static void _IoApicRteConfigSet(unsigned int irq, /* INTIN number */ *((volatile uint32_t *)(IOAPIC_BASE_ADRS + offset)) = value; } -/******************************************************************************* -* -* _IoApicRedirRegSet - write to the specified MSI redirection register -* -* This routine writes the 32-bit into the redirection register -* specified by . -* -* RETURNS: void -*/ +/** + * + * _IoApicRedirRegSet - write to the specified MSI redirection register + * + * This routine writes the 32-bit into the redirection register + * specified by . + * + * RETURNS: void + */ static void _IoApicRedirRegSet(unsigned int reg, uint32_t value) { diff --git a/drivers/interrupt_controller/loapic_intr.c b/drivers/interrupt_controller/loapic_intr.c index a6fc4e9ca0c..ae724fdc44b 100644 --- a/drivers/interrupt_controller/loapic_intr.c +++ b/drivers/interrupt_controller/loapic_intr.c @@ -101,7 +101,7 @@ expands this support of all acceptance of two interrupts per vector rather than per priority level. INCLUDE FILES: loapic.h -*/ + */ #include #include @@ -198,15 +198,15 @@ INCLUDE FILES: loapic.h #define IMCR_IOAPIC_OFF 0x00 /* IMCR IOAPIC route disable */ -/******************************************************************************* -* -* _loapic_init - initialize the Local APIC or xAPIC -* -* This routine initializes Local APIC or xAPIC. -* -* RETURNS: N/A -* -*/ +/** + * + * _loapic_init - initialize the Local APIC or xAPIC + * + * This routine initializes Local APIC or xAPIC. + * + * RETURNS: N/A + * + */ void _loapic_init(void) { @@ -260,14 +260,14 @@ void _loapic_init(void) *(volatile int *)(LOAPIC_BASE_ADRS + LOAPIC_EOI) = 0; } -/******************************************************************************* -* -* _loapic_enable - enable the Local xAPIC -* -* This routine enables the Local xAPIC. -* -* RETURNS: N/A -*/ +/** + * + * _loapic_enable - enable the Local xAPIC + * + * This routine enables the Local xAPIC. + * + * RETURNS: N/A + */ void _loapic_enable(void) { @@ -278,14 +278,14 @@ void _loapic_enable(void) irq_unlock(oldLevel); /* UNLOCK INTERRUPTS */ } -/******************************************************************************* -* -* _loapic_disable - disable the Local xAPIC -* -* This routine disables the Local xAPIC. -* -* RETURNS: N/A -*/ +/** + * + * _loapic_disable - disable the Local xAPIC + * + * This routine disables the Local xAPIC. + * + * RETURNS: N/A + */ void _loapic_disable(void) { @@ -296,14 +296,14 @@ void _loapic_disable(void) irq_unlock(oldLevel); /* UNLOCK INTERRUPTS */ } -/******************************************************************************* -* -* _loapic_eoi - send EOI (End Of Interrupt) signal to Local APIC -* -* This routine sends an EOI signal to the Local APIC's interrupting source. -* -* RETURNS: N/A -*/ +/** + * + * _loapic_eoi - send EOI (End Of Interrupt) signal to Local APIC + * + * This routine sends an EOI signal to the Local APIC's interrupting source. + * + * RETURNS: N/A + */ void _loapic_eoi(unsigned int irq) { @@ -311,17 +311,17 @@ void _loapic_eoi(unsigned int irq) *(volatile int *)(LOAPIC_BASE_ADRS + LOAPIC_EOI) = 0; } -/******************************************************************************* -* -* _loapic_int_vec_set - set the vector field in the specified RTE -* -* This routine is utilized by the BSP provided routined _SysIntVecAllocate() -* which in turn is provided to support the irq_connect() API. Once -* a vector has been allocated, this routine is invoked to update the LVT -* entry associated with with the vector. -* -* RETURNS: N/A -*/ +/** + * + * _loapic_int_vec_set - set the vector field in the specified RTE + * + * This routine is utilized by the BSP provided routined _SysIntVecAllocate() + * which in turn is provided to support the irq_connect() API. Once + * a vector has been allocated, this routine is invoked to update the LVT + * entry associated with with the vector. + * + * RETURNS: N/A + */ void _loapic_int_vec_set(unsigned int irq, /* IRQ number of the interrupt */ @@ -354,14 +354,14 @@ void _loapic_int_vec_set(unsigned int irq, /* IRQ number of the irq_unlock(oldLevel); } -/******************************************************************************* -* -* _loapic_irq_enable - enable an individual LOAPIC interrupt (IRQ) -* -* This routine clears the interrupt mask bit in the LVT for the specified IRQ -* -* RETURNS: N/A -*/ +/** + * + * _loapic_irq_enable - enable an individual LOAPIC interrupt (IRQ) + * + * This routine clears the interrupt mask bit in the LVT for the specified IRQ + * + * RETURNS: N/A + */ void _loapic_irq_enable(unsigned int irq /* IRQ number of the interrupt */ @@ -384,14 +384,14 @@ void _loapic_irq_enable(unsigned int irq /* IRQ number of irq_unlock(oldLevel); } -/******************************************************************************* -* -* _loapic_irq_disable - disable an individual LOAPIC interrupt (IRQ) -* -* This routine clears the interrupt mask bit in the LVT for the specified IRQ -* -* RETURNS: N/A -*/ +/** + * + * _loapic_irq_disable - disable an individual LOAPIC interrupt (IRQ) + * + * This routine clears the interrupt mask bit in the LVT for the specified IRQ + * + * RETURNS: N/A + */ void _loapic_irq_disable(unsigned int irq /* IRQ number of the interrupt */ diff --git a/drivers/interrupt_controller/system_apic.c b/drivers/interrupt_controller/system_apic.c index e42bcef89e8..aab8ec8bbb0 100644 --- a/drivers/interrupt_controller/system_apic.c +++ b/drivers/interrupt_controller/system_apic.c @@ -34,7 +34,7 @@ DESCRIPTION This module provides routines to initialize and support board-level hardware for the atom_n28xx variant of generic_pc BSP. -*/ + */ #include #include "board.h" @@ -43,51 +43,51 @@ for the atom_n28xx variant of generic_pc BSP. #include #include -/******************************************************************************* -* -* _SysIntVecAlloc - allocate interrupt vector -* -* This BSP provided routine supports the irq_connect() API. This -* routine is required to perform the following 3 functions: -* -* a) Allocate a vector satisfying the requested priority. The utility routine -* _IntVecAlloc() provided by the nanokernel will be used to perform the -* the allocation since the local APIC prioritizes interrupts as assumed -* by _IntVecAlloc(). -* b) Return End of Interrupt (EOI) and Beginning of Interrupt (BOI) related -* information to be used when generating the interrupt stub code, and -* c) If an interrupt vector can be allocated, and the argument is not -* equal to NANO_SOFT_IRQ, the IOAPIC redirection table (RED) or the -* LOAPIC local vector table (LVT) will be updated with the allocated -* interrupt vector. -* -* The board virtualizes IRQs as follows: -* -* - The first IOAPIC_NUM_RTES IRQs are provided by the IOAPIC -* - The remaining IRQs are provided by the LOAPIC. -* -* Thus, for example, if the IOAPIC supports 24 IRQs: -* -* - IRQ0 to IRQ23 map to IOAPIC IRQ0 to IRQ23 -* - IRQ24 to IRQ29 map to LOAPIC LVT entries as follows: -* -* IRQ24 -> LOAPIC_TIMER -* IRQ25 -> LOAPIC_THERMAL -* IRQ26 -> LOAPIC_PMC -* IRQ27 -> LOAPIC_LINT0 -* IRQ28 -> LOAPIC_LINT1 -* IRQ29 -> LOAPIC_ERROR -* -* The IOAPIC_NUM_RTES macro is provided by board.h, and it specifies the number -* of IRQs supported by the on-board I/O APIC device. -* -* RETURNS: the allocated interrupt vector -* -* INTERNAL -* For debug kernels, this routine will return -1 if there are no vectors -* remaining in the specified level, or if the or -* parameters are invalid. -*/ +/** + * + * _SysIntVecAlloc - allocate interrupt vector + * + * This BSP provided routine supports the irq_connect() API. This + * routine is required to perform the following 3 functions: + * + * a) Allocate a vector satisfying the requested priority. The utility routine + * _IntVecAlloc() provided by the nanokernel will be used to perform the + * the allocation since the local APIC prioritizes interrupts as assumed + * by _IntVecAlloc(). + * b) Return End of Interrupt (EOI) and Beginning of Interrupt (BOI) related + * information to be used when generating the interrupt stub code, and + * c) If an interrupt vector can be allocated, and the argument is not + * equal to NANO_SOFT_IRQ, the IOAPIC redirection table (RED) or the + * LOAPIC local vector table (LVT) will be updated with the allocated + * interrupt vector. + * + * The board virtualizes IRQs as follows: + * + * - The first IOAPIC_NUM_RTES IRQs are provided by the IOAPIC + * - The remaining IRQs are provided by the LOAPIC. + * + * Thus, for example, if the IOAPIC supports 24 IRQs: + * + * - IRQ0 to IRQ23 map to IOAPIC IRQ0 to IRQ23 + * - IRQ24 to IRQ29 map to LOAPIC LVT entries as follows: + * + * IRQ24 -> LOAPIC_TIMER + * IRQ25 -> LOAPIC_THERMAL + * IRQ26 -> LOAPIC_PMC + * IRQ27 -> LOAPIC_LINT0 + * IRQ28 -> LOAPIC_LINT1 + * IRQ29 -> LOAPIC_ERROR + * + * The IOAPIC_NUM_RTES macro is provided by board.h, and it specifies the number + * of IRQs supported by the on-board I/O APIC device. + * + * RETURNS: the allocated interrupt vector + * + * INTERNAL + * For debug kernels, this routine will return -1 if there are no vectors + * remaining in the specified level, or if the or + * parameters are invalid. + */ int _SysIntVecAlloc( unsigned int irq, /* virtualized IRQ */ @@ -170,27 +170,27 @@ int _SysIntVecAlloc( return vector; } -/******************************************************************************* -* -* _SysIntVecProgram - program interrupt controller -* -* This BSP provided routine programs the appropriate interrupt controller -* with the given vector based on the given IRQ parameter. -* -* Drivers call this routine instead of irq_connect() when interrupts are -* configured statically. -* -* The Clanton board virtualizes IRQs as follows: -* -* - The first IOAPIC_NUM_RTES IRQs are provided by the IOAPIC so the IOAPIC -* is programmed for these IRQs -* - The remaining IRQs are provided by the LOAPIC and hence the LOAPIC is -* programmed. -* -* The IOAPIC_NUM_RTES macro is provided by board.h, and it specifies the number -* of IRQs supported by the on-board I/O APIC device. -* -*/ +/** + * + * _SysIntVecProgram - program interrupt controller + * + * This BSP provided routine programs the appropriate interrupt controller + * with the given vector based on the given IRQ parameter. + * + * Drivers call this routine instead of irq_connect() when interrupts are + * configured statically. + * + * The Clanton board virtualizes IRQs as follows: + * + * - The first IOAPIC_NUM_RTES IRQs are provided by the IOAPIC so the IOAPIC + * is programmed for these IRQs + * - The remaining IRQs are provided by the LOAPIC and hence the LOAPIC is + * programmed. + * + * The IOAPIC_NUM_RTES macro is provided by board.h, and it specifies the number + * of IRQs supported by the on-board I/O APIC device. + * + */ void _SysIntVecProgram(unsigned int vector, /* vector number */ unsigned int irq /* virtualized IRQ */ @@ -205,22 +205,22 @@ void _SysIntVecProgram(unsigned int vector, /* vector number */ } -/******************************************************************************* -* -* irq_enable - enable an individual interrupt (IRQ) -* -* The public interface for enabling/disabling a specific IRQ for the IA-32 -* architecture is defined as follows in include/nanokernel/x86/arch.h -* -* extern void irq_enable (unsigned int irq); -* extern void irq_disable (unsigned int irq); -* -* The irq_enable() routine is provided by the BSP due to the -* IRQ virtualization that is performed by this BSP. See the comments -* in _SysIntVecAlloc() for more information regarding IRQ virtualization. -* -* RETURNS: N/A -*/ +/** + * + * irq_enable - enable an individual interrupt (IRQ) + * + * The public interface for enabling/disabling a specific IRQ for the IA-32 + * architecture is defined as follows in include/nanokernel/x86/arch.h + * + * extern void irq_enable (unsigned int irq); + * extern void irq_disable (unsigned int irq); + * + * The irq_enable() routine is provided by the BSP due to the + * IRQ virtualization that is performed by this BSP. See the comments + * in _SysIntVecAlloc() for more information regarding IRQ virtualization. + * + * RETURNS: N/A + */ void irq_enable(unsigned int irq) { @@ -231,16 +231,16 @@ void irq_enable(unsigned int irq) } } -/******************************************************************************* -* -* irq_disable - disable an individual interrupt (IRQ) -* -* The irq_disable() routine is provided by the BSP due to the -* IRQ virtualization that is performed by this BSP. See the comments -* in _SysIntVecAlloc() for more information regarding IRQ virtualization. -* -* RETURNS: N/A -*/ +/** + * + * irq_disable - disable an individual interrupt (IRQ) + * + * The irq_disable() routine is provided by the BSP due to the + * IRQ virtualization that is performed by this BSP. See the comments + * in _SysIntVecAlloc() for more information regarding IRQ virtualization. + * + * RETURNS: N/A + */ void irq_disable(unsigned int irq) { diff --git a/drivers/interrupt_controller/system_pic.c b/drivers/interrupt_controller/system_pic.c index 534355f2957..b7cba127794 100644 --- a/drivers/interrupt_controller/system_pic.c +++ b/drivers/interrupt_controller/system_pic.c @@ -34,7 +34,7 @@ DESCRIPTION This module provides routines to initialize and support board-level hardware for the pentium4 and minuteia variants of the generic_pc BSP. -*/ + */ #include "board.h" #include @@ -45,34 +45,34 @@ for the pentium4 and minuteia variants of the generic_pc BSP. IRQ_CONNECT_STATIC(pic_master, PIC_MASTER_STRAY_INT_LVL, 0, _i8259_boi_master, 0); IRQ_CONNECT_STATIC(pic_slave, PIC_SLAVE_STRAY_INT_LVL, 0, _i8259_boi_slave, 0); -/******************************************************************************* -* -* _SysIntVecAlloc - allocate interrupt vector -* -* This BSP provided routine supports the irq_connect() API. This -* routine performs the following functions: -* -* a) Allocates a vector satisfying the requested priority, where possible. -* When the argument is not equal to NANO_SOFT_IRQ, the vector assigned -* to the during interrupt controller initialization is returned, -* which may or may not have the desired prioritization. (Prioritization of -* such vectors is fixed by the 8259 interrupt controllers, and cannot be -* programmed on an IRQ basis; for example, IRQ0 is always the highest -* priority interrupt no matter which interrupt vector was assigned to IRQ0.) -* b) Provides End of Interrupt (EOI) and Beginning of Interrupt (BOI) related -* information to be used when generating the interrupt stub code. -* -* The pcPentium4 board virtualizes IRQs as follows: -* -* - IRQ0 to IRQ7 are provided by the master i8259 PIC -* - IRQ8 to IRQ15 are provided by the slave i8259 PIC -* -* RETURNS: the allocated interrupt vector -* -* INTERNAL -* For debug kernels, this routine will return -1 for invalid or -* parameter values. -*/ +/** + * + * _SysIntVecAlloc - allocate interrupt vector + * + * This BSP provided routine supports the irq_connect() API. This + * routine performs the following functions: + * + * a) Allocates a vector satisfying the requested priority, where possible. + * When the argument is not equal to NANO_SOFT_IRQ, the vector assigned + * to the during interrupt controller initialization is returned, + * which may or may not have the desired prioritization. (Prioritization of + * such vectors is fixed by the 8259 interrupt controllers, and cannot be + * programmed on an IRQ basis; for example, IRQ0 is always the highest + * priority interrupt no matter which interrupt vector was assigned to IRQ0.) + * b) Provides End of Interrupt (EOI) and Beginning of Interrupt (BOI) related + * information to be used when generating the interrupt stub code. + * + * The pcPentium4 board virtualizes IRQs as follows: + * + * - IRQ0 to IRQ7 are provided by the master i8259 PIC + * - IRQ8 to IRQ15 are provided by the slave i8259 PIC + * + * RETURNS: the allocated interrupt vector + * + * INTERNAL + * For debug kernels, this routine will return -1 for invalid or + * parameter values. + */ int _SysIntVecAlloc( unsigned int irq, /* virtualized IRQ */ @@ -147,20 +147,20 @@ int _SysIntVecAlloc( return vector; } -/******************************************************************************* -* -* _SysIntVecProgram - program interrupt controller -* -* This BSP provided routine programs the appropriate interrupt controller -* with the given vector based on the given IRQ parameter. -* -* Drivers call this routine instead of irq_connect() when interrupts are -* configured statically. -* -* For PIC-equipped boards this routine does nothing, as PIC does not need -* any additional setup -* -*/ +/** + * + * _SysIntVecProgram - program interrupt controller + * + * This BSP provided routine programs the appropriate interrupt controller + * with the given vector based on the given IRQ parameter. + * + * Drivers call this routine instead of irq_connect() when interrupts are + * configured statically. + * + * For PIC-equipped boards this routine does nothing, as PIC does not need + * any additional setup + * + */ void _SysIntVecProgram(unsigned int vector, /* vector number */ unsigned int irq /* virtualized IRQ */ diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index d86e7e3c210..5e763052199 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -113,7 +113,7 @@ where to start a loop on these classes. Thus, a subsequent pci scan looking for class y will directly start at the relevant bus and device instead of restarting from 0. -*/ + */ #include #include @@ -167,12 +167,12 @@ struct lookup_data { static struct bus_dev class_bd[PCI_CLASS_MAX] = {}; static struct lookup_data __noinit lookup; -/****************************************************************************** -* -* pci_get_bar_config - return the configuration for the specified BAR -* -* RETURNS: 0 if BAR is implemented, -1 if not. -*/ +/** + * + * pci_get_bar_config - return the configuration for the specified BAR + * + * RETURNS: 0 if BAR is implemented, -1 if not. + */ static inline int pci_bar_config_get(union pci_addr_reg pci_ctrl_addr, uint32_t *config) @@ -212,7 +212,7 @@ static inline int pci_bar_config_get(union pci_addr_reg pci_ctrl_addr, return -1; } -/****************************************************************************** +/** * * pci_bar_params_get - retrieve the I/O address and IRQ of the specified BAR * @@ -263,7 +263,7 @@ static inline int pci_bar_params_get(union pci_addr_reg pci_ctrl_addr, return 0; } -/****************************************************************************** +/** * * pci_dev_scan - scan the specified PCI device for all sub functions * @@ -374,7 +374,7 @@ void pci_bus_scan_init(void) lookup.bar = 0; } -/****************************************************************************** +/** * * pci_bus_scan - scans PCI bus for devices * @@ -428,7 +428,7 @@ int pci_bus_scan(struct pci_dev_info *dev_info) } #ifdef CONFIG_PCI_DEBUG -/****************************************************************************** +/** * * pci_show - Show PCI device * diff --git a/drivers/pci/pci_config.c b/drivers/pci/pci_config.c index 0f59bf18276..2d46dcc7b3d 100644 --- a/drivers/pci/pci_config.c +++ b/drivers/pci/pci_config.c @@ -35,7 +35,7 @@ DESCRIPTION This module implements the PCI config space access functions -*/ + */ #include #include @@ -52,18 +52,18 @@ This module implements the PCI config space access functions } while (0) #endif -/******************************************************************************* -* -* pci_config_out_long - write a 32bit data to pci reg in offset -* -* @param bus_no Bus number. -* @param device_no Device number -* @param func_no Function number -* @param offset Offset into the configuration space. -* @param data Data written to the offset. -* -* RETURNS: N/A -*/ +/** + * + * pci_config_out_long - write a 32bit data to pci reg in offset + * + * @param bus_no Bus number. + * @param device_no Device number + * @param func_no Function number + * @param offset Offset into the configuration space. + * @param data Data written to the offset. + * + * RETURNS: N/A + */ void pci_config_out_long(uint32_t bus_no, uint32_t device_no, uint32_t func_no, uint32_t offset, uint32_t data) { @@ -82,18 +82,18 @@ void pci_config_out_long(uint32_t bus_no, uint32_t device_no, uint32_t func_no, pci_write(DEFAULT_PCI_CONTROLLER, pci_addr, sizeof(uint32_t), data); } -/******************************************************************************* -* -* pci_config_out_word - write a 16bit data to pci reg in offset -* -* @param bus_no Bus number. -* @param device_no Device number. -* @param func_no Function number. -* @param offset Offset into the configuration space. -* @param data Data written to the offset. -* -* RETURNS: N/A -*/ +/** + * + * pci_config_out_word - write a 16bit data to pci reg in offset + * + * @param bus_no Bus number. + * @param device_no Device number. + * @param func_no Function number. + * @param offset Offset into the configuration space. + * @param data Data written to the offset. + * + * RETURNS: N/A + */ void pci_config_out_word(uint32_t bus_no, uint32_t device_no, uint32_t func_no, uint32_t offset, uint16_t data) { @@ -112,18 +112,18 @@ void pci_config_out_word(uint32_t bus_no, uint32_t device_no, uint32_t func_no, pci_write(DEFAULT_PCI_CONTROLLER, pci_addr, sizeof(uint16_t), data); } -/******************************************************************************* -* -* pci_config_out_byte - write a 8bit data to pci reg in offset -* -* @param bus_no Bus number. -* @param device_no Device number. -* @param func_no Function number. -* @param offset Offset into the configuration space. -* @param data Data written to the offset. -* -* RETURNS: N/A -*/ +/** + * + * pci_config_out_byte - write a 8bit data to pci reg in offset + * + * @param bus_no Bus number. + * @param device_no Device number. + * @param func_no Function number. + * @param offset Offset into the configuration space. + * @param data Data written to the offset. + * + * RETURNS: N/A + */ void pci_config_out_byte(uint32_t bus_no, uint32_t device_no, uint32_t func_no, uint32_t offset, uint8_t data) { @@ -142,19 +142,19 @@ void pci_config_out_byte(uint32_t bus_no, uint32_t device_no, uint32_t func_no, pci_write(DEFAULT_PCI_CONTROLLER, pci_addr, sizeof(uint8_t), data); } -/******************************************************************************* -* -* pci_config_in_long - read a 32bit data from pci reg in offset -* -* @param bus_no Bus number. -* @param device_no Device number. -* @param func_no Function number. -* @param offset Offset into the configuration space. -* @param data Data read from the offset. -* -* RETURNS: N/A -* -*/ +/** + * + * pci_config_in_long - read a 32bit data from pci reg in offset + * + * @param bus_no Bus number. + * @param device_no Device number. + * @param func_no Function number. + * @param offset Offset into the configuration space. + * @param data Data read from the offset. + * + * RETURNS: N/A + * + */ void pci_config_in_long(uint32_t bus_no, uint32_t device_no, uint32_t func_no, uint32_t offset, uint32_t *data) { @@ -173,19 +173,19 @@ void pci_config_in_long(uint32_t bus_no, uint32_t device_no, uint32_t func_no, pci_read(DEFAULT_PCI_CONTROLLER, pci_addr, sizeof(uint32_t), data); } -/******************************************************************************* -* -* pci_config_in_word - read in a 16bit data from a pci reg in offset -* -* @param bus_no Bus number. -* @param device_no Device number. -* @param func_no Function number. -* @param offset Offset into the configuration space. -* @param data Data read from the offset. -* -* RETURNS: N/A -* -*/ +/** + * + * pci_config_in_word - read in a 16bit data from a pci reg in offset + * + * @param bus_no Bus number. + * @param device_no Device number. + * @param func_no Function number. + * @param offset Offset into the configuration space. + * @param data Data read from the offset. + * + * RETURNS: N/A + * + */ void pci_config_in_word(uint32_t bus_no, uint32_t device_no, uint32_t func_no, uint32_t offset, uint16_t *data) @@ -210,19 +210,19 @@ void pci_config_in_word(uint32_t bus_no, uint32_t device_no, uint32_t func_no, *data = (uint16_t)pci_data; } -/******************************************************************************* -* -* pci_config_in_byte - read in a 8bit data from a pci reg in offset -* -* @param bus_no Bus number. -* @param device_no Device number. -* @param func_no Function number. -* @param offset Offset into the configuration space. -* @param data Data read from the offset. -* -* RETURNS: N/A -* -*/ +/** + * + * pci_config_in_byte - read in a 8bit data from a pci reg in offset + * + * @param bus_no Bus number. + * @param device_no Device number. + * @param func_no Function number. + * @param offset Offset into the configuration space. + * @param data Data read from the offset. + * + * RETURNS: N/A + * + */ void pci_config_in_byte(uint32_t bus_no, uint32_t device_no, uint32_t func_no, uint32_t offset, uint8_t *data) @@ -247,23 +247,23 @@ void pci_config_in_byte(uint32_t bus_no, uint32_t device_no, uint32_t func_no, *data = (uint8_t)pci_data; } -/******************************************************************************* -* -* pci_config_ext_cap_ptr_find - find extended capability in ECP linked list -* -* This routine searches for an extended capability in the linked list of -* capabilities in config space. If found, the offset of the first byte -* of the capability of interest in config space is returned via pOffset. -* -* @param ext_cap_find_id Extended capabilities ID to search for. -* @param bus PCI bus number. -* @param device PCI device number. -* @param function PCI function number. -* @param p_offset Returned config space offset. -* -* RETURNS: 0 if Extended Capability found, -1 otherwise -* -*/ +/** + * + * pci_config_ext_cap_ptr_find - find extended capability in ECP linked list + * + * This routine searches for an extended capability in the linked list of + * capabilities in config space. If found, the offset of the first byte + * of the capability of interest in config space is returned via pOffset. + * + * @param ext_cap_find_id Extended capabilities ID to search for. + * @param bus PCI bus number. + * @param device PCI device number. + * @param function PCI function number. + * @param p_offset Returned config space offset. + * + * RETURNS: 0 if Extended Capability found, -1 otherwise + * + */ int pci_config_ext_cap_ptr_find(uint8_t ext_cap_find_id, uint32_t bus, uint32_t device, uint32_t function, diff --git a/drivers/pci/pci_interface.c b/drivers/pci/pci_interface.c index 0b2ee1f638d..97430de6e3a 100644 --- a/drivers/pci/pci_interface.c +++ b/drivers/pci/pci_interface.c @@ -35,7 +35,7 @@ DESCRIPTION This module implements the PCI H/W access functions. -*/ + */ #include #include @@ -52,16 +52,16 @@ This module implements the PCI H/W access functions. #error "PCI_CTRL_DATA_REG cannot be zero" #endif -/****************************************************************************** -* -* pci_ctrl_read - read a PCI controller register -* -* This routine reads the specified register from the PCI controller and -* places the data into the provided buffer. -* -* RETURNS: N/A -* -*/ +/** + * + * pci_ctrl_read - read a PCI controller register + * + * This routine reads the specified register from the PCI controller and + * places the data into the provided buffer. + * + * RETURNS: N/A + * + */ static void pci_ctrl_read(uint32_t reg, /* PCI register to read */ uint32_t *data, /* where to put the data */ @@ -86,16 +86,16 @@ static void pci_ctrl_read(uint32_t reg, /* PCI register to read */ } } -/****************************************************************************** -* -* pci_ctrl_write - write a PCI controller register -* -* This routine writes the provided data to the specified register in the PCI -* controller. -* -* RETURNS: N/A -* -*/ +/** + * + * pci_ctrl_write - write a PCI controller register + * + * This routine writes the provided data to the specified register in the PCI + * controller. + * + * RETURNS: N/A + * + */ static void pci_ctrl_write(uint32_t reg, /* PCI register to write */ uint32_t data, /* data to write */ @@ -121,15 +121,15 @@ static void pci_ctrl_write(uint32_t reg, /* PCI register to write */ } } -/****************************************************************************** -* -* pci_ctrl_data_read - read the PCI controller data register -* -* This routine reads the data register of the specified PCI controller. -* -* RETURNS: 0 or -1 -* -*/ +/** + * + * pci_ctrl_data_read - read the PCI controller data register + * + * This routine reads the data register of the specified PCI controller. + * + * RETURNS: 0 or -1 + * + */ static int pci_ctrl_data_read(uint32_t controller, /* controller number */ uint32_t offset, /* offset within data region */ @@ -147,16 +147,16 @@ static int pci_ctrl_data_read(uint32_t controller, /* controller number */ return 0; } -/****************************************************************************** -* -* pci_ctrl_data_write - write the PCI controller data register -* -* This routine writes the provided data to the data register of the -* specified PCI controller. -* -* RETURNS: 0 or -1 -* -*/ +/** + * + * pci_ctrl_data_write - write the PCI controller data register + * + * This routine writes the provided data to the data register of the + * specified PCI controller. + * + * RETURNS: 0 or -1 + * + */ static int pci_ctrl_data_write(uint32_t controller, /* controller number */ uint32_t offset, /* offset within address register */ @@ -174,16 +174,16 @@ static int pci_ctrl_data_write(uint32_t controller, /* controller number */ return 0; } -/****************************************************************************** -* -* pci_ctrl_addr_write - write the PCI controller address register -* -* This routine writes the provided data to the address register of the -* specified PCI controller. -* -* RETURNS: 0 or -1 -* -*/ +/** + * + * pci_ctrl_addr_write - write the PCI controller address register + * + * This routine writes the provided data to the address register of the + * specified PCI controller. + * + * RETURNS: 0 or -1 + * + */ static int pci_ctrl_addr_write(uint32_t controller, /* controller number */ uint32_t offset, /* offset within address register */ @@ -200,52 +200,52 @@ static int pci_ctrl_addr_write(uint32_t controller, /* controller number */ return 0; } -/******************************************************************************* -* -* pci_read - read a PCI register from a device -* -* This routine reads data from a PCI device's configuration space. The -* device and register to read is specified by the address parameter ("addr") -* and must be set appropriately by the caller. The address is defined by -* the structure type pci_addr_t and contains the following members: -* -* bus: PCI bus number (0-255) -* device: PCI device number (0-31) -* func: device function number (0-7) -* reg: device 32-bit register number to read (0-63) -* offset: offset within 32-bit register to read (0-3) -* -* The size parameter specifies the number of bytes to read from the PCI -* configuration space, valid values are 1, 2, and 4 bytes. A 32-bit value -* is always returned but it will contain only the number of bytes specified -* by the size parameter. -* -* If multiple PCI controllers are present in the system, the controller id -* can be specified in the "controller" parameter. If only one controller -* is present, the id DEFAULT_PCI_CONTROLLER can be used to denote this -* controller. -* -* Example: -* -* union pci_addr_reg addr; -* uint32_t status; -* -* addr.field.bus = 0; /@ PCI bus zero @/ -* addr.field.device = 1; /@ PCI device one @/ -* addr.field.func = 0; /@ PCI function zero @/ -* addr.field.reg = 4; /@ PCI register 4 @/ -* addr.field.offset = 0; /@ PCI register offset @/ -* -* pci_read (DEFAULT_PCI_CONTROLLER, addr, sizeof(uint16_t), &status); -* -* -* NOTE: -* Reading of PCI data must be performed as an atomic operation. It is up to -* the caller to enforce this. -* -* RETURNS: N/A -* -*/ +/** + * + * pci_read - read a PCI register from a device + * + * This routine reads data from a PCI device's configuration space. The + * device and register to read is specified by the address parameter ("addr") + * and must be set appropriately by the caller. The address is defined by + * the structure type pci_addr_t and contains the following members: + * + * bus: PCI bus number (0-255) + * device: PCI device number (0-31) + * func: device function number (0-7) + * reg: device 32-bit register number to read (0-63) + * offset: offset within 32-bit register to read (0-3) + * + * The size parameter specifies the number of bytes to read from the PCI + * configuration space, valid values are 1, 2, and 4 bytes. A 32-bit value + * is always returned but it will contain only the number of bytes specified + * by the size parameter. + * + * If multiple PCI controllers are present in the system, the controller id + * can be specified in the "controller" parameter. If only one controller + * is present, the id DEFAULT_PCI_CONTROLLER can be used to denote this + * controller. + * + * Example: + * + * union pci_addr_reg addr; + * uint32_t status; + * + * addr.field.bus = 0; /@ PCI bus zero @/ + * addr.field.device = 1; /@ PCI device one @/ + * addr.field.func = 0; /@ PCI function zero @/ + * addr.field.reg = 4; /@ PCI register 4 @/ + * addr.field.offset = 0; /@ PCI register offset @/ + * + * pci_read (DEFAULT_PCI_CONTROLLER, addr, sizeof(uint16_t), &status); + * + * + * NOTE: + * Reading of PCI data must be performed as an atomic operation. It is up to + * the caller to enforce this. + * + * RETURNS: N/A + * + */ void pci_read(uint32_t controller, /* PCI controller to use */ union pci_addr_reg addr, /* PCI address to read */ @@ -290,52 +290,52 @@ void pci_read(uint32_t controller, /* PCI controller to use */ pci_ctrl_data_read(controller, access_offset, data, access_size); } -/******************************************************************************* -* -* pci_write - write a to a PCI register -* -* This routine writes data to a PCI device's configuration space. The -* device and register to write is specified by the address parameter ("addr") -* and must be set appropriately by the caller. The address is defined by -* the structure type pci_addr_t and contains the following members: -* -* bus: PCI bus number (0-255) -* device: PCI device number (0-31) -* func: device function number (0-7) -* reg: device register number to read (0-63) -* offset: offset within 32-bit register to write (0-3) -* -* The size parameter specifies the number of bytes to write to the PCI -* configuration space, valid values are 1, 2, and 4 bytes. A 32-bit value -* is always provided but only the number of bytes specified by the size -* parameter will be written to the device. -* -* If multiple PCI controllers are present in the system, the controller id -* can be specified in the "controller" parameter. If only one controller -* is present, the id DEFAULT_PCI_CONTROLLER can be used to denote this -* controller. -* -* Example: -* -* pci_addr_t addr; -* uint32_t bar0 = 0xE0000000; -* -* addr.field.bus = 0; /@ PCI bus zero @/ -* addr.field.device = 1; /@ PCI device one @/ -* addr.field.func = 0; /@ PCI function zero @/ -* addr.field.reg = 16; /@ PCI register 16 @/ -* addr.field.offset = 0; /@ PCI register offset @/ -* -* pci_write (DEFAULT_PCI_CONTROLLER, addr, sizeof(uint32_t), bar0); -* -* NOTE: -* Writing of PCI data must be performed as an atomic operation. It is up to -* the caller to enforce this. -* -* -* RETURNS: N/A -* -*/ +/** + * + * pci_write - write a to a PCI register + * + * This routine writes data to a PCI device's configuration space. The + * device and register to write is specified by the address parameter ("addr") + * and must be set appropriately by the caller. The address is defined by + * the structure type pci_addr_t and contains the following members: + * + * bus: PCI bus number (0-255) + * device: PCI device number (0-31) + * func: device function number (0-7) + * reg: device register number to read (0-63) + * offset: offset within 32-bit register to write (0-3) + * + * The size parameter specifies the number of bytes to write to the PCI + * configuration space, valid values are 1, 2, and 4 bytes. A 32-bit value + * is always provided but only the number of bytes specified by the size + * parameter will be written to the device. + * + * If multiple PCI controllers are present in the system, the controller id + * can be specified in the "controller" parameter. If only one controller + * is present, the id DEFAULT_PCI_CONTROLLER can be used to denote this + * controller. + * + * Example: + * + * pci_addr_t addr; + * uint32_t bar0 = 0xE0000000; + * + * addr.field.bus = 0; /@ PCI bus zero @/ + * addr.field.device = 1; /@ PCI device one @/ + * addr.field.func = 0; /@ PCI function zero @/ + * addr.field.reg = 16; /@ PCI register 16 @/ + * addr.field.offset = 0; /@ PCI register offset @/ + * + * pci_write (DEFAULT_PCI_CONTROLLER, addr, sizeof(uint32_t), bar0); + * + * NOTE: + * Writing of PCI data must be performed as an atomic operation. It is up to + * the caller to enforce this. + * + * + * RETURNS: N/A + * + */ void pci_write(uint32_t controller, /* controller to use */ union pci_addr_reg addr, /* PCI address to read */ @@ -379,15 +379,15 @@ void pci_write(uint32_t controller, /* controller to use */ pci_ctrl_data_write(controller, access_offset, data, access_size); } -/******************************************************************************* -* -* pci_header_get - get the PCI header for a device -* -* This routine reads the PCI header for the specified device and puts the -* result in the supplied header structure. -* -* RETURNS: N/A -*/ +/** + * + * pci_header_get - get the PCI header for a device + * + * This routine reads the PCI header for the specified device and puts the + * result in the supplied header structure. + * + * RETURNS: N/A + */ void pci_header_get(uint32_t controller, union pci_addr_reg pci_ctrl_addr, diff --git a/drivers/random/rand32-timer.c b/drivers/random/rand32-timer.c index e80770343df..f0adc5e5db3 100644 --- a/drivers/random/rand32-timer.c +++ b/drivers/random/rand32-timer.c @@ -36,7 +36,7 @@ This module provides a non-random implementation of sys_rand32_get(), which is not meant to be used in a final product as a truly random number generator. It was provided to allow testing on a BSP that does not (yet) provide a random number generator. -*/ + */ #include #include @@ -53,7 +53,7 @@ static atomic_val_t _rand32_counter = 0; #define _RAND32_INC 1000000013 -/******************************************************************************* +/** * * sys_rand32_init - initialize the random number generator * @@ -69,7 +69,7 @@ void sys_rand32_init(void) { } -/******************************************************************************* +/** * * sys_rand32_get - get a 32 bit random number * diff --git a/drivers/random/rand32-timestamp.c b/drivers/random/rand32-timestamp.c index e6eaa5c6cf0..fdaa381a688 100644 --- a/drivers/random/rand32-timestamp.c +++ b/drivers/random/rand32-timestamp.c @@ -36,13 +36,13 @@ This module provides a non-random implementation of sys_rand32_get(), which is not meant to be used in a final product as a truly random number generator. It was provided to allow testing on a BSP that does not (yet) provide a random number generator. -*/ + */ #include #include #include -/******************************************************************************* +/** * * sys_rand32_init - initialize the random number generator * @@ -56,7 +56,7 @@ void sys_rand32_init(void) { } -/******************************************************************************* +/** * * sys_rand32_get - get a 32 bit random number * diff --git a/drivers/serial/k20UartDrv.c b/drivers/serial/k20UartDrv.c index aeb91ad6949..e7af9b9711b 100644 --- a/drivers/serial/k20UartDrv.c +++ b/drivers/serial/k20UartDrv.c @@ -38,7 +38,7 @@ The BSP's _InitHardware() routine initializes all the values in the uart_init_info structure before calling uart_init(). INCLUDE FILES: drivers/serial/k20_uart.h -*/ + */ #include #include @@ -59,15 +59,15 @@ typedef struct { UART_PORTS_CONFIGURE(_k20Uart_t, uart); -/******************************************************************************* -* -* uart_init - initialize UART channel -* -* This routine is called to reset the chip in a quiescent state. -* It is assumed that this function is called only once per UART. -* -* RETURNS: N/A -*/ +/** + * + * uart_init - initialize UART channel + * + * This routine is called to reset the chip in a quiescent state. + * It is assumed that this function is called only once per UART. + * + * RETURNS: N/A + */ void uart_init(int port, /* UART channel to initialize */ const struct uart_init_info * const init_info @@ -107,12 +107,12 @@ void uart_init(int port, /* UART channel to initialize */ irq_unlock(oldLevel); } -/******************************************************************************* -* -* uart_poll_in - poll the device for input. -* -* RETURNS: 0 if a character arrived, -1 if the input buffer if empty. -*/ +/** + * + * uart_poll_in - poll the device for input. + * + * RETURNS: 0 if a character arrived, -1 if the input buffer if empty. + */ int uart_poll_in(int port, /* UART channel to select for input */ unsigned char *pChar /* pointer to char */ @@ -129,18 +129,18 @@ int uart_poll_in(int port, /* UART channel to select for input */ return 0; } -/******************************************************************************* -* -* uart_poll_out - output a character in polled mode. -* -* Checks if the transmitter is empty. If empty, a character is written to -* the data register. -* -* If the hardware flow control is enabled then the handshake signal CTS has to -* be asserted in order to send a character. -* -* RETURNS: sent character -*/ +/** + * + * uart_poll_out - output a character in polled mode. + * + * Checks if the transmitter is empty. If empty, a character is written to + * the data register. + * + * If the hardware flow control is enabled then the handshake signal CTS has to + * be asserted in order to send a character. + * + * RETURNS: sent character + */ unsigned char uart_poll_out( int port, /* UART channel to select for output */ unsigned char outChar /* char to send */ @@ -159,12 +159,12 @@ unsigned char uart_poll_out( #if CONFIG_UART_INTERRUPT_DRIVEN -/******************************************************************************* -* -* uart_fifo_fill - fill FIFO with data +/** + * + * uart_fifo_fill - fill FIFO with data -* RETURNS: number of bytes sent -*/ + * RETURNS: number of bytes sent + */ int uart_fifo_fill(int port, /* UART on port to send */ const uint8_t *txData, /* data to transmit */ @@ -181,12 +181,12 @@ int uart_fifo_fill(int port, /* UART on port to send */ return numTx; } -/******************************************************************************* -* -* uart_fifo_read - read data from FIFO -* -* RETURNS: number of bytes read -*/ +/** + * + * uart_fifo_read - read data from FIFO + * + * RETURNS: number of bytes read + */ int uart_fifo_read(int port, /* UART to receive from */ uint8_t *rxData, /* data container */ @@ -203,12 +203,12 @@ int uart_fifo_read(int port, /* UART to receive from */ return numRx; } -/******************************************************************************* -* -* uart_irq_tx_enable - enable TX interrupt -* -* RETURNS: N/A -*/ +/** + * + * uart_irq_tx_enable - enable TX interrupt + * + * RETURNS: N/A + */ void uart_irq_tx_enable(int port /* UART to enable Tx interrupt */ @@ -219,12 +219,12 @@ void uart_irq_tx_enable(int port /* UART to enable Tx uart_p->c2.field.txInt_DmaTx_en = 1; } -/******************************************************************************* -* -* uart_irq_tx_disable - disable TX interrupt in IER -* -* RETURNS: N/A -*/ +/** + * + * uart_irq_tx_disable - disable TX interrupt in IER + * + * RETURNS: N/A + */ void uart_irq_tx_disable( int port /* UART to disable Tx interrupt */ @@ -235,12 +235,12 @@ void uart_irq_tx_disable( uart_p->c2.field.txInt_DmaTx_en = 0; } -/******************************************************************************* -* -* uart_irq_tx_ready - check if Tx IRQ has been raised -* -* RETURNS: 1 if an IRQ is ready, 0 otherwise -*/ +/** + * + * uart_irq_tx_ready - check if Tx IRQ has been raised + * + * RETURNS: 1 if an IRQ is ready, 0 otherwise + */ int uart_irq_tx_ready(int port /* UART to check */ ) @@ -250,12 +250,12 @@ int uart_irq_tx_ready(int port /* UART to check */ return uart_p->s1.field.txDataEmpty; } -/******************************************************************************* -* -* uart_irq_rx_enable - enable RX interrupt in IER -* -* RETURNS: N/A -*/ +/** + * + * uart_irq_rx_enable - enable RX interrupt in IER + * + * RETURNS: N/A + */ void uart_irq_rx_enable(int port /* UART to enable Rx interrupt */ @@ -266,12 +266,12 @@ void uart_irq_rx_enable(int port /* UART to enable Rx uart_p->c2.field.rxFullInt_dmaTx_en = 1; } -/******************************************************************************* -* -* uart_irq_rx_disable - disable RX interrupt in IER -* -* RETURNS: N/A -*/ +/** + * + * uart_irq_rx_disable - disable RX interrupt in IER + * + * RETURNS: N/A + */ void uart_irq_rx_disable( int port /* UART to disable Rx interrupt */ @@ -282,12 +282,12 @@ void uart_irq_rx_disable( uart_p->c2.field.rxFullInt_dmaTx_en = 0; } -/******************************************************************************* -* -* uart_irq_rx_ready - check if Rx IRQ has been raised -* -* RETURNS: 1 if an IRQ is ready, 0 otherwise -*/ +/** + * + * uart_irq_rx_ready - check if Rx IRQ has been raised + * + * RETURNS: 1 if an IRQ is ready, 0 otherwise + */ int uart_irq_rx_ready(int port /* UART to check */ ) @@ -297,12 +297,12 @@ int uart_irq_rx_ready(int port /* UART to check */ return uart_p->s1.field.rxDataFull; } -/******************************************************************************* -* -* uart_irq_err_enable - enable error interrupt -* -* RETURNS: N/A -*/ +/** + * + * uart_irq_err_enable - enable error interrupt + * + * RETURNS: N/A + */ void uart_irq_err_enable(int port) { @@ -316,12 +316,12 @@ void uart_irq_err_enable(int port) uart_p->c3 = c3; } -/******************************************************************************* -* -* uart_irq_err_disable - disable error interrupt -* -* RETURNS: N/A -*/ +/** + * + * uart_irq_err_disable - disable error interrupt + * + * RETURNS: N/A + */ void uart_irq_err_disable(int port /* UART to disable Rx interrupt */ ) @@ -336,12 +336,12 @@ void uart_irq_err_disable(int port /* UART to disable Rx interrupt */ uart_p->c3 = c3; } -/******************************************************************************* -* -* uart_irq_is_pending - check if Tx or Rx IRQ is pending -* -* RETURNS: 1 if a Tx or Rx IRQ is pending, 0 otherwise -*/ +/** + * + * uart_irq_is_pending - check if Tx or Rx IRQ is pending + * + * RETURNS: 1 if a Tx or Rx IRQ is pending, 0 otherwise + */ int uart_irq_is_pending(int port /* UART to check */ ) @@ -355,26 +355,26 @@ int uart_irq_is_pending(int port /* UART to check */ : 0); } -/******************************************************************************* -* -* uart_irq_update - update IRQ status -* -* RETURNS: always 1 -*/ +/** + * + * uart_irq_update - update IRQ status + * + * RETURNS: always 1 + */ int uart_irq_update(int port) { return 1; } -/******************************************************************************* -* -* uart_irq_get - returns UART interrupt number -* -* Returns the IRQ number used by the specified UART port -* -* RETURNS: N/A -*/ +/** + * + * uart_irq_get - returns UART interrupt number + * + * Returns the IRQ number used by the specified UART port + * + * RETURNS: N/A + */ unsigned int uart_irq_get(int port /* UART port */ ) diff --git a/drivers/serial/ns16550.c b/drivers/serial/ns16550.c index 4a1dcd8306b..cfdad555994 100644 --- a/drivers/serial/ns16550.c +++ b/drivers/serial/ns16550.c @@ -54,7 +54,7 @@ A board support package's board.h header must provide definitions for: INCLUDE FILES: drivers/uart.h -*/ + */ #include #include @@ -257,14 +257,14 @@ UART_PORTS_CONFIGURE(struct ns16550, uart); #endif /* UART_PORTS_CONFIGURE */ -/******************************************************************************* -* -* uart_init - initialize the chip -* -* This routine is called to reset the chip in a quiescent state. -* -* RETURNS: N/A -*/ +/** + * + * uart_init - initialize the chip + * + * This routine is called to reset the chip in a quiescent state. + * + * RETURNS: N/A + */ void uart_init(int port, /* UART channel to initialize */ const struct uart_init_info * const init_info @@ -310,12 +310,12 @@ void uart_init(int port, /* UART channel to initialize */ irq_unlock(oldLevel); } -/******************************************************************************* -* -* uart_poll_in - poll the device for input. -* -* RETURNS: 0 if a character arrived, -1 if the input buffer if empty. -*/ +/** + * + * uart_poll_in - poll the device for input. + * + * RETURNS: 0 if a character arrived, -1 if the input buffer if empty. + */ int uart_poll_in(int port, /* UART channel to select for input */ unsigned char *pChar /* pointer to char */ @@ -330,18 +330,18 @@ int uart_poll_in(int port, /* UART channel to select for input */ return 0; } -/******************************************************************************* -* -* uart_poll_out - output a character in polled mode. -* -* Checks if the transmitter is empty. If empty, a character is written to -* the data register. -* -* If the hardware flow control is enabled then the handshake signal CTS has to -* be asserted in order to send a character. -* -* RETURNS: sent character -*/ +/** + * + * uart_poll_out - output a character in polled mode. + * + * Checks if the transmitter is empty. If empty, a character is written to + * the data register. + * + * If the hardware flow control is enabled then the handshake signal CTS has to + * be asserted in order to send a character. + * + * RETURNS: sent character + */ unsigned char uart_poll_out( int port, /* UART channel to select for output */ unsigned char outChar /* char to send */ @@ -357,12 +357,12 @@ unsigned char uart_poll_out( } #if CONFIG_UART_INTERRUPT_DRIVEN -/******************************************************************************* -* -* uart_fifo_fill - fill FIFO with data -* -* RETURNS: number of bytes sent -*/ +/** + * + * uart_fifo_fill - fill FIFO with data + * + * RETURNS: number of bytes sent + */ int uart_fifo_fill(int port, /* UART on port to send */ const uint8_t *txData, /* data to transmit */ @@ -377,12 +377,12 @@ int uart_fifo_fill(int port, /* UART on port to send */ return i; } -/******************************************************************************* -* -* uart_fifo_read - read data from FIFO -* -* RETURNS: number of bytes read -*/ +/** + * + * uart_fifo_read - read data from FIFO + * + * RETURNS: number of bytes read + */ int uart_fifo_read(int port, /* UART to receive from */ uint8_t *rxData, /* data container */ @@ -398,12 +398,12 @@ int uart_fifo_read(int port, /* UART to receive from */ return i; } -/******************************************************************************* -* -* uart_irq_tx_enable - enable TX interrupt in IER -* -* RETURNS: N/A -*/ +/** + * + * uart_irq_tx_enable - enable TX interrupt in IER + * + * RETURNS: N/A + */ void uart_irq_tx_enable(int port /* UART to enable Tx interrupt */ @@ -412,12 +412,12 @@ void uart_irq_tx_enable(int port /* UART to enable Tx OUTBYTE(IER(port), INBYTE(IER(port)) | IER_TBE); } -/******************************************************************************* -* -* uart_irq_tx_disable - disable TX interrupt in IER -* -* RETURNS: N/A -*/ +/** + * + * uart_irq_tx_disable - disable TX interrupt in IER + * + * RETURNS: N/A + */ void uart_irq_tx_disable(int port /* UART to disable Tx interrupt */ ) @@ -425,12 +425,12 @@ void uart_irq_tx_disable(int port /* UART to disable Tx interrupt */ OUTBYTE(IER(port), INBYTE(IER(port)) & (~IER_TBE)); } -/******************************************************************************* -* -* uart_irq_tx_ready - check if Tx IRQ has been raised -* -* RETURNS: N/A -*/ +/** + * + * uart_irq_tx_ready - check if Tx IRQ has been raised + * + * RETURNS: N/A + */ int uart_irq_tx_ready(int port /* UART to check */ ) @@ -438,12 +438,12 @@ int uart_irq_tx_ready(int port /* UART to check */ return ((IIRC(port) & IIR_ID) == IIR_THRE); } -/******************************************************************************* -* -* _uart_irq_rx_enable - enable RX interrupt in IER -* -* RETURNS: N/A -*/ +/** + * + * _uart_irq_rx_enable - enable RX interrupt in IER + * + * RETURNS: N/A + */ void uart_irq_rx_enable(int port /* UART to enable Rx interrupt */ @@ -452,12 +452,12 @@ void uart_irq_rx_enable(int port /* UART to enable Rx OUTBYTE(IER(port), INBYTE(IER(port)) | IER_RXRDY); } -/******************************************************************************* -* -* uart_irq_rx_disable - disable RX interrupt in IER -* -* RETURNS: N/A -*/ +/** + * + * uart_irq_rx_disable - disable RX interrupt in IER + * + * RETURNS: N/A + */ void uart_irq_rx_disable(int port /* UART to disable Rx interrupt */ ) @@ -465,12 +465,12 @@ void uart_irq_rx_disable(int port /* UART to disable Rx interrupt */ OUTBYTE(IER(port), INBYTE(IER(port)) & (~IER_RXRDY)); } -/******************************************************************************* -* -* uart_irq_rx_ready - check if Rx IRQ has been raised -* -* RETURNS: 1 if an IRQ is ready, 0 otherwise -*/ +/** + * + * uart_irq_rx_ready - check if Rx IRQ has been raised + * + * RETURNS: 1 if an IRQ is ready, 0 otherwise + */ int uart_irq_rx_ready(int port /* UART to check */ ) @@ -478,12 +478,12 @@ int uart_irq_rx_ready(int port /* UART to check */ return ((IIRC(port) & IIR_ID) == IIR_RBRF); } -/******************************************************************************* -* -* uart_irq_err_enable - enable error interrupt in IER -* -* RETURNS: N/A -*/ +/** + * + * uart_irq_err_enable - enable error interrupt in IER + * + * RETURNS: N/A + */ void uart_irq_err_enable(int port /* UART to enable Rx interrupt */ ) @@ -491,12 +491,12 @@ void uart_irq_err_enable(int port /* UART to enable Rx interrupt */ OUTBYTE(IER(port), INBYTE(IER(port)) | IER_LSR); } -/******************************************************************************* -* -* uart_irq_err_disable - disable error interrupt in IER -* -* RETURNS: 1 if an IRQ is ready, 0 otherwise -*/ +/** + * + * uart_irq_err_disable - disable error interrupt in IER + * + * RETURNS: 1 if an IRQ is ready, 0 otherwise + */ void uart_irq_err_disable(int port /* UART to disable Rx interrupt */ ) @@ -504,12 +504,12 @@ void uart_irq_err_disable(int port /* UART to disable Rx interrupt */ OUTBYTE(IER(port), INBYTE(IER(port)) & (~IER_LSR)); } -/******************************************************************************* -* -* uart_irq_is_pending - check if any IRQ is pending -* -* RETURNS: 1 if an IRQ is pending, 0 otherwise -*/ +/** + * + * uart_irq_is_pending - check if any IRQ is pending + * + * RETURNS: 1 if an IRQ is pending, 0 otherwise + */ int uart_irq_is_pending(int port /* UART to check */ ) @@ -517,12 +517,12 @@ int uart_irq_is_pending(int port /* UART to check */ return (!(IIRC(port) & IIR_IP)); } -/******************************************************************************* -* -* uart_irq_update - update cached contents of IIR -* -* RETURNS: always 1 -*/ +/** + * + * uart_irq_update - update cached contents of IIR + * + * RETURNS: always 1 + */ int uart_irq_update(int port /* UART to update */ ) @@ -532,14 +532,14 @@ int uart_irq_update(int port /* UART to update */ return 1; } -/******************************************************************************* -* -* uart_irq_get - returns UART interrupt number -* -* Returns the IRQ number used by the specified UART port -* -* RETURNS: N/A -*/ +/** + * + * uart_irq_get - returns UART interrupt number + * + * Returns the IRQ number used by the specified UART port + * + * RETURNS: N/A + */ unsigned int uart_irq_get(int port /* UART port */ ) diff --git a/drivers/serial/stellarisUartDrv.c b/drivers/serial/stellarisUartDrv.c index cefce548eb3..9dfe206cd4b 100644 --- a/drivers/serial/stellarisUartDrv.c +++ b/drivers/serial/stellarisUartDrv.c @@ -37,7 +37,7 @@ an 16550 in functionality, but is not register-compatible. There is only support for poll-mode, so it can only be used with the printk and STDOUT_CONSOLE APIs. -*/ + */ #include #include @@ -153,14 +153,14 @@ struct _StellarisUartPort { UART_PORTS_CONFIGURE(struct _StellarisUartPort, ports); -/******************************************************************************* -* -* baudrateSet - set the baud rate -* -* This routine set the given baud rate for the UART. -* -* RETURNS: N/A -*/ +/** + * + * baudrateSet - set the baud rate + * + * This routine set the given baud rate for the UART. + * + * RETURNS: N/A + */ static void baudrateSet(int port, uint32_t baudrate, uint32_t sysClkFreqInHz) { @@ -184,14 +184,14 @@ static void baudrateSet(int port, uint32_t baudrate, uint32_t sysClkFreqInHz) pUart->fbrd = (uint8_t)(brdf & 0x3f); /* 6 bits */ } -/******************************************************************************* -* -* enable - enable the UART -* -* This routine enables the given UART. -* -* RETURNS: N/A -*/ +/** + * + * enable - enable the UART + * + * This routine enables the given UART. + * + * RETURNS: N/A + */ static inline void enable(int port) { @@ -200,14 +200,14 @@ static inline void enable(int port) pUart->ctl |= UARTCTL_UARTEN; } -/******************************************************************************* -* -* disable - disable the UART -* -* This routine disables the given UART. -* -* RETURNS: N/A -*/ +/** + * + * disable - disable the UART + * + * This routine disables the given UART. + * + * RETURNS: N/A + */ static inline void disable(int port) { @@ -233,14 +233,14 @@ static inline void disable(int port) */ #define LINE_CONTROL_DEFAULTS UARTLCRH_WLEN -/******************************************************************************* -* -* lineControlDefaultsSet - set the default UART line controls -* -* This routine sets the given UART's line controls to their default settings. -* -* RETURNS: N/A -*/ +/** + * + * lineControlDefaultsSet - set the default UART line controls + * + * This routine sets the given UART's line controls to their default settings. + * + * RETURNS: N/A + */ static inline void lineControlDefaultsSet(int port) { @@ -249,15 +249,15 @@ static inline void lineControlDefaultsSet(int port) pUart->lcrh = LINE_CONTROL_DEFAULTS; } -/******************************************************************************* -* -* uart_init - initialize UART channel -* -* This routine is called to reset the chip in a quiescent state. -* It is assumed that this function is called only once per UART. -* -* RETURNS: N/A -*/ +/** + * + * uart_init - initialize UART channel + * + * This routine is called to reset the chip in a quiescent state. + * It is assumed that this function is called only once per UART. + * + * RETURNS: N/A + */ void uart_init(int port, /* UART channel to initialize */ const struct uart_init_info * const init_info ) @@ -270,14 +270,14 @@ void uart_init(int port, /* UART channel to initialize */ enable(port); } -/******************************************************************************* -* -* pollTxReady - get the UART transmit ready status -* -* This routine returns the given UART's transmit ready status. -* -* RETURNS: 0 if ready to transmit, 1 otherwise -*/ +/** + * + * pollTxReady - get the UART transmit ready status + * + * This routine returns the given UART's transmit ready status. + * + * RETURNS: 0 if ready to transmit, 1 otherwise + */ static int pollTxReady(int port) { @@ -286,12 +286,12 @@ static int pollTxReady(int port) return (pUart->fr & UARTFR_TXFE); } -/******************************************************************************* -* -* uart_poll_in - poll the device for input. -* -* RETURNS: 0 if a character arrived, -1 if the input buffer if empty. -*/ +/** + * + * uart_poll_in - poll the device for input. + * + * RETURNS: 0 if a character arrived, -1 if the input buffer if empty. + */ int uart_poll_in(int port, /* UART channel to select for input */ unsigned char *pChar /* pointer to char */ @@ -308,15 +308,15 @@ int uart_poll_in(int port, /* UART channel to select for input */ return 0; } -/******************************************************************************* -* -* uart_poll_out - output a character in polled mode. -* -* Checks if the transmitter is empty. If empty, a character is written to -* the data register. -* -* RETURNS: sent character -*/ +/** + * + * uart_poll_out - output a character in polled mode. + * + * Checks if the transmitter is empty. If empty, a character is written to + * the data register. + * + * RETURNS: sent character + */ unsigned char uart_poll_out(int port, unsigned char c) { volatile struct _Uart *pUart = ports[port].base; @@ -331,12 +331,12 @@ unsigned char uart_poll_out(int port, unsigned char c) #if CONFIG_UART_INTERRUPT_DRIVEN -/******************************************************************************* -* -* uart_fifo_fill - fill FIFO with data -* -* RETURNS: number of bytes sent -*/ +/** + * + * uart_fifo_fill - fill FIFO with data + * + * RETURNS: number of bytes sent + */ int uart_fifo_fill(int port, /* UART on which to send */ const uint8_t *txData, /* data to transmit */ @@ -353,12 +353,12 @@ int uart_fifo_fill(int port, /* UART on which to send */ return (int)numTx; } -/******************************************************************************* -* -* uart_fifo_read - read data from FIFO -* -* RETURNS: number of bytes read -*/ +/** + * + * uart_fifo_read - read data from FIFO + * + * RETURNS: number of bytes read + */ int uart_fifo_read(int port, /* UART to receive from */ uint8_t *rxData, /* data container */ @@ -375,12 +375,12 @@ int uart_fifo_read(int port, /* UART to receive from */ return numRx; } -/******************************************************************************* -* -* uart_irq_tx_enable - enable TX interrupt -* -* RETURNS: N/A -*/ +/** + * + * uart_irq_tx_enable - enable TX interrupt + * + * RETURNS: N/A + */ void uart_irq_tx_enable(int port /* UART to enable Tx interrupt */ ) @@ -430,12 +430,12 @@ void uart_irq_tx_enable(int port /* UART to enable Tx interrupt */ pUart->im |= UARTTIM_TXIM; } -/******************************************************************************* -* -* uart_irq_tx_disable - disable TX interrupt in IER -* -* RETURNS: N/A -*/ +/** + * + * uart_irq_tx_disable - disable TX interrupt in IER + * + * RETURNS: N/A + */ void uart_irq_tx_disable(int port /* UART to disable Tx interrupt */ ) @@ -445,12 +445,12 @@ void uart_irq_tx_disable(int port /* UART to disable Tx interrupt */ pUart->im &= ~UARTTIM_TXIM; } -/******************************************************************************* -* -* uart_irq_tx_ready - check if Tx IRQ has been raised -* -* RETURNS: 1 if a Tx IRQ is pending, 0 otherwise -*/ +/** + * + * uart_irq_tx_ready - check if Tx IRQ has been raised + * + * RETURNS: 1 if a Tx IRQ is pending, 0 otherwise + */ int uart_irq_tx_ready(int port /* UART to check */ ) @@ -460,12 +460,12 @@ int uart_irq_tx_ready(int port /* UART to check */ return ((pUart->mis & UARTMIS_TXMIS) == UARTMIS_TXMIS); } -/******************************************************************************* -* -* uart_irq_rx_enable - enable RX interrupt in IER -* -* RETURNS: N/A -*/ +/** + * + * uart_irq_rx_enable - enable RX interrupt in IER + * + * RETURNS: N/A + */ void uart_irq_rx_enable(int port /* UART to enable Rx interrupt */ ) @@ -475,12 +475,12 @@ void uart_irq_rx_enable(int port /* UART to enable Rx interrupt */ pUart->im |= UARTTIM_RXIM; } -/******************************************************************************* -* -* uart_irq_rx_disable - disable RX interrupt in IER -* -* RETURNS: N/A -*/ +/** + * + * uart_irq_rx_disable - disable RX interrupt in IER + * + * RETURNS: N/A + */ void uart_irq_rx_disable(int port /* UART to disable Rx interrupt */ ) @@ -490,12 +490,12 @@ void uart_irq_rx_disable(int port /* UART to disable Rx interrupt */ pUart->im &= ~UARTTIM_RXIM; } -/******************************************************************************* -* -* uart_irq_rx_ready - check if Rx IRQ has been raised -* -* RETURNS: 1 if an IRQ is ready, 0 otherwise -*/ +/** + * + * uart_irq_rx_ready - check if Rx IRQ has been raised + * + * RETURNS: 1 if an IRQ is ready, 0 otherwise + */ int uart_irq_rx_ready(int port /* UART to check */ ) @@ -505,12 +505,12 @@ int uart_irq_rx_ready(int port /* UART to check */ return ((pUart->mis & UARTMIS_RXMIS) == UARTMIS_RXMIS); } -/******************************************************************************* -* -* uart_irq_err_enable - enable error interrupts -* -* RETURNS: N/A -*/ +/** + * + * uart_irq_err_enable - enable error interrupts + * + * RETURNS: N/A + */ void uart_irq_err_enable(int port /* UART to enable interrupts for */ ) @@ -521,12 +521,12 @@ void uart_irq_err_enable(int port /* UART to enable interrupts for */ UARTTIM_BEIM | UARTTIM_OEIM); } -/******************************************************************************* -* -* uart_irq_err_disable - disable error interrupts -* -* RETURNS: N/A -*/ +/** + * + * uart_irq_err_disable - disable error interrupts + * + * RETURNS: N/A + */ void uart_irq_err_disable(int port /* UART to disable interrupts for */ ) @@ -537,12 +537,12 @@ void uart_irq_err_disable(int port /* UART to disable interrupts for */ UARTTIM_BEIM | UARTTIM_OEIM); } -/******************************************************************************* -* -* uart_irq_is_pending - check if Tx or Rx IRQ is pending -* -* RETURNS: 1 if a Tx or Rx IRQ is pending, 0 otherwise -*/ +/** + * + * uart_irq_is_pending - check if Tx or Rx IRQ is pending + * + * RETURNS: 1 if a Tx or Rx IRQ is pending, 0 otherwise + */ int uart_irq_is_pending(int port /* UART to check */ ) @@ -553,26 +553,26 @@ int uart_irq_is_pending(int port /* UART to check */ return ((pUart->mis & (UARTMIS_RXMIS | UARTMIS_TXMIS)) ? 1 : 0); } -/******************************************************************************* -* -* uart_irq_update - update IRQ status -* -* RETURNS: always 1 -*/ +/** + * + * uart_irq_update - update IRQ status + * + * RETURNS: always 1 + */ int uart_irq_update(int port) { return 1; } -/******************************************************************************* -* -* uart_irq_get - returns UART interrupt number -* -* Returns the IRQ number used by the specified UART port -* -* RETURNS: N/A -*/ +/** + * + * uart_irq_get - returns UART interrupt number + * + * Returns the IRQ number used by the specified UART port + * + * RETURNS: N/A + */ unsigned int uart_irq_get(int port /* UART port */ ) diff --git a/drivers/timer/arcv2_timer0.c b/drivers/timer/arcv2_timer0.c index 3b9bbb7abee..8752c0ac2cd 100644 --- a/drivers/timer/arcv2_timer0.c +++ b/drivers/timer/arcv2_timer0.c @@ -37,7 +37,7 @@ and provides the standard "system clock driver" interfaces. \INTERNAL IMPLEMENTATION DETAILS The ARCv2 processor timer provides a 32-bit incrementing, wrap-to-zero counter. -*/ + */ #include #include @@ -68,19 +68,19 @@ The ARCv2 processor timer provides a 32-bit incrementing, wrap-to-zero counter. static uint32_t clock_accumulated_count = 0; -/******************************************************************************* -* -* enable - enable the timer with the given limit/countup value -* -* This routine sets up the timer for operation by: -* - setting value to which the timer will count up to; -* - setting the timer's start value to zero; and -* - enabling interrupt generation. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * enable - enable the timer with the given limit/countup value + * + * This routine sets up the timer for operation by: + * - setting value to which the timer will count up to; + * - setting the timer's start value to zero; and + * - enabling interrupt generation. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static ALWAYS_INLINE void enable( uint32_t count /* interrupt triggers when up-counter reaches this value */ @@ -95,49 +95,49 @@ static ALWAYS_INLINE void enable( _arc_v2_aux_reg_write(_ARC_V2_TMR0_COUNT, 0); /* write the start value */ } -/******************************************************************************* -* -* count_get - get the current counter value -* -* This routine gets the value from the timer's count register. This -* value is the 'time' elapsed from the starting count (assumed to be 0). -* -* RETURNS: the current counter value -* -* \NOMANUAL -*/ +/** + * + * count_get - get the current counter value + * + * This routine gets the value from the timer's count register. This + * value is the 'time' elapsed from the starting count (assumed to be 0). + * + * RETURNS: the current counter value + * + * \NOMANUAL + */ static ALWAYS_INLINE uint32_t count_get(void) { return _arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT); } -/******************************************************************************* -* -* limit_get - get the limit/countup value -* -* This routine gets the value from the timer's limit register, which is the -* value to which the timer will count up to. -* -* RETURNS: the limit value -* -* \NOMANUAL -*/ +/** + * + * limit_get - get the limit/countup value + * + * This routine gets the value from the timer's limit register, which is the + * value to which the timer will count up to. + * + * RETURNS: the limit value + * + * \NOMANUAL + */ static ALWAYS_INLINE uint32_t limit_get(void) { return _arc_v2_aux_reg_read(_ARC_V2_TMR0_LIMIT); } -/******************************************************************************* -* -* _timer_int_handler - system clock periodic tick handler -* -* This routine handles the system clock periodic tick interrupt. A TICK_EVENT -* event is pushed onto the microkernel stack. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _timer_int_handler - system clock periodic tick handler + * + * This routine handles the system clock periodic tick interrupt. A TICK_EVENT + * event is pushed onto the microkernel stack. + * + * RETURNS: N/A + * + * \NOMANUAL + */ void _timer_int_handler(void *unused) { @@ -153,15 +153,15 @@ void _timer_int_handler(void *unused) _sys_clock_tick_announce(); } -/******************************************************************************* -* -* timer_driver - initialize and enable the system clock -* -* This routine is used to program the ARCv2 timer to deliver interrupts at the -* rate specified via the 'sys_clock_us_per_tick' global variable. -* -* RETURNS: N/A -*/ +/** + * + * timer_driver - initialize and enable the system clock + * + * This routine is used to program the ARCv2 timer to deliver interrupts at the + * rate specified via the 'sys_clock_us_per_tick' global variable. + * + * RETURNS: N/A + */ void timer_driver( int priority /* priority parameter ignored by this driver */ @@ -189,14 +189,14 @@ void timer_driver( irq_enable(CONFIG_ARCV2_TIMER0_INT_LVL); } -/******************************************************************************* -* -* timer_read - read the BSP timer hardware -* -* This routine returns the current time in terms of timer hardware clock cycles. -* -* RETURNS: up counter of elapsed clock cycles -*/ +/** + * + * timer_read - read the BSP timer hardware + * + * This routine returns the current time in terms of timer hardware clock cycles. + * + * RETURNS: up counter of elapsed clock cycles + */ uint32_t timer_read(void) { @@ -204,15 +204,15 @@ uint32_t timer_read(void) } #if defined(CONFIG_SYSTEM_TIMER_DISABLE) -/******************************************************************************* -* -* timer_disable - stop announcing ticks into the kernel -* -* This routine disables timer interrupt generation and delivery. -* Note that the timer's counting cannot be stopped by software. -* -* RETURNS: N/A -*/ +/** + * + * timer_disable - stop announcing ticks into the kernel + * + * This routine disables timer interrupt generation and delivery. + * Note that the timer's counting cannot be stopped by software. + * + * RETURNS: N/A + */ void timer_disable(void) { diff --git a/drivers/timer/cortex_m_timer.c b/drivers/timer/cortex_m_timer.c index c03eebfd76e..be7ffef4abf 100644 --- a/drivers/timer/cortex_m_timer.c +++ b/drivers/timer/cortex_m_timer.c @@ -49,7 +49,7 @@ of a split kernel. The device driver is also part of a nanokernel-only system, but omits more complex capabilities (such as tickless idle support) that are only used in conjunction with a microkernel. -*/ + */ #include #include @@ -127,16 +127,16 @@ static unsigned char idle_mode = IDLE_NOT_TICKLESS; #if defined(CONFIG_TICKLESS_IDLE) || \ defined(CONFIG_SYSTEM_TIMER_DISABLE) -/******************************************************************************* -* -* sysTickStop - stop the timer -* -* This routine disables the systick counter. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * sysTickStop - stop the timer + * + * This routine disables the systick counter. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static ALWAYS_INLINE void sysTickStop(void) { @@ -156,16 +156,16 @@ static ALWAYS_INLINE void sysTickStop(void) #ifdef CONFIG_TICKLESS_IDLE -/******************************************************************************* -* -* sysTickStart - start the timer -* -* This routine enables the systick counter. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * sysTickStart - start the timer + * + * This routine enables the systick counter. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static ALWAYS_INLINE void sysTickStart(void) { @@ -183,33 +183,33 @@ static ALWAYS_INLINE void sysTickStart(void) __scs.systick.stcsr.val = reg.val; } -/******************************************************************************* -* -* sysTickCurrentGet - get the current counter value -* -* This routine gets the value from the timer's current value register. This -* value is the 'time' remaining to decrement before the timer triggers an -* interrupt. -* -* RETURNS: the current counter value -* -* \NOMANUAL -*/ +/** + * + * sysTickCurrentGet - get the current counter value + * + * This routine gets the value from the timer's current value register. This + * value is the 'time' remaining to decrement before the timer triggers an + * interrupt. + * + * RETURNS: the current counter value + * + * \NOMANUAL + */ static ALWAYS_INLINE uint32_t sysTickCurrentGet(void) { return __scs.systick.stcvr; } -/******************************************************************************* -* -* sysTickReloadGet - get the reload/countdown value -* -* This routine returns the value from the reload value register. -* -* RETURNS: the counter's initial count/wraparound value -* -* \NOMANUAL -*/ +/** + * + * sysTickReloadGet - get the reload/countdown value + * + * This routine returns the value from the reload value register. + * + * RETURNS: the counter's initial count/wraparound value + * + * \NOMANUAL + */ static ALWAYS_INLINE uint32_t sysTickReloadGet(void) { return __scs.systick.strvr; @@ -217,18 +217,18 @@ static ALWAYS_INLINE uint32_t sysTickReloadGet(void) #endif /* CONFIG_TICKLESS_IDLE */ -/******************************************************************************* -* -* sysTickReloadSet - set the reload/countdown value -* -* This routine sets value from which the timer will count down and also -* sets the timer's current value register to zero. -* Note that the value given is assumed to be valid (i.e., count < (1<<24)). -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * sysTickReloadSet - set the reload/countdown value + * + * This routine sets value from which the timer will count down and also + * sets the timer's current value register to zero. + * Note that the value given is assumed to be valid (i.e., count < (1<<24)). + * + * RETURNS: N/A + * + * \NOMANUAL + */ static ALWAYS_INLINE void sysTickReloadSet( uint32_t count /* count from which timer is to count down */ @@ -244,20 +244,20 @@ static ALWAYS_INLINE void sysTickReloadSet( __scs.systick.stcvr = 0; /* also clears the countflag */ } -/******************************************************************************* -* -* _TIMER_INT_HANDLER - system clock tick handler -* -* This routine handles the system clock tick interrupt. A TICK_EVENT event -* is pushed onto the microkernel stack. -* -* The symbol for this routine is either _timer_int_handler (for normal -* system operation) or _real_timer_int_handler (when GDB_INFO is enabled). -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _TIMER_INT_HANDLER - system clock tick handler + * + * This routine handles the system clock tick interrupt. A TICK_EVENT event + * is pushed onto the microkernel stack. + * + * The symbol for this routine is either _timer_int_handler (for normal + * system operation) or _real_timer_int_handler (when GDB_INFO is enabled). + * + * RETURNS: N/A + * + * \NOMANUAL + */ void _TIMER_INT_HANDLER(void *unused) { @@ -377,23 +377,23 @@ void _TIMER_INT_HANDLER(void *unused) #ifdef CONFIG_TICKLESS_IDLE -/******************************************************************************* -* -* sysTickTicklessIdleInit - initialize the tickless idle feature -* -* This routine initializes the tickless idle feature by calculating the -* necessary hardware-specific parameters. -* -* Note that the maximum number of ticks that can elapse during a "tickless idle" -* is limited by . The larger the value (the lower the -* tick frequency), the fewer elapsed ticks during a "tickless idle". -* Conversely, the smaller the value (the higher the tick frequency), the -* more elapsed ticks during a "tickless idle". -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * sysTickTicklessIdleInit - initialize the tickless idle feature + * + * This routine initializes the tickless idle feature by calculating the + * necessary hardware-specific parameters. + * + * Note that the maximum number of ticks that can elapse during a "tickless idle" + * is limited by . The larger the value (the lower the + * tick frequency), the fewer elapsed ticks during a "tickless idle". + * Conversely, the smaller the value (the higher the tick frequency), the + * more elapsed ticks during a "tickless idle". + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void sysTickTicklessIdleInit(void) { @@ -458,17 +458,17 @@ static void sysTickTicklessIdleInit(void) sysTickReloadSet(default_load_value); } -/******************************************************************************* -* -* _timer_idle_enter - Place the system timer into idle state -* -* Re-program the timer to enter into the idle state for the given number of -* ticks. It is set to a "one shot" mode where it will fire in the number of -* ticks supplied or the maximum number of ticks that can be programmed into -* hardware. A value of -1 will result in the maximum number of ticks. -* -* RETURNS: N/A -*/ +/** + * + * _timer_idle_enter - Place the system timer into idle state + * + * Re-program the timer to enter into the idle state for the given number of + * ticks. It is set to a "one shot" mode where it will fire in the number of + * ticks supplied or the maximum number of ticks that can be programmed into + * hardware. A value of -1 will result in the maximum number of ticks. + * + * RETURNS: N/A + */ void _timer_idle_enter(int32_t ticks /* system ticks */ ) @@ -514,20 +514,20 @@ void _timer_idle_enter(int32_t ticks /* system ticks */ sysTickStart(); } -/******************************************************************************* -* -* _timer_idle_exit - handling of tickless idle when interrupted -* -* The routine, called by _sys_power_save_idle_exit, is responsible for taking -* the timer out of idle mode and generating an interrupt at the next -* tick interval. It is expected that interrupts have been disabled. -* -* Note that in this routine, _sys_idle_elapsed_ticks must be zero because the -* ticker has done its work and consumed all the ticks. This has to be true -* otherwise idle mode wouldn't have been entered in the first place. -* -* RETURNS: N/A -*/ +/** + * + * _timer_idle_exit - handling of tickless idle when interrupted + * + * The routine, called by _sys_power_save_idle_exit, is responsible for taking + * the timer out of idle mode and generating an interrupt at the next + * tick interval. It is expected that interrupts have been disabled. + * + * Note that in this routine, _sys_idle_elapsed_ticks must be zero because the + * ticker has done its work and consumed all the ticks. This has to be true + * otherwise idle mode wouldn't have been entered in the first place. + * + * RETURNS: N/A + */ void _timer_idle_exit(void) { @@ -606,15 +606,15 @@ void _timer_idle_exit(void) #endif /* CONFIG_TICKLESS_IDLE */ -/******************************************************************************* -* -* timer_driver - initialize and enable the system clock -* -* This routine is used to program the systick to deliver interrupts at the -* rate specified via the 'sys_clock_us_per_tick' global variable. -* -* RETURNS: N/A -*/ +/** + * + * timer_driver - initialize and enable the system clock + * + * This routine is used to program the systick to deliver interrupts at the + * rate specified via the 'sys_clock_us_per_tick' global variable. + * + * RETURNS: N/A + */ void timer_driver(int priority /* priority parameter is ignored by this driver */ ) @@ -646,20 +646,20 @@ void timer_driver(int priority /* priority parameter is ignored by this driver __scs.systick.stcsr.val = stcsr.val; } -/******************************************************************************* -* -* timer_read - read the BSP timer hardware -* -* This routine returns the current time in terms of timer hardware clock cycles. -* Some kernel facilities (e.g. benchmarking code) directly call timer_read() -* instead of utilizing the 'timer_read_fptr' function pointer. -* -* RETURNS: up counter of elapsed clock cycles -* -* \INTERNAL WARNING -* systick counter is a 24-bit down counter which is reset to "reload" value -* once it reaches 0. -*/ +/** + * + * timer_read - read the BSP timer hardware + * + * This routine returns the current time in terms of timer hardware clock cycles. + * Some kernel facilities (e.g. benchmarking code) directly call timer_read() + * instead of utilizing the 'timer_read_fptr' function pointer. + * + * RETURNS: up counter of elapsed clock cycles + * + * \INTERNAL WARNING + * systick counter is a 24-bit down counter which is reset to "reload" value + * once it reaches 0. + */ uint32_t timer_read(void) { @@ -668,15 +668,15 @@ uint32_t timer_read(void) #ifdef CONFIG_SYSTEM_TIMER_DISABLE -/******************************************************************************* -* -* timer_disable - stop announcing ticks into the kernel -* -* This routine disables the systick so that timer interrupts are no -* longer delivered. -* -* RETURNS: N/A -*/ +/** + * + * timer_disable - stop announcing ticks into the kernel + * + * This routine disables the systick so that timer interrupts are no + * longer delivered. + * + * RETURNS: N/A + */ void timer_disable(void) { diff --git a/drivers/timer/cortex_m_timer_gdb.S b/drivers/timer/cortex_m_timer_gdb.S index 76f3716a023..5714c5ff2d1 100644 --- a/drivers/timer/cortex_m_timer_gdb.S +++ b/drivers/timer/cortex_m_timer_gdb.S @@ -34,7 +34,7 @@ DESCRIPTION GDB stub needed before the real systick handler runs to be able to display the correct state of the thread that was interrupted. -*/ + */ #define _ASMLANGUAGE diff --git a/drivers/timer/hpet.c b/drivers/timer/hpet.c index f4c4da933b6..dc5d74ab170 100644 --- a/drivers/timer/hpet.c +++ b/drivers/timer/hpet.c @@ -62,7 +62,7 @@ and announces the number of elapsed ticks (if any) to the microkernel. In a nanokernel-only system this device driver omits more complex capabilities (such as tickless idle support) that are only used with a microkernel. -*/ + */ #include #include @@ -219,19 +219,19 @@ static int32_t programmed_ticks = static int stale_irq_check = 0; /* is stale interrupt possible? */ -/******************************************************************************* -* -* _hpetMainCounterAtomic - safely read the main HPET up counter -* -* This routine simulates an atomic read of the 64-bit system clock on CPUs -* that only support 32-bit memory accesses. The most significant word -* of the counter is read twice to ensure it doesn't change while the least -* significant word is being retrieved (as per HPET documentation). -* -* RETURNS: current 64-bit counter value -* -* \NOMANUAL -*/ +/** + * + * _hpetMainCounterAtomic - safely read the main HPET up counter + * + * This routine simulates an atomic read of the 64-bit system clock on CPUs + * that only support 32-bit memory accesses. The most significant word + * of the counter is read twice to ensure it doesn't change while the least + * significant word is being retrieved (as per HPET documentation). + * + * RETURNS: current 64-bit counter value + * + * \NOMANUAL + */ static uint64_t _hpetMainCounterAtomic(void) { @@ -248,17 +248,17 @@ static uint64_t _hpetMainCounterAtomic(void) #endif /* TIMER_SUPPORTS_TICKLESS */ -/******************************************************************************* -* -* _timer_int_handler - system clock tick handler -* -* This routine handles the system clock tick interrupt. A TICK_EVENT event -* is pushed onto the microkernel stack. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _timer_int_handler - system clock tick handler + * + * This routine handles the system clock tick interrupt. A TICK_EVENT event + * is pushed onto the microkernel stack. + * + * RETURNS: N/A + * + * \NOMANUAL + */ void _timer_int_handler(void *unused) { @@ -349,18 +349,18 @@ void _timer_int_handler(void *unused) #error Tickless idle threshold is too small (must be at least 2) #endif -/******************************************************************************* -* -* _timer_idle_enter - Place system timer into idle state -* -* Re-program the timer to enter into the idle state for the given number of -* ticks (-1 means infinite number of ticks). -* -* RETURNS: N/A -* -* \INTERNAL IMPLEMENTATION DETAILS -* Called while interrupts are locked. -*/ +/** + * + * _timer_idle_enter - Place system timer into idle state + * + * Re-program the timer to enter into the idle state for the given number of + * ticks (-1 means infinite number of ticks). + * + * RETURNS: N/A + * + * \INTERNAL IMPLEMENTATION DETAILS + * Called while interrupts are locked. + */ void _timer_idle_enter(int32_t ticks /* system ticks */ ) @@ -378,22 +378,22 @@ void _timer_idle_enter(int32_t ticks /* system ticks */ programmed_ticks = ticks; } -/******************************************************************************* -* -* _timer_idle_exit - Take system timer out of idle state -* -* Determine how long timer has been idling and reprogram it to interrupt at the -* next tick. -* -* Note that in this routine, _SysTimerElapsedTicks must be zero because the -* ticker has done its work and consumed all the ticks. This has to be true -* otherwise idle mode wouldn't have been entered in the first place. -* -* RETURNS: N/A -* -* \INTERNAL IMPLEMENTATION DETAILS -* Called by _IntEnt() while interrupts are locked. -*/ +/** + * + * _timer_idle_exit - Take system timer out of idle state + * + * Determine how long timer has been idling and reprogram it to interrupt at the + * next tick. + * + * Note that in this routine, _SysTimerElapsedTicks must be zero because the + * ticker has done its work and consumed all the ticks. This has to be true + * otherwise idle mode wouldn't have been entered in the first place. + * + * RETURNS: N/A + * + * \INTERNAL IMPLEMENTATION DETAILS + * Called by _IntEnt() while interrupts are locked. + */ void _timer_idle_exit(void) { @@ -487,15 +487,15 @@ void _timer_idle_exit(void) #endif /* TIMER_SUPPORTS_TICKLESS */ -/******************************************************************************* -* -* timer_driver - initialize and enable the system clock -* -* This routine is used to program the HPET to deliver interrupts at the -* rate specified via the 'sys_clock_us_per_tick' global variable. -* -* RETURNS: N/A -*/ +/** + * + * timer_driver - initialize and enable the system clock + * + * This routine is used to program the HPET to deliver interrupts at the + * rate specified via the 'sys_clock_us_per_tick' global variable. + * + * RETURNS: N/A + */ void timer_driver(int priority /* priority parameter is ignored by this driver */ @@ -620,18 +620,18 @@ void timer_driver(int priority /* priority parameter is ignored by this driver *_HPET_TIMER0_CONFIG_CAPS |= HPET_Tn_INT_ENB_CNF; } -/******************************************************************************* -* -* timer_read - read the BSP timer hardware -* -* This routine returns the current time in terms of timer hardware clock cycles. -* -* RETURNS: up counter of elapsed clock cycles -* -* \INTERNAL WARNING -* If this routine is ever enhanced to return all 64 bits of the counter -* it will need to call _hpetMainCounterAtomic(). -*/ +/** + * + * timer_read - read the BSP timer hardware + * + * This routine returns the current time in terms of timer hardware clock cycles. + * + * RETURNS: up counter of elapsed clock cycles + * + * \INTERNAL WARNING + * If this routine is ever enhanced to return all 64 bits of the counter + * it will need to call _hpetMainCounterAtomic(). + */ uint32_t timer_read(void) { @@ -640,15 +640,15 @@ uint32_t timer_read(void) #ifdef CONFIG_SYSTEM_TIMER_DISABLE -/******************************************************************************* -* -* timer_disable - stop announcing ticks into the kernel -* -* This routine disables the HPET so that timer interrupts are no -* longer delivered. -* -* RETURNS: N/A -*/ +/** + * + * timer_disable - stop announcing ticks into the kernel + * + * This routine disables the HPET so that timer interrupts are no + * longer delivered. + * + * RETURNS: N/A + */ void timer_disable(void) { diff --git a/drivers/timer/i8253.c b/drivers/timer/i8253.c index 2f3b9c6124f..0a4052ab020 100644 --- a/drivers/timer/i8253.c +++ b/drivers/timer/i8253.c @@ -47,7 +47,7 @@ ticks into the guest operating system. However, this driver has been modified to access the PIT in scenarios where the PIT registers are mapped into a guest. An interrupt controller driver will not be utilized, so this driver will directly invoke the VIOAPIC APIs to configure/unmask the IRQ. -*/ + */ #include #include @@ -142,16 +142,16 @@ static uint32_t old_accumulated_count = 0; /* previous accumulated value value * extern struct nano_stack _k_command_stack; #endif /* CONFIG_MICROKERNEL */ -/******************************************************************************* -* -* _i8253CounterRead - read the i8253 counter register's value -* -* This routine reads the 16 bit value from the i8253 counter register. -* -* RETURNS: counter register's 16 bit value -* -* \NOMANUAL -*/ +/** + * + * _i8253CounterRead - read the i8253 counter register's value + * + * This routine reads the 16 bit value from the i8253 counter register. + * + * RETURNS: counter register's 16 bit value + * + * \NOMANUAL + */ static inline uint16_t _i8253CounterRead(void) { @@ -169,17 +169,17 @@ static inline uint16_t _i8253CounterRead(void) return count; } -/******************************************************************************* -* -* _i8253CounterSet - set the i8253 counter register's value -* -* This routine sets the 16 bit value from which the i8253 timer will -* decrement and sets that counter register to its value. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _i8253CounterSet - set the i8253 counter register's value + * + * This routine sets the 16 bit value from which the i8253 timer will + * decrement and sets that counter register to its value. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static inline void _i8253CounterSet( uint16_t count /* value from which the counter will decrement */ @@ -192,16 +192,16 @@ static inline void _i8253CounterSet( _currentLoadVal = count; } -/******************************************************************************* -* -* _i8253CounterPeriodic - set the i8253 timer to fire periodically -* -* This routine sets the i8253 to fire on a periodic basis. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _i8253CounterPeriodic - set the i8253 timer to fire periodically + * + * This routine sets the i8253 to fire on a periodic basis. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static inline void _i8253CounterPeriodic( uint16_t count /* value from which the counter will decrement */ @@ -212,16 +212,16 @@ static inline void _i8253CounterPeriodic( } #if defined(TIMER_SUPPORTS_TICKLESS) -/******************************************************************************* -* -* _i8253CounterOneShot - set the i8253 timer to fire once only -* -* This routine sets the i8253 to fire once only. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _i8253CounterOneShot - set the i8253 timer to fire once only + * + * This routine sets the i8253 to fire once only. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static inline void _i8253CounterOneShot( uint16_t count /* value from which the counter will decrement */ @@ -232,17 +232,17 @@ static inline void _i8253CounterOneShot( } #endif /* !TIMER_SUPPORTS_TICKLESS */ -/******************************************************************************* -* -* _timer_int_handler - system clock periodic tick handler -* -* This routine handles the system clock periodic tick interrupt. A TICK_EVENT -* event is pushed onto the microkernel stack. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _timer_int_handler - system clock periodic tick handler + * + * This routine handles the system clock periodic tick interrupt. A TICK_EVENT + * event is pushed onto the microkernel stack. + * + * RETURNS: N/A + * + * \NOMANUAL + */ void _timer_int_handler(void *unusedArg /* not used */ ) @@ -305,21 +305,21 @@ void _timer_int_handler(void *unusedArg /* not used */ } #if defined(TIMER_SUPPORTS_TICKLESS) -/******************************************************************************* -* -* _i8253TicklessIdleInit - initialize the tickless idle feature -* -* This routine initializes the tickless idle feature. Note that maximum -* number of ticks that can elapse during a "tickless idle" is limited by -* . The larger the value (the lower the tick frequency), the -* fewer elapsed ticks during a "tickless idle". Conversely, the smaller the -* value (the higher the tick frequency), the more elapsed ticks during a -* "tickless idle". -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _i8253TicklessIdleInit - initialize the tickless idle feature + * + * This routine initializes the tickless idle feature. Note that maximum + * number of ticks that can elapse during a "tickless idle" is limited by + * . The larger the value (the lower the tick frequency), the + * fewer elapsed ticks during a "tickless idle". Conversely, the smaller the + * value (the higher the tick frequency), the more elapsed ticks during a + * "tickless idle". + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void _i8253TicklessIdleInit(void) { @@ -328,14 +328,14 @@ static void _i8253TicklessIdleInit(void) max_load_value = max_system_ticks * counterLoadVal; } -/******************************************************************************* -* -* _i8253TicklessIdleSkew - -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _i8253TicklessIdleSkew - + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void _i8253TicklessIdleSkew(void) { @@ -343,15 +343,15 @@ static void _i8253TicklessIdleSkew(void) timer_idle_skew = 0; } -/******************************************************************************* -* -* _timer_idle_enter - Place system timer into idle state -* -* Re-program the timer to enter into the idle state for the given number of -* ticks. It is placed into one shot mode where it will fire in the number of -* ticks supplied or the maximum number of ticks that can be programmed into -* hardware. A value of -1 means inifinite number of ticks. -*/ +/** + * + * _timer_idle_enter - Place system timer into idle state + * + * Re-program the timer to enter into the idle state for the given number of + * ticks. It is placed into one shot mode where it will fire in the number of + * ticks supplied or the maximum number of ticks that can be programmed into + * hardware. A value of -1 means inifinite number of ticks. + */ void _timer_idle_enter(int32_t ticks /* system ticks */ ) @@ -400,21 +400,21 @@ void _timer_idle_enter(int32_t ticks /* system ticks */ irq_enable(PIT_INT_LVL); } -/******************************************************************************* -* -* _timer_idle_exit - handling of tickless idle when interrupted -* -* The routine is responsible for taking the timer out of idle mode and -* generating an interrupt at the next tick interval. -* -* Note that in this routine, _SysTimerElapsedTicks must be zero because the -* ticker has done its work and consumed all the ticks. This has to be true -* otherwise idle mode wouldn't have been entered in the first place. -* -* Called in _IntEnt() -* -* RETURNS: N/A -*/ +/** + * + * _timer_idle_exit - handling of tickless idle when interrupted + * + * The routine is responsible for taking the timer out of idle mode and + * generating an interrupt at the next tick interval. + * + * Note that in this routine, _SysTimerElapsedTicks must be zero because the + * ticker has done its work and consumed all the ticks. This has to be true + * otherwise idle mode wouldn't have been entered in the first place. + * + * Called in _IntEnt() + * + * RETURNS: N/A + */ void _timer_idle_exit(void) { @@ -466,15 +466,15 @@ void _timer_idle_exit(void) } #endif /* !TIMER_SUPPORTS_TICKLESS */ -/******************************************************************************* -* -* timer_driver - initialize and enable the system clock -* -* This routine is used to program the PIT to deliver interrupts at the -* rate specified via the 'sys_clock_us_per_tick' global variable. -* -* RETURNS: N/A -*/ +/** + * + * timer_driver - initialize and enable the system clock + * + * This routine is used to program the PIT to deliver interrupts at the + * rate specified via the 'sys_clock_us_per_tick' global variable. + * + * RETURNS: N/A + */ void timer_driver(int priority /* priority parameter ignored by this driver */ ) @@ -501,14 +501,14 @@ void timer_driver(int priority /* priority parameter ignored by this driver */ irq_enable(PIT_INT_LVL); } -/******************************************************************************* -* -* timer_read - read the BSP timer hardware -* -* This routine returns the current time in terms of timer hardware clock cycles. -* -* RETURNS: up counter of elapsed clock cycles -*/ +/** + * + * timer_read - read the BSP timer hardware + * + * This routine returns the current time in terms of timer hardware clock cycles. + * + * RETURNS: up counter of elapsed clock cycles + */ uint32_t timer_read(void) { @@ -556,15 +556,15 @@ uint32_t timer_read(void) } #if defined(CONFIG_SYSTEM_TIMER_DISABLE) -/******************************************************************************* -* -* timer_disable - stop announcing ticks into the kernel -* -* This routine simply disables the PIT counter such that interrupts are no -* longer delivered. -* -* RETURNS: N/A -*/ +/** + * + * timer_disable - stop announcing ticks into the kernel + * + * This routine simply disables the PIT counter such that interrupts are no + * longer delivered. + * + * RETURNS: N/A + */ void timer_disable(void) { diff --git a/drivers/timer/loapic_timer.c b/drivers/timer/loapic_timer.c index 6892c7a2ba8..58dc39bb288 100644 --- a/drivers/timer/loapic_timer.c +++ b/drivers/timer/loapic_timer.c @@ -41,7 +41,7 @@ The local APIC contains a 32-bit programmable timer for use by the local processor. The time base is derived from the processor's bus clock, divided by a value specified in the divide configuration register. After reset, the timer is initialized to zero. -*/ + */ #include #include @@ -132,16 +132,16 @@ static unsigned char timer_mode = TIMER_MODE_PERIODIC; extern struct nano_stack _k_command_stack; #endif /* CONFIG_MICROKERNEL */ -/******************************************************************************* -* -* _loApicTimerPeriodic - set the timer for periodic mode -* -* This routine sets the timer for periodic mode. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _loApicTimerPeriodic - set the timer for periodic mode + * + * This routine sets the timer for periodic mode. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static inline void _loApicTimerPeriodic(void) { @@ -151,16 +151,16 @@ static inline void _loApicTimerPeriodic(void) #if defined(TIMER_SUPPORTS_TICKLESS) || \ defined(LOAPIC_TIMER_PERIODIC_WORKAROUND) || \ defined(CONFIG_SYSTEM_TIMER_DISABLE) -/******************************************************************************* -* -* _loApicTimerStop - stop the timer -* -* This routine stops the timer. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _loApicTimerStop - stop the timer + * + * This routine stops the timer. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static inline void _loApicTimerStop(void) { @@ -170,16 +170,16 @@ static inline void _loApicTimerStop(void) #if defined(TIMER_SUPPORTS_TICKLESS) || \ defined(LOAPIC_TIMER_PERIODIC_WORKAROUND) -/******************************************************************************* -* -* _loApicTimerStart - start the timer -* -* This routine starts the timer. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _loApicTimerStart - start the timer + * + * This routine starts the timer. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static inline void _loApicTimerStart(void) { @@ -187,16 +187,16 @@ static inline void _loApicTimerStart(void) } #endif -/******************************************************************************* -* -* _loApicTimerSetCount - set countdown value -* -* This routine sets value from which the timer will count down. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _loApicTimerSetCount - set countdown value + * + * This routine sets value from which the timer will count down. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static inline void _loApicTimerSetCount( uint32_t count /* count from which timer is to count down */ @@ -206,16 +206,16 @@ static inline void _loApicTimerSetCount( } #if defined(TIMER_SUPPORTS_TICKLESS) -/******************************************************************************* -* -* _loApicTimerOneShot - set the timer for one shot mode -* -* This routine sets the timer for one shot mode. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _loApicTimerOneShot - set the timer for one shot mode + * + * This routine sets the timer for one shot mode. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static inline void _loApicTimerOneShot(void) { @@ -223,66 +223,66 @@ static inline void _loApicTimerOneShot(void) } #endif /* TIMER_SUPPORTS_TICKLESS */ -/******************************************************************************* -* -* _loApicTimerSetDivider - set the rate at which the timer is decremented -* -* This routine sets rate at which the timer is decremented to match the -* external bus frequency. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _loApicTimerSetDivider - set the rate at which the timer is decremented + * + * This routine sets rate at which the timer is decremented to match the + * external bus frequency. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static inline void _loApicTimerSetDivider(void) { *_REG_TIMER_CFG = (*_REG_TIMER_CFG & ~0xf) | LOAPIC_TIMER_DIVBY_1; } -/******************************************************************************* -* -* _loApicTimerGetRemaining - get the value from the current count register -* -* This routine gets the value from the timer's current count register. This -* value is the 'time' remaining to decrement before the timer triggers an -* interrupt. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _loApicTimerGetRemaining - get the value from the current count register + * + * This routine gets the value from the timer's current count register. This + * value is the 'time' remaining to decrement before the timer triggers an + * interrupt. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static inline uint32_t _loApicTimerGetRemaining(void) { return *_REG_TIMER_CCR; } #if defined(TIMER_SUPPORTS_TICKLESS) -/******************************************************************************* -* -* _loApicTimerGetCount - get the value from the initial count register -* -* This routine gets the value from the initial count register. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _loApicTimerGetCount - get the value from the initial count register + * + * This routine gets the value from the initial count register. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static inline uint32_t _loApicTimerGetCount(void) { return *_REG_TIMER_ICR; } #endif /* TIMER_SUPPORTS_TICKLESS */ -/******************************************************************************* -* -* _timer_int_handler - system clock tick handler -* -* This routine handles the system clock tick interrupt. A TICK_EVENT event -* is pushed onto the microkernel stack. -* -* RETURNS: N/A -*/ +/** + * + * _timer_int_handler - system clock tick handler + * + * This routine handles the system clock tick interrupt. A TICK_EVENT event + * is pushed onto the microkernel stack. + * + * RETURNS: N/A + */ void _timer_int_handler(void *unused /* parameter is not used */ ) @@ -350,21 +350,21 @@ void _timer_int_handler(void *unused /* parameter is not used */ } #if defined(TIMER_SUPPORTS_TICKLESS) -/******************************************************************************* -* -* _loApicTimerTicklessIdleInit - initialize the tickless idle feature -* -* This routine initializes the tickless idle feature. Note that the maximum -* number of ticks that can elapse during a "tickless idle" is limited by -* . The larger the value (the lower the tick frequency), the -* fewer elapsed ticks during a "tickless idle". Conversely, the smaller the -* value (the higher the tick frequency), the more elapsed ticks during a -* "tickless idle". -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _loApicTimerTicklessIdleInit - initialize the tickless idle feature + * + * This routine initializes the tickless idle feature. Note that the maximum + * number of ticks that can elapse during a "tickless idle" is limited by + * . The larger the value (the lower the tick frequency), the + * fewer elapsed ticks during a "tickless idle". Conversely, the smaller the + * value (the higher the tick frequency), the more elapsed ticks during a + * "tickless idle". + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void _loApicTimerTicklessIdleInit(void) { @@ -373,21 +373,21 @@ static void _loApicTimerTicklessIdleInit(void) max_load_value = max_system_ticks * counterLoadVal; } -/******************************************************************************* -* -* _i8253TicklessIdleSkew - calculate the skew from idle mode switching -* -* This routine calculates the skew from switching the timer in and out of idle -* mode. The typical sequence is: -* 1. Stop timer. -* 2. Load new counter value. -* 3. Set timer mode to periodic/one-shot -* 4. Start timer. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _i8253TicklessIdleSkew - calculate the skew from idle mode switching + * + * This routine calculates the skew from switching the timer in and out of idle + * mode. The typical sequence is: + * 1. Stop timer. + * 2. Load new counter value. + * 3. Set timer mode to periodic/one-shot + * 4. Start timer. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void _loApicTimerTicklessIdleSkew(void) { @@ -407,17 +407,17 @@ static void _loApicTimerTicklessIdleSkew(void) timer_idle_skew -= _loApicTimerGetRemaining(); } -/******************************************************************************* -* -* _timer_idle_enter - Place system timer into idle state -* -* Re-program the timer to enter into the idle state for the given number of -* ticks. It is placed into one shot mode where it will fire in the number of -* ticks supplied or the maximum number of ticks that can be programmed into -* hardware. A value of -1 means inifinite number of ticks. -* -* RETURNS: N/A -*/ +/** + * + * _timer_idle_enter - Place system timer into idle state + * + * Re-program the timer to enter into the idle state for the given number of + * ticks. It is placed into one shot mode where it will fire in the number of + * ticks supplied or the maximum number of ticks that can be programmed into + * hardware. A value of -1 means inifinite number of ticks. + * + * RETURNS: N/A + */ void _timer_idle_enter(int32_t ticks /* system ticks */ ) @@ -460,21 +460,21 @@ void _timer_idle_enter(int32_t ticks /* system ticks */ _loApicTimerStart(); } -/******************************************************************************* -* -* _timer_idle_exit - handling of tickless idle when interrupted -* -* The routine is responsible for taking the timer out of idle mode and -* generating an interrupt at the next tick interval. -* -* Note that in this routine, _SysTimerElapsedTicks must be zero because the -* ticker has done its work and consumed all the ticks. This has to be true -* otherwise idle mode wouldn't have been entered in the first place. -* -* Called in _IntEnt() -* -* RETURNS: N/A -*/ +/** + * + * _timer_idle_exit - handling of tickless idle when interrupted + * + * The routine is responsible for taking the timer out of idle mode and + * generating an interrupt at the next tick interval. + * + * Note that in this routine, _SysTimerElapsedTicks must be zero because the + * ticker has done its work and consumed all the ticks. This has to be true + * otherwise idle mode wouldn't have been entered in the first place. + * + * Called in _IntEnt() + * + * RETURNS: N/A + */ void _timer_idle_exit(void) { @@ -530,15 +530,15 @@ void _timer_idle_exit(void) } #endif /* TIMER_SUPPORTS_TICKLESS */ -/******************************************************************************* -* -* timer_driver - initialize and enable the system clock -* -* This routine is used to program the PIT to deliver interrupts at the -* rate specified via the 'sys_clock_us_per_tick' global variable. -* -* RETURNS: N/A -*/ +/** + * + * timer_driver - initialize and enable the system clock + * + * This routine is used to program the PIT to deliver interrupts at the + * rate specified via the 'sys_clock_us_per_tick' global variable. + * + * RETURNS: N/A + */ void timer_driver(int priority /* priority parameter ignored by this driver */ ) @@ -570,14 +570,14 @@ void timer_driver(int priority /* priority parameter ignored by this driver */ irq_enable(LOAPIC_TIMER_IRQ); } -/******************************************************************************* -* -* timer_read - read the BSP timer hardware -* -* This routine returns the current time in terms of timer hardware clock cycles. -* -* RETURNS: up counter of elapsed clock cycles -*/ +/** + * + * timer_read - read the BSP timer hardware + * + * This routine returns the current time in terms of timer hardware clock cycles. + * + * RETURNS: up counter of elapsed clock cycles + */ uint32_t timer_read(void) { @@ -599,15 +599,15 @@ uint32_t timer_read(void) } #if defined(CONFIG_SYSTEM_TIMER_DISABLE) -/******************************************************************************* -* -* timer_disable - stop announcing ticks into the kernel -* -* This routine simply disables the LOAPIC counter such that interrupts are no -* longer delivered. -* -* RETURNS: N/A -*/ +/** + * + * timer_disable - stop announcing ticks into the kernel + * + * This routine simply disables the LOAPIC counter such that interrupts are no + * longer delivered. + * + * RETURNS: N/A + */ void timer_disable(void) { diff --git a/include/arch/arc/arch.h b/include/arch/arc/arch.h index c17eb8d0ab7..522f8ecebfd 100644 --- a/include/arch/arc/arch.h +++ b/include/arch/arc/arch.h @@ -35,7 +35,7 @@ DESCRIPTION This header contains the ARC specific nanokernel interface. It is included by the nanokernel interface architecture-abstraction header (nanokernel/cpu.h) -*/ + */ #ifndef _ARC_ARCH__H_ #define _ARC_ARCH__H_ diff --git a/include/arch/arc/v2/aux_regs.h b/include/arch/arc/v2/aux_regs.h index dda5d8d520c..4cae3509615 100644 --- a/include/arch/arc/v2/aux_regs.h +++ b/include/arch/arc/v2/aux_regs.h @@ -34,7 +34,7 @@ DESCRIPTION Definitions for auxiliary registers. -*/ + */ #ifndef _ARC_V2_AUX_REGS__H_ #define _ARC_V2_AUX_REGS__H_ diff --git a/include/arch/arc/v2/error.h b/include/arch/arc/v2/error.h index 7ea8a2c0bf1..d0c6269b45e 100644 --- a/include/arch/arc/v2/error.h +++ b/include/arch/arc/v2/error.h @@ -33,7 +33,7 @@ /* DESCRIPTION ARC-specific nanokernel error handling interface. Included by ARC/arch.h. -*/ + */ #ifndef _ARCH_ARC_V2_ERROR_H_ #define _ARCH_ARC_V2_ERROR_H_ diff --git a/include/arch/arc/v2/exc.h b/include/arch/arc/v2/exc.h index 2cd04cf5a1a..362b6fc4687 100644 --- a/include/arch/arc/v2/exc.h +++ b/include/arch/arc/v2/exc.h @@ -33,7 +33,7 @@ /* DESCRIPTION ARC-specific nanokernel exception handling interface. Included by ARC/arch.h. -*/ + */ #ifndef _ARCH_ARC_V2_EXC_H_ #define _ARCH_ARC_V2_EXC_H_ diff --git a/include/arch/arc/v2/ffs.h b/include/arch/arc/v2/ffs.h index de83f89bab7..848a4837fa9 100644 --- a/include/arch/arc/v2/ffs.h +++ b/include/arch/arc/v2/ffs.h @@ -33,7 +33,7 @@ /* DESCRIPTION ARC-specific nanokernel ffs interface. Included by ARC/arch.h. -*/ + */ #ifndef _ARCH_ARC_V2_FFS_H_ #define _ARCH_ARC_V2_FFS_H_ @@ -45,17 +45,17 @@ GTEXT(nanoFfsMsb); extern unsigned nanoFfsLsb(unsigned int); extern unsigned nanoFfsMsb(unsigned int); -/******************************************************************************* -* -* nanoFfsMsb_inline - Find First Set bit (searching from most significant bit) -* -* This routine finds the first bit set in the argument passed it and returns -* the index of that bit. Bits are numbered starting at 1 from the least -* significant bit. A return value of zero indicates that the value passed -* is zero. -* -* RETURNS: most significant bit set -*/ +/** + * + * nanoFfsMsb_inline - Find First Set bit (searching from most significant bit) + * + * This routine finds the first bit set in the argument passed it and returns + * the index of that bit. Bits are numbered starting at 1 from the least + * significant bit. A return value of zero indicates that the value passed + * is zero. + * + * RETURNS: most significant bit set + */ #if defined(__GNUC__) static ALWAYS_INLINE unsigned int nanoFfsMsb_inline(unsigned int op) @@ -74,17 +74,17 @@ static ALWAYS_INLINE unsigned int nanoFfsMsb_inline(unsigned int op) } #endif -/******************************************************************************* -* -* nanoFfsLsb - find first set bit (searching from the least significant bit) -* -* This routine finds the first bit set in the argument passed it and -* returns the index of that bit. Bits are numbered starting -* at 1 from the least significant bit. A return value of zero indicates that -* the value passed is zero. -* -* RETURNS: least significant bit set -*/ +/** + * + * nanoFfsLsb - find first set bit (searching from the least significant bit) + * + * This routine finds the first bit set in the argument passed it and + * returns the index of that bit. Bits are numbered starting + * at 1 from the least significant bit. A return value of zero indicates that + * the value passed is zero. + * + * RETURNS: least significant bit set + */ #if defined(__GNUC__) static ALWAYS_INLINE unsigned int nanoFfsLsb_inline(unsigned int op) diff --git a/include/arch/arc/v2/irq.h b/include/arch/arc/v2/irq.h index f2ecb156e1f..82945d3e48b 100644 --- a/include/arch/arc/v2/irq.h +++ b/include/arch/arc/v2/irq.h @@ -71,17 +71,17 @@ extern void irq_priority_set(unsigned int irq, unsigned int prio); extern void _irq_exit(void); -/******************************************************************************* -* -* irq_lock_inline - disable all interrupts on the CPU (inline) -* -* See irq_lock() for full description -* -* RETURNS: An architecture-dependent lock-out key representing the -* "interrupt disable state" prior to the call. -* -* \NOMANUAL -*/ +/** + * + * irq_lock_inline - disable all interrupts on the CPU (inline) + * + * See irq_lock() for full description + * + * RETURNS: An architecture-dependent lock-out key representing the + * "interrupt disable state" prior to the call. + * + * \NOMANUAL + */ static ALWAYS_INLINE unsigned int irq_lock_inline(void) { @@ -91,16 +91,16 @@ static ALWAYS_INLINE unsigned int irq_lock_inline(void) return key; } -/******************************************************************************* -* -* irq_unlock_inline - enable all interrupts on the CPU (inline) -* -* See irq_unlock() for full description -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * irq_unlock_inline - enable all interrupts on the CPU (inline) + * + * See irq_unlock() for full description + * + * RETURNS: N/A + * + * \NOMANUAL + */ static ALWAYS_INLINE void irq_unlock_inline(unsigned int key) { diff --git a/include/arch/arc/v2/misc.h b/include/arch/arc/v2/misc.h index 132e7b20a43..947df266662 100644 --- a/include/arch/arc/v2/misc.h +++ b/include/arch/arc/v2/misc.h @@ -33,7 +33,7 @@ /* DESCRIPTION ARC-specific nanokernel miscellaneous interface. Included by arc/arch.h. -*/ + */ #ifndef _ARCH_ARC_V2_MISC_H_ #define _ARCH_ARC_V2_MISC_H_ diff --git a/include/arch/arm/CortexM/asm_inline_gcc.h b/include/arch/arm/CortexM/asm_inline_gcc.h index 727b1732f82..20330dd2afc 100644 --- a/include/arch/arm/CortexM/asm_inline_gcc.h +++ b/include/arch/arm/CortexM/asm_inline_gcc.h @@ -52,17 +52,17 @@ #include #include -/******************************************************************************* -* -* find_last_set_inline - find first set bit (searching from most significant bit) -* -* This routine finds the first bit set in the argument passed it and returns -* the index of that bit. Bits are numbered starting at 1 from the least -* significant bit. A return value of zero indicates that the value passed -* is zero. -* -* RETURNS: most significant bit set -*/ +/** + * + * find_last_set_inline - find first set bit (searching from most significant bit) + * + * This routine finds the first bit set in the argument passed it and returns + * the index of that bit. Bits are numbered starting at 1 from the least + * significant bit. A return value of zero indicates that the value passed + * is zero. + * + * RETURNS: most significant bit set + */ static ALWAYS_INLINE unsigned int find_last_set_inline(unsigned int op) { @@ -80,17 +80,17 @@ static ALWAYS_INLINE unsigned int find_last_set_inline(unsigned int op) } -/******************************************************************************* -* -* find_first_set_inline - find first set bit (from the least significant bit) -* -* This routine finds the first bit set in the argument passed it and -* returns the index of that bit. Bits are numbered starting -* at 1 from the least significant bit. A return value of zero indicates that -* the value passed is zero. -* -* RETURNS: least significant bit set -*/ +/** + * + * find_first_set_inline - find first set bit (from the least significant bit) + * + * This routine finds the first bit set in the argument passed it and + * returns the index of that bit. Bits are numbered starting + * at 1 from the least significant bit. A return value of zero indicates that + * the value passed is zero. + * + * RETURNS: least significant bit set + */ static ALWAYS_INLINE unsigned int find_first_set_inline(unsigned int op) { @@ -109,36 +109,36 @@ static ALWAYS_INLINE unsigned int find_first_set_inline(unsigned int op) } -/******************************************************************************* -* -* irq_lock_inline - disable all interrupts on the CPU (inline) -* -* This routine disables interrupts. It can be called from either interrupt, -* task or fiber level. This routine returns an architecture-dependent -* lock-out key representing the "interrupt disable state" prior to the call; -* this key can be passed to irq_unlock_inline() to re-enable interrupts. -* -* The lock-out key should only be used as the argument to the -* irq_unlock_inline() API. It should never be used to manually re-enable -* interrupts or to inspect or manipulate the contents of the source register. -* -* WARNINGS -* Invoking a kernel routine with interrupts locked may result in -* interrupts being re-enabled for an unspecified period of time. If the -* called routine blocks, interrupts will be re-enabled while another -* context executes, or while the system is idle. -* -* The "interrupt disable state" is an attribute of a context. Thus, if a -* fiber or task disables interrupts and subsequently invokes a kernel -* routine that causes the calling context to block, the interrupt -* disable state will be restored when the context is later rescheduled -* for execution. -* -* RETURNS: An architecture-dependent lock-out key representing the -* "interrupt disable state" prior to the call. -* -* \NOMANUAL -*/ +/** + * + * irq_lock_inline - disable all interrupts on the CPU (inline) + * + * This routine disables interrupts. It can be called from either interrupt, + * task or fiber level. This routine returns an architecture-dependent + * lock-out key representing the "interrupt disable state" prior to the call; + * this key can be passed to irq_unlock_inline() to re-enable interrupts. + * + * The lock-out key should only be used as the argument to the + * irq_unlock_inline() API. It should never be used to manually re-enable + * interrupts or to inspect or manipulate the contents of the source register. + * + * WARNINGS + * Invoking a kernel routine with interrupts locked may result in + * interrupts being re-enabled for an unspecified period of time. If the + * called routine blocks, interrupts will be re-enabled while another + * context executes, or while the system is idle. + * + * The "interrupt disable state" is an attribute of a context. Thus, if a + * fiber or task disables interrupts and subsequently invokes a kernel + * routine that causes the calling context to block, the interrupt + * disable state will be restored when the context is later rescheduled + * for execution. + * + * RETURNS: An architecture-dependent lock-out key representing the + * "interrupt disable state" prior to the call. + * + * \NOMANUAL + */ static ALWAYS_INLINE unsigned int irq_lock_inline(void) { @@ -156,20 +156,20 @@ static ALWAYS_INLINE unsigned int irq_lock_inline(void) } -/******************************************************************************* -* -* irq_unlock_inline - enable all interrupts on the CPU (inline) -* -* This routine re-enables interrupts on the CPU. The parameter -* is an architecture-dependent lock-out key that is returned by a previous -* invocation of irq_lock_inline(). -* -* This routine can be called from either interrupt, task or fiber level. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * irq_unlock_inline - enable all interrupts on the CPU (inline) + * + * This routine re-enables interrupts on the CPU. The parameter + * is an architecture-dependent lock-out key that is returned by a previous + * invocation of irq_lock_inline(). + * + * This routine can be called from either interrupt, task or fiber level. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static ALWAYS_INLINE void irq_unlock_inline(unsigned int key) { diff --git a/include/arch/arm/CortexM/error.h b/include/arch/arm/CortexM/error.h index 81684e112c9..4725b321a98 100644 --- a/include/arch/arm/CortexM/error.h +++ b/include/arch/arm/CortexM/error.h @@ -33,7 +33,7 @@ /* DESCRIPTION ARM-specific nanokernel error handling interface. Included by ARM/arch.h. -*/ + */ #ifndef _ARCH_ARM_CORTEXM_ERROR_H_ #define _ARCH_ARM_CORTEXM_ERROR_H_ diff --git a/include/arch/arm/CortexM/exc.h b/include/arch/arm/CortexM/exc.h index 3aadba3cd78..ea58ab54a9e 100644 --- a/include/arch/arm/CortexM/exc.h +++ b/include/arch/arm/CortexM/exc.h @@ -33,7 +33,7 @@ /* DESCRIPTION ARM-specific nanokernel exception handling interface. Included by ARM/arch.h. -*/ + */ #ifndef _ARCH_ARM_CORTEXM_EXC_H_ #define _ARCH_ARM_CORTEXM_EXC_H_ diff --git a/include/arch/arm/CortexM/ffs.h b/include/arch/arm/CortexM/ffs.h index 45faa28ef1a..d797d789f38 100644 --- a/include/arch/arm/CortexM/ffs.h +++ b/include/arch/arm/CortexM/ffs.h @@ -33,7 +33,7 @@ /* DESCRIPTION ARM-specific nanokernel ffs interface. Included by ARM/arch.h. -*/ + */ #ifndef _ARCH_ARM_CORTEXM_FFS_H_ #define _ARCH_ARM_CORTEXM_FFS_H_ diff --git a/include/arch/arm/CortexM/gdb_stub.h b/include/arch/arm/CortexM/gdb_stub.h index bdac56d0534..71c508f32bf 100644 --- a/include/arch/arm/CortexM/gdb_stub.h +++ b/include/arch/arm/CortexM/gdb_stub.h @@ -36,7 +36,7 @@ Prep work done when entering exceptions consists of saving the callee-saved registers before they get used by exception handlers, and recording the fact that we are running in an exception. -*/ + */ #ifndef _GDB_STUB__H_ #define _GDB_STUB__H_ diff --git a/include/arch/arm/CortexM/irq.h b/include/arch/arm/CortexM/irq.h index 1931e53cfd6..31a14818b0d 100644 --- a/include/arch/arm/CortexM/irq.h +++ b/include/arch/arm/CortexM/irq.h @@ -33,7 +33,7 @@ /* DESCRIPTION ARM-specific nanokernel interrupt handling interface. Included by ARM/arch.h. -*/ + */ #ifndef _ARCH_ARM_CORTEXM_IRQ_H_ #define _ARCH_ARM_CORTEXM_IRQ_H_ @@ -80,7 +80,7 @@ extern void _IntExit(void); #define DO_CONCAT(x, y) x ## y #define CONCAT(x, y) DO_CONCAT(x, y) -/******************************************************************************* +/** * * IRQ_CONNECT_STATIC - connect a routine to interrupt number * @@ -97,7 +97,7 @@ extern void _IntExit(void); __attribute__ ((section (TOSTR(CONCAT(.gnu.linkonce.isr_irq, irq))))) = \ {parameter, isr} -/******************************************************************************* +/** * * IRQ_CONFIG - configure interrupt for the device * diff --git a/include/arch/arm/CortexM/memory_map-m3-m4.h b/include/arch/arm/CortexM/memory_map-m3-m4.h index c707ef278fb..f0460a13b0f 100644 --- a/include/arch/arm/CortexM/memory_map-m3-m4.h +++ b/include/arch/arm/CortexM/memory_map-m3-m4.h @@ -35,7 +35,7 @@ DESCRIPTION This module contains definitions for the memory map parts specific to the CORTEX-M3/M4 series of processors. It is included by nanokernel/ARM/memory_map.h -*/ + */ #ifndef _MEMORY_MAP_M3_M4__H_ #define _MEMORY_MAP_M3_M4__H_ diff --git a/include/arch/arm/CortexM/memory_map.h b/include/arch/arm/CortexM/memory_map.h index e115c0bfefa..6a372aa3d4a 100644 --- a/include/arch/arm/CortexM/memory_map.h +++ b/include/arch/arm/CortexM/memory_map.h @@ -35,7 +35,7 @@ DESCRIPTION This module contains definitions for the memory map of the CORTEX-M series of processors. -*/ + */ #ifndef _CORTEXM_MEMORY_MAP__H_ #define _CORTEXM_MEMORY_MAP__H_ diff --git a/include/arch/arm/CortexM/misc.h b/include/arch/arm/CortexM/misc.h index 2017b6b0481..54472d5283b 100644 --- a/include/arch/arm/CortexM/misc.h +++ b/include/arch/arm/CortexM/misc.h @@ -33,7 +33,7 @@ /* DESCRIPTION ARM-specific nanokernel miscellaneous interface. Included by ARM/arch.h. -*/ + */ #ifndef _ARCH_ARM_CORTEXM_MISC_H_ #define _ARCH_ARM_CORTEXM_MISC_H_ diff --git a/include/arch/arm/CortexM/nvic.h b/include/arch/arm/CortexM/nvic.h index 819799d48a4..91f7936e168 100644 --- a/include/arch/arm/CortexM/nvic.h +++ b/include/arch/arm/CortexM/nvic.h @@ -46,7 +46,7 @@ especially for registers with multiple instances to account for potentially the way to implement it. Supports up to 240 IRQs and 256 priority levels. -*/ + */ #ifndef _NVIC_H_ #define _NVIC_H_ @@ -85,14 +85,14 @@ Supports up to 240 IRQs and 256 priority levels. #include #include -/******************************************************************************* -* -* _NvicIrqEnable - enable an IRQ -* -* Enable IRQ #, which is equivalent to exception #+16 -* -* RETURNS: N/A -*/ +/** + * + * _NvicIrqEnable - enable an IRQ + * + * Enable IRQ #, which is equivalent to exception #+16 + * + * RETURNS: N/A + */ static inline void _NvicIrqEnable(unsigned int irq /* IRQ number */ ) @@ -100,14 +100,14 @@ static inline void _NvicIrqEnable(unsigned int irq /* IRQ number */ __scs.nvic.iser[REG_FROM_IRQ(irq)] = 1 << BIT_FROM_IRQ(irq); } -/******************************************************************************* -* -* _NvicIsIrqEnabled - find out if an IRQ is enabled -* -* Find out if IRQ # is enabled. -* -* RETURNS: 1 if IRQ is enabled, 0 otherwise -*/ +/** + * + * _NvicIsIrqEnabled - find out if an IRQ is enabled + * + * Find out if IRQ # is enabled. + * + * RETURNS: 1 if IRQ is enabled, 0 otherwise + */ static inline int _NvicIsIrqEnabled(unsigned int irq /* IRQ number */ ) @@ -115,14 +115,14 @@ static inline int _NvicIsIrqEnabled(unsigned int irq /* IRQ number */ return __scs.nvic.iser[REG_FROM_IRQ(irq)] & (1 << BIT_FROM_IRQ(irq)); } -/******************************************************************************* -* -* _NvicIrqEnable - disable an IRQ -* -* Disable IRQ #, which is equivalent to exception #+16 -* -* RETURNS: N/A -*/ +/** + * + * _NvicIrqEnable - disable an IRQ + * + * Disable IRQ #, which is equivalent to exception #+16 + * + * RETURNS: N/A + */ static inline void _NvicIrqDisable(unsigned int irq /* IRQ number */ ) @@ -130,16 +130,16 @@ static inline void _NvicIrqDisable(unsigned int irq /* IRQ number */ __scs.nvic.icer[REG_FROM_IRQ(irq)] = 1 << BIT_FROM_IRQ(irq); } -/******************************************************************************* -* -* _NvicIrqPend - pend an IRQ -* -* Pend IRQ #, which is equivalent to exception #+16. CPU will handle -* the IRQ when interrupts are enabled and/or returning from a higher priority -* interrupt. -* -* RETURNS: N/A -*/ +/** + * + * _NvicIrqPend - pend an IRQ + * + * Pend IRQ #, which is equivalent to exception #+16. CPU will handle + * the IRQ when interrupts are enabled and/or returning from a higher priority + * interrupt. + * + * RETURNS: N/A + */ static inline void _NvicIrqPend(unsigned int irq /* IRQ number */ ) @@ -147,14 +147,14 @@ static inline void _NvicIrqPend(unsigned int irq /* IRQ number */ __scs.nvic.ispr[REG_FROM_IRQ(irq)] = 1 << BIT_FROM_IRQ(irq); } -/******************************************************************************* -* -* _NvicIsIrqPending - find out if an IRQ is pending -* -* Find out if IRQ # is pending -* -* RETURNS: 1 if IRQ is pending, 0 otherwise -*/ +/** + * + * _NvicIsIrqPending - find out if an IRQ is pending + * + * Find out if IRQ # is pending + * + * RETURNS: 1 if IRQ is pending, 0 otherwise + */ static inline int _NvicIsIrqPending(unsigned int irq /* IRQ number */ ) @@ -162,16 +162,16 @@ static inline int _NvicIsIrqPending(unsigned int irq /* IRQ number */ return __scs.nvic.ispr[REG_FROM_IRQ(irq)] & (1 << BIT_FROM_IRQ(irq)); } -/******************************************************************************* -* -* _NvicIrqUnpend - unpend an IRQ -* -* Unpend IRQ #, which is equivalent to exception #+16. The previously -* pending interrupt will be ignored when either unlocking interrupts or -* returning from a higher priority exception. -* -* RETURNS: N/A -*/ +/** + * + * _NvicIrqUnpend - unpend an IRQ + * + * Unpend IRQ #, which is equivalent to exception #+16. The previously + * pending interrupt will be ignored when either unlocking interrupts or + * returning from a higher priority exception. + * + * RETURNS: N/A + */ static inline void _NvicIrqUnpend(unsigned int irq /* IRQ number */ ) @@ -179,14 +179,14 @@ static inline void _NvicIrqUnpend(unsigned int irq /* IRQ number */ __scs.nvic.icpr[REG_FROM_IRQ(irq)] = 1 << BIT_FROM_IRQ(irq); } -/******************************************************************************* -* -* _NvicIrqPrioSet - set priority of an IRQ -* -* Set priority of IRQ # to . There are 256 priority levels. -* -* RETURNS: N/A -*/ +/** + * + * _NvicIrqPrioSet - set priority of an IRQ + * + * Set priority of IRQ # to . There are 256 priority levels. + * + * RETURNS: N/A + */ static inline void _NvicIrqPrioSet(unsigned int irq, /* IRQ number */ unsigned int prio /* priority */ @@ -196,14 +196,14 @@ static inline void _NvicIrqPrioSet(unsigned int irq, /* IRQ number */ __scs.nvic.ipr[irq] = prio; } -/******************************************************************************* -* -* _NvicIrqPrioGet - get priority of an IRQ -* -* Get priority of IRQ #. -* -* RETURNS: the priority level of the IRQ -*/ +/** + * + * _NvicIrqPrioGet - get priority of an IRQ + * + * Get priority of IRQ #. + * + * RETURNS: the priority level of the IRQ + */ static inline uint32_t _NvicIrqPrioGet(unsigned int irq /* IRQ number */ ) @@ -211,15 +211,15 @@ static inline uint32_t _NvicIrqPrioGet(unsigned int irq /* IRQ number */ return __scs.nvic.ipr[irq]; } -/******************************************************************************* -* -* _NvicSwInterruptTrigger - trigger an interrupt via software -* -* Trigger interrupt #. The CPU will handle the IRQ when interrupts are -* enabled and/or returning from a higher priority interrupt. -* -* RETURNS: N/A -*/ +/** + * + * _NvicSwInterruptTrigger - trigger an interrupt via software + * + * Trigger interrupt #. The CPU will handle the IRQ when interrupts are + * enabled and/or returning from a higher priority interrupt. + * + * RETURNS: N/A + */ static inline void _NvicSwInterruptTrigger(unsigned int irq /* IRQ number */ ) diff --git a/include/arch/arm/CortexM/scb.h b/include/arch/arm/CortexM/scb.h index 2c94c3309b9..5298af1ac25 100644 --- a/include/arch/arm/CortexM/scb.h +++ b/include/arch/arm/CortexM/scb.h @@ -47,7 +47,7 @@ especially for registers with multiple instances to account for 16 exceptions. If access to a missing functionality is needed, directly writing to the registers is the way to implement it. -*/ + */ #ifndef _SCB__H_ #define _SCB__H_ @@ -73,141 +73,141 @@ registers is the way to implement it. extern void _ScbSystemReset(void); extern void _ScbNumPriGroupSet(unsigned int n); -/******************************************************************************* -* -* _ScbIsNmiPending - find out if the NMI exception is pending -* -* RETURNS: 1 if it is pending, 0 otherwise -*/ +/** + * + * _ScbIsNmiPending - find out if the NMI exception is pending + * + * RETURNS: 1 if it is pending, 0 otherwise + */ static inline int _ScbIsNmiPending(void) { return !!__scs.scb.icsr.bit.nmipendset; } -/******************************************************************************* -* -* _ScbNmiPend - pend the NMI exception -* -* Pend the NMI exception: it should fire immediately. -* -* RETURNS: N/A -*/ +/** + * + * _ScbNmiPend - pend the NMI exception + * + * Pend the NMI exception: it should fire immediately. + * + * RETURNS: N/A + */ static inline void _ScbNmiPend(void) { __scs.scb.icsr.bit.nmipendset = 1; } -/******************************************************************************* -* -* _ScbIsPendsvPending - find out if the PendSV exception is pending -* -* RETURNS: 1 if it is pending, 0 otherwise -*/ +/** + * + * _ScbIsPendsvPending - find out if the PendSV exception is pending + * + * RETURNS: 1 if it is pending, 0 otherwise + */ static inline int _ScbIsPendsvPending(void) { return __scs.scb.icsr.bit.pendsvset; } -/******************************************************************************* -* -* _ScbPendsvSet - set the PendSV exception -* -* Set the PendSV exception: it will be handled when the last nested exception -* returns, or immediately if running in thread mode. -* -* RETURNS: N/A -*/ +/** + * + * _ScbPendsvSet - set the PendSV exception + * + * Set the PendSV exception: it will be handled when the last nested exception + * returns, or immediately if running in thread mode. + * + * RETURNS: N/A + */ static inline void _ScbPendsvSet(void) { __scs.scb.icsr.bit.pendsvset = 1; } -/******************************************************************************* -* -* _ScbPendsvClear - clear the PendSV exception -* -* This routine clears the PendSV exception. -* -* RETURNS: N/A -*/ +/** + * + * _ScbPendsvClear - clear the PendSV exception + * + * This routine clears the PendSV exception. + * + * RETURNS: N/A + */ static inline void _ScbPendsvClear(void) { __scs.scb.icsr.bit.pendsvclr = 1; } -/******************************************************************************* -* -* _ScbIsSystickPending - find out if the SYSTICK exception is pending -* -* This routine determines if the SYSTICK exception is pending. -* -* RETURNS: 1 if it is pending, 0 otherwise -*/ +/** + * + * _ScbIsSystickPending - find out if the SYSTICK exception is pending + * + * This routine determines if the SYSTICK exception is pending. + * + * RETURNS: 1 if it is pending, 0 otherwise + */ static inline int _ScbIsSystickPending(void) { return __scs.scb.icsr.bit.pendstset; } -/******************************************************************************* -* -* _ScbSystickPend - pend the SYSTICK exception -* -* Pend the SYSTICK exception: it will be handled when returning from a higher -* priority exception or immediately if in thread mode or handling a lower -* priority exception. -* -* RETURNS: N/A -*/ +/** + * + * _ScbSystickPend - pend the SYSTICK exception + * + * Pend the SYSTICK exception: it will be handled when returning from a higher + * priority exception or immediately if in thread mode or handling a lower + * priority exception. + * + * RETURNS: N/A + */ static inline void _ScbSystickPendSet(void) { __scs.scb.icsr.bit.pendstset = 1; } -/******************************************************************************* -* -* _ScbSystickClear - clear the SYSTICK exception -* -* This routine clears the SYSTICK exception. -* -* RETURNS: N/A -*/ +/** + * + * _ScbSystickClear - clear the SYSTICK exception + * + * This routine clears the SYSTICK exception. + * + * RETURNS: N/A + */ static inline void _ScbSystickPendClear(void) { __scs.scb.icsr.bit.pendstclr = 1; } -/******************************************************************************* -* -* _ScbIsIrqPending - find out if an external interrupt is pending -* -* Find out if an external interrupt, generated by the NVIC, is pending. -* -* RETURNS: 1 if one or more interrupt is pending, 0 otherwise -*/ +/** + * + * _ScbIsIrqPending - find out if an external interrupt is pending + * + * Find out if an external interrupt, generated by the NVIC, is pending. + * + * RETURNS: 1 if one or more interrupt is pending, 0 otherwise + */ static inline int _ScbIsIrqPending(void) { return __scs.scb.icsr.bit.isrpending; } -/******************************************************************************* -* -* _ScbHiPriVectorPendingGet - find out the exception number of highest-priority -* pending exception (including interrupts) -* -* If one or more exceptions are pending, return the exception number of the -* highest-priority one; otherwise, return 0. -* -* RETURNS: the exception number if one is pending, 0 otherwise -*/ +/** + * + * _ScbHiPriVectorPendingGet - find out the exception number of highest-priority + * pending exception (including interrupts) + * + * If one or more exceptions are pending, return the exception number of the + * highest-priority one; otherwise, return 0. + * + * RETURNS: the exception number if one is pending, 0 otherwise + */ static inline int _ScbHiPriVectorPendingGet(void) { @@ -217,14 +217,14 @@ static inline int _ScbHiPriVectorPendingGet(void) return reg.bit.vectpending; } -/******************************************************************************* -* -* _ScbIsNested - find out if the currently executing exception is nested -* -* This routine determines if the currently executing exception is nested. -* -* RETURNS: 1 if nested, 0 otherwise -*/ +/** + * + * _ScbIsNested - find out if the currently executing exception is nested + * + * This routine determines if the currently executing exception is nested. + * + * RETURNS: 1 if nested, 0 otherwise + */ static inline int _ScbIsNestedExc(void) { @@ -232,14 +232,14 @@ static inline int _ScbIsNestedExc(void) return !__scs.scb.icsr.bit.rettobase; } -/******************************************************************************* -* -* _ScbIsInThreadMode - find out if running in thread mode -* -* This routine determines if the current mode is thread mode. -* -* RETURNS: 1 if in thread mode, 0 otherwise -*/ +/** + * + * _ScbIsInThreadMode - find out if running in thread mode + * + * This routine determines if the current mode is thread mode. + * + * RETURNS: 1 if in thread mode, 0 otherwise + */ static inline int _ScbIsInThreadMode(void) { @@ -247,71 +247,71 @@ static inline int _ScbIsInThreadMode(void) return !__scs.scb.icsr.bit.vectactive; } -/******************************************************************************* -* -* _ScbIsInHandlerMode - find out if running in handler mode -* -* This routine determines if the current mode is handler mode. -* -* RETURNS: 1 if in handler mode, 0 otherwise -*/ +/** + * + * _ScbIsInHandlerMode - find out if running in handler mode + * + * This routine determines if the current mode is handler mode. + * + * RETURNS: 1 if in handler mode, 0 otherwise + */ static inline int _ScbIsInHandlerMode(void) { return !_ScbIsInThreadMode(); } -/******************************************************************************* -* -* _ScbIsInExc - find out if handling an exception -* -* This routine determines if an exception is being handled (handler mode). -* -* RETURNS: 1 if handling an exception, 0 otherwise -*/ +/** + * + * _ScbIsInExc - find out if handling an exception + * + * This routine determines if an exception is being handled (handler mode). + * + * RETURNS: 1 if handling an exception, 0 otherwise + */ static inline int _ScbIsInExc(void) { return _ScbIsInHandlerMode(); } -/******************************************************************************* -* -* _ScbActiveVectorGet - obtain the currently executing vector -* -* If currently handling an exception/interrupt, return the exceuting vector -* number. If not, return 0. -* -* RETURNS: the currently excecuting vector number, 0 if in thread mode. -*/ +/** + * + * _ScbActiveVectorGet - obtain the currently executing vector + * + * If currently handling an exception/interrupt, return the exceuting vector + * number. If not, return 0. + * + * RETURNS: the currently excecuting vector number, 0 if in thread mode. + */ static inline uint32_t _ScbActiveVectorGet(void) { return __scs.scb.icsr.bit.vectactive; } -/******************************************************************************* -* -* _ScbIsVtableInSram - find out if vector table is in SRAM or ROM -* -* This routine determines if the currently executing exception is nested. -* -* RETURNS: 1 if in SRAM, 0 if in ROM -*/ +/** + * + * _ScbIsVtableInSram - find out if vector table is in SRAM or ROM + * + * This routine determines if the currently executing exception is nested. + * + * RETURNS: 1 if in SRAM, 0 if in ROM + */ static inline uint32_t _ScbIsVtableInSram(void) { return !!__scs.scb.vtor.bit.tblbase; } -/******************************************************************************* -* -* _ScbVtableLocationSet - move vector table from SRAM to ROM and vice-versa -* -* This routine moves the vector table to the given memory region. -* -* RETURNS: 1 if in SRAM, 0 if in ROM -*/ +/** + * + * _ScbVtableLocationSet - move vector table from SRAM to ROM and vice-versa + * + * This routine moves the vector table to the given memory region. + * + * RETURNS: 1 if in SRAM, 0 if in ROM + */ static inline void _ScbVtableLocationSet( int sram /* 1 to move vector to SRAM, 0 to move it to ROM */ @@ -321,37 +321,37 @@ static inline void _ScbVtableLocationSet( __scs.scb.vtor.bit.tblbase = sram; } -/******************************************************************************* -* -* _ScbVtableAddrGet - obtain base address of vector table -* -* This routine returs the vector table's base address. -* -* RETURNS: the base address of the vector table -*/ +/** + * + * _ScbVtableAddrGet - obtain base address of vector table + * + * This routine returs the vector table's base address. + * + * RETURNS: the base address of the vector table + */ static inline uint32_t _ScbVtableAddrGet(void) { return __scs.scb.vtor.bit.tbloff; } -/******************************************************************************* -* -* _ScbVtableAddrSet - set base address of vector table -* -* must align to the number of exception entries in vector table: -* -* numException = 16 + num_interrupts where each entry is 4 Bytes -* -* As a minimum, must be a multiple of 128: -* -* 0 <= num_interrupts < 16: multiple 0x080 -* 16 <= num_interrupts < 48: multiple 0x100 -* 48 <= num_interrupts < 112: multiple 0x200 -* .... -* -* RETURNS: N/A -*/ +/** + * + * _ScbVtableAddrSet - set base address of vector table + * + * must align to the number of exception entries in vector table: + * + * numException = 16 + num_interrupts where each entry is 4 Bytes + * + * As a minimum, must be a multiple of 128: + * + * 0 <= num_interrupts < 16: multiple 0x080 + * 16 <= num_interrupts < 48: multiple 0x100 + * 48 <= num_interrupts < 112: multiple 0x200 + * .... + * + * RETURNS: N/A + */ static inline void _ScbVtableAddrSet(uint32_t addr /* base address, aligned on 128 minimum */ @@ -361,202 +361,202 @@ static inline void _ScbVtableAddrSet(uint32_t addr /* base address, aligned on __scs.scb.vtor.bit.tbloff = addr; } -/******************************************************************************* -* -* _ScbIsDataLittleEndian - find out if data regions are little endian -* -* Data regions on Cortex-M devices can be either little or big endian. Code -* regions are always little endian. -* -* RETURNS: 1 if little endian, 0 if big endian -*/ +/** + * + * _ScbIsDataLittleEndian - find out if data regions are little endian + * + * Data regions on Cortex-M devices can be either little or big endian. Code + * regions are always little endian. + * + * RETURNS: 1 if little endian, 0 if big endian + */ static inline int _ScbIsDataLittleEndian(void) { return !(__scs.scb.aircr.bit.endianness); } -/******************************************************************************* -* -* _ScbNumPriGroupGet - get the programmed number of priority groups -* -* Exception priorities can be sub-divided into groups, with sub-priorities. -* Within these groups, exceptions do not preempt each other. The sub-priorities -* are only used to decide which exception will run when several are pending. -* -* RETURNS: the number of priority groups -*/ +/** + * + * _ScbNumPriGroupGet - get the programmed number of priority groups + * + * Exception priorities can be sub-divided into groups, with sub-priorities. + * Within these groups, exceptions do not preempt each other. The sub-priorities + * are only used to decide which exception will run when several are pending. + * + * RETURNS: the number of priority groups + */ static inline int _ScbNumPriGroupGet(void) { return 1 << (7 - __scs.scb.aircr.bit.prigroup); } -/******************************************************************************* -* -* _ScbSleepOnExitSet - CPU goes to sleep after exiting an ISR -* -* CPU never runs in thread mode until this is cancelled. -* -* This enables the feature until it is cancelled. -* -* RETURNS: N/A -*/ +/** + * + * _ScbSleepOnExitSet - CPU goes to sleep after exiting an ISR + * + * CPU never runs in thread mode until this is cancelled. + * + * This enables the feature until it is cancelled. + * + * RETURNS: N/A + */ static inline void _ScbSleepOnExitSet(void) { __scs.scb.scr.bit.sleeponexit = 1; } -/******************************************************************************* -* -* _ScbSleepOnExitClear - CPU does not go to sleep after exiting an ISR -* -* This routine prevents CPU sleep mode upon exiting an ISR. -* This is the normal operating mode. -* -* RETURNS: N/A -*/ +/** + * + * _ScbSleepOnExitClear - CPU does not go to sleep after exiting an ISR + * + * This routine prevents CPU sleep mode upon exiting an ISR. + * This is the normal operating mode. + * + * RETURNS: N/A + */ static inline void _ScbSleepOnExitClear(void) { __scs.scb.scr.bit.sleeponexit = 0; } -/******************************************************************************* -* -* _ScbSevOnPendSet - do not put CPU to sleep if pending exception are present -* when invoking wfe instruction -* -* By default, when invoking wfi or wfe instructions, if PRIMASK is masking -* interrupts and if an interrupt is pending, the CPU will go to sleep, and -* another interrupt is needed to wake it up. By coupling the use of the -* SEVONPEND feature and the wfe instruction (NOT wfi), pending exception will -* prevent the CPU from sleeping. -* -* This enables the feature until it is cancelled. -* -* RETURNS: N/A -*/ +/** + * + * _ScbSevOnPendSet - do not put CPU to sleep if pending exception are present + * when invoking wfe instruction + * + * By default, when invoking wfi or wfe instructions, if PRIMASK is masking + * interrupts and if an interrupt is pending, the CPU will go to sleep, and + * another interrupt is needed to wake it up. By coupling the use of the + * SEVONPEND feature and the wfe instruction (NOT wfi), pending exception will + * prevent the CPU from sleeping. + * + * This enables the feature until it is cancelled. + * + * RETURNS: N/A + */ static inline void _ScbSevOnPendSet(void) { __scs.scb.scr.bit.sevonpend = 1; } -/******************************************************************************* -* -* _ScbSevOnPendClear - clear SEVONPEND bit -* -* See _ScbSevOnPendSet(). -* -* RETURNS: N/A -*/ +/** + * + * _ScbSevOnPendClear - clear SEVONPEND bit + * + * See _ScbSevOnPendSet(). + * + * RETURNS: N/A + */ static inline void _ScbSevOnPendClear(void) { __scs.scb.scr.bit.sevonpend = 0; } -/******************************************************************************* -* -* _ScbSleepDeepSet - when putting the CPU to sleep, put it in deep sleep -* -* When wfi/wfe is invoked, the CPU will go into a "deep sleep" mode, using less -* power than regular sleep mode, but with some possible side-effect. -* -* Behaviour is processor-specific. -* -* RETURNS: N/A -*/ +/** + * + * _ScbSleepDeepSet - when putting the CPU to sleep, put it in deep sleep + * + * When wfi/wfe is invoked, the CPU will go into a "deep sleep" mode, using less + * power than regular sleep mode, but with some possible side-effect. + * + * Behaviour is processor-specific. + * + * RETURNS: N/A + */ static inline void _ScbSleepDeepSet(void) { __scs.scb.scr.bit.sleepdeep = 1; } -/******************************************************************************* -* -* _ScbSleepDeepSet - when putting the CPU to sleep, do not put it in deep sleep -* -* This routine prevents CPU deep sleep mode. -* -* RETURNS: N/A -*/ +/** + * + * _ScbSleepDeepSet - when putting the CPU to sleep, do not put it in deep sleep + * + * This routine prevents CPU deep sleep mode. + * + * RETURNS: N/A + */ static inline void _ScbSleepDeepClear(void) { __scs.scb.scr.bit.sleepdeep = 0; } -/******************************************************************************* -* -* _ScbDivByZeroFaultEnable - enable faulting on division by zero -* -* This routine enables the divide by zero fault. -* By default, the CPU ignores the error. -* -* RETURNS: N/A -*/ +/** + * + * _ScbDivByZeroFaultEnable - enable faulting on division by zero + * + * This routine enables the divide by zero fault. + * By default, the CPU ignores the error. + * + * RETURNS: N/A + */ static inline void _ScbDivByZeroFaultEnable(void) { __scs.scb.ccr.bit.div_0_trp = 1; } -/******************************************************************************* -* -* _ScbDivByZeroFaultDisable - ignore division by zero errors -* -* This routine disables the divide by zero fault. -* This is the default behaviour. -* -* RETURNS: N/A -*/ +/** + * + * _ScbDivByZeroFaultDisable - ignore division by zero errors + * + * This routine disables the divide by zero fault. + * This is the default behaviour. + * + * RETURNS: N/A + */ static inline void _ScbDivByZeroFaultDisable(void) { __scs.scb.ccr.bit.div_0_trp = 0; } -/******************************************************************************* -* -* _ScbUnalignedFaultEnable - enable faulting on unaligned access -* -* This routine enables the unaligned access fault. -* By default, the CPU ignores the error. -* -* RETURNS: N/A -*/ +/** + * + * _ScbUnalignedFaultEnable - enable faulting on unaligned access + * + * This routine enables the unaligned access fault. + * By default, the CPU ignores the error. + * + * RETURNS: N/A + */ static inline void _ScbUnalignedFaultEnable(void) { __scs.scb.ccr.bit.unalign_trp = 1; } -/******************************************************************************* -* -* _ScbUnalignedFaultDisable - ignore unaligned access errors -* -* This routine disables the divide by zero fault. -* This is the default behaviour. -* -* RETURNS: N/A -*/ +/** + * + * _ScbUnalignedFaultDisable - ignore unaligned access errors + * + * This routine disables the divide by zero fault. + * This is the default behaviour. + * + * RETURNS: N/A + */ static inline void _ScbUnalignedFaultDisable(void) { __scs.scb.ccr.bit.unalign_trp = 0; } -/******************************************************************************* -* -* _ScbCcrSet - write the CCR all at once -* -* This routine writes the given value to the Configuration Control Register. -* -* RETURNS: N/A -*/ +/** + * + * _ScbCcrSet - write the CCR all at once + * + * This routine writes the given value to the Configuration Control Register. + * + * RETURNS: N/A + */ static inline void ScbCcrSet(uint32_t val /* value to write to CCR */ ) @@ -564,17 +564,17 @@ static inline void ScbCcrSet(uint32_t val /* value to write to CCR */ __scs.scb.ccr.val = val; } -/******************************************************************************* -* -* _ScbExcPrioGet - obtain priority of an exception -* -* Only works with exceptions 4 to 15, ie. do not use this for interrupts, which -* are exceptions 16+. -* -* Exceptions 1 to 3 priorities are fixed (-3, -2, -1). -* -* RETURNS: priority of exception -*/ +/** + * + * _ScbExcPrioGet - obtain priority of an exception + * + * Only works with exceptions 4 to 15, ie. do not use this for interrupts, which + * are exceptions 16+. + * + * Exceptions 1 to 3 priorities are fixed (-3, -2, -1). + * + * RETURNS: priority of exception + */ static inline uint8_t _ScbExcPrioGet(uint8_t exc /* exception number, 4 to 15 */ ) @@ -584,20 +584,20 @@ static inline uint8_t _ScbExcPrioGet(uint8_t exc /* exception number, 4 to 15 */ return __scs.scb.shpr[exc - 4]; } -/******************************************************************************* -* -* _ScbExcPrioSet - set priority of an exception -* -* Only works with exceptions 4 to 15, ie. do not use this for interrupts, which -* are exceptions 16+. -* -* Note that the processor might not implement all 8 bits, in which case the -* lower N bits are ignored. -* -* Exceptions 1 to 3 priorities are fixed (-3, -2, -1). -* -* RETURNS: N/A -*/ +/** + * + * _ScbExcPrioSet - set priority of an exception + * + * Only works with exceptions 4 to 15, ie. do not use this for interrupts, which + * are exceptions 16+. + * + * Note that the processor might not implement all 8 bits, in which case the + * lower N bits are ignored. + * + * Exceptions 1 to 3 priorities are fixed (-3, -2, -1). + * + * RETURNS: N/A + */ static inline void _ScbExcPrioSet(uint8_t exc, /* exception number, 4 to 15 */ uint8_t pri /* priority, 0 to 255 */ @@ -608,620 +608,620 @@ static inline void _ScbExcPrioSet(uint8_t exc, /* exception number, 4 to 15 */ __scs.scb.shpr[exc - 4] = pri; } -/******************************************************************************* -* -* _ScbUsageFaultEnable - enable usage fault exceptions -* -* This routine enables usage faults. -* By default, the CPU does not raise usage fault exceptions. -* -* RETURNS: N/A -*/ +/** + * + * _ScbUsageFaultEnable - enable usage fault exceptions + * + * This routine enables usage faults. + * By default, the CPU does not raise usage fault exceptions. + * + * RETURNS: N/A + */ static inline void _ScbUsageFaultEnable(void) { __scs.scb.shcsr.bit.usgfaultena = 1; } -/******************************************************************************* -* -* _ScbUsageFaultDisable - disable usage fault exceptions -* -* This routine disables usage faults. -* This is the default behaviour. -* -* RETURNS: N/A -*/ +/** + * + * _ScbUsageFaultDisable - disable usage fault exceptions + * + * This routine disables usage faults. + * This is the default behaviour. + * + * RETURNS: N/A + */ static inline void _ScbUsageFaultDisable(void) { __scs.scb.shcsr.bit.usgfaultena = 0; } -/******************************************************************************* -* -* _ScbBusFaultEnable - enable bus fault exceptions -* -* This routine enables bus faults. -* By default, the CPU does not raise bus fault exceptions. -* -* RETURNS: N/A -*/ +/** + * + * _ScbBusFaultEnable - enable bus fault exceptions + * + * This routine enables bus faults. + * By default, the CPU does not raise bus fault exceptions. + * + * RETURNS: N/A + */ static inline void _ScbBusFaultEnable(void) { __scs.scb.shcsr.bit.busfaultena = 1; } -/******************************************************************************* -* -* _ScbBusFaultDisable - disable bus fault exceptions -* -* This routine disables bus faults. -* This is the default behaviour. -* -* RETURNS: N/A -*/ +/** + * + * _ScbBusFaultDisable - disable bus fault exceptions + * + * This routine disables bus faults. + * This is the default behaviour. + * + * RETURNS: N/A + */ static inline void _ScbBusFaultDisable(void) { __scs.scb.shcsr.bit.busfaultena = 0; } -/******************************************************************************* -* -* _ScbMemFaultEnable - enable MPU faults exceptions -* -* This routine enables the MPU faults. -* By default, the CPU does not raise MPU fault exceptions. -* -* RETURNS: N/A -*/ +/** + * + * _ScbMemFaultEnable - enable MPU faults exceptions + * + * This routine enables the MPU faults. + * By default, the CPU does not raise MPU fault exceptions. + * + * RETURNS: N/A + */ static inline void _ScbMemFaultEnable(void) { __scs.scb.shcsr.bit.memfaultena = 1; } -/******************************************************************************* -* -* _ScbMemFaultDisable - disable MPU fault exceptions -* -* This routine disables MPU faults. -* This is the default behaviour. -* -* RETURNS: N/A -*/ +/** + * + * _ScbMemFaultDisable - disable MPU fault exceptions + * + * This routine disables MPU faults. + * This is the default behaviour. + * + * RETURNS: N/A + */ static inline void _ScbMemFaultDisable(void) { __scs.scb.shcsr.bit.memfaultena = 0; } -/******************************************************************************* -* -* _ScbHardFaultIsBusErrOnVectorRead - find out if a hard fault is caused by -* a bus error on vector read -* -* This routine determines if a hard fault is caused by a bus error during -* a vector table read operation. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbHardFaultIsBusErrOnVectorRead - find out if a hard fault is caused by + * a bus error on vector read + * + * This routine determines if a hard fault is caused by a bus error during + * a vector table read operation. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbHardFaultIsBusErrOnVectorRead(void) { return __scs.scb.hfsr.bit.vecttbl; } -/******************************************************************************* -* -* _ScbHardFaultIsForced - find out if a fault was escalated to hard fault -* -* Happens if a fault cannot be triggered because of priority or because it was -* disabled. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbHardFaultIsForced - find out if a fault was escalated to hard fault + * + * Happens if a fault cannot be triggered because of priority or because it was + * disabled. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbHardFaultIsForced(void) { return __scs.scb.hfsr.bit.forced; } -/******************************************************************************* -* -* _ScbHardFaultAllFaultsReset - clear all hard faults (HFSR register) -* -* HFSR register is a 'write-one-to-clear' (W1C) register. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbHardFaultAllFaultsReset - clear all hard faults (HFSR register) + * + * HFSR register is a 'write-one-to-clear' (W1C) register. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbHardFaultAllFaultsReset(void) { return __scs.scb.hfsr.val = 0xffff; } -/******************************************************************************* -* -* _ScbIsMemFault - find out if a hard fault is an MPU fault -* -* This routine determines if a hard fault is an MPU fault. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbIsMemFault - find out if a hard fault is an MPU fault + * + * This routine determines if a hard fault is an MPU fault. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbIsMemFault(void) { return !!__scs.scb.cfsr.byte.mmfsr.val; } -/******************************************************************************* -* -* _ScbMemFaultIsMmfarValid - find out if the MMFAR register contains a valid -* value -* -* The MMFAR register contains the faulting address on an MPU fault. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbMemFaultIsMmfarValid - find out if the MMFAR register contains a valid + * value + * + * The MMFAR register contains the faulting address on an MPU fault. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbMemFaultIsMmfarValid(void) { return !!__scs.scb.cfsr.byte.mmfsr.bit.mmarvalid; } -/******************************************************************************* -* -* _ScbMemFaultMmfarReset - invalid the value in MMFAR -* -* This routine invalidates the MMFAR value. This should be done after -* processing an MPU fault. -* -* RETURNS: N/A -*/ +/** + * + * _ScbMemFaultMmfarReset - invalid the value in MMFAR + * + * This routine invalidates the MMFAR value. This should be done after + * processing an MPU fault. + * + * RETURNS: N/A + */ static inline void _ScbMemFaultMmfarReset(void) { __scs.scb.cfsr.byte.mmfsr.bit.mmarvalid = 0; } -/******************************************************************************* -* -* _ScbMemFaultAllFaultsReset - clear all MPU faults (MMFSR register) -* -* CFSR/MMFSR register is a 'write-one-to-clear' (W1C) register. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbMemFaultAllFaultsReset - clear all MPU faults (MMFSR register) + * + * CFSR/MMFSR register is a 'write-one-to-clear' (W1C) register. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline void _ScbMemFaultAllFaultsReset(void) { __scs.scb.cfsr.byte.mmfsr.val = 0xfe; } -/******************************************************************************* -* -* _ScbMemFaultIsStacking - find out if an MPU fault is a stacking fault -* -* This routine determines if an MPU fault is a stacking fault. -* This may occur upon exception entry. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbMemFaultIsStacking - find out if an MPU fault is a stacking fault + * + * This routine determines if an MPU fault is a stacking fault. + * This may occur upon exception entry. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbMemFaultIsStacking(void) { return !!__scs.scb.cfsr.byte.mmfsr.bit.mstkerr; } -/******************************************************************************* -* -* _ScbMemFaultIsUnstacking - find out if an MPU fault is an unstacking fault -* -* This routine determines if an MPU fault is an unstacking fault. -* This may occur upon exception exit. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbMemFaultIsUnstacking - find out if an MPU fault is an unstacking fault + * + * This routine determines if an MPU fault is an unstacking fault. + * This may occur upon exception exit. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbMemFaultIsUnstacking(void) { return !!__scs.scb.cfsr.byte.mmfsr.bit.munstkerr; } -/******************************************************************************* -* -* _ScbMemFaultIsDataAccessViolation - find out if an MPU fault is a data access -* violation -* -* If this routine returns 1, read the MMFAR register via _ScbMemFaultAddrGet() -* to get the faulting address. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbMemFaultIsDataAccessViolation - find out if an MPU fault is a data access + * violation + * + * If this routine returns 1, read the MMFAR register via _ScbMemFaultAddrGet() + * to get the faulting address. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbMemFaultIsDataAccessViolation(void) { return !!__scs.scb.cfsr.byte.mmfsr.bit.daccviol; } -/******************************************************************************* -* -* _ScbMemFaultIsInstrAccessViolation - find out if an MPU fault is an -* instruction access violation -* -* This routine determines if an MPU fault is due to an instruction access -* violation. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbMemFaultIsInstrAccessViolation - find out if an MPU fault is an + * instruction access violation + * + * This routine determines if an MPU fault is due to an instruction access + * violation. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbMemFaultIsInstrAccessViolation(void) { return !!__scs.scb.cfsr.byte.mmfsr.bit.iaccviol; } -/******************************************************************************* -* -* _ScbMemFaultAddrGet - find out the faulting address on an MPU fault -* -* RETURNS: the faulting address -*/ +/** + * + * _ScbMemFaultAddrGet - find out the faulting address on an MPU fault + * + * RETURNS: the faulting address + */ static inline uint32_t _ScbMemFaultAddrGet(void) { return __scs.scb.mmfar; } -/******************************************************************************* -* -* _ScbIsBusFault - find out if a hard fault is a bus fault -* -* This routine determines if a hard fault is a bus fault. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbIsBusFault - find out if a hard fault is a bus fault + * + * This routine determines if a hard fault is a bus fault. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbIsBusFault(void) { return !!__scs.scb.cfsr.byte.bfsr.val; } -/******************************************************************************* -* -* _ScbBusFaultIsBfarValid - find out if the BFAR register contains a valid -* value -* -* The BFAR register contains the faulting address on bus fault. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbBusFaultIsBfarValid - find out if the BFAR register contains a valid + * value + * + * The BFAR register contains the faulting address on bus fault. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbBusFaultIsBfarValid(void) { return !!__scs.scb.cfsr.byte.bfsr.bit.bfarvalid; } -/******************************************************************************* -* -* _ScbMemFaultBfarReset - invalid the value in BFAR -* -* This routine clears/invalidates the Bus Fault Address Register. -* It should be done after processing a bus fault. -* -* RETURNS: N/A -*/ +/** + * + * _ScbMemFaultBfarReset - invalid the value in BFAR + * + * This routine clears/invalidates the Bus Fault Address Register. + * It should be done after processing a bus fault. + * + * RETURNS: N/A + */ static inline void _ScbBusFaultBfarReset(void) { __scs.scb.cfsr.byte.bfsr.bit.bfarvalid = 0; } -/******************************************************************************* -* -* _ScbBusFaultAllFaultsReset - clear all bus faults (BFSR register) -* -* CFSR/BFSR register is a 'write-one-to-clear' (W1C) register. -* -* RETURNS: N/A -*/ +/** + * + * _ScbBusFaultAllFaultsReset - clear all bus faults (BFSR register) + * + * CFSR/BFSR register is a 'write-one-to-clear' (W1C) register. + * + * RETURNS: N/A + */ static inline void _ScbBusFaultAllFaultsReset(void) { __scs.scb.cfsr.byte.bfsr.val = 0xfe; } -/******************************************************************************* -* -* _ScbBusFaultIsStacking - find out if a bus fault is a stacking fault -* -* This routine determines if a bus fault is a stacking fault. -* This may occurs upon exception entry. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbBusFaultIsStacking - find out if a bus fault is a stacking fault + * + * This routine determines if a bus fault is a stacking fault. + * This may occurs upon exception entry. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbBusFaultIsStacking(void) { return !!__scs.scb.cfsr.byte.bfsr.bit.stkerr; } -/******************************************************************************* -* -* _ScbBusFaultIsUnstacking - find out if a bus fault is an unstacking fault -* -* This routine determines if a bus fault is an unstacking fault. -* This may occur upon exception exit. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbBusFaultIsUnstacking - find out if a bus fault is an unstacking fault + * + * This routine determines if a bus fault is an unstacking fault. + * This may occur upon exception exit. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbBusFaultIsUnstacking(void) { return !!__scs.scb.cfsr.byte.bfsr.bit.unstkerr; } -/******************************************************************************* -* -* _ScbBusFaultIsImprecise - find out if a bus fault is an imprecise error -* -* This routine determines if a bus fault is an imprecise error. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbBusFaultIsImprecise - find out if a bus fault is an imprecise error + * + * This routine determines if a bus fault is an imprecise error. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbBusFaultIsImprecise(void) { return !!__scs.scb.cfsr.byte.bfsr.bit.impreciserr; } -/******************************************************************************* -* -* _ScbBusFaultIsPrecise - find out if a bus fault is an precise error -* -* Read the BFAR register via _ScbBusFaultAddrGet() if this routine returns 1, -* as it will contain the faulting address. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbBusFaultIsPrecise - find out if a bus fault is an precise error + * + * Read the BFAR register via _ScbBusFaultAddrGet() if this routine returns 1, + * as it will contain the faulting address. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbBusFaultIsPrecise(void) { return !!__scs.scb.cfsr.byte.bfsr.bit.preciserr; } -/******************************************************************************* -* -* _ScbBusFaultIsInstrBusErr - find out if a bus fault is an instruction bus -* error -* -* This routine determines if a bus fault is an instruction bus error. -* It is signalled only if the instruction is issued. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbBusFaultIsInstrBusErr - find out if a bus fault is an instruction bus + * error + * + * This routine determines if a bus fault is an instruction bus error. + * It is signalled only if the instruction is issued. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbBusFaultIsInstrBusErr(void) { return !!__scs.scb.cfsr.byte.bfsr.bit.ibuserr; } -/******************************************************************************* -* -* _ScbBusFaultAddrGet - get the faulting address on a precise bus fault -* -* This routine returns the faulting address for a precise bus fault. -* -* RETURNS: the faulting address -*/ +/** + * + * _ScbBusFaultAddrGet - get the faulting address on a precise bus fault + * + * This routine returns the faulting address for a precise bus fault. + * + * RETURNS: the faulting address + */ static inline uint32_t _ScbBusFaultAddrGet(void) { return __scs.scb.bfar; } -/******************************************************************************* -* -* _ScbIsUsageFault - find out if a hard fault is a usage fault -* -* This routine determines if a hard fault is a usage fault. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbIsUsageFault - find out if a hard fault is a usage fault + * + * This routine determines if a hard fault is a usage fault. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbIsUsageFault(void) { return !!__scs.scb.cfsr.byte.ufsr.val; } -/******************************************************************************* -* -* _ScbUsageFaultIsDivByZero - find out if a usage fault is a 'divide by zero' -* fault -* -* This routine determines if a usage fault is a 'divde by zero' fault. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbUsageFaultIsDivByZero - find out if a usage fault is a 'divide by zero' + * fault + * + * This routine determines if a usage fault is a 'divde by zero' fault. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbUsageFaultIsDivByZero(void) { return !!__scs.scb.cfsr.byte.ufsr.bit.divbyzero; } -/******************************************************************************* -* -* _ScbUsageFaultIsUnaligned - find out if a usage fault is a unaligned access -* error -* -* This routine determines if a usage fault is an unaligned access error. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbUsageFaultIsUnaligned - find out if a usage fault is a unaligned access + * error + * + * This routine determines if a usage fault is an unaligned access error. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbUsageFaultIsUnaligned(void) { return !!__scs.scb.cfsr.byte.ufsr.bit.unaligned; } -/******************************************************************************* -* -* _ScbUsageFaultIsNoCp - find out if a usage fault is a coprocessor access -* error -* -* This routine determines if a usage fault is caused by a coprocessor access. -* This happens if the coprocessor is either absent or disabled. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbUsageFaultIsNoCp - find out if a usage fault is a coprocessor access + * error + * + * This routine determines if a usage fault is caused by a coprocessor access. + * This happens if the coprocessor is either absent or disabled. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbUsageFaultIsNoCp(void) { return !!__scs.scb.cfsr.byte.ufsr.bit.nocp; } -/******************************************************************************* -* -* _ScbUsageFaultIsInvalidPcLoad - find out if a usage fault is a invalid PC -* load error -* -* Happens if the the instruction address on an exception return is not -* halfword-aligned. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbUsageFaultIsInvalidPcLoad - find out if a usage fault is a invalid PC + * load error + * + * Happens if the the instruction address on an exception return is not + * halfword-aligned. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbUsageFaultIsInvalidPcLoad(void) { return !!__scs.scb.cfsr.byte.ufsr.bit.invpc; } -/******************************************************************************* -* -* _ScbUsageFaultIsInvalidState - find out if a usage fault is a invalid state -* error -* -* Happens if the the instruction address loaded in the PC via a branch, LDR or -* POP, or if the instruction address installed in a exception vector, does not -* have bit 0 set, ie., is not halfword-aligned. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbUsageFaultIsInvalidState - find out if a usage fault is a invalid state + * error + * + * Happens if the the instruction address loaded in the PC via a branch, LDR or + * POP, or if the instruction address installed in a exception vector, does not + * have bit 0 set, ie., is not halfword-aligned. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbUsageFaultIsInvalidState(void) { return !!__scs.scb.cfsr.byte.ufsr.bit.invstate; } -/******************************************************************************* -* -* _ScbUsageFaultIsUndefinedInstr - find out if a usage fault is a undefined -* instruction error -* -* The processor tried to execute an invalid opcode. -* -* RETURNS: 1 if so, 0 otherwise -*/ +/** + * + * _ScbUsageFaultIsUndefinedInstr - find out if a usage fault is a undefined + * instruction error + * + * The processor tried to execute an invalid opcode. + * + * RETURNS: 1 if so, 0 otherwise + */ static inline int _ScbUsageFaultIsUndefinedInstr(void) { return !!__scs.scb.cfsr.byte.ufsr.bit.undefinstr; } -/******************************************************************************* -* -* _ScbUsageFaultDivByZeroReset - clear the 'division by zero' fault -* -* CFSR/UFSR register is a 'write-one-to-clear' (W1C) register. -* -* RETURNS: N/A -*/ +/** + * + * _ScbUsageFaultDivByZeroReset - clear the 'division by zero' fault + * + * CFSR/UFSR register is a 'write-one-to-clear' (W1C) register. + * + * RETURNS: N/A + */ static inline void _ScbUsageFaultDivByZeroReset(void) { __scs.scb.cfsr.byte.ufsr.bit.divbyzero = 1; } -/******************************************************************************* -* -* _ScbUsageFaultUnalignedReset - clear the 'unaligned access' fault -* -* CFSR/UFSR register is a 'write-one-to-clear' (W1C) register. -* -* RETURNS: N/A -*/ +/** + * + * _ScbUsageFaultUnalignedReset - clear the 'unaligned access' fault + * + * CFSR/UFSR register is a 'write-one-to-clear' (W1C) register. + * + * RETURNS: N/A + */ static inline void _ScbUsageFaultUnalignedReset(void) { __scs.scb.cfsr.byte.ufsr.bit.unaligned = 1; } -/******************************************************************************* -* -* _ScbUsageFaultNoCpReset - clear the 'no co-processor' fault -* -* CFSR/UFSR register is a 'write-one-to-clear' (W1C) register. -* -* RETURNS: N/A -*/ +/** + * + * _ScbUsageFaultNoCpReset - clear the 'no co-processor' fault + * + * CFSR/UFSR register is a 'write-one-to-clear' (W1C) register. + * + * RETURNS: N/A + */ static inline void _ScbUsageFaultNoCpReset(void) { __scs.scb.cfsr.byte.ufsr.bit.nocp = 1; } -/******************************************************************************* -* -* _ScbUsageFaultInvalidPcLoadReset - clear the 'invalid PC load ' fault -* -* CFSR/UFSR register is a 'write-one-to-clear' (W1C) register. -* -* RETURNS: N/A -*/ +/** + * + * _ScbUsageFaultInvalidPcLoadReset - clear the 'invalid PC load ' fault + * + * CFSR/UFSR register is a 'write-one-to-clear' (W1C) register. + * + * RETURNS: N/A + */ static inline void _ScbUsageFaultInvalidPcLoadReset(void) { __scs.scb.cfsr.byte.ufsr.bit.invpc = 1; } -/******************************************************************************* -* -* _ScbUsageFaultInvalidStateReset - clear the 'invalid state' fault -* -* CFSR/UFSR register is a 'write-one-to-clear' (W1C) register. -* -* RETURNS: N/A -*/ +/** + * + * _ScbUsageFaultInvalidStateReset - clear the 'invalid state' fault + * + * CFSR/UFSR register is a 'write-one-to-clear' (W1C) register. + * + * RETURNS: N/A + */ static inline void _ScbUsageFaultInvalidStateReset(void) { __scs.scb.cfsr.byte.ufsr.bit.invstate = 1; } -/******************************************************************************* -* -* _ScbUsageFaultUndefinedInstrReset - clear the 'undefined instruction' fault -* -* CFSR/UFSR register is a 'write-one-to-clear' (W1C) register. -* -* RETURNS: N/A -*/ +/** + * + * _ScbUsageFaultUndefinedInstrReset - clear the 'undefined instruction' fault + * + * CFSR/UFSR register is a 'write-one-to-clear' (W1C) register. + * + * RETURNS: N/A + */ static inline void _ScbUsageFaultUndefinedInstrReset(void) { __scs.scb.cfsr.byte.ufsr.bit.undefinstr = 1; } -/******************************************************************************* -* -* _ScbUsageFaultAllFaultsReset - clear all usage faults (UFSR register) -* -* CFSR/UFSR register is a 'write-one-to-clear' (W1C) register. -* -* RETURNS: N/A -*/ +/** + * + * _ScbUsageFaultAllFaultsReset - clear all usage faults (UFSR register) + * + * CFSR/UFSR register is a 'write-one-to-clear' (W1C) register. + * + * RETURNS: N/A + */ static inline void _ScbUsageFaultAllFaultsReset(void) { diff --git a/include/arch/arm/CortexM/scripts/linker.cmd b/include/arch/arm/CortexM/scripts/linker.cmd index c52453c12f5..80bd9ca95a3 100644 --- a/include/arch/arm/CortexM/scripts/linker.cmd +++ b/include/arch/arm/CortexM/scripts/linker.cmd @@ -33,7 +33,7 @@ /* DESCRIPTION Linker script for the Cortex-M3 BSPs. -*/ + */ #define _LINKER #define _ASMLANGUAGE diff --git a/include/arch/arm/CortexM/scs.h b/include/arch/arm/CortexM/scs.h index e2551385db4..fb50b4ad31f 100644 --- a/include/arch/arm/CortexM/scs.h +++ b/include/arch/arm/CortexM/scs.h @@ -66,7 +66,7 @@ registers is the way to implement it. Note that the 'stir' register, even if not in the 'nvic' part of the SCB, is still considered part of the NVIC and an API for it is provided in nvic.h. -*/ + */ #ifndef _SCS__H_ #define _SCS__H_ @@ -431,110 +431,110 @@ extern volatile struct __scs __scs; /* API */ -/******************************************************************************* -* -* _ScsNumIrqGet - obtain the number of interrupt lines on the target -* -* RETURNS: the number of interrupts -*/ +/** + * + * _ScsNumIrqGet - obtain the number of interrupt lines on the target + * + * RETURNS: the number of interrupts + */ static inline int _ScsNumIrqGet(void) { return 32 * (__scs.ictr.bit.intlinesnum + 1); } -/******************************************************************************* -* -* _ScsIntMultiCycleInstDisable - disable load/store multiple instructions -* -* From the ARM manuals: -* -* LDM/STM instructions increase the interrupt latency of the processor because -* they must complete before the processor can stack the current state and invoke -* the interrupt handler. -* -* RETURNS: N/A -*/ +/** + * + * _ScsIntMultiCycleInstDisable - disable load/store multiple instructions + * + * From the ARM manuals: + * + * LDM/STM instructions increase the interrupt latency of the processor because + * they must complete before the processor can stack the current state and invoke + * the interrupt handler. + * + * RETURNS: N/A + */ static inline void _ScsIntMultiCycleInstDisable(void) { __scs.actlr.bit.dismcycint = 1; } -/******************************************************************************* -* -* _ScsIntMultiCycleInstEnable - enable load/store multiple instructions -* -* See _ScsIntMultiCycleInstDisable(). -* -* RETURNS: N/A -*/ +/** + * + * _ScsIntMultiCycleInstEnable - enable load/store multiple instructions + * + * See _ScsIntMultiCycleInstDisable(). + * + * RETURNS: N/A + */ static inline void _ScsIntMultiCycleInstEnable(void) { __scs.actlr.bit.dismcycint = 0; } -/******************************************************************************* -* -* _ScsWriteBufDisable - disable write buffer -* -* From the ARM manuals: -* -* Disables write buffer use during default memory map accesses. This causes all -* BusFaults to be precise BusFaults but decreases performance because any store -* to memory must complete before the processor can execute the next instruction. -* -* RETURNS: N/A -*/ +/** + * + * _ScsWriteBufDisable - disable write buffer + * + * From the ARM manuals: + * + * Disables write buffer use during default memory map accesses. This causes all + * BusFaults to be precise BusFaults but decreases performance because any store + * to memory must complete before the processor can execute the next instruction. + * + * RETURNS: N/A + */ static inline void _ScsWriteBufDisable(void) { __scs.actlr.bit.disdefwbuf = 1; } -/******************************************************************************* -* -* _ScsWriteBufEnable - enable write buffer -* -* See _ScsWriteBufDisable(). -* -* RETURNS: N/A -*/ +/** + * + * _ScsWriteBufEnable - enable write buffer + * + * See _ScsWriteBufDisable(). + * + * RETURNS: N/A + */ static inline void _ScsWriteBufEnable(void) { __scs.actlr.bit.disdefwbuf = 0; } -/******************************************************************************* -* -* _ScsFoldItDisable - disable IT folding -* -* From the ARM manuals: -* -* In some situations, the processor can start executing the first instruction -* in an IT block while it is still executing the IT instruction. This behavior -* is called IT folding, and improves performance, However, IT folding can cause -* jitter in looping. If a task must avoid jitter, set the DISFOLD bit to 1 -* before executing the task, to disable IT folding. -* -* RETURNS: N/A -*/ +/** + * + * _ScsFoldItDisable - disable IT folding + * + * From the ARM manuals: + * + * In some situations, the processor can start executing the first instruction + * in an IT block while it is still executing the IT instruction. This behavior + * is called IT folding, and improves performance, However, IT folding can cause + * jitter in looping. If a task must avoid jitter, set the DISFOLD bit to 1 + * before executing the task, to disable IT folding. + * + * RETURNS: N/A + */ static inline void _ScsFoldItDisable(void) { __scs.actlr.bit.disfold = 1; } -/******************************************************************************* -* -* _ScsFoldItEnable - enable IT folding -* -* See _ScsFoldItDisable(). -* -* RETURNS: N/A -*/ +/** + * + * _ScsFoldItEnable - enable IT folding + * + * See _ScsFoldItDisable(). + * + * RETURNS: N/A + */ static inline void _ScsFoldItEnable(void) { diff --git a/include/arch/arm/arch.h b/include/arch/arm/arch.h index a47559c7682..cfc0aee1a33 100644 --- a/include/arch/arm/arch.h +++ b/include/arch/arm/arch.h @@ -35,7 +35,7 @@ DESCRIPTION This header contains the ARM specific nanokernel interface. It is included by the nanokernel interface architecture-abstraction header (nanokernel/cpu.h) -*/ + */ #ifndef _ARM_ARCH__H_ #define _ARM_ARCH__H_ diff --git a/include/arch/x86/arch.h b/include/arch/x86/arch.h index 26694b8816a..508f4a568d1 100644 --- a/include/arch/x86/arch.h +++ b/include/arch/x86/arch.h @@ -34,7 +34,7 @@ DESCRIPTION This header contains the IA-32 specific nanokernel interface. It is included by the generic nanokernel interface header (nanokernel.h) -*/ + */ #ifndef _ARCH_IFACE_H #define _ARCH_IFACE_H @@ -105,26 +105,26 @@ typedef struct s_isrList { unsigned int dpl; /* Privilege level associated with ISR/stub */ } ISR_LIST; -/******************************************************************************* -* -* NANO_CPU_INT_REGISTER - connect a routine to an interrupt vector -* -* This macro "connects" the specified routine, , to the specified interrupt -* vector, using the descriptor privilege level . On the IA-32 -* architecture, an interrupt vector is a value from 0 to 255. This macro -* populates the special intList section with the address of the routine, the -* vector number and the descriptor privilege level. The genIdt tool then picks -* up this information and generates an actual IDT entry with this information -* properly encoded. This macro replaces the _IntVecSet () routine in static -* interrupt systems. -* -* The argument specifies the privilege level for the interrupt-gate -* descriptor; (hardware) interrupts and exceptions should specify a level of 0, -* whereas handlers for user-mode software generated interrupts should specify 3. -* -* RETURNS: N/A -* -*/ +/** + * + * NANO_CPU_INT_REGISTER - connect a routine to an interrupt vector + * + * This macro "connects" the specified routine, , to the specified interrupt + * vector, using the descriptor privilege level . On the IA-32 + * architecture, an interrupt vector is a value from 0 to 255. This macro + * populates the special intList section with the address of the routine, the + * vector number and the descriptor privilege level. The genIdt tool then picks + * up this information and generates an actual IDT entry with this information + * properly encoded. This macro replaces the _IntVecSet () routine in static + * interrupt systems. + * + * The argument specifies the privilege level for the interrupt-gate + * descriptor; (hardware) interrupts and exceptions should specify a level of 0, + * whereas handlers for user-mode software generated interrupts should specify 3. + * + * RETURNS: N/A + * + */ #define NANO_CPU_INT_REGISTER(r, v, d) \ ISR_LIST __attribute__((section(".intList"))) MK_ISR_NAME(r) = {&r, v, d} @@ -138,7 +138,7 @@ typedef struct s_isrList { _NODATA_SECTION(.intStubSect) NANO_INT_STUB(s) -/******************************************************************************* +/** * * IRQ_CONNECT_STATIC - connect a routine to interrupt number * @@ -155,7 +155,7 @@ typedef struct s_isrList { NANO_CPU_INT_REGISTER(_##device##_##isr##_stub, INT_VEC_IRQ0 + (irq), priority) -/******************************************************************************* +/** * * IRQ_CONFIG - configure interrupt for the device * @@ -274,36 +274,36 @@ void _int_latency_start(void); void _int_latency_stop(void); #endif -/******************************************************************************* -* -* irq_lock_inline - disable all interrupts on the CPU (inline) -* -* This routine disables interrupts. It can be called from either interrupt, -* task or fiber level. This routine returns an architecture-dependent -* lock-out key representing the "interrupt disable state" prior to the call; -* this key can be passed to irq_unlock_inline() to re-enable interrupts. -* -* The lock-out key should only be used as the argument to the -* irq_unlock_inline() API. It should never be used to manually re-enable -* interrupts or to inspect or manipulate the contents of the source register. -* -* WARNINGS -* Invoking a kernel routine with interrupts locked may result in -* interrupts being re-enabled for an unspecified period of time. If the -* called routine blocks, interrupts will be re-enabled while another -* context executes, or while the system is idle. -* -* The "interrupt disable state" is an attribute of a context. Thus, if a -* fiber or task disables interrupts and subsequently invokes a kernel -* routine that causes the calling context to block, the interrupt -* disable state will be restored when the context is later rescheduled -* for execution. -* -* RETURNS: An architecture-dependent lock-out key representing the -* "interrupt disable state" prior to the call. -* -* \NOMANUAL -*/ +/** + * + * irq_lock_inline - disable all interrupts on the CPU (inline) + * + * This routine disables interrupts. It can be called from either interrupt, + * task or fiber level. This routine returns an architecture-dependent + * lock-out key representing the "interrupt disable state" prior to the call; + * this key can be passed to irq_unlock_inline() to re-enable interrupts. + * + * The lock-out key should only be used as the argument to the + * irq_unlock_inline() API. It should never be used to manually re-enable + * interrupts or to inspect or manipulate the contents of the source register. + * + * WARNINGS + * Invoking a kernel routine with interrupts locked may result in + * interrupts being re-enabled for an unspecified period of time. If the + * called routine blocks, interrupts will be re-enabled while another + * context executes, or while the system is idle. + * + * The "interrupt disable state" is an attribute of a context. Thus, if a + * fiber or task disables interrupts and subsequently invokes a kernel + * routine that causes the calling context to block, the interrupt + * disable state will be restored when the context is later rescheduled + * for execution. + * + * RETURNS: An architecture-dependent lock-out key representing the + * "interrupt disable state" prior to the call. + * + * \NOMANUAL + */ static inline __attribute__((always_inline)) unsigned int irq_lock_inline(void) @@ -318,20 +318,20 @@ static inline __attribute__((always_inline)) } -/******************************************************************************* -* -* irq_unlock_inline - enable all interrupts on the CPU (inline) -* -* This routine re-enables interrupts on the CPU. The parameter -* is an architecture-dependent lock-out key that is returned by a previous -* invocation of irq_lock_inline(). -* -* This routine can be called from either interrupt, task or fiber level. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * irq_unlock_inline - enable all interrupts on the CPU (inline) + * + * This routine re-enables interrupts on the CPU. The parameter + * is an architecture-dependent lock-out key that is returned by a previous + * invocation of irq_lock_inline(). + * + * This routine can be called from either interrupt, task or fiber level. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static inline __attribute__((always_inline)) void irq_unlock_inline(unsigned int key) diff --git a/include/arch/x86/asm_inline_gcc.h b/include/arch/x86/asm_inline_gcc.h index 5a290bcb138..ee475c3cb47 100644 --- a/include/arch/x86/asm_inline_gcc.h +++ b/include/arch/x86/asm_inline_gcc.h @@ -44,36 +44,36 @@ #include #include -/******************************************************************************* -* -* _do_irq_lock_inline - disable all interrupts on the CPU (inline) -* -* This routine disables interrupts. It can be called from either interrupt, -* task or fiber level. This routine returns an architecture-dependent -* lock-out key representing the "interrupt disable state" prior to the call; -* this key can be passed to irq_unlock_inline() to re-enable interrupts. -* -* The lock-out key should only be used as the argument to the -* irq_unlock_inline() API. It should never be used to manually re-enable -* interrupts or to inspect or manipulate the contents of the source register. -* -* WARNINGS -* Invoking a kernel routine with interrupts locked may result in -* interrupts being re-enabled for an unspecified period of time. If the -* called routine blocks, interrupts will be re-enabled while another -* context executes, or while the system is idle. -* -* The "interrupt disable state" is an attribute of a context. Thus, if a -* fiber or task disables interrupts and subsequently invokes a kernel -* routine that causes the calling context to block, the interrupt -* disable state will be restored when the context is later rescheduled -* for execution. -* -* RETURNS: An architecture-dependent lock-out key representing the -* "interrupt disable state" prior to the call. -* -* \NOMANUAL -*/ +/** + * + * _do_irq_lock_inline - disable all interrupts on the CPU (inline) + * + * This routine disables interrupts. It can be called from either interrupt, + * task or fiber level. This routine returns an architecture-dependent + * lock-out key representing the "interrupt disable state" prior to the call; + * this key can be passed to irq_unlock_inline() to re-enable interrupts. + * + * The lock-out key should only be used as the argument to the + * irq_unlock_inline() API. It should never be used to manually re-enable + * interrupts or to inspect or manipulate the contents of the source register. + * + * WARNINGS + * Invoking a kernel routine with interrupts locked may result in + * interrupts being re-enabled for an unspecified period of time. If the + * called routine blocks, interrupts will be re-enabled while another + * context executes, or while the system is idle. + * + * The "interrupt disable state" is an attribute of a context. Thus, if a + * fiber or task disables interrupts and subsequently invokes a kernel + * routine that causes the calling context to block, the interrupt + * disable state will be restored when the context is later rescheduled + * for execution. + * + * RETURNS: An architecture-dependent lock-out key representing the + * "interrupt disable state" prior to the call. + * + * \NOMANUAL + */ static inline __attribute__((always_inline)) unsigned int _do_irq_lock_inline(void) @@ -93,17 +93,17 @@ static inline __attribute__((always_inline)) } -/******************************************************************************* -* -* _do_irq_unlock_inline - enable all interrupts on the CPU (inline) -* -* This routine can be called from either interrupt, task or fiber level. -* Invoked by kernel or by irq_unlock_inline() -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _do_irq_unlock_inline - enable all interrupts on the CPU (inline) + * + * This routine can be called from either interrupt, task or fiber level. + * Invoked by kernel or by irq_unlock_inline() + * + * RETURNS: N/A + * + * \NOMANUAL + */ static inline __attribute__((always_inline)) void _do_irq_unlock_inline(void) @@ -115,24 +115,24 @@ static inline __attribute__((always_inline)) } -/******************************************************************************* -* -* find_first_set_inline - find first set bit searching from the LSB (inline) -* -* This routine finds the first bit set in the argument passed it and -* returns the index of that bit. Bits are numbered starting -* at 1 from the least significant bit to 32 for the most significant bit. -* A return value of zero indicates that the value passed is zero. -* -* RETURNS: bit position from 1 to 32, or 0 if the argument is zero. -* -* INTERNAL -* For Intel64 (x86_64) architectures, the 'cmovzl' can be removed -* and leverage the fact that the 'bsfl' doesn't modify the destination operand -* when the source operand is zero. The "bitpos" variable can be preloaded -* into the destination register, and given the unconditional ++bitpos that -* is performed after the 'cmovzl', the correct results are yielded. -*/ +/** + * + * find_first_set_inline - find first set bit searching from the LSB (inline) + * + * This routine finds the first bit set in the argument passed it and + * returns the index of that bit. Bits are numbered starting + * at 1 from the least significant bit to 32 for the most significant bit. + * A return value of zero indicates that the value passed is zero. + * + * RETURNS: bit position from 1 to 32, or 0 if the argument is zero. + * + * INTERNAL + * For Intel64 (x86_64) architectures, the 'cmovzl' can be removed + * and leverage the fact that the 'bsfl' doesn't modify the destination operand + * when the source operand is zero. The "bitpos" variable can be preloaded + * into the destination register, and given the unconditional ++bitpos that + * is performed after the 'cmovzl', the correct results are yielded. + */ static inline __attribute__((always_inline)) unsigned int find_first_set_inline (unsigned int op) @@ -166,24 +166,24 @@ static inline __attribute__((always_inline)) } -/******************************************************************************* -* -* find_last_set_inline - find first set bit searching from the MSB (inline) -* -* This routine finds the first bit set in the argument passed it and -* returns the index of that bit. Bits are numbered starting -* at 1 from the least significant bit to 32 for the most significant bit. -* A return value of zero indicates that the value passed is zero. -* -* RETURNS: bit position from 1 to 32, or 0 if the argument is zero. -* -* INTERNAL -* For Intel64 (x86_64) architectures, the 'cmovzl' can be removed -* and leverage the fact that the 'bsfl' doesn't modify the destination operand -* when the source operand is zero. The "bitpos" variable can be preloaded -* into the destination register, and given the unconditional ++bitpos that -* is performed after the 'cmovzl', the correct results are yielded. -*/ +/** + * + * find_last_set_inline - find first set bit searching from the MSB (inline) + * + * This routine finds the first bit set in the argument passed it and + * returns the index of that bit. Bits are numbered starting + * at 1 from the least significant bit to 32 for the most significant bit. + * A return value of zero indicates that the value passed is zero. + * + * RETURNS: bit position from 1 to 32, or 0 if the argument is zero. + * + * INTERNAL + * For Intel64 (x86_64) architectures, the 'cmovzl' can be removed + * and leverage the fact that the 'bsfl' doesn't modify the destination operand + * when the source operand is zero. The "bitpos" variable can be preloaded + * into the destination register, and given the unconditional ++bitpos that + * is performed after the 'cmovzl', the correct results are yielded. + */ static inline inline __attribute__((always_inline)) unsigned int find_last_set_inline (unsigned int op) @@ -216,10 +216,10 @@ static inline inline __attribute__((always_inline)) } -/******************************************************** -* -* _NanoTscRead - read timestamp register ensuring serialization -*/ +/** + * + * _NanoTscRead - read timestamp register ensuring serialization + */ static inline uint64_t _NanoTscRead(void) { @@ -250,7 +250,7 @@ static inline uint64_t _NanoTscRead(void) } -/******************************************************************************* +/** * * _do_read_cpu_timestamp - get a 32 bit CPU timestamp counter * @@ -268,17 +268,17 @@ static inline inline __attribute__((always_inline)) } -/******************************************************************************* -* -* sys_out8 - output a byte to an IA-32 I/O port -* -* This function issues the 'out' instruction to write a byte to the specified -* I/O port. -* -* RETURNS: N/A -* -* NOMANUAL -*/ +/** + * + * sys_out8 - output a byte to an IA-32 I/O port + * + * This function issues the 'out' instruction to write a byte to the specified + * I/O port. + * + * RETURNS: N/A + * + * NOMANUAL + */ static inline inline __attribute__((always_inline)) void sys_out8(unsigned char data, unsigned int port) @@ -287,17 +287,17 @@ static inline inline __attribute__((always_inline)) } -/******************************************************************************* -* -* sys_in8 - input a byte from an IA-32 I/O port -* -* This function issues the 'in' instruction to read a byte from the specified -* I/O port. -* -* RETURNS: the byte read from the specified I/O port -* -* NOMANUAL -*/ +/** + * + * sys_in8 - input a byte from an IA-32 I/O port + * + * This function issues the 'in' instruction to read a byte from the specified + * I/O port. + * + * RETURNS: the byte read from the specified I/O port + * + * NOMANUAL + */ static inline inline __attribute__((always_inline)) unsigned char sys_in8(unsigned int port) @@ -309,17 +309,17 @@ static inline inline __attribute__((always_inline)) } -/******************************************************************************* -* -* sys_out16 - output a word to an IA-32 I/O port -* -* This function issues the 'out' instruction to write a word to the -* specified I/O port. -* -* RETURNS: N/A -* -* NOMANUAL -*/ +/** + * + * sys_out16 - output a word to an IA-32 I/O port + * + * This function issues the 'out' instruction to write a word to the + * specified I/O port. + * + * RETURNS: N/A + * + * NOMANUAL + */ static inline inline __attribute__((always_inline)) void sys_out16(unsigned short data, unsigned int port) @@ -328,17 +328,17 @@ static inline inline __attribute__((always_inline)) } -/******************************************************************************* -* -* sys_in16 - input a word from an IA-32 I/O port -* -* This function issues the 'in' instruction to read a word from the -* specified I/O port. -* -* RETURNS: the word read from the specified I/O port -* -* NOMANUAL -*/ +/** + * + * sys_in16 - input a word from an IA-32 I/O port + * + * This function issues the 'in' instruction to read a word from the + * specified I/O port. + * + * RETURNS: the word read from the specified I/O port + * + * NOMANUAL + */ static inline inline __attribute__((always_inline)) unsigned short sys_in16(unsigned int port) @@ -350,17 +350,17 @@ static inline inline __attribute__((always_inline)) } -/******************************************************************************* -* -* sys_out32 - output a long word to an IA-32 I/O port -* -* This function issues the 'out' instruction to write a long word to the -* specified I/O port. -* -* RETURNS: N/A -* -* NOMANUAL -*/ +/** + * + * sys_out32 - output a long word to an IA-32 I/O port + * + * This function issues the 'out' instruction to write a long word to the + * specified I/O port. + * + * RETURNS: N/A + * + * NOMANUAL + */ static inline inline __attribute__((always_inline)) void sys_out32(unsigned int data, unsigned int port) @@ -369,17 +369,17 @@ static inline inline __attribute__((always_inline)) } -/******************************************************************************* -* -* sys_in32 - input a long word from an IA-32 I/O port -* -* This function issues the 'in' instruction to read a long word from the -* specified I/O port. -* -* RETURNS: the long read from the specified I/O port -* -* NOMANUAL -*/ +/** + * + * sys_in32 - input a long word from an IA-32 I/O port + * + * This function issues the 'in' instruction to read a long word from the + * specified I/O port. + * + * RETURNS: the long read from the specified I/O port + * + * NOMANUAL + */ static inline inline __attribute__((always_inline)) unsigned long sys_in32(unsigned int port) diff --git a/include/arch/x86/linker-common-sections.h b/include/arch/x86/linker-common-sections.h index e4c3d87e235..6b7f30d35f1 100644 --- a/include/arch/x86/linker-common-sections.h +++ b/include/arch/x86/linker-common-sections.h @@ -59,7 +59,7 @@ undefined symbol error. Please do not change the order of the section as the nanokernel expects this order when programming the MMU. -*/ + */ #define _LINKER #define _ASMLANGUAGE /* Needed to include mmustructs.h */ diff --git a/include/arch/x86/linker-defs-arch.h b/include/arch/x86/linker-defs-arch.h index d80fdd3e65c..4ea2d09cde9 100644 --- a/include/arch/x86/linker-defs-arch.h +++ b/include/arch/x86/linker-defs-arch.h @@ -33,7 +33,7 @@ /* DESCRIPTION Commonly used macros and defines for linker script. -*/ + */ #ifndef _LINKERDEFSARCH_H #define _LINKERDEFSARCH_H diff --git a/include/bluetooth/conn.h b/include/bluetooth/conn.h index 8a617f63ed4..a55219b9321 100644 --- a/include/bluetooth/conn.h +++ b/include/bluetooth/conn.h @@ -54,7 +54,7 @@ struct bt_conn *bt_conn_get(struct bt_conn *conn); * Decrement the reference count of a connection object. * * @param conn Connection object. -*/ + */ void bt_conn_put(struct bt_conn *conn); /** @brief Look up an existing connection by address. diff --git a/include/drivers/k20_mcg.h b/include/drivers/k20_mcg.h index 22fc61270ae..c389196cbdd 100644 --- a/include/drivers/k20_mcg.h +++ b/include/drivers/k20_mcg.h @@ -34,7 +34,7 @@ DESCRIPTION This module defines the Multipurpose Clock Generator (MCG) and Oscillator (OSC) registers for the K20 Family of microprocessors. -*/ + */ #ifndef _K20MCG_H_ #define _K20MCG_H_ diff --git a/include/drivers/k20_pcr.h b/include/drivers/k20_pcr.h index 94b3be2454c..d9d78bc544e 100644 --- a/include/drivers/k20_pcr.h +++ b/include/drivers/k20_pcr.h @@ -34,7 +34,7 @@ DESCRIPTION This module defines the PCR (Port/Pin Control/Configuration Registers) for the K20 Family of microprocessors -*/ + */ #ifndef _K20PCR_H_ #define _K20PCR_H_ diff --git a/include/drivers/k20_sim.h b/include/drivers/k20_sim.h index aa85ba92e25..3aad1581b08 100644 --- a/include/drivers/k20_sim.h +++ b/include/drivers/k20_sim.h @@ -34,7 +34,7 @@ DESCRIPTION This module defines the SIM (System Integration Module) Registers for the K20 Family of microprocessors -*/ + */ #ifndef _K20SIM_H_ #define _K20SIM_H_ diff --git a/include/drivers/k20_uart.h b/include/drivers/k20_uart.h index 67bc03cccd4..6022f2c880c 100644 --- a/include/drivers/k20_uart.h +++ b/include/drivers/k20_uart.h @@ -33,7 +33,7 @@ /* DESCRIPTION This module defines the UART Registers for the K20 Family of microprocessors -*/ + */ #ifndef _K20UART_H_ #define _K20UART_H_ diff --git a/include/drivers/k20_watchdog.h b/include/drivers/k20_watchdog.h index f625c9e4528..4750dcd2fb5 100644 --- a/include/drivers/k20_watchdog.h +++ b/include/drivers/k20_watchdog.h @@ -33,7 +33,7 @@ /* DESCRIPTION This module defines Watch Dog Registers for the K20 Family of microprocessors -*/ + */ #ifndef _K20WDOG_H_ #define _K20WDOG_H_ @@ -83,15 +83,15 @@ typedef volatile struct { uint16_t presc; /* 0x16 */ } K20_WDOG_t; -/***********************************************************************/ +/**/ /**< Macro to enable all interrupts. */ #define EnableInterrupts __asm__(" CPSIE i"); /**< Macro to disable all interrupts. */ #define DisableInterrupts __asm__(" CPSID i"); -/***********************************************************************/ +/**/ -/******************************************************************************* +/** * * wdog_unlock - Watchdog timer unlock routine. * @@ -124,7 +124,7 @@ static ALWAYS_INLINE void wdog_unlock(K20_WDOG_t *wdog_p) EnableInterrupts; } -/******************************************************************************* +/** * * wdog_disable - Watchdog timer disable routine * diff --git a/include/drivers/k6x_mpu.h b/include/drivers/k6x_mpu.h index e29484ab7c1..834a1104ca1 100644 --- a/include/drivers/k6x_mpu.h +++ b/include/drivers/k6x_mpu.h @@ -36,7 +36,7 @@ This module defines the Memory Protection Unit (MPU) Registers for the K6x Family of microprocessors. NOTE: Not all the registers are currently defined here - only those that are currently used. -*/ + */ #ifndef _K6xMPU_H_ #define _K6xMPU_H_ diff --git a/include/drivers/k6x_pmc.h b/include/drivers/k6x_pmc.h index 0cb57394e99..b7246828df2 100644 --- a/include/drivers/k6x_pmc.h +++ b/include/drivers/k6x_pmc.h @@ -36,7 +36,7 @@ This module defines the Power Management Controller (PMC) registers for the K6x Family of microprocessors. NOTE: Not all the registers are currently defined here - only those that are currently used. -*/ + */ #ifndef _K6xPMC_H_ #define _K6xPMC_H_ diff --git a/include/drivers/pci/pci.h b/include/drivers/pci/pci.h index 51c0b27ed18..905f565e058 100644 --- a/include/drivers/pci/pci.h +++ b/include/drivers/pci/pci.h @@ -33,7 +33,7 @@ /* DESCRIPTION Module declares routines of PCI bus initialization and query -*/ + */ #ifndef _PCI_H_ #define _PCI_H_ diff --git a/include/drivers/pci/pci_mgr.h b/include/drivers/pci/pci_mgr.h index 77b2d780bbe..d7de1eb8c29 100644 --- a/include/drivers/pci/pci_mgr.h +++ b/include/drivers/pci/pci_mgr.h @@ -110,7 +110,7 @@ device supports multiple functions). Bits 7 through 2 select the specific | Device Number | Function Number | Register Number | 00 | +---------------------------------------------------------------------------+ -*/ + */ union pci_addr_reg { struct { @@ -161,7 +161,7 @@ device supports multiple functions). Bits 7 through 2 select the specific | Device Number | Function Number | Register Number | 00 | +---------------------------------------------------------------------------+ -*/ + */ union pcie_addr_reg { struct { @@ -277,7 +277,7 @@ Header Type 0x01 (PCI-to-PCI bridge): | 3C | Bridge Control | Interrupt PIN | Interrupt Line| +-----------------------------------------------------------------------------+ -*/ + */ union pci_dev { @@ -568,7 +568,7 @@ Generic Capability register set header: +----------+----------------+----------------+----------------+---------------+ | 00 | Capability specific data | Next Pointer | Cap ID | +-----------------------------------------------------------------------------+ -*/ + */ union pci_cap_hdr { struct { @@ -622,7 +622,7 @@ MSI Capability register set (64-bit): | 0C | | Message Data Register | +-----------------------------------------------------------------------------+ -*/ + */ struct _pci_msi_hdr { /* common MSI header */ diff --git a/include/drivers/rand32.h b/include/drivers/rand32.h index f133e57b8af..317eb879f85 100644 --- a/include/drivers/rand32.h +++ b/include/drivers/rand32.h @@ -41,7 +41,7 @@ sys_rand32_get(). However, if it does not do so a project requiring random numbers must implement these routines, or (for testing purposes only) enable the TEST_RANDOM_GENERATOR configuration option. -*/ + */ #ifndef __INCrand32h #define __INCrand32h diff --git a/include/drivers/system_timer.h b/include/drivers/system_timer.h index 762c2f3f3db..4a0e6dfc071 100644 --- a/include/drivers/system_timer.h +++ b/include/drivers/system_timer.h @@ -34,7 +34,7 @@ DESCRIPTION Declare API implemented by system timer driver and used by kernel components. -*/ + */ #ifndef _TIMER__H_ #define _TIMER__H_ diff --git a/include/linker-defs.h b/include/linker-defs.h index d56365d2df9..0b4be013f33 100644 --- a/include/linker-defs.h +++ b/include/linker-defs.h @@ -36,7 +36,7 @@ This file may be included by: - Linker script files: for linker section declarations - C files: for external declaration of address or size of linker section - Assembly files: for external declaration of address or size of linker section -*/ + */ #ifndef _LINKERDEFS_H #define _LINKERDEFS_H diff --git a/include/linker-tool-gcc.h b/include/linker-tool-gcc.h index 70f2e0542da..dd04b0053e8 100644 --- a/include/linker-tool-gcc.h +++ b/include/linker-tool-gcc.h @@ -34,7 +34,7 @@ DESCRIPTION This header file defines the necessary macros used by the linker script for use with the GCC linker. -*/ + */ #ifndef __LINKER_TOOL_GCC_H #define __LINKER_TOOL_GCC_H diff --git a/include/linker-tool.h b/include/linker-tool.h index b691a6bd971..7276d6e1dca 100644 --- a/include/linker-tool.h +++ b/include/linker-tool.h @@ -34,7 +34,7 @@ DESCRIPTION This header file is used to automatically select the proper set of macro definitions (based on the toolchain) for the linker script. -*/ + */ #ifndef __LINKER_TOOL_H #define __LINKER_TOOL_H diff --git a/include/microkernel/command_packet.h b/include/microkernel/command_packet.h index d31c2b46867..0b0d7fb7224 100644 --- a/include/microkernel/command_packet.h +++ b/include/microkernel/command_packet.h @@ -43,30 +43,30 @@ extern "C" { #define CMD_PKT_SIZE_IN_WORDS (19) -/******************************************************************************* -* -* CMD_PKT_SET_INSTANCE - define an instance of a command packet set -* -* This macro is used to create an instance of a command packet set in the -* global namespace. Each instance of the set may have its own unique number -* of command packets. -* -* INTERNAL -* It is critical that the word corresponding to the [alloc] field in the -* equivalent struct k_args command packet be zero so that the system knows that the -* command packet is not part of the free list. -*/ +/** + * + * CMD_PKT_SET_INSTANCE - define an instance of a command packet set + * + * This macro is used to create an instance of a command packet set in the + * global namespace. Each instance of the set may have its own unique number + * of command packets. + * + * INTERNAL + * It is critical that the word corresponding to the [alloc] field in the + * equivalent struct k_args command packet be zero so that the system knows that the + * command packet is not part of the free list. + */ #define CMD_PKT_SET_INSTANCE(name, num) \ uint32_t name[2 + CMD_PKT_SIZE_IN_WORDS * (num)] = {num, 0}; -/******************************************************************************* -* -* CMD_PKT_SET - wrapper for accessing a command packet set -* -* As a command packet set is instantiated as an array of uint32_t, it is -* necessary to typecast a command packet set before accessing it. -*/ +/** + * + * CMD_PKT_SET - wrapper for accessing a command packet set + * + * As a command packet set is instantiated as an array of uint32_t, it is + * necessary to typecast a command packet set before accessing it. + */ #define CMD_PKT_SET(name) (*(struct cmd_pkt_set *)(name)) diff --git a/include/misc/__assert.h b/include/misc/__assert.h index a4446a2c574..ccb503bd05d 100644 --- a/include/misc/__assert.h +++ b/include/misc/__assert.h @@ -77,7 +77,7 @@ The third and fourth parameters are the parameters it passes to __ASSERT(). The __ASSERT_NO_MSG() macro can be used to perform an assertion that reports the failed test and its location, but lacks additional debugging information provided to assist the user in diagnosing the problem; its use is discouraged. -*/ + */ #ifndef ___ASSERT__H_ #define ___ASSERT__H_ diff --git a/include/misc/dlist.h b/include/misc/dlist.h index a60570ed5f2..9f42ec5e611 100644 --- a/include/misc/dlist.h +++ b/include/misc/dlist.h @@ -37,7 +37,7 @@ Doubly-linked list implementation. The lists are expected to be initialized such that both the head and tail pointers point to the list itself. Initializing the lists in such a fashion simplifies the adding and removing of nodes to/from the list. -*/ + */ #ifndef _misc_dlist__h_ #define _misc_dlist__h_ diff --git a/include/misc/lists_c.h b/include/misc/lists_c.h index 9bacd939f82..37c352107bc 100644 --- a/include/misc/lists_c.h +++ b/include/misc/lists_c.h @@ -37,9 +37,9 @@ extern "C" { #endif -/******************************************************************************* +/** Example code from list insertion etc -*******************************************************************************/ + *******************************************************************************/ #include diff --git a/include/misc/util.h b/include/misc/util.h index 581de6b83f7..093ac82b9c6 100644 --- a/include/misc/util.h +++ b/include/misc/util.h @@ -33,7 +33,7 @@ /* DESCRIPTION Misc utilities usable by nanokernel, microkernel, and application code. -*/ + */ #ifndef _UTIL__H_ #define _UTIL__H_ diff --git a/include/sections.h b/include/sections.h index 8698102c9e3..bb2c6f1a1c3 100644 --- a/include/sections.h +++ b/include/sections.h @@ -33,7 +33,7 @@ /* DESCRIPTION Linker Section declarations used by linker script, C files and Assembly files. -*/ + */ #ifndef _SECTIONS_H #define _SECTIONS_H diff --git a/include/sw_isr_table.h b/include/sw_isr_table.h index 2ea68d7f33b..290c3fbb322 100644 --- a/include/sw_isr_table.h +++ b/include/sw_isr_table.h @@ -33,7 +33,7 @@ /* DESCRIPTION Data types for a software-managed ISR table, with a parameter per-ISR. -*/ + */ #ifndef _SW_ISR_TABLE__H_ #define _SW_ISR_TABLE__H_ diff --git a/include/sys_clock.h b/include/sys_clock.h index db630bdd8fa..4b93bf35a8e 100644 --- a/include/sys_clock.h +++ b/include/sys_clock.h @@ -35,7 +35,7 @@ DESCRIPTION Declare variables used by both system timer device driver and kernel components that use timer functionality. -*/ + */ #ifndef _SYS_CLOCK__H_ #define _SYS_CLOCK__H_ diff --git a/include/toolchain.h b/include/toolchain.h index d264adac05b..d16327bf4aa 100644 --- a/include/toolchain.h +++ b/include/toolchain.h @@ -36,7 +36,7 @@ This file contains various macros to abstract compiler capabilities that utilize toolchain specific attributes and/or pragmas. \NOMANUAL -*/ + */ #ifndef _TOOLCHAIN_H #define _TOOLCHAIN_H diff --git a/include/toolchain/common.h b/include/toolchain/common.h index 8dcc808d1f6..984bc141f0f 100644 --- a/include/toolchain/common.h +++ b/include/toolchain/common.h @@ -35,7 +35,7 @@ DESCRIPTION Macros to abstract compiler capabilities (common to all toolchains). \NOMANUAL -*/ + */ /* * Generate a reference to an external symbol. diff --git a/include/toolchain/gcc.h b/include/toolchain/gcc.h index 0d7fd32d353..dae69cd898a 100644 --- a/include/toolchain/gcc.h +++ b/include/toolchain/gcc.h @@ -35,7 +35,7 @@ Macros to abstract compiler capabilities for GCC toolchain. \NOMANUAL -*/ + */ #include diff --git a/kernel/microkernel/include/k_pipe_util.h b/kernel/microkernel/include/k_pipe_util.h index 10c974d5488..f65ddd57134 100644 --- a/kernel/microkernel/include/k_pipe_util.h +++ b/kernel/microkernel/include/k_pipe_util.h @@ -37,9 +37,9 @@ #define CANCEL_TIMERS -/***********************************/ +/**/ /* TARGET channels functionality: */ -/***********************************/ +/**/ typedef uint32_t REQ_TYPE; #define _ALLREQ ((REQ_TYPE)0x0000FF00) diff --git a/kernel/microkernel/k_command_packet.c b/kernel/microkernel/k_command_packet.c index 40e5720054b..2c49c419783 100644 --- a/kernel/microkernel/k_command_packet.c +++ b/kernel/microkernel/k_command_packet.c @@ -50,7 +50,7 @@ is implicitly released once the command packet has been processed. Thus, it is important that each command packet be processed in a near-FIFO order to prevent corruption of command packets that are already in use. To this end, drivers that have an ISR component should use their own command packet set. -*/ + */ #include #include @@ -67,7 +67,7 @@ that have an ISR component should use their own command packet set. uint32_t _k_test_cmd_pkt_size [0 - ((CMD_PKT_SIZE_IN_WORDS * sizeof(uint32_t)) != sizeof(struct k_args))]; -/******************************************************************************* +/** * * _cmd_pkt_get - get the next command packet * @@ -93,12 +93,12 @@ cmdPkt_t *_cmd_pkt_get( return &pSet->cmdPkt[index]; } -/******************************************************************************* -* -* _k_task_call - send command packet to be processed by K_swapper -* -* RETURNS: N/A -*/ +/** + * + * _k_task_call - send command packet to be processed by K_swapper + * + * RETURNS: N/A + */ void _k_task_call(struct k_args *cmd_packet) { diff --git a/kernel/microkernel/k_event.c b/kernel/microkernel/k_event.c index 5c656df9003..2890215f585 100644 --- a/kernel/microkernel/k_event.c +++ b/kernel/microkernel/k_event.c @@ -37,12 +37,12 @@ extern struct evstr _k_event_list[]; -/******************************************************************************* -* -* _k_event_handler_set - perform set event handler request -* -* RETURNS: N/A -*/ +/** + * + * _k_event_handler_set - perform set event handler request + * + * RETURNS: N/A + */ void _k_event_handler_set(struct k_args *A) { @@ -71,22 +71,22 @@ void _k_event_handler_set(struct k_args *A) } } -/******************************************************************************* -* -* task_event_set_handler - set event handler request -* -* This routine specifies the event handler that runs (in the context of the -* K_swapper fiber) when the associated event is signaled. Specifying a non-NULL -* handler installs a new handler, while specifying a NULL event handler removes -* the existing event handler. -* -* A new event handler cannot be installed if one already exists for that event; -* the old handler must be removed first. However, it is permitted to replace -* the NULL event handler with itself. -* -* RETURNS: RC_FAIL if an event handler exists or the event number is invalid, -* else RC_OK -*/ +/** + * + * task_event_set_handler - set event handler request + * + * This routine specifies the event handler that runs (in the context of the + * K_swapper fiber) when the associated event is signaled. Specifying a non-NULL + * handler installs a new handler, while specifying a NULL event handler removes + * the existing event handler. + * + * A new event handler cannot be installed if one already exists for that event; + * the old handler must be removed first. However, it is permitted to replace + * the NULL event handler with itself. + * + * RETURNS: RC_FAIL if an event handler exists or the event number is invalid, + * else RC_OK + */ int task_event_set_handler(kevent_t event, /* event upon which to reigster */ kevent_handler_t handler /* function pointer to handler */ @@ -101,12 +101,12 @@ int task_event_set_handler(kevent_t event, /* event upon which to reigster * return A.Time.rcode; } -/******************************************************************************* -* -* _k_event_test_timeout - finish handling a test for event request that timed out -* -* RETURNS: N/A -*/ +/** + * + * _k_event_test_timeout - finish handling a test for event request that timed out + * + * RETURNS: N/A + */ void _k_event_test_timeout(struct k_args *A) { @@ -119,12 +119,12 @@ void _k_event_test_timeout(struct k_args *A) _k_state_bit_reset(A->Ctxt.proc, TF_EVNT); } -/******************************************************************************* -* -* _k_event_test - perform test for event request -* -* RETURNS: N/A -*/ +/** + * + * _k_event_test - perform test for event request + * + * RETURNS: N/A + */ void _k_event_test(struct k_args *A) { @@ -166,14 +166,14 @@ void _k_event_test(struct k_args *A) } } -/******************************************************************************* -* -* _task_event_recv - test for event request -* -* This routine tests an event to see if it has been signaled. -* -* RETURNS: RC_OK, RC_FAIL, RC_TIME on success, failure, timeout respectively -*/ +/** + * + * _task_event_recv - test for event request + * + * This routine tests an event to see if it has been signaled. + * + * RETURNS: RC_OK, RC_FAIL, RC_TIME on success, failure, timeout respectively + */ int _task_event_recv( kevent_t event, /* event for which to test */ @@ -189,16 +189,16 @@ int _task_event_recv( return A.Time.rcode; } -/******************************************************************************* -* -* _k_do_event_signal - signal an event -* -* Lowest level event signalling routine, which is invoked directly when the -* signal is issued by a task and indirectly when the signal is issued by a -* fiber or ISR. The specified event number must be valid. -* -* RETURNS: N/A -*/ +/** + * + * _k_do_event_signal - signal an event + * + * Lowest level event signalling routine, which is invoked directly when the + * signal is issued by a task and indirectly when the signal is issued by a + * fiber or ISR. The specified event number must be valid. + * + * RETURNS: N/A + */ void _k_do_event_signal(kevent_t event) { @@ -233,12 +233,12 @@ void _k_do_event_signal(kevent_t event) #endif } -/******************************************************************************* -* -* _k_event_signal - perform signal an event request -* -* RETURNS: N/A -*/ +/** + * + * _k_event_signal - perform signal an event request + * + * RETURNS: N/A + */ void _k_event_signal(struct k_args *A) { @@ -252,16 +252,16 @@ void _k_event_signal(struct k_args *A) } } -/******************************************************************************* -* -* task_event_send - signal an event request -* -* This routine signals the specified event from a task. If an event handler -* is installed for that event, it will run; if no event handler is installed, -* any task waiting on the event is released. -* -* RETURNS: RC_FAIL if event number is invalid, else RC_OK -*/ +/** + * + * task_event_send - signal an event request + * + * This routine signals the specified event from a task. If an event handler + * is installed for that event, it will run; if no event handler is installed, + * any task waiting on the event is released. + * + * RETURNS: RC_FAIL if event number is invalid, else RC_OK + */ int task_event_send(kevent_t event /* event to signal */ ) @@ -274,7 +274,7 @@ int task_event_send(kevent_t event /* event to signal */ return A.Time.rcode; } -/******************************************************************************* +/** * * fiber_event_send - signal an event from a fiber * @@ -285,7 +285,7 @@ int task_event_send(kevent_t event /* event to signal */ FUNC_ALIAS(isr_event_send, fiber_event_send, void); -/******************************************************************************* +/** * * isr_event_send - signal an event from an ISR * diff --git a/kernel/microkernel/k_fifo.c b/kernel/microkernel/k_fifo.c index 8e91f991a78..df4b9e4afcf 100644 --- a/kernel/microkernel/k_fifo.c +++ b/kernel/microkernel/k_fifo.c @@ -35,12 +35,12 @@ #include #include -/******************************************************************************* -* -* _k_fifo_enque_reply - finish performing an incomplete FIFO enqueue request -* -* RETURNS: N/A -*/ +/** + * + * _k_fifo_enque_reply - finish performing an incomplete FIFO enqueue request + * + * RETURNS: N/A + */ void _k_fifo_enque_reply(struct k_args *A) { @@ -60,12 +60,12 @@ void _k_fifo_enque_reply(struct k_args *A) _k_state_bit_reset(A->Ctxt.proc, TF_ENQU); } -/******************************************************************************* -* -* _k_fifo_enque_request - perform a FIFO enqueue request -* -* RETURNS: N/A -*/ +/** + * + * _k_fifo_enque_request - perform a FIFO enqueue request + * + * RETURNS: N/A + */ void _k_fifo_enque_request(struct k_args *A) { @@ -136,14 +136,14 @@ void _k_fifo_enque_request(struct k_args *A) } } } -/******************************************************************************* -* -* _task_fifo_put - FIFO enqueue request -* -* This routine puts an entry at the end of the FIFO queue. -* -* RETURNS: RC_OK, RC_FAIL, RC_TIME on success, failure, timeout respectively -*/ +/** + * + * _task_fifo_put - FIFO enqueue request + * + * This routine puts an entry at the end of the FIFO queue. + * + * RETURNS: RC_OK, RC_FAIL, RC_TIME on success, failure, timeout respectively + */ int _task_fifo_put(kfifo_t queue, /* FIFO queue */ void *data, /* ptr to data to add to queue */ @@ -162,12 +162,12 @@ int _task_fifo_put(kfifo_t queue, /* FIFO queue */ return A.Time.rcode; } -/******************************************************************************* -* -* _k_fifo_deque_reply - finish performing an incomplete FIFO dequeue request -* -* RETURNS: N/A -*/ +/** + * + * _k_fifo_deque_reply - finish performing an incomplete FIFO dequeue request + * + * RETURNS: N/A + */ void _k_fifo_deque_reply(struct k_args *A) { @@ -187,12 +187,12 @@ void _k_fifo_deque_reply(struct k_args *A) _k_state_bit_reset(A->Ctxt.proc, TF_DEQU); } -/******************************************************************************* -* -* _k_fifo_deque_request - perform FIFO dequeue request -* -* RETURNS: N/A -*/ +/** + * + * _k_fifo_deque_request - perform FIFO dequeue request + * + * RETURNS: N/A + */ void _k_fifo_deque_request(struct k_args *A) { @@ -266,17 +266,17 @@ void _k_fifo_deque_request(struct k_args *A) } } -/******************************************************************************* -* -* _task_fifo_get - FIFO dequeue request -* -* This routine tries to read a data element from the FIFO. -* -* If the FIFO is not empty, the oldest entry is removed and copied to the -* address provided by the caller. -* -* RETURNS: RC_OK, RC_FAIL, RC_TIME on success, failure, timeout respectively -*/ +/** + * + * _task_fifo_get - FIFO dequeue request + * + * This routine tries to read a data element from the FIFO. + * + * If the FIFO is not empty, the oldest entry is removed and copied to the + * address provided by the caller. + * + * RETURNS: RC_OK, RC_FAIL, RC_TIME on success, failure, timeout respectively + */ int _task_fifo_get(kfifo_t queue, /* FIFO queue */ void *data, /* where to store FIFO entry */ @@ -295,12 +295,12 @@ int _task_fifo_get(kfifo_t queue, /* FIFO queue */ return A.Time.rcode; } -/******************************************************************************* -* -* _k_fifo_ioctl - perform miscellaneous FIFO request -* -* RETURNS: N/A -*/ +/** + * + * _k_fifo_ioctl - perform miscellaneous FIFO request + * + * RETURNS: N/A + */ void _k_fifo_ioctl(struct k_args *A) { @@ -335,16 +335,16 @@ void _k_fifo_ioctl(struct k_args *A) A->Time.rcode = Q->Nused; } -/******************************************************************************* -* -* _task_fifo_ioctl - miscellaneous FIFO request -* -* Depending upon the chosen operation, this routine will ... -* 1. = 0 : query the number of FIFO entries -* 2. = 1 : purge the FIFO of its entries -* -* RETURNS: # of FIFO entries on query; RC_OK on purge -*/ +/** + * + * _task_fifo_ioctl - miscellaneous FIFO request + * + * Depending upon the chosen operation, this routine will ... + * 1. = 0 : query the number of FIFO entries + * 2. = 1 : purge the FIFO of its entries + * + * RETURNS: # of FIFO entries on query; RC_OK on purge + */ int _task_fifo_ioctl(kfifo_t queue, /* FIFO queue */ int op /* 0: status query; 1: purge */ diff --git a/kernel/microkernel/k_idle.c b/kernel/microkernel/k_idle.c index b388c540b65..80fab76637c 100644 --- a/kernel/microkernel/k_idle.c +++ b/kernel/microkernel/k_idle.c @@ -34,7 +34,7 @@ DESCRIPTION Microkernel idle logic. Different forms of idling are performed by the idle task, depending on how the kernel is configured. -*/ + */ #include #include @@ -66,19 +66,19 @@ static extern uint32_t _k_workload_scale; #define MSEC_PER_SEC 1000 -/******************************************************************************* -* -* workload_loop - shared code between workload calibration and monitoring -* -* Perform idle task "dummy work". -* -* This routine increments _k_workload_i and checks it against _k_workload_n1. -* _k_workload_n1 is updated by the system tick handler, and both are kept -* in close synchronization. -* -* RETURNS: N/A -* -*/ +/** + * + * workload_loop - shared code between workload calibration and monitoring + * + * Perform idle task "dummy work". + * + * This routine increments _k_workload_i and checks it against _k_workload_n1. + * _k_workload_n1 is updated by the system tick handler, and both are kept + * in close synchronization. + * + * RETURNS: N/A + * + */ static void workload_loop(void) { @@ -100,16 +100,16 @@ static void workload_loop(void) } } -/******************************************************************************* -* -* _k_workload_monitor_calibrate - calibrate the workload monitoring subsystem -* -* Measures the time required to do a fixed amount of "dummy work", and -* sets default values for the workload measuring period. -* -* RETURNS: N/A -* -*/ +/** + * + * _k_workload_monitor_calibrate - calibrate the workload monitoring subsystem + * + * Measures the time required to do a fixed amount of "dummy work", and + * sets default values for the workload measuring period. + * + * RETURNS: N/A + * + */ void _k_workload_monitor_calibrate(void) { @@ -133,17 +133,17 @@ void _k_workload_monitor_calibrate(void) _k_workload_ticks = 100; } -/******************************************************************************* -* -* _k_workload_monitor_update - workload monitor tick handler -* -* If workload monitor is configured this routine updates the global variables -* it uses to record the passage of time. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _k_workload_monitor_update - workload monitor tick handler + * + * If workload monitor is configured this routine updates the global variables + * it uses to record the passage of time. + * + * RETURNS: N/A + * + * \NOMANUAL + */ void _k_workload_monitor_update(void) { @@ -156,29 +156,29 @@ void _k_workload_monitor_update(void) } } -/******************************************************************************* -* -* _k_workload_monitor_idle_start - workload monitor "start idling" handler -* -* Records time when idle task was selected for execution by the microkernel. -* -* RETURNS: N/A -*/ +/** + * + * _k_workload_monitor_idle_start - workload monitor "start idling" handler + * + * Records time when idle task was selected for execution by the microkernel. + * + * RETURNS: N/A + */ void _k_workload_monitor_idle_start(void) { _k_workload_start_time = timer_read(); } -/******************************************************************************* -* -* _k_workload_monitor_idle_end - workload monitor "end idling" handler -* -* Records time when idle task was no longer selected for execution by the -* microkernel, and updates amount of time spent idling. -* -* RETURNS: N/A -*/ +/** + * + * _k_workload_monitor_idle_end - workload monitor "end idling" handler + * + * Records time when idle task was no longer selected for execution by the + * microkernel, and updates amount of time spent idling. + * + * RETURNS: N/A + */ void _k_workload_monitor_idle_end(void) { @@ -187,14 +187,14 @@ void _k_workload_monitor_idle_end(void) (_k_workload_end_time - _k_workload_start_time)) / _k_workload_delta; } -/******************************************************************************* -* -* _k_workload_get - process request to read the processor workload -* -* Computes workload, or uses 0 if workload monitoring is not configured. -* -* RETURNS: N/A -*/ +/** + * + * _k_workload_get - process request to read the processor workload + * + * Computes workload, or uses 0 if workload monitoring is not configured. + * + * RETURNS: N/A + */ void _k_workload_get(struct k_args *P) { @@ -229,22 +229,22 @@ void _k_workload_get(struct k_args *P) #endif /* CONFIG_WORKLOAD_MONITOR */ -/******************************************************************************* -* -* task_workload_get - read the processor workload -* -* This routine returns the workload as a number ranging from 0 to 1000. -* -* Each unit equals 0.1% of the time the idle task was not scheduled by the -* microkernel during the period set by sys_workload_time_slice_set(). -* -* IMPORTANT: This workload monitor ignores any time spent servicing ISRs and -* fibers! Thus, a system which has no meaningful task work to do may spend -* up to 100% of its time servicing ISRs and fibers, yet report a workload of 0% -* because the idle task is always the task selected by the microkernel. -* -* RETURNS: workload -*/ +/** + * + * task_workload_get - read the processor workload + * + * This routine returns the workload as a number ranging from 0 to 1000. + * + * Each unit equals 0.1% of the time the idle task was not scheduled by the + * microkernel during the period set by sys_workload_time_slice_set(). + * + * IMPORTANT: This workload monitor ignores any time spent servicing ISRs and + * fibers! Thus, a system which has no meaningful task work to do may spend + * up to 100% of its time servicing ISRs and fibers, yet report a workload of 0% + * because the idle task is always the task selected by the microkernel. + * + * RETURNS: workload + */ int task_workload_get(void) { @@ -255,14 +255,14 @@ int task_workload_get(void) return A.Args.u1.rval; } -/******************************************************************************* -* -* sys_workload_time_slice_set - set workload period -* -* This routine specifies the workload measuring period for task_workload_get(). -* -* RETURNS: N/A -*/ +/** + * + * sys_workload_time_slice_set - set workload period + * + * This routine specifies the workload measuring period for task_workload_get(). + * + * RETURNS: N/A + */ void sys_workload_time_slice_set(int32_t t) { @@ -303,18 +303,18 @@ extern void nano_cpu_set_idle(int32_t ticks); int32_t _sys_idle_threshold_ticks = CONFIG_TICKLESS_IDLE_THRESH; #endif /* CONFIG_TICKLESS_IDLE */ -/******************************************************************************* -* -* _sys_power_save_idle - power management policy when kernel begins idling -* -* This routine implements the power management policy based on the time -* until the timer expires, in system ticks. -* Routine is invoked from the idle task with interrupts disabled -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _sys_power_save_idle - power management policy when kernel begins idling + * + * This routine implements the power management policy based on the time + * until the timer expires, in system ticks. + * Routine is invoked from the idle task with interrupts disabled + * + * RETURNS: N/A + * + * \NOMANUAL + */ void _sys_power_save_idle(int32_t ticks) { @@ -348,18 +348,18 @@ void _sys_power_save_idle(int32_t ticks) #endif /* CONFIG_ADVANCED_IDLE */ } -/******************************************************************************* -* -* _sys_power_save_idle_exit - power management policy when kernel stops idling -* -* This routine is invoked when the kernel leaves the idle state. -* Routine can be modified to wake up other devices. -* The routine is invoked from interrupt context, with interrupts disabled. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _sys_power_save_idle_exit - power management policy when kernel stops idling + * + * This routine is invoked when the kernel leaves the idle state. + * Routine can be modified to wake up other devices. + * The routine is invoked from interrupt context, with interrupts disabled. + * + * RETURNS: N/A + * + * \NOMANUAL + */ void _sys_power_save_idle_exit(int32_t ticks) { @@ -374,16 +374,16 @@ void _sys_power_save_idle_exit(int32_t ticks) #endif /* CONFIG_TICKLESS_IDLE */ } -/******************************************************************************* -* -* _get_next_timer_expiry - obtain number of ticks until next timer expires -* -* Must be called with interrupts locked to prevent the timer queues from -* changing. -* -* RETURNS: Number of ticks until next timer expires. -* -*/ +/** + * + * _get_next_timer_expiry - obtain number of ticks until next timer expires + * + * Must be called with interrupts locked to prevent the timer queues from + * changing. + * + * RETURNS: Number of ticks until next timer expires. + * + */ static inline int32_t _get_next_timer_expiry(void) { @@ -397,18 +397,18 @@ static inline int32_t _get_next_timer_expiry(void) } #endif -/******************************************************************************* -* -* _power_save - power saving when idle -* -* If the BSP sets the _sys_power_save_flag flag, this routine will call the -* _sys_power_save_idle() routine in an infinite loop. If the flag is not set, -* this routine will fall through and _k_kernel_idle() will try the next idling -* mechanism. -* -* RETURNS: N/A -* -*/ +/** + * + * _power_save - power saving when idle + * + * If the BSP sets the _sys_power_save_flag flag, this routine will call the + * _sys_power_save_idle() routine in an infinite loop. If the flag is not set, + * this routine will fall through and _k_kernel_idle() will try the next idling + * mechanism. + * + * RETURNS: N/A + * + */ static void _power_save(void) { @@ -447,15 +447,15 @@ static void _power_save(void) #define DO_IDLE_WORK() do { /* do nothing */ } while (0) #endif -/******************************************************************************* -* -* _k_kernel_idle - microkernel idle task -* -* If power save is on, we sleep; if power save is off, we "busy wait". -* -* RETURNS: N/A -* -*/ +/** + * + * _k_kernel_idle - microkernel idle task + * + * If power save is on, we sleep; if power save is off, we "busy wait". + * + * RETURNS: N/A + * + */ int _k_kernel_idle(void) { diff --git a/kernel/microkernel/k_irq.c b/kernel/microkernel/k_irq.c index f53466b6616..1ee5199585f 100644 --- a/kernel/microkernel/k_irq.c +++ b/kernel/microkernel/k_irq.c @@ -49,7 +49,7 @@ These routines perform error checking to ensure that an IRQ object can only be allocated by a single task, and that subsequent operations on that IRQ object are only performed by that task. This checking is necessary to ensure that a task cannot impact the operation of an IRQ object it does not own. -*/ + */ #if (CONFIG_MAX_NUM_TASK_IRQS > 0) @@ -102,22 +102,22 @@ static struct task_irq_info task_irq_object[MAX_TASK_IRQS] = { extern const kevent_t _TaskIrqEvt0_objId; -/******************************************************************************* -* -* task_irq_int_handler - ISR for task IRQ objects -* -* This ISR handles interrupts generated by registered task IRQ objects. -* -* The ISR triggers an event signal specified by the event number associated -* with a particular task IRQ object; the interrupt for the task IRQ object -* is then disabled. The parameter provided to the ISR is a structure that -* contains information about the objects's vector, IRQ, and event number. -* -* This ISR does not facilitate an int acknowledgment as it presumes that an -* End of Interrupt (EOI) routine is provided by the PIC that is being used. -* -* RETURNS: N/A -*/ +/** + * + * task_irq_int_handler - ISR for task IRQ objects + * + * This ISR handles interrupts generated by registered task IRQ objects. + * + * The ISR triggers an event signal specified by the event number associated + * with a particular task IRQ object; the interrupt for the task IRQ object + * is then disabled. The parameter provided to the ISR is a structure that + * contains information about the objects's vector, IRQ, and event number. + * + * This ISR does not facilitate an int acknowledgment as it presumes that an + * End of Interrupt (EOI) routine is provided by the PIC that is being used. + * + * RETURNS: N/A + */ static void task_irq_int_handler( void *parameter /* ptr to task IRQ object */ @@ -129,16 +129,16 @@ static void task_irq_int_handler( irq_disable(irq_obj_ptr->irq); } -/******************************************************************************* -* -* task_irq_free - free a task IRQ object -* -* The task IRQ object's interrupt is disabled, and the associated event -* is flushed; the object's interrupt vector is then freed, and the object's -* global array entry is marked as unused. -* -* RETURNS: N/A -*/ +/** + * + * task_irq_free - free a task IRQ object + * + * The task IRQ object's interrupt is disabled, and the associated event + * is flushed; the object's interrupt vector is then freed, and the object's + * global array entry is marked as unused. + * + * RETURNS: N/A + */ void task_irq_free(kirq_t irq_obj /* IRQ object identifier */ ) @@ -153,14 +153,14 @@ void task_irq_free(kirq_t irq_obj /* IRQ object identifier */ task_irq_object[irq_obj].taskId = INVALID_TASK; } -/******************************************************************************* -* -* task_irq_ack - re-enable a task IRQ object's interrupt -* -* This re-enables the interrupt for a task IRQ object. -* -* RETURNS: N/A -*/ +/** + * + * task_irq_ack - re-enable a task IRQ object's interrupt + * + * This re-enables the interrupt for a task IRQ object. + * + * RETURNS: N/A + */ void task_irq_ack(kirq_t irq_obj /* IRQ object identifier */ ) @@ -172,14 +172,14 @@ void task_irq_ack(kirq_t irq_obj /* IRQ object identifier */ irq_enable(task_irq_object[irq_obj].irq); } -/******************************************************************************* -* -* _task_irq_test - determine if a task IRQ object has had an interrupt -* -* This tests a task IRQ object to see if it has signalled an interrupt. -* -* RETURNS: RC_OK, RC_FAIL, or RC_TIME -*/ +/** + * + * _task_irq_test - determine if a task IRQ object has had an interrupt + * + * This tests a task IRQ object to see if it has signalled an interrupt. + * + * RETURNS: RC_OK, RC_FAIL, or RC_TIME + */ int _task_irq_test(kirq_t irq_obj, /* IRQ object identifier */ int32_t time /* time to wait (in ticks) */ @@ -192,14 +192,14 @@ int _task_irq_test(kirq_t irq_obj, /* IRQ object identifier */ return _task_event_recv(task_irq_object[irq_obj].event, time); } -/******************************************************************************* -* -* _k_task_irq_alloc - allocate a task IRQ object -* -* This routine allocates a task IRQ object to a task. -* -* RETURNS: ptr to allocated task IRQ object if successful, NULL if not -*/ +/** + * + * _k_task_irq_alloc - allocate a task IRQ object + * + * This routine allocates a task IRQ object to a task. + * + * RETURNS: ptr to allocated task IRQ object if successful, NULL if not + */ static int _k_task_irq_alloc( void *arg /* ptr to registration request arguments */ @@ -235,18 +235,18 @@ static int _k_task_irq_alloc( return (int)irq_obj_ptr; } -/******************************************************************************* -* -* task_irq_alloc - register a task IRQ object -* -* This routine connects a task IRQ object to a system interrupt based -* upon the specified IRQ and priority values. -* -* IRQ allocation is done via K_swapper so that simultaneous allocation -* requests are single-threaded. -* -* RETURNS: assigned interrupt vector if successful, INVALID_VECTOR if not -*/ +/** + * + * task_irq_alloc - register a task IRQ object + * + * This routine connects a task IRQ object to a system interrupt based + * upon the specified IRQ and priority values. + * + * IRQ allocation is done via K_swapper so that simultaneous allocation + * requests are single-threaded. + * + * RETURNS: assigned interrupt vector if successful, INVALID_VECTOR if not + */ uint32_t task_irq_alloc( kirq_t irq_obj, /* IRQ object identifier */ diff --git a/kernel/microkernel/k_mailbox.c b/kernel/microkernel/k_mailbox.c index bcabc31baf8..1d26ea9e39a 100644 --- a/kernel/microkernel/k_mailbox.c +++ b/kernel/microkernel/k_mailbox.c @@ -40,22 +40,22 @@ #include #include -/******************************************************************************* -* -* ISASYNCMSG - determines if mailbox message is synchronous or asynchronous -* -* Returns a non-zero value if the specified message contains a valid pool ID, -* indicating that it is an asynchronous message. -*/ +/** + * + * ISASYNCMSG - determines if mailbox message is synchronous or asynchronous + * + * Returns a non-zero value if the specified message contains a valid pool ID, + * indicating that it is an asynchronous message. + */ #define ISASYNCMSG(message) ((message)->tx_block.poolid != 0) -/******************************************************************************* -* -* copy_packet - copy a packet -* -* RETURNS: N/A -*/ +/** + * + * copy_packet - copy a packet + * + * RETURNS: N/A + */ static void copy_packet(struct k_args **out, struct k_args *in) { @@ -70,12 +70,12 @@ static void copy_packet(struct k_args **out, struct k_args *in) (*out)->Ctxt.args = in; } -/******************************************************************************* -* -* match - determine if there is a match between the mailbox sender and receiver -* -* RETURNS: matched message size, or -1 if no match -*/ +/** + * + * match - determine if there is a match between the mailbox sender and receiver + * + * RETURNS: matched message size, or -1 if no match + */ static int match(struct k_args *Reader, struct k_args *Writer) { @@ -126,12 +126,12 @@ static int match(struct k_args *Reader, struct k_args *Writer) return -1; /* There was no match */ } -/******************************************************************************* -* -* prepare_transfer - -* -* RETURNS: true or false -*/ +/** + * + * prepare_transfer - + * + * RETURNS: true or false + */ static bool prepare_transfer(struct k_args *move, struct k_args *reader, @@ -210,12 +210,12 @@ static bool prepare_transfer(struct k_args *move, } } -/******************************************************************************* -* -* transfer - -* -* RETURNS: N/A -*/ +/** + * + * transfer - + * + * RETURNS: N/A + */ static void transfer(struct k_args *pMvdReq) { @@ -226,12 +226,12 @@ static void transfer(struct k_args *pMvdReq) FREEARGS(pMvdReq); } -/******************************************************************************* -* -* _k_mbox_send_ack - process the acknowledgment to a mailbox send request -* -* RETURNS: N/A -*/ +/** + * + * _k_mbox_send_ack - process the acknowledgment to a mailbox send request + * + * RETURNS: N/A + */ void _k_mbox_send_ack(struct k_args *pCopyWriter) { @@ -298,12 +298,12 @@ void _k_mbox_send_ack(struct k_args *pCopyWriter) } } -/******************************************************************************* -* -* _k_mbox_send_reply - process the timeout for a mailbox send request -* -* RETURNS: N/A -*/ +/** + * + * _k_mbox_send_reply - process the timeout for a mailbox send request + * + * RETURNS: N/A + */ void _k_mbox_send_reply(struct k_args *pCopyWriter) { @@ -314,12 +314,12 @@ void _k_mbox_send_reply(struct k_args *pCopyWriter) SENDARGS(pCopyWriter); } -/******************************************************************************* -* -* _k_mbox_send_request - process a mailbox send request -* -* RETURNS: N/A -*/ +/** + * + * _k_mbox_send_request - process a mailbox send request + * + * RETURNS: N/A + */ void _k_mbox_send_request(struct k_args *Writer) { @@ -474,14 +474,14 @@ void _k_mbox_send_request(struct k_args *Writer) } } -/******************************************************************************* -* -* _task_mbox_put - send a message to a mailbox -* -* This routine sends a message to a mailbox and looks for a matching receiver. -* -* RETURNS: RC_OK, RC_FAIL, RC_TIME on success, failure, timeout respectively -*/ +/** + * + * _task_mbox_put - send a message to a mailbox + * + * This routine sends a message to a mailbox and looks for a matching receiver. + * + * RETURNS: RC_OK, RC_FAIL, RC_TIME on success, failure, timeout respectively + */ int _task_mbox_put(kmbox_t mbox, /* mailbox */ kpriority_t prio, /* priority of data transfer */ @@ -515,16 +515,16 @@ int _task_mbox_put(kmbox_t mbox, /* mailbox */ return A.Time.rcode; } -/******************************************************************************* -* -* _k_mbox_receive_ack - process a mailbox receive acknowledgment -* -* This routine processes a mailbox receive acknowledgment. -* -* INTERNAL: This routine frees the packet -* -* RETURNS: N/A -*/ +/** + * + * _k_mbox_receive_ack - process a mailbox receive acknowledgment + * + * This routine processes a mailbox receive acknowledgment. + * + * INTERNAL: This routine frees the packet + * + * RETURNS: N/A + */ void _k_mbox_receive_ack(struct k_args *pCopyReader) { @@ -545,12 +545,12 @@ void _k_mbox_receive_ack(struct k_args *pCopyReader) FREEARGS(pCopyReader); } -/******************************************************************************* -* -* _k_mbox_receive_reply - process the timeout for a mailbox receive request -* -* RETURNS: N/A -*/ +/** + * + * _k_mbox_receive_reply - process the timeout for a mailbox receive request + * + * RETURNS: N/A + */ void _k_mbox_receive_reply(struct k_args *pCopyReader) { @@ -563,12 +563,12 @@ void _k_mbox_receive_reply(struct k_args *pCopyReader) #endif } -/******************************************************************************* -* -* _k_mbox_receive_request - process a mailbox receive request -* -* RETURNS: N/A -*/ +/** + * + * _k_mbox_receive_request - process a mailbox receive request + * + * RETURNS: N/A + */ void _k_mbox_receive_request(struct k_args *Reader) { @@ -690,13 +690,13 @@ void _k_mbox_receive_request(struct k_args *Reader) } } -/******************************************************************************* -* -* _task_mbox_get - gets struct k_msg message header structure information -* from a mailbox -* -* RETURNS: RC_OK, RC_FAIL, RC_TIME on success, failure, timeout respectively -*/ +/** + * + * _task_mbox_get - gets struct k_msg message header structure information + * from a mailbox + * + * RETURNS: RC_OK, RC_FAIL, RC_TIME on success, failure, timeout respectively + */ int _task_mbox_get(kmbox_t mbox, /* mailbox */ struct k_msg *M, /* pointer to message */ @@ -724,16 +724,16 @@ int _task_mbox_get(kmbox_t mbox, /* mailbox */ return A.Time.rcode; } -/******************************************************************************* -* -* _task_mbox_put_async - send a message asynchronously to a mailbox -* -* This routine sends a message to a mailbox and does not wait for a matching -* receiver. There is no exchange header returned to the sender. When the data -* has been transferred to the receiver, the semaphore signaling is performed. -* -* RETURNS: N/A -*/ +/** + * + * _task_mbox_put_async - send a message asynchronously to a mailbox + * + * This routine sends a message to a mailbox and does not wait for a matching + * receiver. There is no exchange header returned to the sender. When the data + * has been transferred to the receiver, the semaphore signaling is performed. + * + * RETURNS: N/A + */ void _task_mbox_put_async(kmbox_t mbox, /* mailbox to which to send message */ kpriority_t prio, /* priority of data transfer */ @@ -767,12 +767,12 @@ void _task_mbox_put_async(kmbox_t mbox, /* mailbox to which to send message */ KERNEL_ENTRY(&A); } -/******************************************************************************* -* -* _k_mbox_receive_data - process a mailbox receive data request -* -* RETURNS: N/A -*/ +/** + * + * _k_mbox_receive_data - process a mailbox receive data request + * + * RETURNS: N/A + */ void _k_mbox_receive_data(struct k_args *Starter) { @@ -812,18 +812,18 @@ void _k_mbox_receive_data(struct k_args *Starter) } } -/******************************************************************************* -* -* _task_mbox_data_get - get message data -* -* This routine is called for either of the two following purposes: -* 1. To transfer data if the call to task_mbox_get() resulted in a non-zero size -* field in the struct k_msg header structure. -* 2. To wake up and release a transmitting task that is blocked on a call to -* task_mbox_put[wait|wait_timeout](). -* -* RETURNS: N/A -*/ +/** + * + * _task_mbox_data_get - get message data + * + * This routine is called for either of the two following purposes: + * 1. To transfer data if the call to task_mbox_get() resulted in a non-zero size + * field in the struct k_msg header structure. + * 2. To wake up and release a transmitting task that is blocked on a call to + * task_mbox_put[wait|wait_timeout](). + * + * RETURNS: N/A + */ void _task_mbox_data_get(struct k_msg *M /* message from which to get data */ ) @@ -845,13 +845,13 @@ void _task_mbox_data_get(struct k_msg *M /* message from which to get data */ KERNEL_ENTRY(&A); } -/******************************************************************************* -* -* _task_mbox_data_get_async_block - get the mailbox data and place -* in a memory pool block -* -* RETURNS: RC_OK upon success, RC_FAIL upon failure, or RC_TIME upon timeout -*/ +/** + * + * _task_mbox_data_get_async_block - get the mailbox data and place + * in a memory pool block + * + * RETURNS: RC_OK upon success, RC_FAIL upon failure, or RC_TIME upon timeout + */ int _task_mbox_data_get_async_block(struct k_msg *message, struct k_block *rxblock, @@ -944,12 +944,12 @@ int _task_mbox_data_get_async_block(struct k_msg *message, return RC_OK; /* task_mbox_data_get() doesn't return anything */ } -/******************************************************************************* -* -* _k_mbox_send_data - process a mailbox send data request -* -* RETURNS: N/A -*/ +/** + * + * _k_mbox_send_data - process a mailbox send data request + * + * RETURNS: N/A + */ void _k_mbox_send_data(struct k_args *Starter) { diff --git a/kernel/microkernel/k_memory_map.c b/kernel/microkernel/k_memory_map.c index 304f5359f80..623b72fd152 100644 --- a/kernel/microkernel/k_memory_map.c +++ b/kernel/microkernel/k_memory_map.c @@ -33,14 +33,14 @@ #include #include -/******************************************************************************* -* -* _k_mem_map_init - initialize kernel memory map subsystem -* -* Perform any initialization of memory maps that wasn't done at build time. -* -* RETURNS: N/A -*/ +/** + * + * _k_mem_map_init - initialize kernel memory map subsystem + * + * Perform any initialization of memory maps that wasn't done at build time. + * + * RETURNS: N/A + */ void _k_mem_map_init(void) { @@ -70,12 +70,12 @@ void _k_mem_map_init(void) } } -/******************************************************************************* -* -* _k_mem_map_alloc_timeout - finish handling a memory map block request that timed out -* -* RETURNS: N/A -*/ +/** + * + * _k_mem_map_alloc_timeout - finish handling a memory map block request that timed out + * + * RETURNS: N/A + */ void _k_mem_map_alloc_timeout(struct k_args *A) { @@ -85,12 +85,12 @@ void _k_mem_map_alloc_timeout(struct k_args *A) _k_state_bit_reset(A->Ctxt.proc, TF_ALLO); } -/******************************************************************************* -* -* _k_mem_map_alloc - perform allocate memory map block request -* -* RETURNS: N/A -*/ +/** + * + * _k_mem_map_alloc - perform allocate memory map block request + * + * RETURNS: N/A + */ void _k_mem_map_alloc(struct k_args *A) { @@ -130,14 +130,14 @@ void _k_mem_map_alloc(struct k_args *A) A->Time.rcode = RC_FAIL; } -/******************************************************************************* -* -* _task_mem_map_alloc - allocate memory map block request -* -* This routine is used to request a block of memory from the memory map. -* -* RETURNS: RC_OK, RC_FAIL, RC_TIME on success, error, timeout respectively -*/ +/** + * + * _task_mem_map_alloc - allocate memory map block request + * + * This routine is used to request a block of memory from the memory map. + * + * RETURNS: RC_OK, RC_FAIL, RC_TIME on success, error, timeout respectively + */ int _task_mem_map_alloc(kmemory_map_t mmap, /* memory map from which to request block */ void **mptr, /* pointer to requested block of memory */ @@ -154,12 +154,12 @@ int _task_mem_map_alloc(kmemory_map_t mmap, /* memory map from which to request return A.Time.rcode; } -/******************************************************************************* -* -* _k_mem_map_dealloc - perform return memory map block request -* -* RETURNS: N/A -*/ +/** + * + * _k_mem_map_dealloc - perform return memory map block request + * + * RETURNS: N/A + */ void _k_mem_map_dealloc(struct k_args *A) { @@ -194,16 +194,16 @@ void _k_mem_map_dealloc(struct k_args *A) M->Nused--; } -/******************************************************************************* -* -* _task_mem_map_free - return memory map block request -* -* This routine returns a block to the specified memory map. If a higher -* priority task is waiting for a block from the same map a task switch -* takes place. -* -* RETURNS: N/A -*/ +/** + * + * _task_mem_map_free - return memory map block request + * + * This routine returns a block to the specified memory map. If a higher + * priority task is waiting for a block from the same map a task switch + * takes place. + * + * RETURNS: N/A + */ void _task_mem_map_free(kmemory_map_t mmap, /* memory map */ void **mptr /* block of memory to return */ @@ -217,14 +217,14 @@ void _task_mem_map_free(kmemory_map_t mmap, /* memory map */ KERNEL_ENTRY(&A); } -/******************************************************************************* -* -* task_mem_map_used_get - read the number of used blocks in a memory map -* -* This routine returns the number of blocks in use for the memory map. -* -* RETURNS: number of used blocks -*/ +/** + * + * task_mem_map_used_get - read the number of used blocks in a memory map + * + * This routine returns the number of blocks in use for the memory map. + * + * RETURNS: number of used blocks + */ int task_mem_map_used_get(kmemory_map_t mmap /* memory map */ ) diff --git a/kernel/microkernel/k_memory_pool.c b/kernel/microkernel/k_memory_pool.c index 2edece693d0..2916a86e616 100644 --- a/kernel/microkernel/k_memory_pool.c +++ b/kernel/microkernel/k_memory_pool.c @@ -43,14 +43,14 @@ #define AUTODEFRAG AD_AFTER_SEARCH4BIGGERBLOCK -/******************************************************************************* -* -* _k_mem_pool_init - initialize kernel memory pool subsystem -* -* Perform any initialization of memory pool that wasn't done at build time. -* -* RETURNS: N/A -*/ +/** + * + * _k_mem_pool_init - initialize kernel memory pool subsystem + * + * Perform any initialization of memory pool that wasn't done at build time. + * + * RETURNS: N/A + */ void _k_mem_pool_init(void) { @@ -99,14 +99,14 @@ void _k_mem_pool_init(void) } } -/******************************************************************************* -* -* search_bp - ??? -* -* marks ptr as free block in the given list [MYSTERIOUS LEGACY COMMENT] -* -* RETURNS: N/A -*/ +/** + * + * search_bp - ??? + * + * marks ptr as free block in the given list [MYSTERIOUS LEGACY COMMENT] + * + * RETURNS: N/A + */ static void search_bp(char *ptr, struct pool_struct *P, int index) { @@ -134,12 +134,12 @@ static void search_bp(char *ptr, struct pool_struct *P, int index) } } -/******************************************************************************* -* -* defrag - defragmentation algorithm for memory pool -* -* RETURNS: N/A -*/ +/** + * + * defrag - defragmentation algorithm for memory pool + * + * RETURNS: N/A + */ static void defrag(struct pool_struct *P, int ifraglevel_start, @@ -190,12 +190,12 @@ static void defrag(struct pool_struct *P, } } -/******************************************************************************* -* -* _k_defrag - perform defragment memory pool request -* -* RETURNS: N/A -*/ +/** + * + * _k_defrag - perform defragment memory pool request + * + * RETURNS: N/A + */ void _k_defrag(struct k_args *A) { @@ -223,14 +223,14 @@ void _k_defrag(struct k_args *A) } } -/******************************************************************************* -* -* task_mem_pool_defragment - defragment memory pool request -* -* This routine concatenates unused memory in a memory pool. -* -* RETURNS: N/A -*/ +/** + * + * task_mem_pool_defragment - defragment memory pool request + * + * This routine concatenates unused memory in a memory pool. + * + * RETURNS: N/A + */ void task_mem_pool_defragment(kmemory_pool_t Pid /* pool to defragment */ ) @@ -242,14 +242,14 @@ void task_mem_pool_defragment(kmemory_pool_t Pid /* pool to defragment */ KERNEL_ENTRY(&A); } -/******************************************************************************* -* -* search_block_on_frag_level - allocate block using specified fragmentation level -* -* This routine attempts to allocate a free block. [NEED TO EXPAND THIS] -* -* RETURNS: pointer to allocated block, or NULL if none available -*/ +/** + * + * search_block_on_frag_level - allocate block using specified fragmentation level + * + * This routine attempts to allocate a free block. [NEED TO EXPAND THIS] + * + * RETURNS: pointer to allocated block, or NULL if none available + */ static char *search_block_on_frag_level(struct pool_block *pfraglevelinfo, int *piblockindex) @@ -324,17 +324,17 @@ static char *search_block_on_frag_level(struct pool_block *pfraglevelinfo, return found; } -/******************************************************************************* -* -* get_block_recusive - recursively get a block, doing fragmentation if necessary -* -* [NEED A BETTER DESCRIPTION HERE] -* -* not implemented: check if we go below the minimal number of blocks with -* the maximum size -* -* RETURNS: pointer to allocated block, or NULL if none available -*/ +/** + * + * get_block_recusive - recursively get a block, doing fragmentation if necessary + * + * [NEED A BETTER DESCRIPTION HERE] + * + * not implemented: check if we go below the minimal number of blocks with + * the maximum size + * + * RETURNS: pointer to allocated block, or NULL if none available + */ static char *get_block_recusive(struct pool_struct *P, int index, int startindex) { @@ -408,17 +408,17 @@ static char *get_block_recusive(struct pool_struct *P, int index, int startindex return NULL; /* now we have to report failure: no block available */ } -/******************************************************************************* -* -* _k_block_waiters_get - examine tasks that are waiting for memory pool blocks -* -* This routine attempts to satisfy any incomplete block allocation requests for -* the specified memory pool. It can be invoked either by the explicit freeing -* of a used block or as a result of defragmenting the pool (which may create -* one or more new, larger blocks). -* -* RETURNS: N/A -*/ +/** + * + * _k_block_waiters_get - examine tasks that are waiting for memory pool blocks + * + * This routine attempts to satisfy any incomplete block allocation requests for + * the specified memory pool. It can be invoked either by the explicit freeing + * of a used block or as a result of defragmenting the pool (which may create + * one or more new, larger blocks). + * + * RETURNS: N/A + */ void _k_block_waiters_get(struct k_args *A) { @@ -478,12 +478,12 @@ void _k_block_waiters_get(struct k_args *A) FREEARGS(A); } -/******************************************************************************* -* -* _k_mem_pool_block_get_timeout_handle - finish handling an allocate block request that timed out -* -* RETURNS: N/A -*/ +/** + * + * _k_mem_pool_block_get_timeout_handle - finish handling an allocate block request that timed out + * + * RETURNS: N/A + */ void _k_mem_pool_block_get_timeout_handle(struct k_args *A) { @@ -493,12 +493,12 @@ void _k_mem_pool_block_get_timeout_handle(struct k_args *A) _k_state_bit_reset(A->Ctxt.proc, TF_GTBL); } -/******************************************************************************* -* -* _k_mem_pool_block_get - perform allocate memory pool block request -* -* RETURNS: N/A -*/ +/** + * + * _k_mem_pool_block_get - perform allocate memory pool block request + * + * RETURNS: N/A + */ void _k_mem_pool_block_get(struct k_args *A) { @@ -555,15 +555,15 @@ void _k_mem_pool_block_get(struct k_args *A) } } -/******************************************************************************* -* -* _task_mem_pool_alloc - allocate memory pool block request -* -* This routine allocates a free block from the specified memory pool, ensuring -* that its size is at least as big as the size requested (in bytes). -* -* RETURNS: RC_OK, RC_FAIL, RC_TIME on success, failure, timeout respectively -*/ +/** + * + * _task_mem_pool_alloc - allocate memory pool block request + * + * This routine allocates a free block from the specified memory pool, ensuring + * that its size is at least as big as the size requested (in bytes). + * + * RETURNS: RC_OK, RC_FAIL, RC_TIME on success, failure, timeout respectively + */ int _task_mem_pool_alloc(struct k_block *blockptr, /* ptr to requested block */ kmemory_pool_t poolid, /* pool from which to get block */ @@ -589,15 +589,15 @@ int _task_mem_pool_alloc(struct k_block *blockptr, /* ptr to requested block */ return A.Time.rcode; } -/******************************************************************************* -* -* _k_mem_pool_block_release - perform return memory pool block request -* -* Marks a block belonging to a pool as free; if there are waiters that can use -* the the block it is passed to a waiting task. -* -* RETURNS: N/A -*/ +/** + * + * _k_mem_pool_block_release - perform return memory pool block request + * + * Marks a block belonging to a pool as free; if there are waiters that can use + * the the block it is passed to a waiting task. + * + * RETURNS: N/A + */ void _k_mem_pool_block_release(struct k_args *A) { @@ -659,17 +659,17 @@ void _k_mem_pool_block_release(struct k_args *A) } } -/******************************************************************************* -* -* task_mem_pool_free - return memory pool block request -* -* This routine returns a block to a memory pool. -* -* The struct k_block structure contains the block details, including the pool to -* which it should be returned. -* -* RETURNS: N/A -*/ +/** + * + * task_mem_pool_free - return memory pool block request + * + * This routine returns a block to a memory pool. + * + * The struct k_block structure contains the block details, including the pool to + * which it should be returned. + * + * RETURNS: N/A + */ void task_mem_pool_free(struct k_block *blockptr /* pointer to block to free */ ) diff --git a/kernel/microkernel/k_move_data.c b/kernel/microkernel/k_move_data.c index 2f24cd9ca30..712f3b66695 100644 --- a/kernel/microkernel/k_move_data.c +++ b/kernel/microkernel/k_move_data.c @@ -36,12 +36,12 @@ #include #include -/******************************************************************************* -* -* mvdreq_docont - -* -* RETURNS: N/A -*/ +/** + * + * mvdreq_docont - + * + * RETURNS: N/A + */ static void mvdreq_docont(struct k_args *Cont) { @@ -54,12 +54,12 @@ static void mvdreq_docont(struct k_args *Cont) } } -/******************************************************************************* -* -* mvdreq_copy - perform movedata request -* -* RETURNS: N/A -*/ +/** + * + * mvdreq_copy - perform movedata request + * + * RETURNS: N/A + */ static void mvdreq_copy(struct moved_req *ReqArgs) { @@ -72,12 +72,12 @@ static void mvdreq_copy(struct moved_req *ReqArgs) mvdreq_docont(ReqArgs->Extra.Setup.ContRcv); } -/******************************************************************************* -* -* _k_movedata_request - process a movedata request -* -* RETURNS: N/A -*/ +/** + * + * _k_movedata_request - process a movedata request + * + * RETURNS: N/A + */ void _k_movedata_request(struct k_args *Req) { diff --git a/kernel/microkernel/k_mutex.c b/kernel/microkernel/k_mutex.c index ffecd9cffa7..5a939345f4e 100644 --- a/kernel/microkernel/k_mutex.c +++ b/kernel/microkernel/k_mutex.c @@ -53,23 +53,23 @@ to follow this nested model may result in tasks running at unexpected priority levels (too high, or too low). NOMANUAL -*/ + */ #include #include #include -/******************************************************************************* -* -* _k_mutex_lock_reply - reply to a mutex lock request (LOCK_TMO, LOCK_RPL) -* -* This routine replies to a mutex lock request. This will occur if either -* the waiting task times out or acquires the mutex lock. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _k_mutex_lock_reply - reply to a mutex lock request (LOCK_TMO, LOCK_RPL) + * + * This routine replies to a mutex lock request. This will occur if either + * the waiting task times out or acquires the mutex lock. + * + * RETURNS: N/A + * + * \NOMANUAL + */ void _k_mutex_lock_reply( struct k_args *A /* pointer to mutex lock reply request arguments */ @@ -144,18 +144,18 @@ void _k_mutex_lock_reply( _k_state_bit_reset(A->Ctxt.proc, TF_LOCK); } -/******************************************************************************* -* -* _k_mutex_lock_request - process a mutex lock request -* -* This routine processes a mutex lock request (LOCK_REQ). If the mutex -* is already locked, and the timeout is non-zero then the priority inheritance -* algorithm may be applied to prevent priority inversion scenarios. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _k_mutex_lock_request - process a mutex lock request + * + * This routine processes a mutex lock request (LOCK_REQ). If the mutex + * is already locked, and the timeout is non-zero then the priority inheritance + * algorithm may be applied to prevent priority inversion scenarios. + * + * RETURNS: N/A + * + * \NOMANUAL + */ void _k_mutex_lock_request(struct k_args *A /* pointer to mutex lock request arguments */ @@ -267,14 +267,14 @@ void _k_mutex_lock_request(struct k_args *A /* pointer to mutex lock } } -/******************************************************************************* -* -* _task_mutex_lock - mutex lock kernel service -* -* This routine is the entry to the mutex lock kernel service. -* -* RETURNS: RC_OK on success, RC_FAIL on error, RC_TIME on timeout -*/ +/** + * + * _task_mutex_lock - mutex lock kernel service + * + * This routine is the entry to the mutex lock kernel service. + * + * RETURNS: RC_OK on success, RC_FAIL on error, RC_TIME on timeout + */ int _task_mutex_lock( kmutex_t mutex, /* mutex to lock */ @@ -291,19 +291,19 @@ int _task_mutex_lock( return A.Time.rcode; } -/******************************************************************************* -* -* _k_mutex_unlock - process a mutex unlock request -* -* This routine processes a mutex unlock request (UNLOCK). If the mutex -* was involved in priority inheritance, then it will change the priority level -* of the current owner to the priority level it had when it acquired the -* mutex. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _k_mutex_unlock - process a mutex unlock request + * + * This routine processes a mutex unlock request (UNLOCK). If the mutex + * was involved in priority inheritance, then it will change the priority level + * of the current owner to the priority level it had when it acquired the + * mutex. + * + * RETURNS: N/A + * + * \NOMANUAL + */ void _k_mutex_unlock(struct k_args *A /* pointer to mutex unlock request arguments */ @@ -385,14 +385,14 @@ void _k_mutex_unlock(struct k_args *A /* pointer to mutex unlock } } -/******************************************************************************* -* -* _task_mutex_unlock - mutex unlock kernel service -* -* This routine is the entry to the mutex unlock kernel service. -* -* RETURNS: N/A -*/ +/** + * + * _task_mutex_unlock - mutex unlock kernel service + * + * This routine is the entry to the mutex unlock kernel service. + * + * RETURNS: N/A + */ void _task_mutex_unlock(kmutex_t mutex /* mutex to unlock */ ) diff --git a/kernel/microkernel/k_nop.c b/kernel/microkernel/k_nop.c index 3ccda5954f4..ec75e747f49 100644 --- a/kernel/microkernel/k_nop.c +++ b/kernel/microkernel/k_nop.c @@ -38,32 +38,32 @@ This service is primarily used by other kernel services that need a way to resume the execution of a kernel request that could not be completed in a single invocation of the K_swapper fiber. However, it can also be used by a task to measure the overhead involved in issuing a kernel service request. -*/ + */ #include #include #include -/******************************************************************************* -* -* _k_nop - perform "do nothing" kernel request -* -* RETURNS: N/A -*/ +/** + * + * _k_nop - perform "do nothing" kernel request + * + * RETURNS: N/A + */ void _k_nop(struct k_args *A) { ARG_UNUSED(A); } -/******************************************************************************* -* -* _task_nop - "do nothing" kernel request -* -* This routine is a request for the K_swapper to run a "do nothing" routine. -* -* RETURNS: N/A -*/ +/** + * + * _task_nop - "do nothing" kernel request + * + * This routine is a request for the K_swapper to run a "do nothing" routine. + * + * RETURNS: N/A + */ void _task_nop(void) { diff --git a/kernel/microkernel/k_offload.c b/kernel/microkernel/k_offload.c index aa73d53d365..07ec979217c 100644 --- a/kernel/microkernel/k_offload.c +++ b/kernel/microkernel/k_offload.c @@ -33,33 +33,33 @@ #include #include -/******************************************************************************* -* -* _k_offload_to_fiber - process an "offload to fiber" request -* -* This routine simply invokes the requested function from within the context -* of the K_swapper() fiber and saves the result. -* -* RETURNS: N/A -*/ +/** + * + * _k_offload_to_fiber - process an "offload to fiber" request + * + * This routine simply invokes the requested function from within the context + * of the K_swapper() fiber and saves the result. + * + * RETURNS: N/A + */ void _k_offload_to_fiber(struct k_args *A) { A->Args.u1.rval = (*A->Args.u1.func)(A->Args.u1.argp); } -/******************************************************************************* -* -* task_offload_to_fiber - issue a custom call from within K_swapper() -* -* @func: function to call from within K_swapper() -* @argp: argument to pass to custom function -* -* This routine issues a request to execute a function from within the context -* of the K_swapper() fiber. -* -* RETURNS: return value from custom call -*/ +/** + * + * task_offload_to_fiber - issue a custom call from within K_swapper() + * + * @func: function to call from within K_swapper() + * @argp: argument to pass to custom function + * + * This routine issues a request to execute a function from within the context + * of the K_swapper() fiber. + * + * RETURNS: return value from custom call + */ int task_offload_to_fiber(int (*func)(), void *argp) { diff --git a/kernel/microkernel/k_pipe.c b/kernel/microkernel/k_pipe.c index 57eb69e2a7e..dde0db22707 100644 --- a/kernel/microkernel/k_pipe.c +++ b/kernel/microkernel/k_pipe.c @@ -35,15 +35,15 @@ #include #include -/******************************************************************************* -* -* _k_pipe_init - initialize kernel pipe subsystem -* -* Performs any initialization of statically-defined pipes that wasn't done -* at build time. (Note: most pipe structure fields are set to zero by sysgen.) -* -* RETURNS: N/A -*/ +/** + * + * _k_pipe_init - initialize kernel pipe subsystem + * + * Performs any initialization of statically-defined pipes that wasn't done + * at build time. (Note: most pipe structure fields are set to zero by sysgen.) + * + * RETURNS: N/A + */ void _k_pipe_init(void) { @@ -57,15 +57,15 @@ void _k_pipe_init(void) } } -/******************************************************************************* -* -* _task_pipe_get - pipe read request -* -* This routine attempts to read data into a memory buffer area from the -* specified pipe. -* -* RETURNS: RC_OK, RC_INCOMPLETE, RC_FAIL, RC_TIME, or RC_ALIGNMENT -*/ +/** + * + * _task_pipe_get - pipe read request + * + * This routine attempts to read data into a memory buffer area from the + * specified pipe. + * + * RETURNS: RC_OK, RC_INCOMPLETE, RC_FAIL, RC_TIME, or RC_ALIGNMENT + */ int _task_pipe_get(kpipe_t Id, void *pBuffer, int iNbrBytesToRead, int *piNbrBytesRead, @@ -112,15 +112,15 @@ int _task_pipe_get(kpipe_t Id, void *pBuffer, return A.Time.rcode; } -/******************************************************************************* -* -* _task_pipe_put - pipe write request -* -* This routine attempts to write data from a memory buffer area to the -* specified pipe. -* -* RETURNS: RC_OK, RC_INCOMPLETE, RC_FAIL, RC_TIME, or RC_ALIGNMENT -*/ +/** + * + * _task_pipe_put - pipe write request + * + * This routine attempts to write data from a memory buffer area to the + * specified pipe. + * + * RETURNS: RC_OK, RC_INCOMPLETE, RC_FAIL, RC_TIME, or RC_ALIGNMENT + */ int _task_pipe_put(kpipe_t Id, void *pBuffer, int iNbrBytesToWrite, int *piNbrBytesWritten, @@ -167,16 +167,16 @@ int _task_pipe_put(kpipe_t Id, void *pBuffer, return A.Time.rcode; } -/******************************************************************************* -* -* _task_pipe_put_async - asynchronous pipe write request -* -* This routine attempts to write data from a memory pool block to the -* specified pipe. (Note that partial transfers and timeouts are not -* supported, unlike the case for synchronous write requests.) -* -* RETURNS: RC_OK, RC_FAIL, or RC_ALIGNMENT -*/ +/** + * + * _task_pipe_put_async - asynchronous pipe write request + * + * This routine attempts to write data from a memory pool block to the + * specified pipe. (Note that partial transfers and timeouts are not + * supported, unlike the case for synchronous write requests.) + * + * RETURNS: RC_OK, RC_FAIL, or RC_ALIGNMENT + */ int _task_pipe_put_async(kpipe_t Id, struct k_block Block, int iReqSize2Xfer, ksem_t Sema) diff --git a/kernel/microkernel/k_pipe_buffer.c b/kernel/microkernel/k_pipe_buffer.c index 8a9b8865cfe..94f54f5b259 100644 --- a/kernel/microkernel/k_pipe_buffer.c +++ b/kernel/microkernel/k_pipe_buffer.c @@ -34,7 +34,7 @@ /* Implementation remarks: - when using a floating end pointer: do not use pChBuff->iBuffsize for (Buff->pEnd - pChBuff->pBegin) -*/ + */ #include #include @@ -67,9 +67,9 @@ static void pipe_intrusion_check(struct chbuff *pChBuff, unsigned char *pBegin, int iSize); -/*******************/ -/* Markers -********************/ +/** + * Markers + */ static int MarkerFindFree(struct marker aMarkers[]) { @@ -217,7 +217,7 @@ static void MarkersClear(struct marker_list *pMarkerList) pMarkerList->iAWAMarker = -1; } -/********************************************************************************/ +/**/ /* note on setting/clearing markers/guards: @@ -232,14 +232,14 @@ static void MarkersClear(struct marker_list *pMarkerList) (*) we need to housekeep how much markers there are or we can inspect the guard (**) for this, the complete markers table needs to be investigated -*/ + */ -/***************************************/ +/**/ /* This function will see if one or more 'areas' in the buffer can be made available (either for writing xor reading). Note: such a series of areas starts from the beginning. -*/ + */ static int ScanMarkers(struct marker_list *pMarkerList, int *piSizeBWA, int *piSizeAWA, int *piNbrPendingXfers) { @@ -301,9 +301,9 @@ static int ScanMarkers(struct marker_list *pMarkerList, return pMarkerList->iFirstMarker; } -/*******************/ -/* General -********************/ +/** + * General + */ void BuffInit(unsigned char *pBuffer, int *piBuffSize, struct chbuff *pChBuff) { @@ -479,9 +479,9 @@ int BuffFull(struct chbuff *pChBuff) return (pChBuff->iBuffSize == iAvailDataTotal); } -/*******************/ -/* Buffer en-queuing: -********************/ +/** + * Buffer en-queuing: + */ static int AsyncEnQRegstr(struct chbuff *pChBuff, int iSize) { @@ -589,9 +589,9 @@ void BuffEnQA_End(struct chbuff *pChBuff, int iTransferID, AsyncEnQFinished(pChBuff, iTransferID); } -/**********************/ -/* Buffer de-queuing: */ -/**********************/ +/** + * Buffer de-queuing: + */ static int AsyncDeQRegstr(struct chbuff *pChBuff, int iSize) { @@ -699,9 +699,9 @@ void BuffDeQA_End(struct chbuff *pChBuff, int iTransferID, AsyncDeQFinished(pChBuff, iTransferID); } -/**********************/ -/* Buffer instrusion */ -/**********************/ +/** + * Buffer instrusion + */ static bool AreasCheck4Intrusion(unsigned char *pBegin1, int iSize1, unsigned char *pBegin2, int iSize2) diff --git a/kernel/microkernel/k_pipe_get.c b/kernel/microkernel/k_pipe_get.c index 209d491a447..7040bdfebc3 100644 --- a/kernel/microkernel/k_pipe_get.c +++ b/kernel/microkernel/k_pipe_get.c @@ -36,12 +36,12 @@ #include #include -/******************************************************************************* -* -* _k_pipe_get_request - process request command for a pipe get operation -* -* RETURNS: N/A -*/ +/** + * + * _k_pipe_get_request - process request command for a pipe get operation + * + * RETURNS: N/A + */ void _k_pipe_get_request(struct k_args *RequestOrig) { @@ -188,12 +188,12 @@ void _k_pipe_get_request(struct k_args *RequestOrig) } } -/******************************************************************************* -* -* _k_pipe_get_timeout - process timeout command for a pipe get operation -* -* RETURNS: N/A -*/ +/** + * + * _k_pipe_get_timeout - process timeout command for a pipe get operation + * + * RETURNS: N/A + */ void _k_pipe_get_timeout(struct k_args *ReqProc) { @@ -208,12 +208,12 @@ void _k_pipe_get_timeout(struct k_args *ReqProc) } } -/******************************************************************************* -* -* _k_pipe_get_reply - process reply command for a pipe get operation -* -* RETURNS: N/A -*/ +/** + * + * _k_pipe_get_reply - process reply command for a pipe get operation + * + * RETURNS: N/A + */ void _k_pipe_get_reply(struct k_args *ReqProc) { @@ -259,12 +259,12 @@ void _k_pipe_get_reply(struct k_args *ReqProc) FREEARGS(ReqProc); } -/******************************************************************************* -* -* _k_pipe_get_ack - process acknowledgment command for a pipe get operation -* -* RETURNS: N/A -*/ +/** + * + * _k_pipe_get_ack - process acknowledgment command for a pipe get operation + * + * RETURNS: N/A + */ void _k_pipe_get_ack(struct k_args *Request) { diff --git a/kernel/microkernel/k_pipe_put.c b/kernel/microkernel/k_pipe_put.c index 245515e34ca..b33fb94e199 100644 --- a/kernel/microkernel/k_pipe_put.c +++ b/kernel/microkernel/k_pipe_put.c @@ -37,12 +37,12 @@ #include -/******************************************************************************* -* -* _k_pipe_put_request - process request command for a pipe put operation -* -* RETURNS: N/A -*/ +/** + * + * _k_pipe_put_request - process request command for a pipe put operation + * + * RETURNS: N/A + */ void _k_pipe_put_request(struct k_args *RequestOrig) { @@ -207,12 +207,12 @@ void _k_pipe_put_request(struct k_args *RequestOrig) } } -/******************************************************************************* -* -* _k_pipe_put_timeout - perform timeout command for a pipe put operation -* -* RETURNS: N/A -*/ +/** + * + * _k_pipe_put_timeout - perform timeout command for a pipe put operation + * + * RETURNS: N/A + */ void _k_pipe_put_timeout(struct k_args *ReqProc) { @@ -227,12 +227,12 @@ void _k_pipe_put_timeout(struct k_args *ReqProc) } } -/******************************************************************************* -* -* _k_pipe_put_reply - process reply command for a pipe put operation -* -* RETURNS: N/A -*/ +/** + * + * _k_pipe_put_reply - process reply command for a pipe put operation + * + * RETURNS: N/A + */ void _k_pipe_put_reply(struct k_args *ReqProc) { @@ -280,12 +280,12 @@ void _k_pipe_put_reply(struct k_args *ReqProc) FREEARGS(ReqProc); } -/******************************************************************************* -* -* _k_pipe_put_ack - process acknowledgment command for a pipe put operation -* -* RETURNS: N/A -*/ +/** + * + * _k_pipe_put_ack - process acknowledgment command for a pipe put operation + * + * RETURNS: N/A + */ void _k_pipe_put_ack(struct k_args *Request) { diff --git a/kernel/microkernel/k_pipe_xfer.c b/kernel/microkernel/k_pipe_xfer.c index 38f4f63abec..bc6e699236e 100644 --- a/kernel/microkernel/k_pipe_xfer.c +++ b/kernel/microkernel/k_pipe_xfer.c @@ -43,23 +43,23 @@ #define _X_TO_N (_0_TO_N | _1_TO_N) /* -* - artefacts: ??? -* - non-optimal: + * - artefacts: ??? + * - non-optimal: * from single requester to multiple requesters : basic function is K_ProcWR() K_ProcWR() copies remaining data into buffer; better would be to possibly copy the remaining data to the next requester (if there is one) * ... -*/ + */ -/******************************************************************************* -* -* _k_pipe_movedata_ack - -* -* RETURNS: N/A -*/ +/** + * + * _k_pipe_movedata_ack - + * + * RETURNS: N/A + */ void _k_pipe_movedata_ack(struct k_args *pEOXfer) { @@ -205,16 +205,16 @@ void _k_pipe_movedata_ack(struct k_args *pEOXfer) } } -/******************************************************************************* -* -* move_priority_compute - determines priority for data move operation -* -* Uses priority level of most important participant. -* -* Note: It's OK to have one or two participants, but there can't be none! -* -* RETURNS: N/A -*/ +/** + * + * move_priority_compute - determines priority for data move operation + * + * Uses priority level of most important participant. + * + * Note: It's OK to have one or two participants, but there can't be none! + * + * RETURNS: N/A + */ static kpriority_t move_priority_compute(struct k_args *pWriter, struct k_args *pReader) @@ -233,12 +233,12 @@ static kpriority_t move_priority_compute(struct k_args *pWriter, return move_priority; } -/******************************************************************************* -* -* setup_movedata - -* -* RETURNS: N/A -*/ +/** + * + * setup_movedata - + * + * RETURNS: N/A + */ static void setup_movedata(struct k_args *A, struct pipe_struct *pPipe, XFER_TYPE XferType, @@ -408,16 +408,16 @@ static int WriterInProgressIsBlocked(struct pipe_struct *pPipe, } } -/******************************************************************************* -* -* pipe_read - read from the channel -* -* This routine reads from the channel. If is NULL, then it uses -* as the reader. Otherwise it takes the reader from the channel -* structure. -* -* RETURNS: N/A -*/ +/** + * + * pipe_read - read from the channel + * + * This routine reads from the channel. If is NULL, then it uses + * as the reader. Otherwise it takes the reader from the channel + * structure. + * + * RETURNS: N/A + */ static void pipe_read(struct pipe_struct *pPipe, struct k_args *pNewReader) { @@ -477,16 +477,16 @@ static void pipe_read(struct pipe_struct *pPipe, struct k_args *pNewReader) } while (--numIterations != 0); } -/******************************************************************************* -* -* pipe_write - write to the channel -* -* This routine writes to the channel. If is NULL, then it uses -* as the writer. Otherwise it takes the writer from the channel -* structure. -* -* RETURNS: N/A -*/ +/** + * + * pipe_write - write to the channel + * + * This routine writes to the channel. If is NULL, then it uses + * as the writer. Otherwise it takes the writer from the channel + * structure. + * + * RETURNS: N/A + */ static void pipe_write(struct pipe_struct *pPipe, struct k_args *pNewWriter) { @@ -548,12 +548,12 @@ static void pipe_write(struct pipe_struct *pPipe, struct k_args *pNewWriter) } while (--numIterations != 0); } -/******************************************************************************* -* -* pipe_xfer_status_update - update the channel transfer status -* -* RETURNS: N/A -*/ +/** + * + * pipe_xfer_status_update - update the channel transfer status + * + * RETURNS: N/A + */ static void pipe_xfer_status_update( struct k_args *pActor, /* ptr to struct k_args to be used by actor */ @@ -575,12 +575,12 @@ static void pipe_xfer_status_update( } } -/******************************************************************************* -* -* pipe_read_write - read and/or write from/to the channel -* -* RETURNS: N/A -*/ +/** + * + * pipe_read_write - read and/or write from/to the channel + * + * RETURNS: N/A + */ static void pipe_read_write( struct pipe_struct *pPipe, /* ptr to channel structure */ diff --git a/kernel/microkernel/k_semaphore.c b/kernel/microkernel/k_semaphore.c index 6cf3c9d0f94..1c5463954c9 100644 --- a/kernel/microkernel/k_semaphore.c +++ b/kernel/microkernel/k_semaphore.c @@ -36,12 +36,12 @@ #include -/******************************************************************************* -* -* signal_semaphore - common code for signaling a semaphore -* -* RETURNS: N/A -*/ +/** + * + * signal_semaphore - common code for signaling a semaphore + * + * RETURNS: N/A + */ static void signal_semaphore(int n, struct sem_struct *S) { @@ -94,12 +94,12 @@ static void signal_semaphore(int n, struct sem_struct *S) } } -/******************************************************************************* -* -* _k_sem_group_wait - finish handling incomplete waits on semaphores -* -* RETURNS: N/A -*/ +/** + * + * _k_sem_group_wait - finish handling incomplete waits on semaphores + * + * RETURNS: N/A + */ void _k_sem_group_wait(struct k_args *R) { @@ -111,17 +111,17 @@ void _k_sem_group_wait(struct k_args *R) } } -/******************************************************************************* -* -* _k_sem_group_wait_cancel - handle cancellation of a semaphore involved in a -* semaphore group wait request -* -* This routine only applies to semaphore group wait requests. It is invoked -* for each semaphore in the semaphore group that "lost" the semaphore group -* wait request. -* -* RETURNS: N/A -*/ +/** + * + * _k_sem_group_wait_cancel - handle cancellation of a semaphore involved in a + * semaphore group wait request + * + * This routine only applies to semaphore group wait requests. It is invoked + * for each semaphore in the semaphore group that "lost" the semaphore group + * wait request. + * + * RETURNS: N/A + */ void _k_sem_group_wait_cancel(struct k_args *A) { @@ -177,16 +177,16 @@ void _k_sem_group_wait_cancel(struct k_args *A) } } -/******************************************************************************* -* -* _k_sem_group_wait_accept - handle acceptance of the ready semaphore request -* -* This routine only applies to semaphore group wait requests. It handles -* the request for the one semaphore in the group that "wins" the semaphore -* group wait request. -* -* RETURNS: N/A -*/ +/** + * + * _k_sem_group_wait_accept - handle acceptance of the ready semaphore request + * + * This routine only applies to semaphore group wait requests. It handles + * the request for the one semaphore in the group that "wins" the semaphore + * group wait request. + * + * RETURNS: N/A + */ void _k_sem_group_wait_accept(struct k_args *A) { @@ -216,12 +216,12 @@ void _k_sem_group_wait_accept(struct k_args *A) /* ERROR */ } -/******************************************************************************* -* -* _k_sem_group_wait_timeout - handle semaphore group timeout request -* -* RETURNS: N/A -*/ +/** + * + * _k_sem_group_wait_timeout - handle semaphore group timeout request + * + * RETURNS: N/A + */ void _k_sem_group_wait_timeout(struct k_args *A) { @@ -247,16 +247,16 @@ void _k_sem_group_wait_timeout(struct k_args *A) } } -/******************************************************************************* -* -* _k_sem_group_ready - handle semaphore ready request -* -* This routine only applies to semaphore group wait requests. It identifies -* the one semaphore in the group that "won" the semaphore group wait request -* before triggering the semaphore group timeout handler. -* -* RETURNS: N/A -*/ +/** + * + * _k_sem_group_ready - handle semaphore ready request + * + * This routine only applies to semaphore group wait requests. It identifies + * the one semaphore in the group that "won" the semaphore group wait request + * before triggering the semaphore group timeout handler. + * + * RETURNS: N/A + */ void _k_sem_group_ready(struct k_args *R) { @@ -275,12 +275,12 @@ void _k_sem_group_ready(struct k_args *R) FREEARGS(R); } -/******************************************************************************* -* -* _k_sem_wait_reply - reply to a semaphore wait request -* -* RETURNS: N/A -*/ +/** + * + * _k_sem_wait_reply - reply to a semaphore wait request + * + * RETURNS: N/A + */ void _k_sem_wait_reply(struct k_args *A) { @@ -297,13 +297,13 @@ void _k_sem_wait_reply(struct k_args *A) _k_state_bit_reset(A->Ctxt.proc, TF_SEMA); } -/******************************************************************************* -* -* _k_sem_group_wait_request - handle internal wait request on a semaphore involved in a -* semaphore group wait request -* -* RETURNS: N/A -*/ +/** + * + * _k_sem_group_wait_request - handle internal wait request on a semaphore involved in a + * semaphore group wait request + * + * RETURNS: N/A + */ void _k_sem_group_wait_request(struct k_args *A) { @@ -339,15 +339,15 @@ void _k_sem_group_wait_request(struct k_args *A) signal_semaphore(0, S); } -/******************************************************************************* -* -* _k_sem_group_wait_any - handle semaphore group wait request -* -* This routine splits the single semaphore group wait request into several -* internal wait requests--one for each semaphore in the group. -* -* RETURNS: N/A -*/ +/** + * + * _k_sem_group_wait_any - handle semaphore group wait request + * + * This routine splits the single semaphore group wait request into several + * internal wait requests--one for each semaphore in the group. + * + * RETURNS: N/A + */ void _k_sem_group_wait_any(struct k_args *A) { @@ -388,12 +388,12 @@ void _k_sem_group_wait_any(struct k_args *A) #endif } -/******************************************************************************* -* -* _k_sem_wait_request - handle semaphore test and wait request -* -* RETURNS: N/A -*/ +/** + * + * _k_sem_wait_request - handle semaphore test and wait request + * + * RETURNS: N/A + */ void _k_sem_wait_request(struct k_args *A) { @@ -425,18 +425,18 @@ void _k_sem_wait_request(struct k_args *A) } } -/******************************************************************************* -* -* _task_sem_take - test a semaphore -* -* This routine tests a semaphore to see if it has been signaled. If the signal -* count is greater than zero, it is decremented. -* -* @param sema Semaphore to test. -* @param time Maximum number of ticks to wait. -* -* RETURNS: RC_OK, RC_FAIL, RC_TIME on success, failure, timeout respectively -*/ +/** + * + * _task_sem_take - test a semaphore + * + * This routine tests a semaphore to see if it has been signaled. If the signal + * count is greater than zero, it is decremented. + * + * @param sema Semaphore to test. + * @param time Maximum number of ticks to wait. + * + * RETURNS: RC_OK, RC_FAIL, RC_TIME on success, failure, timeout respectively + */ int _task_sem_take(ksem_t sema, int32_t time) { @@ -449,21 +449,21 @@ int _task_sem_take(ksem_t sema, int32_t time) return A.Time.rcode; } -/******************************************************************************* -* -* _task_sem_group_take - test multiple semaphores -* -* This routine tests a group of semaphores. A semaphore group is an array of -* semaphore names terminated by the predefined constant ENDLIST. -* -* It returns the ID of the first semaphore in the group whose signal count is -* greater than zero, and decrements the signal count. -* -* @param group Group of semaphores to test. -* @param time Maximum number of ticks to wait. -* -* RETURNS: N/A -*/ +/** + * + * _task_sem_group_take - test multiple semaphores + * + * This routine tests a group of semaphores. A semaphore group is an array of + * semaphore names terminated by the predefined constant ENDLIST. + * + * It returns the ID of the first semaphore in the group whose signal count is + * greater than zero, and decrements the signal count. + * + * @param group Group of semaphores to test. + * @param time Maximum number of ticks to wait. + * + * RETURNS: N/A + */ ksem_t _task_sem_group_take(ksemg_t group, int32_t time) { @@ -477,12 +477,12 @@ ksem_t _task_sem_group_take(ksemg_t group, int32_t time) return A.Args.s1.sema; } -/******************************************************************************* -* -* _k_sem_signal - handle semaphore signal request -* -* RETURNS: N/A -*/ +/** + * + * _k_sem_signal - handle semaphore signal request + * + * RETURNS: N/A + */ void _k_sem_signal(struct k_args *A) { @@ -491,12 +491,12 @@ void _k_sem_signal(struct k_args *A) signal_semaphore(1, _k_sem_list + OBJ_INDEX(Sid)); } -/******************************************************************************* -* -* _k_sem_group_signal - handle signal semaphore group request -* -* RETURNS: N/A -*/ +/** + * + * _k_sem_group_signal - handle signal semaphore group request + * + * RETURNS: N/A + */ void _k_sem_group_signal(struct k_args *A) { @@ -507,16 +507,16 @@ void _k_sem_group_signal(struct k_args *A) } } -/******************************************************************************* -* -* task_sem_give - signal a semaphore -* -* This routine signals the specified semaphore. -* -* @param sema Semaphore to signal. -* -* RETURNS: N/A -*/ +/** + * + * task_sem_give - signal a semaphore + * + * This routine signals the specified semaphore. + * + * @param sema Semaphore to signal. + * + * RETURNS: N/A + */ void task_sem_give(ksem_t sema) { @@ -527,23 +527,23 @@ void task_sem_give(ksem_t sema) KERNEL_ENTRY(&A); } -/******************************************************************************* -* -* task_sem_group_give - signal a group of semaphores -* -* This routine signals a group of semaphores. A semaphore group is an array of -* semaphore names terminated by the predefined constant ENDLIST. -* -* If the semaphore list of waiting tasks is empty, the signal count is -* incremented, otherwise the highest priority waiting task is released. -* -* Using task_sem_group_give() is faster than using multiple single signals, -* and ensures all signals take place before other tasks run. -* -* @param group Group of semaphores to signal. -* -* RETURNS: N/A -*/ +/** + * + * task_sem_group_give - signal a group of semaphores + * + * This routine signals a group of semaphores. A semaphore group is an array of + * semaphore names terminated by the predefined constant ENDLIST. + * + * If the semaphore list of waiting tasks is empty, the signal count is + * incremented, otherwise the highest priority waiting task is released. + * + * Using task_sem_group_give() is faster than using multiple single signals, + * and ensures all signals take place before other tasks run. + * + * @param group Group of semaphores to signal. + * + * RETURNS: N/A + */ void task_sem_group_give(ksemg_t group) { @@ -554,34 +554,34 @@ void task_sem_group_give(ksemg_t group) KERNEL_ENTRY(&A); } -/******************************************************************************* -* -* fiber_sem_give - signal a semaphore from a fiber -* -* This routine (to only be called from a fiber) signals a semaphore. It -* requires a statically allocated command packet (from a command packet set) -* that is implicitly released once the command packet has been processed. -* To signal a semaphore from a task, task_sem_give() should be used instead. -* -* RETURNS: N/A -*/ +/** + * + * fiber_sem_give - signal a semaphore from a fiber + * + * This routine (to only be called from a fiber) signals a semaphore. It + * requires a statically allocated command packet (from a command packet set) + * that is implicitly released once the command packet has been processed. + * To signal a semaphore from a task, task_sem_give() should be used instead. + * + * RETURNS: N/A + */ FUNC_ALIAS(isr_sem_give, fiber_sem_give, void); -/******************************************************************************* -* -* isr_sem_give - signal a semaphore from an ISR -* -* This routine (to only be called from an ISR) signals a semaphore. It -* requires a statically allocated command packet (from a command packet set) -* that is implicitly released once the command packet has been processed. -* To signal a semaphore from a task, task_sem_give() should be used instead. -* -* @param sema Semaphore to signal. -* @param pSet Pointer to command packet set. -* -* RETURNS: N/A -*/ +/** + * + * isr_sem_give - signal a semaphore from an ISR + * + * This routine (to only be called from an ISR) signals a semaphore. It + * requires a statically allocated command packet (from a command packet set) + * that is implicitly released once the command packet has been processed. + * To signal a semaphore from a task, task_sem_give() should be used instead. + * + * @param sema Semaphore to signal. + * @param pSet Pointer to command packet set. + * + * RETURNS: N/A + */ void isr_sem_give(ksem_t sema, struct cmd_pkt_set *pSet) { @@ -600,12 +600,12 @@ void isr_sem_give(ksem_t sema, struct cmd_pkt_set *pSet) nano_isr_stack_push(&_k_command_stack, (uint32_t)pCommand); } -/******************************************************************************* -* -* _k_sem_reset - handle semaphore reset request -* -* RETURNS: N/A -*/ +/** + * + * _k_sem_reset - handle semaphore reset request + * + * RETURNS: N/A + */ void _k_sem_reset(struct k_args *A) { @@ -614,12 +614,12 @@ void _k_sem_reset(struct k_args *A) _k_sem_list[OBJ_INDEX(Sid)].Level = 0; } -/******************************************************************************* -* -* _k_sem_group_reset - handle semaphore group reset request -* -* RETURNS: N/A -*/ +/** + * + * _k_sem_group_reset - handle semaphore group reset request + * + * RETURNS: N/A + */ void _k_sem_group_reset(struct k_args *A) { @@ -630,16 +630,16 @@ void _k_sem_group_reset(struct k_args *A) } } -/******************************************************************************* -* -* task_sem_reset - reset semaphore count to zero -* -* This routine resets the signal count of the specified semaphore to zero. -* -* @param sema Semaphore to reset. -* -* RETURNS: N/A -*/ +/** + * + * task_sem_reset - reset semaphore count to zero + * + * This routine resets the signal count of the specified semaphore to zero. + * + * @param sema Semaphore to reset. + * + * RETURNS: N/A + */ void task_sem_reset(ksem_t sema) { @@ -650,18 +650,18 @@ void task_sem_reset(ksem_t sema) KERNEL_ENTRY(&A); } -/******************************************************************************* -* -* task_sem_group_reset - reset a group of semaphores -* -* This routine resets the signal count for a group of semaphores. A semaphore -* group is an array of semaphore names terminated by the predefined constant -* ENDLIST. -* -* @param group Group of semaphores to reset. -* -* RETURNS: N/A -*/ +/** + * + * task_sem_group_reset - reset a group of semaphores + * + * This routine resets the signal count for a group of semaphores. A semaphore + * group is an array of semaphore names terminated by the predefined constant + * ENDLIST. + * + * @param group Group of semaphores to reset. + * + * RETURNS: N/A + */ void task_sem_group_reset(ksemg_t group) { @@ -672,12 +672,12 @@ void task_sem_group_reset(ksemg_t group) KERNEL_ENTRY(&A); } -/******************************************************************************* -* -* _k_sem_inquiry - handle semaphore inquiry request -* -* RETURNS: N/A -*/ +/** + * + * _k_sem_inquiry - handle semaphore inquiry request + * + * RETURNS: N/A + */ void _k_sem_inquiry(struct k_args *A) { @@ -689,16 +689,16 @@ void _k_sem_inquiry(struct k_args *A) A->Time.rcode = S->Level; } -/******************************************************************************* -* -* task_sem_count_get - read the semaphore signal count -* -* This routine reads the signal count of the specified semaphore. -* -* @param sema Semaphore to query. -* -* RETURNS: signal count -*/ +/** + * + * task_sem_count_get - read the semaphore signal count + * + * This routine reads the signal count of the specified semaphore. + * + * @param sema Semaphore to query. + * + * RETURNS: signal count + */ int task_sem_count_get(ksem_t sema) { diff --git a/kernel/microkernel/k_server.c b/kernel/microkernel/k_server.c index 6fb00b5ef2f..25f2a3cb090 100644 --- a/kernel/microkernel/k_server.c +++ b/kernel/microkernel/k_server.c @@ -37,7 +37,7 @@ from tasks (and, less commonly, fibers and ISRs). The requests are service by a high priority fiber, thereby ensuring that requests are processed in a timely manner and in a single threaded manner that prevents simultaneous requests from interfering with each other. -*/ + */ #include #include @@ -50,16 +50,16 @@ interfering with each other. extern const kernelfunc _k_server_dispatch_table[]; -/******************************************************************************* -* -* next_task_select - select task to be executed by microkernel -* -* Locates that highest priority task queue that is non-empty and chooses the -* task at the head of that queue. It's guaranteed that there will always be -* a non-empty queue, since the idle task is always executable. -* -* RETURNS: pointer to selected task -*/ +/** + * + * next_task_select - select task to be executed by microkernel + * + * Locates that highest priority task queue that is non-empty and chooses the + * task at the head of that queue. It's guaranteed that there will always be + * a non-empty queue, since the idle task is always executable. + * + * RETURNS: pointer to selected task + */ static struct k_proc *next_task_select(void) { @@ -85,17 +85,17 @@ static struct k_proc *next_task_select(void) return _k_task_priority_list[K_PrioListIdx].Head; } -/******************************************************************************* -* -* K_swapper - the microkernel thread entry point -* -* This function implements the microkernel fiber. It waits for command -* packets to arrive on its stack channel. It executes all commands on the -* stack and then sets up the next task that is ready to run. Next it -* goes to wait on further inputs on its stack channel. -* -* RETURNS: Does not return. -*/ +/** + * + * K_swapper - the microkernel thread entry point + * + * This function implements the microkernel fiber. It waits for command + * packets to arrive on its stack channel. It executes all commands on the + * stack and then sets up the next task that is ready to run. Next it + * goes to wait on further inputs on its stack channel. + * + * RETURNS: Does not return. + */ FUNC_NORETURN void K_swapper(int parameter1, /* not used */ int parameter2 /* not used */ diff --git a/kernel/microkernel/k_task.c b/kernel/microkernel/k_task.c index f6a642ae5a2..f15b871c011 100644 --- a/kernel/microkernel/k_task.c +++ b/kernel/microkernel/k_task.c @@ -43,29 +43,29 @@ #include -/******************************************************************************* -* -* task_id_get - get task identifer -* -* RETURNS: identifier for current task -*/ +/** + * + * task_id_get - get task identifer + * + * RETURNS: identifier for current task + */ ktask_t task_id_get(void) { return _k_current_task->Ident; } -/******************************************************************************* -* -* _k_state_bit_reset - reset the specified task state bits -* -* This routine resets the specified task state bits. When a task's state bits -* are zero, the task may be scheduled to run. The tasks's state bits are a -* bitmask of the TF_xxx bits. Each TF_xxx bit indicates a reason why the task -* must not be scheduled to run. -* -* RETURNS: N/A -*/ +/** + * + * _k_state_bit_reset - reset the specified task state bits + * + * This routine resets the specified task state bits. When a task's state bits + * are zero, the task may be scheduled to run. The tasks's state bits are a + * bitmask of the TF_xxx bits. Each TF_xxx bit indicates a reason why the task + * must not be scheduled to run. + * + * RETURNS: N/A + */ void _k_state_bit_reset(struct k_proc *X, /* ptr to task */ uint32_t bits /* bitmask of TF_xxx @@ -107,17 +107,17 @@ void _k_state_bit_reset(struct k_proc *X, /* ptr to task */ #endif } -/******************************************************************************* -* -* _k_state_bit_set - set specified task state bits -* -* This routine sets the specified task state bits. When a task's state bits -* are non-zero, the task will not be scheduled to run. The task's state bits -* are a bitmask of the TF_xxx bits. Each TF_xxx bit indicates a reason why -* the task must not be scheduled to run. -* -* RETURNS: N/A -*/ +/** + * + * _k_state_bit_set - set specified task state bits + * + * This routine sets the specified task state bits. When a task's state bits + * are non-zero, the task will not be scheduled to run. The task's state bits + * are a bitmask of the TF_xxx bits. Each TF_xxx bit indicates a reason why + * the task must not be scheduled to run. + * + * RETURNS: N/A + */ void _k_state_bit_set( struct k_proc *task_ptr, @@ -193,12 +193,12 @@ void _k_state_bit_set( #endif } -/******************************************************************************* -* -* start_task - initialize and start a task -* -* RETURNS: N/A -*/ +/** + * + * start_task - initialize and start a task + * + * RETURNS: N/A + */ static void start_task(struct k_proc *X, /* ptr to task control block */ void (*func)(void) /* entry point for task */ @@ -235,14 +235,14 @@ static void start_task(struct k_proc *X, /* ptr to task control block */ _k_state_bit_reset(X, TF_STOP | TF_TERM); } -/******************************************************************************* -* -* abort_task - abort a task -* -* This routine aborts the specified task. -* -* RETURNS: N/A -*/ +/** + * + * abort_task - abort a task + * + * This routine aborts the specified task. + * + * RETURNS: N/A + */ static void abort_task(struct k_proc *X) { @@ -263,17 +263,17 @@ static void abort_task(struct k_proc *X) } #ifndef CONFIG_ARCH_HAS_TASK_ABORT -/******************************************************************************* -* -* _TaskAbort - microkernel handler for fatal task errors -* -* To be invoked when a task aborts implicitly, either by returning from its -* entry point or due to a software or hardware fault. -* -* RETURNS: does not return -* -* \NOMANUAL -*/ +/** + * + * _TaskAbort - microkernel handler for fatal task errors + * + * To be invoked when a task aborts implicitly, either by returning from its + * entry point or due to a software or hardware fault. + * + * RETURNS: does not return + * + * \NOMANUAL + */ FUNC_NORETURN void _TaskAbort(void) { @@ -289,23 +289,23 @@ FUNC_NORETURN void _TaskAbort(void) } #endif -/******************************************************************************* -* -* task_abort_handler_set - install an abort handler -* -* This routine installs an abort handler for the calling task. -* -* The abort handler is run when the calling task is aborted by a _TaskAbort() -* or task_group_abort() call. -* -* Each call to task_abort_handler_set() replaces the previously installed -* handler. -* -* To remove an abort handler, set the parameter to NULL as below: -* task_abort_handler_set (NULL) -* -* RETURNS: N/A -*/ +/** + * + * task_abort_handler_set - install an abort handler + * + * This routine installs an abort handler for the calling task. + * + * The abort handler is run when the calling task is aborted by a _TaskAbort() + * or task_group_abort() call. + * + * Each call to task_abort_handler_set() replaces the previously installed + * handler. + * + * To remove an abort handler, set the parameter to NULL as below: + * task_abort_handler_set (NULL) + * + * RETURNS: N/A + */ void task_abort_handler_set(void (*func)(void) /* abort handler */ ) @@ -313,16 +313,16 @@ void task_abort_handler_set(void (*func)(void) /* abort handler */ _k_current_task->fabort = func; } -/******************************************************************************* -* -* _k_task_op - handle a task operation request -* -* This routine handles any one of the following task operation requests: -* starting either a kernel or user task, aborting a task, suspending a task, -* resuming a task, blocking a task or unblocking a task -* -* RETURNS: N/A -*/ +/** + * + * _k_task_op - handle a task operation request + * + * This routine handles any one of the following task operation requests: + * starting either a kernel or user task, aborting a task, suspending a task, + * resuming a task, blocking a task or unblocking a task + * + * RETURNS: N/A + */ void _k_task_op(struct k_args *A) { @@ -351,12 +351,12 @@ void _k_task_op(struct k_args *A) } } -/******************************************************************************* -* -* _task_ioctl - task operations -* -* RETURNS: N/A -*/ +/** + * + * _task_ioctl - task operations + * + * RETURNS: N/A + */ void _task_ioctl(ktask_t task, /* task on which to operate */ int opt /* task operation */ @@ -370,16 +370,16 @@ void _task_ioctl(ktask_t task, /* task on which to operate */ KERNEL_ENTRY(&A); } -/******************************************************************************* -* -* _k_task_group_op - handle task group operation request -* -* This routine handles any one of the following task group operations requests: -* starting either kernel or user tasks, aborting tasks, suspending tasks, -* resuming tasks, blocking tasks or unblocking tasks -* -* RETURNS: N/A -*/ +/** + * + * _k_task_group_op - handle task group operation request + * + * This routine handles any one of the following task group operations requests: + * starting either kernel or user tasks, aborting tasks, suspending tasks, + * resuming tasks, blocking tasks or unblocking tasks + * + * RETURNS: N/A + */ void _k_task_group_op(struct k_args *A) { @@ -422,12 +422,12 @@ void _k_task_group_op(struct k_args *A) } -/******************************************************************************* -* -* _task_group_ioctl - task group operations -* -* RETURNS: N/A -*/ +/** + * + * _task_group_ioctl - task group operations + * + * RETURNS: N/A + */ void _task_group_ioctl(ktask_group_t group, /* task group */ int opt /* operation */ @@ -441,60 +441,60 @@ void _task_group_ioctl(ktask_group_t group, /* task group */ KERNEL_ENTRY(&A); } -/******************************************************************************* -* -* task_group_mask_get - get task groups for task -* -* RETURNS: task groups associated with current task -*/ +/** + * + * task_group_mask_get - get task groups for task + * + * RETURNS: task groups associated with current task + */ kpriority_t task_group_mask_get(void) { return _k_current_task->Group; } -/******************************************************************************* -* -* task_group_join - add task to task group(s) -* -* RETURNS: N/A -*/ +/** + * + * task_group_join - add task to task group(s) + * + * RETURNS: N/A + */ void task_group_join(uint32_t groups) { _k_current_task->Group |= groups; } -/******************************************************************************* -* -* task_group_leave - remove task from task group(s) -* -* RETURNS: N/A -*/ +/** + * + * task_group_leave - remove task from task group(s) + * + * RETURNS: N/A + */ void task_group_leave(uint32_t groups) { _k_current_task->Group &= ~groups; } -/******************************************************************************* -* -* task_priority_get - get task priority -* -* RETURNS: priority of current task -*/ +/** + * + * task_priority_get - get task priority + * + * RETURNS: priority of current task + */ kpriority_t task_priority_get(void) { return _k_current_task->Prio; } -/******************************************************************************* -* -* _k_task_priority_set - handle task set priority request -* -* RETURNS: N/A -*/ +/** + * + * _k_task_priority_set - handle task set priority request + * + * RETURNS: N/A + */ void _k_task_priority_set(struct k_args *A) { @@ -509,20 +509,20 @@ void _k_task_priority_set(struct k_args *A) FREEARGS(A); } -/******************************************************************************* -* -* task_priority_set - set the priority of a task -* -* This routine changes the priority of the specified task. -* -* The call has immediate effect. If the calling task is no longer the highest -* priority runnable task, a task switch occurs. -* -* The priority should be specified in the range 0 to 62. 0 is the highest -* priority. -* -* RETURNS: N/A -*/ +/** + * + * task_priority_set - set the priority of a task + * + * This routine changes the priority of the specified task. + * + * The call has immediate effect. If the calling task is no longer the highest + * priority runnable task, a task switch occurs. + * + * The priority should be specified in the range 0 to 62. 0 is the highest + * priority. + * + * RETURNS: N/A + */ void task_priority_set(ktask_t task, /* task whose priority is to be set */ kpriority_t prio /* new priority */ @@ -536,12 +536,12 @@ void task_priority_set(ktask_t task, /* task whose priority is to be set */ KERNEL_ENTRY(&A); } -/******************************************************************************* -* -* _k_task_yield - handle task yield request -* -* RETURNS: N/A -*/ +/** + * + * _k_task_yield - handle task yield request + * + * RETURNS: N/A + */ void _k_task_yield(struct k_args *A) { @@ -557,17 +557,17 @@ void _k_task_yield(struct k_args *A) } } -/******************************************************************************* -* -* task_yield - yield the CPU to another task -* -* This routine yields the processor to the next equal priority task that is -* runnable. Using task_yield(), it is possible to achieve the effect of round -* robin scheduling. If no task with the same priority is runnable then no task -* switch occurs and the calling task resumes execution. -* -* RETURNS: N/A -*/ +/** + * + * task_yield - yield the CPU to another task + * + * This routine yields the processor to the next equal priority task that is + * runnable. Using task_yield(), it is possible to achieve the effect of round + * robin scheduling. If no task with the same priority is runnable then no task + * switch occurs and the calling task resumes execution. + * + * RETURNS: N/A + */ void task_yield(void) { @@ -577,20 +577,20 @@ void task_yield(void) KERNEL_ENTRY(&A); } -/******************************************************************************* -* -* task_entry_set - set the entry point of a task -* -* This routine sets the entry point of a task to a given routine. It is only -* needed if the entry point is different from that specified in the project -* file. It must be called before task_start() to have any effect, so it -* cannot work with members of the EXE group or of any group that automatically -* starts when the application is loaded. -* -* The routine is executed when the task is started -* -* RETURNS: N/A -*/ +/** + * + * task_entry_set - set the entry point of a task + * + * This routine sets the entry point of a task to a given routine. It is only + * needed if the entry point is different from that specified in the project + * file. It must be called before task_start() to have any effect, so it + * cannot work with members of the EXE group or of any group that automatically + * starts when the application is loaded. + * + * The routine is executed when the task is started + * + * RETURNS: N/A + */ void task_entry_set(ktask_t task, /* task */ void (*func)(void) /* entry point */ diff --git a/kernel/microkernel/k_ticker.c b/kernel/microkernel/k_ticker.c index 7b7d3c78bca..72b6665be39 100644 --- a/kernel/microkernel/k_ticker.c +++ b/kernel/microkernel/k_ticker.c @@ -33,7 +33,7 @@ /* DESCRIPTION This module implements the microkernel's tick event handler. -*/ + */ #include #include @@ -69,50 +69,50 @@ int sys_clock_us_per_tick; int sys_clock_hw_cycles_per_tick; #endif -/******************************************************************************* -* -* task_cycle_get_32 - read the processor's high precision timer -* -* This routine reads the processor's high precision timer. It reads the -* counter register on the timer device. This counter register increments -* at a relatively high rate (e.g. 20 MHz), and thus is considered a -* "high resolution" timer. This is in contrast to nano_tick_get_32() and -* task_tick_get_32() which return the value of the kernel ticks variable. -* -* RETURNS: current high precision clock value -*/ +/** + * + * task_cycle_get_32 - read the processor's high precision timer + * + * This routine reads the processor's high precision timer. It reads the + * counter register on the timer device. This counter register increments + * at a relatively high rate (e.g. 20 MHz), and thus is considered a + * "high resolution" timer. This is in contrast to nano_tick_get_32() and + * task_tick_get_32() which return the value of the kernel ticks variable. + * + * RETURNS: current high precision clock value + */ uint32_t task_cycle_get_32(void) { return timer_read(); } -/******************************************************************************* -* -* task_tick_get_32 - read the current system clock value -* -* This routine returns the lower 32-bits of the current system clock value -* as measured in ticks. -* -* RETURNS: lower 32-bit of the current system clock value -*/ +/** + * + * task_tick_get_32 - read the current system clock value + * + * This routine returns the lower 32-bits of the current system clock value + * as measured in ticks. + * + * RETURNS: lower 32-bit of the current system clock value + */ int32_t task_tick_get_32(void) { return (int32_t)_k_sys_clock_tick_count; } -/******************************************************************************* -* -* task_tick_get - read the current system clock value -* -* This routine returns the current system clock value as measured in ticks. -* -* Interrupts are locked while updating clock since some CPUs do not support -* native atomic operations on 64 bit values. -* -* RETURNS: current system clock value -*/ +/** + * + * task_tick_get - read the current system clock value + * + * This routine returns the current system clock value as measured in ticks. + * + * Interrupts are locked while updating clock since some CPUs do not support + * native atomic operations on 64 bit values. + * + * RETURNS: current system clock value + */ int64_t task_tick_get(void) { @@ -124,15 +124,15 @@ int64_t task_tick_get(void) return ticks; } -/******************************************************************************* -* -* sys_clock_increment - increment system clock by "N" ticks -* -* Interrupts are locked while updating clock since some CPUs do not support -* native atomic operations on 64 bit values. -* -* RETURNS: N/A -*/ +/** + * + * sys_clock_increment - increment system clock by "N" ticks + * + * Interrupts are locked while updating clock since some CPUs do not support + * native atomic operations on 64 bit values. + * + * RETURNS: N/A + */ static void sys_clock_increment(int inc) { @@ -142,17 +142,17 @@ static void sys_clock_increment(int inc) irq_unlock_inline(key); } -/******************************************************************************* -* -* _TlDebugUpdate - task level debugging tick handler -* -* If task level debugging is configured this routine updates the low resolution -* debugging timer and determines if task level processing should be suspended. -* -* RETURNS: 0 if task level processing should be halted or 1 if not -* -* \NOMANUAL -*/ +/** + * + * _TlDebugUpdate - task level debugging tick handler + * + * If task level debugging is configured this routine updates the low resolution + * debugging timer and determines if task level processing should be suspended. + * + * RETURNS: 0 if task level processing should be halted or 1 if not + * + * \NOMANUAL + */ #ifdef CONFIG_TASK_DEBUG uint32_t __noinit _k_debug_sys_clock_tick_count; @@ -166,17 +166,17 @@ static inline int _TlDebugUpdate(int32_t ticks) #define _TlDebugUpdate(ticks) 1 #endif -/******************************************************************************* -* -* _TimeSliceUpdate - tick handler time slice logic -* -* This routine checks to see if it is time for the current task -* to relinquish control, and yields CPU if so. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _TimeSliceUpdate - tick handler time slice logic + * + * This routine checks to see if it is time for the current task + * to relinquish control, and yields CPU if so. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static inline void _TimeSliceUpdate(void) { @@ -192,21 +192,21 @@ static inline void _TimeSliceUpdate(void) #endif /* CONFIG_TIMESLICING */ } -/******************************************************************************* -* -* _SysIdleElapsedTicksGet - get elapsed ticks -* -* If tickless idle support is configured this routine returns the number -* of ticks since going idle and then resets the global elapsed tick counter back -* to zero indicating all elapsed ticks have been consumed. This is done with -* interrupts locked to prevent the timer ISR from modifying the global elapsed -* tick counter. -* If tickless idle support is not configured in it simply returns 1. -* -* RETURNS: number of ticks to process -* -* \NOMANUAL -*/ +/** + * + * _SysIdleElapsedTicksGet - get elapsed ticks + * + * If tickless idle support is configured this routine returns the number + * of ticks since going idle and then resets the global elapsed tick counter back + * to zero indicating all elapsed ticks have been consumed. This is done with + * interrupts locked to prevent the timer ISR from modifying the global elapsed + * tick counter. + * If tickless idle support is not configured in it simply returns 1. + * + * RETURNS: number of ticks to process + * + * \NOMANUAL + */ static inline int32_t _SysIdleElapsedTicksGet(void) { @@ -225,15 +225,15 @@ static inline int32_t _SysIdleElapsedTicksGet(void) #endif } -/******************************************************************************* -* -* _k_ticker - microkernel tick handler -* -* This routine informs other microkernel subsystems that a tick event has -* occurred. -* -* RETURNS: 1 -*/ +/** + * + * _k_ticker - microkernel tick handler + * + * This routine informs other microkernel subsystems that a tick event has + * occurred. + * + * RETURNS: 1 + */ int _k_ticker(int event) { @@ -256,34 +256,34 @@ int _k_ticker(int event) #ifdef CONFIG_TIMESLICING -/******************************************************************************* -* -* sys_scheduler_time_slice_set - set time slicing period and scope -* -* This routine controls how task time slicing is performed by the task -* scheduler, by specifying the maximum time slice length (in ticks) and -* the highest priority task level for which time slicing is performed. -* -* To enable time slicing, a non-zero time slice length must be specified. -* The task scheduler then ensures that no executing task runs for more than -* the specified number of ticks before giving other tasks of that priority -* a chance to execute. (However, any task whose priority is higher than the -* specified task priority level is exempted, and may execute as long as -* desired without being pre-empted due to time slicing.) -* -* Time slicing only limits that maximum amount of time a task may continuously -* execute. Once the scheduler selects a task for execution, there is no minimum -* guaranteed time the task will execute before tasks of greater or equal -* priority are scheduled. -* -* If the currently executing task is the only one of that priority eligible -* for execution this routine has no effect, as that task will be immediately -* rescheduled once the slice period expires. -* -* To disable timeslicing, call the API with both parameters set to zero. -* -* RETURNS: N/A -*/ +/** + * + * sys_scheduler_time_slice_set - set time slicing period and scope + * + * This routine controls how task time slicing is performed by the task + * scheduler, by specifying the maximum time slice length (in ticks) and + * the highest priority task level for which time slicing is performed. + * + * To enable time slicing, a non-zero time slice length must be specified. + * The task scheduler then ensures that no executing task runs for more than + * the specified number of ticks before giving other tasks of that priority + * a chance to execute. (However, any task whose priority is higher than the + * specified task priority level is exempted, and may execute as long as + * desired without being pre-empted due to time slicing.) + * + * Time slicing only limits that maximum amount of time a task may continuously + * execute. Once the scheduler selects a task for execution, there is no minimum + * guaranteed time the task will execute before tasks of greater or equal + * priority are scheduled. + * + * If the currently executing task is the only one of that priority eligible + * for execution this routine has no effect, as that task will be immediately + * rescheduled once the slice period expires. + * + * To disable timeslicing, call the API with both parameters set to zero. + * + * RETURNS: N/A + */ void sys_scheduler_time_slice_set(int32_t t, kpriority_t p) { @@ -293,15 +293,15 @@ void sys_scheduler_time_slice_set(int32_t t, kpriority_t p) #endif /* CONFIG_TIMESLICING */ -/******************************************************************************* -* -* _k_time_elapse - handle elapsed ticks calculation request -* -* This routine, called by K_swapper(), handles the request for calculating the -* time elapsed since the specified reference time. -* -* RETURNS: N/A -*/ +/** + * + * _k_time_elapse - handle elapsed ticks calculation request + * + * This routine, called by K_swapper(), handles the request for calculating the + * time elapsed since the specified reference time. + * + * RETURNS: N/A + */ void _k_time_elapse(struct k_args *P) { @@ -311,25 +311,25 @@ void _k_time_elapse(struct k_args *P) P->Args.c1.time1 = now; } -/******************************************************************************* -* -* task_tick_delta - return ticks between calls -* -* This function is meant to be used in contained fragments of code. The first -* call to it in a particular code fragment fills in a reference time variable -* which then gets passed and updated every time the function is called. From -* the second call on, the delta between the value passed to it and the current -* tick count is the return value. Since the first call is meant to only fill in -* the reference time, its return value should be discarded. -* -* Since a code fragment that wants to use task_tick_delta() passes in its -* own reference time variable, multiple code fragments can make use of this -* function concurrently. -* -* Note that it is not necessary to allocate a timer to use this call. -* -* RETURNS: elapsed time in system ticks -*/ +/** + * + * task_tick_delta - return ticks between calls + * + * This function is meant to be used in contained fragments of code. The first + * call to it in a particular code fragment fills in a reference time variable + * which then gets passed and updated every time the function is called. From + * the second call on, the delta between the value passed to it and the current + * tick count is the return value. Since the first call is meant to only fill in + * the reference time, its return value should be discarded. + * + * Since a code fragment that wants to use task_tick_delta() passes in its + * own reference time variable, multiple code fragments can make use of this + * function concurrently. + * + * Note that it is not necessary to allocate a timer to use this call. + * + * RETURNS: elapsed time in system ticks + */ int64_t task_tick_delta(int64_t *reftime /* pointer to reference time */ ) diff --git a/kernel/microkernel/k_timer.c b/kernel/microkernel/k_timer.c index 436c3f0cab0..0329fba4109 100644 --- a/kernel/microkernel/k_timer.c +++ b/kernel/microkernel/k_timer.c @@ -43,47 +43,47 @@ extern struct k_timer _k_timer_blocks[]; struct k_timer *_k_timer_list_head = NULL; struct k_timer *_k_timer_list_tail = NULL; -/******************************************************************************* -* -* _timer_id_to_ptr - convert timer pointer to timer object identifier -* -* This routine converts a timer pointer into a timer object identifier. -* -* This algorithm relies on the fact that subtracting two pointers that point -* to elements of an array returns the difference between the array subscripts -* of those elements. (That is, "&a[j]-&a[i]" returns "j-i".) -* -* This algorithm also set the upper 16 bits of the object identifier -* to the same value utilized by the microkernel system generator. -* -* RETURNS: timer object identifier -*/ +/** + * + * _timer_id_to_ptr - convert timer pointer to timer object identifier + * + * This routine converts a timer pointer into a timer object identifier. + * + * This algorithm relies on the fact that subtracting two pointers that point + * to elements of an array returns the difference between the array subscripts + * of those elements. (That is, "&a[j]-&a[i]" returns "j-i".) + * + * This algorithm also set the upper 16 bits of the object identifier + * to the same value utilized by the microkernel system generator. + * + * RETURNS: timer object identifier + */ static inline ktimer_t _timer_ptr_to_id(struct k_timer *timer) { return (ktimer_t)(0x00010000u + (uint32_t)(timer - &_k_timer_blocks[0])); } -/******************************************************************************* -* -* _timer_id_to_ptr - convert timer object identifier to timer pointer -* -* This routine converts a timer object identifier into a timer pointer. -* -* RETURNS: timer pointer -*/ +/** + * + * _timer_id_to_ptr - convert timer object identifier to timer pointer + * + * This routine converts a timer object identifier into a timer pointer. + * + * RETURNS: timer pointer + */ static inline struct k_timer *_timer_id_to_ptr(ktimer_t timer) { return &_k_timer_blocks[OBJ_INDEX(timer)]; } -/******************************************************************************* -* -* _k_timer_enlist - insert a timer into the timer queue -* -* RETURNS: N/A -*/ +/** + * + * _k_timer_enlist - insert a timer into the timer queue + * + * RETURNS: N/A + */ void _k_timer_enlist(struct k_timer *T) { @@ -110,12 +110,12 @@ void _k_timer_enlist(struct k_timer *T) T->Back = Q; } -/******************************************************************************* -* -* _k_timer_delist - remove a timer from the timer queue -* -* RETURNS: N/A -*/ +/** + * + * _k_timer_delist - remove a timer from the timer queue + * + * RETURNS: N/A + */ void _k_timer_delist(struct k_timer *T) { @@ -134,14 +134,14 @@ void _k_timer_delist(struct k_timer *T) T->duration = -1; } -/******************************************************************************* -* -* _k_timeout_alloc - allocate timer used for command packet timeout -* -* Allocates timer for command packet and inserts it into the timer queue. -* -* RETURNS: N/A -*/ +/** + * + * _k_timeout_alloc - allocate timer used for command packet timeout + * + * Allocates timer for command packet and inserts it into the timer queue. + * + * RETURNS: N/A + */ void _k_timeout_alloc(struct k_args *P) { @@ -155,18 +155,18 @@ void _k_timeout_alloc(struct k_args *P) P->Time.timer = T; } -/******************************************************************************* -* -* _k_timeout_cancel - cancel timer used for command packet timeout -* -* Cancels timer (if not already expired), then reschedules the command packet -* for further processing. -* -* The command that is processed following cancellation is typically NOT the -* command that would have occurred had the timeout expired on its own. -* -* RETURNS: N/A -*/ +/** + * + * _k_timeout_cancel - cancel timer used for command packet timeout + * + * Cancels timer (if not already expired), then reschedules the command packet + * for further processing. + * + * The command that is processed following cancellation is typically NOT the + * command that would have occurred had the timeout expired on its own. + * + * RETURNS: N/A + */ void _k_timeout_cancel(struct k_args *A) { @@ -178,14 +178,14 @@ void _k_timeout_cancel(struct k_args *A) } } -/******************************************************************************* -* -* _k_timeout_free - free timer used for command packet timeout -* -* Cancels timer (if not already expired), then frees it. -* -* RETURNS: N/A -*/ +/** + * + * _k_timeout_free - free timer used for command packet timeout + * + * Cancels timer (if not already expired), then frees it. + * + * RETURNS: N/A + */ void _k_timeout_free(struct k_timer *T) { @@ -194,25 +194,25 @@ void _k_timeout_free(struct k_timer *T) FREETIMER(T); } -/******************************************************************************* -* -* _k_timer_list_update - handle expired timers -* -* Process the sorted list of timers associated with waiting tasks and -* activate each task whose timer has now expired. -* -* With tickless idle, a tick announcement may encompass multiple ticks. -* Due to limitations of the underlying timer driver, the number of elapsed -* ticks may -- under very rare circumstances -- exceed the first timer's -* remaining tick count, although never by more a single tick. This means that -* a task timer may occasionally expire one tick later than it was scheduled to, -* and that a periodic timer may exhibit a slow, ever-increasing degree of drift -* from the main system timer over long intervals. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _k_timer_list_update - handle expired timers + * + * Process the sorted list of timers associated with waiting tasks and + * activate each task whose timer has now expired. + * + * With tickless idle, a tick announcement may encompass multiple ticks. + * Due to limitations of the underlying timer driver, the number of elapsed + * ticks may -- under very rare circumstances -- exceed the first timer's + * remaining tick count, although never by more a single tick. This means that + * a task timer may occasionally expire one tick later than it was scheduled to, + * and that a periodic timer may exhibit a slow, ever-increasing degree of drift + * from the main system timer over long intervals. + * + * RETURNS: N/A + * + * \NOMANUAL + */ void _k_timer_list_update(int ticks) { @@ -244,17 +244,17 @@ void _k_timer_list_update(int ticks) } } -/******************************************************************************* -* -* _k_timer_alloc - handle timer allocation request -* -* This routine, called by K_swapper(), handles the request for allocating a -* timer. -* -* @param P Pointer to timer allocation request arguments. -* -* RETURNS: N/A -*/ +/** + * + * _k_timer_alloc - handle timer allocation request + * + * This routine, called by K_swapper(), handles the request for allocating a + * timer. + * + * @param P Pointer to timer allocation request arguments. + * + * RETURNS: N/A + */ void _k_timer_alloc(struct k_args *P) { @@ -269,12 +269,12 @@ void _k_timer_alloc(struct k_args *P) T->duration = -1; /* -1 indicates that timer is disabled */ } -/******************************************************************************* -* -* task_timer_alloc - allocate a timer and return its object identifier -* -* RETURNS: timer identifier -*/ +/** + * + * task_timer_alloc - allocate a timer and return its object identifier + * + * RETURNS: timer identifier + */ ktimer_t task_timer_alloc(void) { @@ -286,15 +286,15 @@ ktimer_t task_timer_alloc(void) return _timer_ptr_to_id(A.Args.c1.timer); } -/******************************************************************************* -* -* _k_timer_dealloc - handle timer deallocation request -* -* This routine, called by K_swapper(), handles the request for deallocating a -* timer. -* -* RETURNS: N/A -*/ +/** + * + * _k_timer_dealloc - handle timer deallocation request + * + * This routine, called by K_swapper(), handles the request for deallocating a + * timer. + * + * RETURNS: N/A + */ void _k_timer_dealloc(struct k_args *P) { @@ -308,17 +308,17 @@ void _k_timer_dealloc(struct k_args *P) FREEARGS(A); } -/******************************************************************************* -* -* task_timer_free - deallocate a timer -* -* This routine frees the resources associated with the timer. If a timer was -* started, it has to be stopped using task_timer_stop() before it can be freed. -* -* @param timer Timer to deallocate. -* -* RETURNS: N/A -*/ +/** + * + * task_timer_free - deallocate a timer + * + * This routine frees the resources associated with the timer. If a timer was + * started, it has to be stopped using task_timer_stop() before it can be freed. + * + * @param timer Timer to deallocate. + * + * RETURNS: N/A + */ void task_timer_free(ktimer_t timer) { @@ -329,17 +329,17 @@ void task_timer_free(ktimer_t timer) KERNEL_ENTRY(&A); } -/******************************************************************************* -* -* _k_timer_start - handle start timer request -* -* This routine, called by K_swapper(), handles the start timer request from -* both task_timer_start() and task_timer_restart(). -* -* @param P Pointer to timer start request arguments. -* -* RETURNS: N/A -*/ +/** + * + * _k_timer_start - handle start timer request + * + * This routine, called by K_swapper(), handles the start timer request from + * both task_timer_start() and task_timer_restart(). + * + * @param P Pointer to timer start request arguments. + * + * RETURNS: N/A + */ void _k_timer_start(struct k_args *P) { @@ -378,30 +378,30 @@ void _k_timer_start(struct k_args *P) _k_timer_enlist(T); } -/******************************************************************************* -* -* task_timer_start - start or restart the specified low resolution timer -* -* This routine starts or restarts the specified low resolution timer. -* -* When the specified number of ticks, set by , expires, the semaphore -* is signalled. The timer repeats the expiration/signal cycle each time -* ticks has elapsed. -* -* Setting to 0 stops the timer at the end of the initial delay. -* Setting to 0 will cause an initial delay equal to the repetition -* interval. If both and are set to 0, or if one or both of -* the values is invalid (negative), then this kernel API acts like a -* task_timer_stop(): if the allocated timer was still running (from a -* previous call), it will be cancelled; if not, nothing will happen. -* -* @param timer Timer to start. -* @param duration Initial delay in ticks. -* @param period Repetition interval in ticks. -* @param sema Semaphore to signal. -* -* RETURNS: N/A -*/ +/** + * + * task_timer_start - start or restart the specified low resolution timer + * + * This routine starts or restarts the specified low resolution timer. + * + * When the specified number of ticks, set by , expires, the semaphore + * is signalled. The timer repeats the expiration/signal cycle each time + * ticks has elapsed. + * + * Setting to 0 stops the timer at the end of the initial delay. + * Setting to 0 will cause an initial delay equal to the repetition + * interval. If both and are set to 0, or if one or both of + * the values is invalid (negative), then this kernel API acts like a + * task_timer_stop(): if the allocated timer was still running (from a + * previous call), it will be cancelled; if not, nothing will happen. + * + * @param timer Timer to start. + * @param duration Initial delay in ticks. + * @param period Repetition interval in ticks. + * @param sema Semaphore to signal. + * + * RETURNS: N/A + */ void task_timer_start(ktimer_t timer, int32_t duration, int32_t period, ksem_t sema) @@ -416,18 +416,18 @@ void task_timer_start(ktimer_t timer, int32_t duration, int32_t period, KERNEL_ENTRY(&A); } -/******************************************************************************* -* -* task_timer_restart - restart a timer -* -* This routine restarts the timer specified by . -* -* @param timer Timer to restart. -* @param duration Initial delay. -* @param period Repetition interval. -* -* RETURNS: N/A -*/ +/** + * + * task_timer_restart - restart a timer + * + * This routine restarts the timer specified by . + * + * @param timer Timer to restart. + * @param duration Initial delay. + * @param period Repetition interval. + * + * RETURNS: N/A + */ void task_timer_restart(ktimer_t timer, int32_t duration, int32_t period) { @@ -441,15 +441,15 @@ void task_timer_restart(ktimer_t timer, int32_t duration, int32_t period) KERNEL_ENTRY(&A); } -/******************************************************************************* -* -* _k_timer_stop - handle stop timer request -* -* This routine, called by K_swapper(), handles the request for stopping a -* timer. -* -* RETURNS: N/A -*/ +/** + * + * _k_timer_stop - handle stop timer request + * + * This routine, called by K_swapper(), handles the request for stopping a + * timer. + * + * RETURNS: N/A + */ void _k_timer_stop(struct k_args *P) { @@ -459,17 +459,17 @@ void _k_timer_stop(struct k_args *P) _k_timer_delist(T); } -/******************************************************************************* -* -* task_timer_stop - stop a timer -* -* This routine stops the specified timer. If the timer period has already -* elapsed, the call has no effect. -* -* @param timer Timer to stop. -* -* RETURNS: N/A -*/ +/** + * + * task_timer_stop - stop a timer + * + * This routine stops the specified timer. If the timer period has already + * elapsed, the call has no effect. + * + * @param timer Timer to stop. + * + * RETURNS: N/A + */ void task_timer_stop(ktimer_t timer) { @@ -480,15 +480,15 @@ void task_timer_stop(ktimer_t timer) KERNEL_ENTRY(&A); } -/******************************************************************************* -* -* _k_task_wakeup - handle internally issued task wakeup request -* -* This routine, called by K_swapper(), handles the request for waking a task -* at the end of its sleep period. -* -* RETURNS: N/A -*/ +/** + * + * _k_task_wakeup - handle internally issued task wakeup request + * + * This routine, called by K_swapper(), handles the request for waking a task + * at the end of its sleep period. + * + * RETURNS: N/A + */ void _k_task_wakeup(struct k_args *P) { @@ -502,15 +502,15 @@ void _k_task_wakeup(struct k_args *P) _k_state_bit_reset(X, TF_TIME); } -/******************************************************************************* -* -* _k_task_sleep - handle task sleep request -* -* This routine, called by K_swapper(), handles the request for putting a task -* to sleep. -* -* RETURNS: N/A -*/ +/** + * + * _k_task_sleep - handle task sleep request + * + * This routine, called by K_swapper(), handles the request for putting a task + * to sleep. + * + * RETURNS: N/A + */ void _k_task_sleep(struct k_args *P) { @@ -533,18 +533,18 @@ void _k_task_sleep(struct k_args *P) _k_state_bit_set(_k_current_task, TF_TIME); } -/******************************************************************************* -* -* task_sleep - sleep for a number of ticks -* -* This routine suspends the calling task for the specified number of timer -* ticks. When the task is awakened, it is rescheduled according to its -* priority. -* -* @param ticks Number of ticks for which to sleep. -* -* RETURNS: N/A -*/ +/** + * + * task_sleep - sleep for a number of ticks + * + * This routine suspends the calling task for the specified number of timer + * ticks. When the task is awakened, it is rescheduled according to its + * priority. + * + * @param ticks Number of ticks for which to sleep. + * + * RETURNS: N/A + */ void task_sleep(int32_t ticks) { diff --git a/kernel/nanokernel/compiler_stack_protect.c b/kernel/nanokernel/compiler_stack_protect.c index 74a1b64505d..7bea0e907ec 100644 --- a/kernel/nanokernel/compiler_stack_protect.c +++ b/kernel/nanokernel/compiler_stack_protect.c @@ -38,7 +38,7 @@ CONFIG_STACK_CANARIES=y. When this feature is enabled, the compiler generated code refers to function __stack_chk_fail and global variable __stack_chk_guard. -*/ + */ #include /* compiler specific configurations */ @@ -48,14 +48,14 @@ function __stack_chk_fail and global variable __stack_chk_guard. #include #include -/******************************************************************************* -* -* _StackCheckHandler - stack canary error handler -* -* This function is invoked when a stack canary error is detected. -* -* RETURNS: Does not return -*/ +/** + * + * _StackCheckHandler - stack canary error handler + * + * This function is invoked when a stack canary error is detected. + * + * RETURNS: Does not return + */ void FUNC_NORETURN _StackCheckHandler(void) { @@ -78,13 +78,13 @@ void FUNC_NORETURN _StackCheckHandler(void) void __noinit *__stack_chk_guard; -/******************************************************************************* -* -* __stack_chk_fail - Referenced by GCC compiler generated code -* -* This routine is invoked when a stack canary error is detected, indicating -* a buffer overflow or stack corruption problem. -*/ +/** + * + * __stack_chk_fail - Referenced by GCC compiler generated code + * + * This routine is invoked when a stack canary error is detected, indicating + * a buffer overflow or stack corruption problem. + */ FUNC_ALIAS(_StackCheckHandler, __stack_chk_fail, void); #endif diff --git a/kernel/nanokernel/ctors.c b/kernel/nanokernel/ctors.c index 0da7e920dbb..8a59dab09ca 100644 --- a/kernel/nanokernel/ctors.c +++ b/kernel/nanokernel/ctors.c @@ -44,7 +44,7 @@ initialization has completed. Although ctors are traditionally a C++ feature, normal C code can use them too. No destructor support (dtors) is provided. -*/ + */ /* What a constructor function pointer looks like */ @@ -61,14 +61,14 @@ typedef void (*CtorFuncPtr)(void); extern CtorFuncPtr __CTOR_LIST__[]; extern CtorFuncPtr __CTOR_END__[]; -/****************************************************************************** -* -* _Ctors - invoke all C++ style global object constructors -* -* This function is invoked by _Cstart(), which is implemented in the BSP. It -* invokes all routines that have been tagged using NANO_INIT_xxx, in order -* of priority (i.e. lowest numbered to highest numbered). -*/ +/** + * + * _Ctors - invoke all C++ style global object constructors + * + * This function is invoked by _Cstart(), which is implemented in the BSP. It + * invokes all routines that have been tagged using NANO_INIT_xxx, in order + * of priority (i.e. lowest numbered to highest numbered). + */ void _Ctors(void) { diff --git a/kernel/nanokernel/idle.c b/kernel/nanokernel/idle.c index 630330c80db..d42ac041ad8 100644 --- a/kernel/nanokernel/idle.c +++ b/kernel/nanokernel/idle.c @@ -34,7 +34,7 @@ DESCRIPTION This module provides routines to set the idle field in the nanokernel data structure. -*/ + */ #ifdef CONFIG_ADVANCED_POWER_MANAGEMENT @@ -43,16 +43,16 @@ data structure. #include #include -/******************************************************************************* -* -* nano_cpu_set_idle - indicate that nanokernel is idling in tickless mode -* -* Sets the nanokernel data structure idle field to a non-zero value. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * nano_cpu_set_idle - indicate that nanokernel is idling in tickless mode + * + * Sets the nanokernel data structure idle field to a non-zero value. + * + * RETURNS: N/A + * + * \NOMANUAL + */ void nano_cpu_set_idle(int32_t ticks) { diff --git a/kernel/nanokernel/include/gen_offset.h b/kernel/nanokernel/include/gen_offset.h index a38fae8c890..319e7f9fee8 100644 --- a/kernel/nanokernel/include/gen_offset.h +++ b/kernel/nanokernel/include/gen_offset.h @@ -99,7 +99,7 @@ $ nm offsets.o \NOMANUAL -*/ + */ #ifndef _GEN_OFFSET_H #define _GEN_OFFSET_H diff --git a/kernel/nanokernel/include/nano_internal.h b/kernel/nanokernel/include/nano_internal.h index eb5d1fe1e62..96d7f859fd2 100644 --- a/kernel/nanokernel/include/nano_internal.h +++ b/kernel/nanokernel/include/nano_internal.h @@ -33,7 +33,7 @@ /* DESCRIPTION This file contains private nanokernel APIs that are not architecture-specific. -*/ + */ #ifndef _NANO_INTERNAL__H_ #define _NANO_INTERNAL__H_ diff --git a/kernel/nanokernel/int_latency_bench.c b/kernel/nanokernel/int_latency_bench.c index 68a3bcc330b..454054e81e7 100644 --- a/kernel/nanokernel/int_latency_bench.c +++ b/kernel/nanokernel/int_latency_bench.c @@ -66,16 +66,16 @@ static uint32_t int_latency_bench_ready = 0; /* min amount of time it takes from HW interrupt generation to 'C' handler */ uint32_t _hw_irq_to_c_handler_latency = ULONG_MAX; -/******************************************************************************* -* -* intLatencyStart - start tracking time spent with interrupts locked -* -* calls to lock interrupt can nest, so this routine can be called numerous -* times before interrupt are unlocked -* -* RETURNS: N/A -* -*/ +/** + * + * intLatencyStart - start tracking time spent with interrupts locked + * + * calls to lock interrupt can nest, so this routine can be called numerous + * times before interrupt are unlocked + * + * RETURNS: N/A + * + */ void _int_latency_start(void) { @@ -87,15 +87,15 @@ void _int_latency_start(void) int_lock_unlock_nest++; } -/******************************************************************************* -* -* intLatencyStop - stop accumulating time spent for when interrupts are locked -* -* This is only call once when the interrupt are being reenabled -* -* RETURNS: N/A -* -*/ +/** + * + * intLatencyStop - stop accumulating time spent for when interrupts are locked + * + * This is only call once when the interrupt are being reenabled + * + * RETURNS: N/A + * + */ void _int_latency_stop(void) { @@ -140,13 +140,13 @@ void _int_latency_stop(void) } } -/******************************************************************************* -* -* int_latency_init - initialize interrupt latency benchmark -* -* RETURNS: N/A -* -*/ +/** + * + * int_latency_init - initialize interrupt latency benchmark + * + * RETURNS: N/A + * + */ void int_latency_init(void) { @@ -191,15 +191,15 @@ void int_latency_init(void) } } -/******************************************************************************* -* -* int_latency_show - dumps interrupt latency values -* -* The interrupt latency value measures -* -* RETURNS: N/A -* -*/ +/** + * + * int_latency_show - dumps interrupt latency values + * + * The interrupt latency value measures + * + * RETURNS: N/A + * + */ void int_latency_show(void) { diff --git a/kernel/nanokernel/nano_context.c b/kernel/nanokernel/nano_context.c index 6b062930027..c12ea9f1b39 100644 --- a/kernel/nanokernel/nano_context.c +++ b/kernel/nanokernel/nano_context.c @@ -34,7 +34,7 @@ DESCRIPTION This module provides general purpose context support, with applies to both tasks or fibers. -*/ + */ #include #include @@ -42,29 +42,29 @@ tasks or fibers. #include #include -/******************************************************************************* -* -* context_self_get - return the currently executing context -* -* This routine returns a pointer to the context control block of the currently -* executing context. It is cast to a nano_context_id_t for use publically. -* -* RETURNS: nano_context_id_t of the currently executing context. -*/ +/** + * + * context_self_get - return the currently executing context + * + * This routine returns a pointer to the context control block of the currently + * executing context. It is cast to a nano_context_id_t for use publically. + * + * RETURNS: nano_context_id_t of the currently executing context. + */ nano_context_id_t context_self_get(void) { return _nanokernel.current; } -/******************************************************************************* -* -* context_type_get - return the type of the currently executing context -* -* This routine returns the type of context currently executing. -* -* RETURNS: nano_context_type_t of the currently executing context. -*/ +/** + * + * context_type_get - return the type of the currently executing context + * + * This routine returns the type of context currently executing. + * + * RETURNS: nano_context_type_t of the currently executing context. + */ nano_context_type_t context_type_get(void) { @@ -77,48 +77,48 @@ nano_context_type_t context_type_get(void) return NANO_CTX_FIBER; } -/******************************************************************************* -* -* _context_essential_set - mark context as essential to system -* -* This function tags the running fiber or task as essential to system -* option; exceptions raised by this context will be treated as a fatal -* system error. -* -* RETURNS: N/A -*/ +/** + * + * _context_essential_set - mark context as essential to system + * + * This function tags the running fiber or task as essential to system + * option; exceptions raised by this context will be treated as a fatal + * system error. + * + * RETURNS: N/A + */ void _context_essential_set(void) { _nanokernel.current->flags |= ESSENTIAL; } -/******************************************************************************* -* -* _context_essential_clear - mark context as not essential to system -* -* This function tags the running fiber or task as not essential to system -* option; exceptions raised by this context may be recoverable. -* (This is the default tag for a context.) -* -* RETURNS: N/A -*/ +/** + * + * _context_essential_clear - mark context as not essential to system + * + * This function tags the running fiber or task as not essential to system + * option; exceptions raised by this context may be recoverable. + * (This is the default tag for a context.) + * + * RETURNS: N/A + */ void _context_essential_clear(void) { _nanokernel.current->flags &= ~ESSENTIAL; } -/******************************************************************************* -* -* _context_essential_check - is the specified context essential? -* -* This routine indicates if the specified context is an essential system -* context. A NULL context pointer indicates that the current context is -* to be queried. -* -* RETURNS: Non-zero if specified context is essential, zero if it is not -*/ +/** + * + * _context_essential_check - is the specified context essential? + * + * This routine indicates if the specified context is an essential system + * context. A NULL context pointer indicates that the current context is + * to be queried. + * + * RETURNS: Non-zero if specified context is essential, zero if it is not + */ int _context_essential_check(tCCS *pCtx /* pointer to context */ ) @@ -128,16 +128,16 @@ int _context_essential_check(tCCS *pCtx /* pointer to context */ #ifdef CONFIG_CONTEXT_CUSTOM_DATA -/******************************************************************************* -* -* context_custom_data_set - set context's custom data -* -* This routine sets the custom data value for the current task or fiber. -* Custom data is not used by the kernel itself, and is freely available -* for the context to use as it sees fit. -* -* RETURNS: N/A -*/ +/** + * + * context_custom_data_set - set context's custom data + * + * This routine sets the custom data value for the current task or fiber. + * Custom data is not used by the kernel itself, and is freely available + * for the context to use as it sees fit. + * + * RETURNS: N/A + */ void context_custom_data_set(void *value /* new value */ ) @@ -145,14 +145,14 @@ void context_custom_data_set(void *value /* new value */ _nanokernel.current->custom_data = value; } -/******************************************************************************* -* -* context_custom_data_get - get context's custom data -* -* This function returns the custom data value for the current task or fiber. -* -* RETURNS: current handle value -*/ +/** + * + * context_custom_data_get - get context's custom data + * + * This function returns the custom data value for the current task or fiber. + * + * RETURNS: current handle value + */ void *context_custom_data_get(void) { @@ -162,21 +162,21 @@ void *context_custom_data_get(void) #endif /* CONFIG_CONTEXT_CUSTOM_DATA */ #if defined(CONFIG_CONTEXT_MONITOR) -/******************************************************************************* -* -* _context_exit - context exit routine -* -* This function is invoked when the specified context is aborted, either -* normally or abnormally. It is called for the termination of any context, -* (fibers and tasks). -* -* This routine must be invoked from a fiber to guarantee that the list -* of contexts does not change in mid-operation. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _context_exit - context exit routine + * + * This function is invoked when the specified context is aborted, either + * normally or abnormally. It is called for the termination of any context, + * (fibers and tasks). + * + * This routine must be invoked from a fiber to guarantee that the list + * of contexts does not change in mid-operation. + * + * RETURNS: N/A + * + * \NOMANUAL + */ void _context_exit(tCCS *pContext) { @@ -200,30 +200,30 @@ void _context_exit(tCCS *pContext) } #endif /* CONFIG_CONTEXT_MONITOR */ -/******************************************************************************* -* -* _context_entry - common context entry point function for kernel contexts -* -* This function serves as the entry point for _all_ kernel contexts, i.e. both -* task and fiber contexts are instantiated such that initial execution starts -* here. -* -* This routine invokes the actual task or fiber entry point function and -* passes it three arguments. It also handles graceful termination of the -* task or fiber if the entry point function ever returns. -* -* INTERNAL -* The 'noreturn' attribute is applied to this function so that the compiler -* can dispense with generating the usual preamble that is only required for -* functions that actually return. -* -* The analogous entry point function for user-mode task contexts is called -* _ContextUsrEntryRtn(). -* -* RETURNS: Does not return -* -* \NOMANUAL -*/ +/** + * + * _context_entry - common context entry point function for kernel contexts + * + * This function serves as the entry point for _all_ kernel contexts, i.e. both + * task and fiber contexts are instantiated such that initial execution starts + * here. + * + * This routine invokes the actual task or fiber entry point function and + * passes it three arguments. It also handles graceful termination of the + * task or fiber if the entry point function ever returns. + * + * INTERNAL + * The 'noreturn' attribute is applied to this function so that the compiler + * can dispense with generating the usual preamble that is only required for + * functions that actually return. + * + * The analogous entry point function for user-mode task contexts is called + * _ContextUsrEntryRtn(). + * + * RETURNS: Does not return + * + * \NOMANUAL + */ FUNC_NORETURN void _context_entry( _ContextEntry pEntry, /* address of app entry point function */ diff --git a/kernel/nanokernel/nano_fiber.c b/kernel/nanokernel/nano_fiber.c index 2f2943424b0..be6490d0585 100644 --- a/kernel/nanokernel/nano_fiber.c +++ b/kernel/nanokernel/nano_fiber.c @@ -34,26 +34,26 @@ DESCRIPTION This module provides various nanokernel fiber related primitives, either in the form of an actual function or an alias to a function. -*/ + */ #include #include #include #include -/******************************************************************************* -* -* _nano_fiber_schedule - add a fiber to the list of runnable fibers -* -* The list of runnable fibers is maintained via a single linked list -* in priority order. Numerically lower priorities represent higher priority -* contexts. -* -* Interrupts must already be locked to ensure list cannot change -* while this routine is executing! -* -* RETURNS: N/A -*/ +/** + * + * _nano_fiber_schedule - add a fiber to the list of runnable fibers + * + * The list of runnable fibers is maintained via a single linked list + * in priority order. Numerically lower priorities represent higher priority + * contexts. + * + * Interrupts must already be locked to ensure list cannot change + * while this routine is executing! + * + * RETURNS: N/A + */ void _nano_fiber_schedule(tCCS *ccs) { @@ -81,26 +81,26 @@ FUNC_ALIAS(_fiber_start, fiber_fiber_start, void); FUNC_ALIAS(_fiber_start, task_fiber_start, void); FUNC_ALIAS(_fiber_start, fiber_start, void); -/******************************************************************************* -* -* _fiber_start - initialize and start a fiber context -* -* This routine initilizes and starts a fiber context; it can be called from -* either a fiber or a task context. When this routine is called from a -* task, the newly created fiber will start executing immediately. -* -* INTERNAL -* Given that this routine is _not_ ISR-callable, the following code is used -* to differentiate between a task and fiber context: -* -* if ((_nanokernel.current->flags & TASK) == TASK) -* -* Given that the _fiber_start() primitive is not considered real-time -* performance critical, a runtime check to differentiate between a calling -* task or fiber is performed in order to conserve footprint. -* -* RETURNS: N/A -*/ +/** + * + * _fiber_start - initialize and start a fiber context + * + * This routine initilizes and starts a fiber context; it can be called from + * either a fiber or a task context. When this routine is called from a + * task, the newly created fiber will start executing immediately. + * + * INTERNAL + * Given that this routine is _not_ ISR-callable, the following code is used + * to differentiate between a task and fiber context: + * + * if ((_nanokernel.current->flags & TASK) == TASK) + * + * Given that the _fiber_start() primitive is not considered real-time + * performance critical, a runtime check to differentiate between a calling + * task or fiber is performed in order to conserve footprint. + * + * RETURNS: N/A + */ void _fiber_start(char *pStack, unsigned stackSize, /* stack size in bytes */ @@ -145,19 +145,19 @@ void _fiber_start(char *pStack, irq_unlock(imask); } -/******************************************************************************* -* -* fiber_yield - yield the current context -* -* Invocation of this routine results in the current context yielding to -* another context of the same or higher priority. If there doesn't exist -* any other contexts of the same or higher priority that are runnable, this -* routine will return immediately. -* -* This routine can only be called from a fiber context. -* -* RETURNS: N/A -*/ +/** + * + * fiber_yield - yield the current context + * + * Invocation of this routine results in the current context yielding to + * another context of the same or higher priority. If there doesn't exist + * any other contexts of the same or higher priority that are runnable, this + * routine will return immediately. + * + * This routine can only be called from a fiber context. + * + * RETURNS: N/A + */ void fiber_yield(void) { @@ -177,16 +177,16 @@ void fiber_yield(void) irq_unlock_inline(imask); } -/******************************************************************************* -* -* _nano_fiber_swap - pass control from the currently executing fiber -* -* This routine is used when a fiber voluntarily gives up control of the CPU. -* -* This routine can only be called from a fiber context. -* -* RETURNS: This function never returns -*/ +/** + * + * _nano_fiber_swap - pass control from the currently executing fiber + * + * This routine is used when a fiber voluntarily gives up control of the CPU. + * + * This routine can only be called from a fiber context. + * + * RETURNS: This function never returns + */ FUNC_NORETURN void _nano_fiber_swap(void) { @@ -212,20 +212,20 @@ FUNC_NORETURN void _nano_fiber_swap(void) } #ifndef CONFIG_ARCH_HAS_NANO_FIBER_ABORT -/******************************************************************************* -* -* fiber_abort - abort the currently executing fiber -* -* This routine is used to abort the currrently executing fiber. This can occur -* because: -* - the fiber has explicitly aborted itself (by calling this routine), -* - the fiber has implicitly aborted itself (by returning from its entry point), -* - the fiber has encountered a fatal exception. -* -* This routine can only be called from a fiber context. -* -* RETURNS: This function never returns -*/ +/** + * + * fiber_abort - abort the currently executing fiber + * + * This routine is used to abort the currrently executing fiber. This can occur + * because: + * - the fiber has explicitly aborted itself (by calling this routine), + * - the fiber has implicitly aborted itself (by returning from its entry point), + * - the fiber has encountered a fatal exception. + * + * This routine can only be called from a fiber context. + * + * RETURNS: This function never returns + */ FUNC_NORETURN void fiber_abort(void) { diff --git a/kernel/nanokernel/nano_fifo.c b/kernel/nanokernel/nano_fifo.c index 8dc539f8145..88b1a1acdc4 100644 --- a/kernel/nanokernel/nano_fifo.c +++ b/kernel/nanokernel/nano_fifo.c @@ -44,29 +44,29 @@ the following APIs: INTERNAL In some cases the compiler "alias" attribute is used to map two or more APIs to the same function, since they have identical implementations. -*/ + */ #include #include #include #include -/******************************************************************************* -* -* nano_fifo_init - initialize a nanokernel multiple-waiter fifo (fifo) object -* -* This function initializes a nanokernel multiple-waiter fifo (fifo) object -* structure. -* -* It may be called from either a fiber or task context. -* -* RETURNS: N/A -* -* INTERNAL -* Although the existing implementation will support invocation from an ISR -* context, for future flexibility, this API will be restricted from ISR -* level invocation. -*/ +/** + * + * nano_fifo_init - initialize a nanokernel multiple-waiter fifo (fifo) object + * + * This function initializes a nanokernel multiple-waiter fifo (fifo) object + * structure. + * + * It may be called from either a fiber or task context. + * + * RETURNS: N/A + * + * INTERNAL + * Although the existing implementation will support invocation from an ISR + * context, for future flexibility, this API will be restricted from ISR + * level invocation. + */ void nano_fifo_init( struct nano_fifo *fifo /* fifo to initialize */ @@ -94,12 +94,12 @@ void nano_fifo_init( FUNC_ALIAS(_fifo_put_non_preemptible, nano_isr_fifo_put, void); FUNC_ALIAS(_fifo_put_non_preemptible, nano_fiber_fifo_put, void); -/******************************************************************************* -* -* enqueue_data - internal routine to append data to a fifo -* -* RETURNS: N/A -*/ +/** + * + * enqueue_data - internal routine to append data to a fifo + * + * RETURNS: N/A + */ static inline void enqueue_data(struct nano_fifo *fifo, void *data) { @@ -108,25 +108,25 @@ static inline void enqueue_data(struct nano_fifo *fifo, void *data) *(int *)data = 0; } -/******************************************************************************* -* -* _fifo_put_non_preemptible - append an element to a fifo (no context switch) -* -* This routine adds an element to the end of a fifo object; it may be called -* from either either a fiber or an ISR context. A fiber pending on the fifo -* object will be made ready, but will NOT be scheduled to execute. -* -* If a fiber is waiting on the fifo, the address of the element is returned to -* the waiting fiber. Otherwise, the element is linked to the end of the list. -* -* RETURNS: N/A -* -* INTERNAL -* This function is capable of supporting invocations from both a fiber and an -* ISR context. However, the nano_isr_fifo_put and nano_fiber_fifo_put aliases -* are created to support any required implementation differences in the future -* without introducing a source code migration issue. -*/ +/** + * + * _fifo_put_non_preemptible - append an element to a fifo (no context switch) + * + * This routine adds an element to the end of a fifo object; it may be called + * from either either a fiber or an ISR context. A fiber pending on the fifo + * object will be made ready, but will NOT be scheduled to execute. + * + * If a fiber is waiting on the fifo, the address of the element is returned to + * the waiting fiber. Otherwise, the element is linked to the end of the list. + * + * RETURNS: N/A + * + * INTERNAL + * This function is capable of supporting invocations from both a fiber and an + * ISR context. However, the nano_isr_fifo_put and nano_fiber_fifo_put aliases + * are created to support any required implementation differences in the future + * without introducing a source code migration issue. + */ void _fifo_put_non_preemptible( struct nano_fifo *fifo, /* fifo on which to interact */ @@ -149,19 +149,19 @@ void _fifo_put_non_preemptible( irq_unlock_inline(imask); } -/******************************************************************************* -* -* nano_task_fifo_put - add an element to the end of a fifo -* -* This routine adds an element to the end of a fifo object; it can be called -* from only a task context. A fiber pending on the fifo object will be made -* ready, and will preempt the running task immediately. -* -* If a fiber is waiting on the fifo, the address of the element is returned to -* the waiting fiber. Otherwise, the element is linked to the end of the list. -* -* RETURNS: N/A -*/ +/** + * + * nano_task_fifo_put - add an element to the end of a fifo + * + * This routine adds an element to the end of a fifo object; it can be called + * from only a task context. A fiber pending on the fifo object will be made + * ready, and will preempt the running task immediately. + * + * If a fiber is waiting on the fifo, the address of the element is returned to + * the waiting fiber. Otherwise, the element is linked to the end of the list. + * + * RETURNS: N/A + */ void nano_task_fifo_put( struct nano_fifo *fifo, /* fifo on which to interact */ @@ -186,15 +186,15 @@ void nano_task_fifo_put( irq_unlock_inline(imask); } -/******************************************************************************* -* -* nano_fifo_put - add an element to the end of a fifo -* -* This is a convenience wrapper for the context-specific APIs. This is -* helpful whenever the exact scheduling context is not known, but should -* be avoided when the context is known up-front (to avoid unnecessary -* overhead). -*/ +/** + * + * nano_fifo_put - add an element to the end of a fifo + * + * This is a convenience wrapper for the context-specific APIs. This is + * helpful whenever the exact scheduling context is not known, but should + * be avoided when the context is known up-front (to avoid unnecessary + * overhead). + */ void nano_fifo_put(struct nano_fifo *fifo, void *data) { static void (*func[3])(struct nano_fifo *fifo, void *data) = { @@ -208,12 +208,12 @@ FUNC_ALIAS(_fifo_get, nano_fiber_fifo_get, void *); FUNC_ALIAS(_fifo_get, nano_task_fifo_get, void *); FUNC_ALIAS(_fifo_get, nano_fifo_get, void *); -/******************************************************************************* -* -* dequeue_data - internal routine to remove data from a fifo -* -* RETURNS: the data item removed -*/ +/** + * + * dequeue_data - internal routine to remove data from a fifo + * + * RETURNS: the data item removed + */ static inline void *dequeue_data(struct nano_fifo *fifo) { @@ -233,26 +233,26 @@ static inline void *dequeue_data(struct nano_fifo *fifo) return data; } -/******************************************************************************* -* -* _fifo_get - get an element from the head a fifo -* -* Remove the head element from the specified nanokernel multiple-waiter fifo -* linked list fifo; it may be called from a fiber, task, or ISR context. -* -* If no elements are available, NULL is returned. The first word in the -* element contains invalid data because that memory location was used to store -* a pointer to the next element in the linked list. -* -* RETURNS: Pointer to head element in the list if available, otherwise NULL -* -* INTERNAL -* This function is capable of supporting invocations from fiber, task, and ISR -* contexts. However, the nano_isr_fifo_get, nano_task_fifo_get, and -* nano_fiber_fifo_get aliases are created to support any required -* implementation differences in the future without introducing a source code -* migration issue. -*/ +/** + * + * _fifo_get - get an element from the head a fifo + * + * Remove the head element from the specified nanokernel multiple-waiter fifo + * linked list fifo; it may be called from a fiber, task, or ISR context. + * + * If no elements are available, NULL is returned. The first word in the + * element contains invalid data because that memory location was used to store + * a pointer to the next element in the linked list. + * + * RETURNS: Pointer to head element in the list if available, otherwise NULL + * + * INTERNAL + * This function is capable of supporting invocations from fiber, task, and ISR + * contexts. However, the nano_isr_fifo_get, nano_task_fifo_get, and + * nano_fiber_fifo_get aliases are created to support any required + * implementation differences in the future without introducing a source code + * migration issue. + */ void *_fifo_get( struct nano_fifo *fifo /* fifo on which to interact */ @@ -271,25 +271,25 @@ void *_fifo_get( return data; } -/******************************************************************************* -* -* nano_fiber_fifo_get_wait - get the head element of a fifo, wait if emtpy -* -* Remove the head element from the specified system-level multiple-waiter -* fifo; it can only be called from a fiber context. -* -* If no elements are available, the calling fiber will pend until an element -* is put onto the fifo. -* -* The first word in the element contains invalid data because that memory -* location was used to store a pointer to the next element in the linked list. -* -* RETURNS: Pointer to head element in the list -* -* INTERNAL There exists a separate nano_task_fifo_get_wait() implementation -* since a task context cannot pend on a nanokernel object. Instead tasks will -* poll the fifo object. -*/ +/** + * + * nano_fiber_fifo_get_wait - get the head element of a fifo, wait if emtpy + * + * Remove the head element from the specified system-level multiple-waiter + * fifo; it can only be called from a fiber context. + * + * If no elements are available, the calling fiber will pend until an element + * is put onto the fifo. + * + * The first word in the element contains invalid data because that memory + * location was used to store a pointer to the next element in the linked list. + * + * RETURNS: Pointer to head element in the list + * + * INTERNAL There exists a separate nano_task_fifo_get_wait() implementation + * since a task context cannot pend on a nanokernel object. Instead tasks will + * poll the fifo object. + */ void *nano_fiber_fifo_get_wait( struct nano_fifo *fifo /* fifo on which to interact */ @@ -312,21 +312,21 @@ void *nano_fiber_fifo_get_wait( return data; } -/******************************************************************************* -* -* nano_task_fifo_get_wait - get the head element of a fifo, poll if empty -* -* Remove the head element from the specified system-level multiple-waiter -* fifo; it can only be called from a task context. -* -* If no elements are available, the calling task will poll until an -* until an element is put onto the fifo. -* -* The first word in the element contains invalid data because that memory -* location was used to store a pointer to the next element in the linked list. -* -* RETURNS: Pointer to head element in the list -*/ +/** + * + * nano_task_fifo_get_wait - get the head element of a fifo, poll if empty + * + * Remove the head element from the specified system-level multiple-waiter + * fifo; it can only be called from a task context. + * + * If no elements are available, the calling task will poll until an + * until an element is put onto the fifo. + * + * The first word in the element contains invalid data because that memory + * location was used to store a pointer to the next element in the linked list. + * + * RETURNS: Pointer to head element in the list + */ void *nano_task_fifo_get_wait( struct nano_fifo *fifo /* fifo on which to interact */ @@ -360,17 +360,17 @@ void *nano_task_fifo_get_wait( return data; } -/******************************************************************************* -* -* nano_fifo_get_wait - get the head element of a fifo, poll/pend if empty -* -* This is a convenience wrapper for the context-specific APIs. This is -* helpful whenever the exact scheduling context is not known, but should -* be avoided when the context is known up-front (to avoid unnecessary -* overhead). -* -* It's only valid to call this API from a fiber or a task. -*/ +/** + * + * nano_fifo_get_wait - get the head element of a fifo, poll/pend if empty + * + * This is a convenience wrapper for the context-specific APIs. This is + * helpful whenever the exact scheduling context is not known, but should + * be avoided when the context is known up-front (to avoid unnecessary + * overhead). + * + * It's only valid to call this API from a fiber or a task. + */ void *nano_fifo_get_wait(struct nano_fifo *fifo) { static void *(*func[3])(struct nano_fifo *fifo) = { diff --git a/kernel/nanokernel/nano_init.c b/kernel/nanokernel/nano_init.c index 86495f977a9..e44dd5d7db3 100644 --- a/kernel/nanokernel/nano_init.c +++ b/kernel/nanokernel/nano_init.c @@ -33,7 +33,7 @@ /* DESCRIPTION This module contains routines that are used to initialize the nanokernel. -*/ + */ #include #include @@ -120,7 +120,7 @@ extern void _Ctors(void); #define initialize_nano_timeouts() do { } while ((0)) #endif -/********************************************************************** +/** * * In the nanokernel only configuration we still want to run the * app_{early,late}_init levels to maintain the correct semantics. In @@ -148,18 +148,18 @@ static void _main(void) #endif -/******************************************************************************* -* -* nano_init - initializes nanokernel data structures -* -* This routine initializes various nanokernel data structures, including -* the background (or idle) task and any architecture-specific initialization. -* -* Note that all fields of "_nanokernel" are set to zero on entry, which may -* be all the initialization many of them require. -* -* RETURNS: N/A -*/ +/** + * + * nano_init - initializes nanokernel data structures + * + * This routine initializes various nanokernel data structures, including + * the background (or idle) task and any architecture-specific initialization. + * + * Note that all fields of "_nanokernel" are set to zero on entry, which may + * be all the initialization many of them require. + * + * RETURNS: N/A + */ static void nano_init(tCCS *dummyOutContext) { @@ -219,7 +219,7 @@ static void nano_init(tCCS *dummyOutContext) } #ifdef CONFIG_STACK_CANARIES -/******************************************************************************* +/** * * STACK_CANARY_INIT - initialize the kernel's stack canary * @@ -254,16 +254,16 @@ extern void *__stack_chk_guard; #define STACK_CANARY_INIT() #endif /* CONFIG_STACK_CANARIES */ -/******************************************************************************* -* -* _Cstart - initialize nanokernel -* -* This routine is invoked by the BSP when the system is ready to run C code. -* The processor must be running in 32-bit mode, and the BSS must have been -* cleared/zeroed. -* -* RETURNS: Does not return -*/ +/** + * + * _Cstart - initialize nanokernel + * + * This routine is invoked by the BSP when the system is ready to run C code. + * The processor must be running in 32-bit mode, and the BSS must have been + * cleared/zeroed. + * + * RETURNS: Does not return + */ FUNC_NORETURN void _Cstart(void) { diff --git a/kernel/nanokernel/nano_lifo.c b/kernel/nanokernel/nano_lifo.c index 7ee9b1bee19..38c05e94aad 100644 --- a/kernel/nanokernel/nano_lifo.c +++ b/kernel/nanokernel/nano_lifo.c @@ -43,29 +43,29 @@ the following APIs: INTERNAL In some cases the compiler "alias" attribute is used to map two or more APIs to the same function, since they have identical implementations. -*/ + */ #include #include #include #include -/******************************************************************************* -* -* nano_lifo_init - initialize a nanokernel linked list lifo object -* -* This function initializes a nanokernel system-level linked list lifo -* object structure. -* -* It may be called from either a fiber or task context. -* -* RETURNS: N/A -* -* INTERNAL -* Although the existing implementation will support invocation from an ISR -* context, for future flexibility, this API will be restricted from ISR -* level invocation. -*/ +/** + * + * nano_lifo_init - initialize a nanokernel linked list lifo object + * + * This function initializes a nanokernel system-level linked list lifo + * object structure. + * + * It may be called from either a fiber or task context. + * + * RETURNS: N/A + * + * INTERNAL + * Although the existing implementation will support invocation from an ISR + * context, for future flexibility, this API will be restricted from ISR + * level invocation. + */ void nano_lifo_init( struct nano_lifo *lifo /* lifo to initialize */ @@ -78,22 +78,22 @@ void nano_lifo_init( FUNC_ALIAS(_lifo_put_non_preemptible, nano_isr_lifo_put, void); FUNC_ALIAS(_lifo_put_non_preemptible, nano_fiber_lifo_put, void); -/******************************************************************************* -* -* _lifo_put_non_preemptible - prepend an element to a lifo (no context switch) -* -* This routine adds an element to the head of a lifo object; it may be -* called from either a fiber or an ISR context. A fiber pending on the lifo -* object will be made ready, but will NOT be scheduled to execute. -* -* RETURNS: N/A -* -* INTERNAL -* This function is capable of supporting invocations from both a fiber and an -* ISR context. However, the nano_isr_lifo_put and nano_fiber_lifo_put aliases -* are created to support any required implementation differences in the future -* without introducing a source code migration issue. -*/ +/** + * + * _lifo_put_non_preemptible - prepend an element to a lifo (no context switch) + * + * This routine adds an element to the head of a lifo object; it may be + * called from either a fiber or an ISR context. A fiber pending on the lifo + * object will be made ready, but will NOT be scheduled to execute. + * + * RETURNS: N/A + * + * INTERNAL + * This function is capable of supporting invocations from both a fiber and an + * ISR context. However, the nano_isr_lifo_put and nano_fiber_lifo_put aliases + * are created to support any required implementation differences in the future + * without introducing a source code migration issue. + */ void _lifo_put_non_preemptible( struct nano_lifo *lifo, /* lifo on which to put */ @@ -116,18 +116,18 @@ void _lifo_put_non_preemptible( irq_unlock_inline(imask); } -/******************************************************************************* -* -* nano_task_lifo_put - add an element to the head of a linked list lifo -* -* This routine adds an element to the head of a lifo object; it can be -* called only from a task context. A fiber pending on the lifo -* object will be made ready, and will preempt the running task immediately. -* -* This routine is only callable by a task. -* -* RETURNS: N/A -*/ +/** + * + * nano_task_lifo_put - add an element to the head of a linked list lifo + * + * This routine adds an element to the head of a lifo object; it can be + * called only from a task context. A fiber pending on the lifo + * object will be made ready, and will preempt the running task immediately. + * + * This routine is only callable by a task. + * + * RETURNS: N/A + */ void nano_task_lifo_put( struct nano_lifo *lifo, /* lifo on which to put */ @@ -156,26 +156,26 @@ FUNC_ALIAS(_lifo_get, nano_isr_lifo_get, void *); FUNC_ALIAS(_lifo_get, nano_fiber_lifo_get, void *); FUNC_ALIAS(_lifo_get, nano_task_lifo_get, void *); -/******************************************************************************* -* -* _lifo_get - remove the first element from a linked list lifo -* -* Remove the first element from the specified nanokernel linked list lifo; -* it may be called from a fiber, task, or ISR context. -* -* If no elements are available, NULL is returned. The first word in the -* element contains invalid data because that memory location was used to store -* a pointer to the next element in the linked list. -* -* RETURNS: Pointer to first element in the list if available, otherwise NULL -* -* INTERNAL -* This function is capable of supporting invocations from fiber, task, and ISR -* contexts. However, the nano_isr_lifo_get, nano_task_lifo_get, and -* nano_fiber_lifo_get aliases are created to support any required -* implementation differences in the future without introducing a source code -* migration issue. -*/ +/** + * + * _lifo_get - remove the first element from a linked list lifo + * + * Remove the first element from the specified nanokernel linked list lifo; + * it may be called from a fiber, task, or ISR context. + * + * If no elements are available, NULL is returned. The first word in the + * element contains invalid data because that memory location was used to store + * a pointer to the next element in the linked list. + * + * RETURNS: Pointer to first element in the list if available, otherwise NULL + * + * INTERNAL + * This function is capable of supporting invocations from fiber, task, and ISR + * contexts. However, the nano_isr_lifo_get, nano_task_lifo_get, and + * nano_fiber_lifo_get aliases are created to support any required + * implementation differences in the future without introducing a source code + * migration issue. + */ void *_lifo_get( struct nano_lifo *lifo /* lifo on which to receive */ @@ -196,26 +196,26 @@ void *_lifo_get( return data; } -/******************************************************************************* -* -* nano_fiber_lifo_get_wait - get the first element from a LIFO, wait if empty -* -* Remove the first element from the specified system-level linked list lifo; -* it can only be called from a fiber context. -* -* If no elements are available, the calling fiber will pend until an element -* is put onto the list. -* -* The first word in the element contains invalid data because that memory -* location was used to store a pointer to the next element in the linked list. -* -* RETURNS: Pointer to first element in the list -* -* INTERNAL -* There exists a separate nano_task_lifo_get_wait() implementation since a -* task context cannot pend on a nanokernel object. Instead, tasks will poll -* the lifo object. -*/ +/** + * + * nano_fiber_lifo_get_wait - get the first element from a LIFO, wait if empty + * + * Remove the first element from the specified system-level linked list lifo; + * it can only be called from a fiber context. + * + * If no elements are available, the calling fiber will pend until an element + * is put onto the list. + * + * The first word in the element contains invalid data because that memory + * location was used to store a pointer to the next element in the linked list. + * + * RETURNS: Pointer to first element in the list + * + * INTERNAL + * There exists a separate nano_task_lifo_get_wait() implementation since a + * task context cannot pend on a nanokernel object. Instead, tasks will poll + * the lifo object. + */ void *nano_fiber_lifo_get_wait( struct nano_lifo *lifo /* lifo on which to receive */ @@ -238,21 +238,21 @@ void *nano_fiber_lifo_get_wait( return data; } -/******************************************************************************* -* -* nano_task_lifo_get_wait - get the first element from a lifo, poll if empty -* -* Remove the first element from the specified nanokernel linked list lifo; it -* can only be called from a task context. -* -* If no elements are available, the calling task will poll until an element is -* put onto the list. -* -* The first word in the element contains invalid data because that memory -* location was used to store a pointer to the next element in the linked list. -* -* RETURNS: Pointer to first element in the list -*/ +/** + * + * nano_task_lifo_get_wait - get the first element from a lifo, poll if empty + * + * Remove the first element from the specified nanokernel linked list lifo; it + * can only be called from a task context. + * + * If no elements are available, the calling task will poll until an element is + * put onto the list. + * + * The first word in the element contains invalid data because that memory + * location was used to store a pointer to the next element in the linked list. + * + * RETURNS: Pointer to first element in the list + */ void *nano_task_lifo_get_wait( struct nano_lifo *lifo /* lifo on which to interact */ @@ -287,17 +287,17 @@ void *nano_task_lifo_get_wait( return data; } -/******************************************************************************* -* -* _nano_fiber_lifo_get_panic - get first element from lifo and panic if NULL -* -* Get the first element from the specified lifo but generate a fatal error -* if the element is NULL. -* -* RETURNS: Pointer to first element in the list -* -* \NOMANUAL -*/ +/** + * + * _nano_fiber_lifo_get_panic - get first element from lifo and panic if NULL + * + * Get the first element from the specified lifo but generate a fatal error + * if the element is NULL. + * + * RETURNS: Pointer to first element in the list + * + * \NOMANUAL + */ void *_nano_fiber_lifo_get_panic(struct nano_lifo *lifo) { diff --git a/kernel/nanokernel/nano_sema.c b/kernel/nanokernel/nano_sema.c index 74a8a81dd20..11a2244f6fe 100644 --- a/kernel/nanokernel/nano_sema.c +++ b/kernel/nanokernel/nano_sema.c @@ -49,29 +49,29 @@ having to pend on the semaphore. INTERNAL In some cases the compiler "alias" attribute is used to map two or more APIs to the same function, since they have identical implementations. -*/ + */ #include #include #include #include -/******************************************************************************* -* -* nano_sem_init - initialize a nanokernel semaphore object -* -* This function initializes a nanokernel semaphore object structure. After -* initialization, the semaphore count will be 0. -* -* It may be called from either a fiber or task context. -* -* RETURNS: N/A -* -* INTERNAL -* Although the existing implementation will support invocation from an ISR -* context, for future flexibility, this API will be restricted from ISR -* level invocation. -*/ +/** + * + * nano_sem_init - initialize a nanokernel semaphore object + * + * This function initializes a nanokernel semaphore object structure. After + * initialization, the semaphore count will be 0. + * + * It may be called from either a fiber or task context. + * + * RETURNS: N/A + * + * INTERNAL + * Although the existing implementation will support invocation from an ISR + * context, for future flexibility, this API will be restricted from ISR + * level invocation. + */ void nano_sem_init( struct nano_sem *sem /* semaphore object to initialize */ @@ -90,23 +90,23 @@ FUNC_ALIAS(_sem_give_non_preemptible, nano_fiber_sem_give, void); #define set_sem_available(ccs) do { } while ((0)) #endif -/******************************************************************************* -* -* _sem_give_non_preemptible - give a nanokernel semaphore (no context switch) -* -* This routine performs a "give" operation on a nanokernel sempahore object; -* it may be call from either a fiber or an ISR context. A fiber pending on -* the semaphore object will be made ready, but will NOT be scheduled to -* execute. -* -* RETURNS: N/A -* -* INTERNAL -* This function is capable of supporting invocations from both a fiber and an -* ISR context. However, the nano_isr_sem_give and nano_fiber_sem_give aliases -* are created to support any required implementation differences in the future -* without introducing a source code migration issue. -*/ +/** + * + * _sem_give_non_preemptible - give a nanokernel semaphore (no context switch) + * + * This routine performs a "give" operation on a nanokernel sempahore object; + * it may be call from either a fiber or an ISR context. A fiber pending on + * the semaphore object will be made ready, but will NOT be scheduled to + * execute. + * + * RETURNS: N/A + * + * INTERNAL + * This function is capable of supporting invocations from both a fiber and an + * ISR context. However, the nano_isr_sem_give and nano_fiber_sem_give aliases + * are created to support any required implementation differences in the future + * without introducing a source code migration issue. + */ void _sem_give_non_preemptible( struct nano_sem *sem /* semaphore on which to signal */ @@ -127,17 +127,17 @@ void _sem_give_non_preemptible( irq_unlock_inline(imask); } -/******************************************************************************* -* -* nano_task_sem_give - give a nanokernel semaphore -* -* This routine performs a "give" operation on a nanokernel sempahore object; -* it can only be called from a task context. A fiber pending on the -* semaphore object will be made ready, and will preempt the running task -* immediately. -* -* RETURNS: N/A -*/ +/** + * + * nano_task_sem_give - give a nanokernel semaphore + * + * This routine performs a "give" operation on a nanokernel sempahore object; + * it can only be called from a task context. A fiber pending on the + * semaphore object will be made ready, and will preempt the running task + * immediately. + * + * RETURNS: N/A + */ void nano_task_sem_give( struct nano_sem *sem /* semaphore on which to signal */ @@ -160,15 +160,15 @@ void nano_task_sem_give( irq_unlock_inline(imask); } -/******************************************************************************* -* -* nano_sem_give - give a nanokernel semaphore -* -* This is a convenience wrapper for the context-specific APIs. This is -* helpful whenever the exact scheduling context is not known, but should -* be avoided when the context is known up-front (to avoid unnecessary -* overhead). -*/ +/** + * + * nano_sem_give - give a nanokernel semaphore + * + * This is a convenience wrapper for the context-specific APIs. This is + * helpful whenever the exact scheduling context is not known, but should + * be avoided when the context is known up-front (to avoid unnecessary + * overhead). + */ void nano_sem_give(struct nano_sem *sem) { @@ -182,18 +182,18 @@ FUNC_ALIAS(_sem_take, nano_isr_sem_take, int); FUNC_ALIAS(_sem_take, nano_fiber_sem_take, int); FUNC_ALIAS(_sem_take, nano_task_sem_take, int); -/******************************************************************************* -* -* _sem_take - take a nanokernel semaphore, fail if unavailable -* -* Attempt to take a nanokernel sempahore; it may be called from a fiber, task, -* or ISR context. -* -* If the semaphore is not available, this function returns immediately, i.e. -* a wait (pend) operation will NOT be performed. -* -* RETURNS: 1 if semaphore is available, 0 otherwise -*/ +/** + * + * _sem_take - take a nanokernel semaphore, fail if unavailable + * + * Attempt to take a nanokernel sempahore; it may be called from a fiber, task, + * or ISR context. + * + * If the semaphore is not available, this function returns immediately, i.e. + * a wait (pend) operation will NOT be performed. + * + * RETURNS: 1 if semaphore is available, 0 otherwise + */ int _sem_take( struct nano_sem *sem /* semaphore on which to test */ @@ -210,23 +210,23 @@ int _sem_take( return avail; } -/******************************************************************************* -* -* nano_fiber_sem_take_wait - test a nanokernel semaphore, wait if unavailable -* -* Take a nanokernel sempahore; it can only be called from a fiber context. -* -* If the nanokernel semaphore is not available, i.e. the event counter -* is 0, the calling fiber context will wait (pend) until the semaphore is -* given (via nano_fiber_sem_give/nano_task_sem_give/nano_isr_sem_give). -* -* RETURNS: N/A -* -* INTERNAL -* There exists a separate nano_task_sem_take_wait() implementation since a task -* context cannot pend on a nanokernel object. Instead, tasks will poll -* the sempahore object. -*/ +/** + * + * nano_fiber_sem_take_wait - test a nanokernel semaphore, wait if unavailable + * + * Take a nanokernel sempahore; it can only be called from a fiber context. + * + * If the nanokernel semaphore is not available, i.e. the event counter + * is 0, the calling fiber context will wait (pend) until the semaphore is + * given (via nano_fiber_sem_give/nano_task_sem_give/nano_isr_sem_give). + * + * RETURNS: N/A + * + * INTERNAL + * There exists a separate nano_task_sem_take_wait() implementation since a task + * context cannot pend on a nanokernel object. Instead, tasks will poll + * the sempahore object. + */ void nano_fiber_sem_take_wait( struct nano_sem *sem /* semaphore on which to wait */ @@ -244,18 +244,18 @@ void nano_fiber_sem_take_wait( } } -/******************************************************************************* -* -* nano_task_sem_take_wait - take a nanokernel semaphore, poll if unavailable -* -* Take a nanokernel sempahore; it can only be called from a task context. -* -* If the nanokernel semaphore is not available, i.e. the event counter -* is 0, the calling task will poll until the semaphore is given -* (via nano_fiber_sem_give/nano_task_sem_give/nano_isr_sem_give). -* -* RETURNS: N/A -*/ +/** + * + * nano_task_sem_take_wait - take a nanokernel semaphore, poll if unavailable + * + * Take a nanokernel sempahore; it can only be called from a task context. + * + * If the nanokernel semaphore is not available, i.e. the event counter + * is 0, the calling task will poll until the semaphore is given + * (via nano_fiber_sem_give/nano_task_sem_give/nano_isr_sem_give). + * + * RETURNS: N/A + */ void nano_task_sem_take_wait( struct nano_sem *sem /* semaphore on which to wait */ @@ -285,17 +285,17 @@ void nano_task_sem_take_wait( irq_unlock_inline(imask); } -/******************************************************************************* -* -* nano_sem_take_wait - take a nanokernel semaphore, poll/pend if not available -* -* This is a convenience wrapper for the context-specific APIs. This is -* helpful whenever the exact scheduling context is not known, but should -* be avoided when the context is known up-front (to avoid unnecessary -* overhead). -* -* It's only valid to call this API from a fiber or a task. -*/ +/** + * + * nano_sem_take_wait - take a nanokernel semaphore, poll/pend if not available + * + * This is a convenience wrapper for the context-specific APIs. This is + * helpful whenever the exact scheduling context is not known, but should + * be avoided when the context is known up-front (to avoid unnecessary + * overhead). + * + * It's only valid to call this API from a fiber or a task. + */ void nano_sem_take_wait(struct nano_sem *sem) { static void (*func[3])(struct nano_sem *sem) = { diff --git a/kernel/nanokernel/nano_stack.c b/kernel/nanokernel/nano_stack.c index df6c88ff65c..308c55a301d 100644 --- a/kernel/nanokernel/nano_stack.c +++ b/kernel/nanokernel/nano_stack.c @@ -44,27 +44,27 @@ the following APIs: INTERNAL In some cases the compiler "alias" attribute is used to map two or more APIs to the same function, since they have identical implementations. -*/ + */ #include #include #include -/******************************************************************************* -* -* nano_stack_init - initialize a nanokernel stack object -* -* This function initializes a nanokernel stack object structure. -* -* It may be called from either a fiber or a task context. -* -* RETURNS: N/A -* -* INTERNAL -* Although the existing implementation will support invocation from an ISR -* context, for future flexibility, this API will be restricted from ISR -* level invocation. -*/ +/** + * + * nano_stack_init - initialize a nanokernel stack object + * + * This function initializes a nanokernel stack object structure. + * + * It may be called from either a fiber or a task context. + * + * RETURNS: N/A + * + * INTERNAL + * Although the existing implementation will support invocation from an ISR + * context, for future flexibility, this API will be restricted from ISR + * level invocation. + */ void nano_stack_init( struct nano_stack *stack, /* stack to initialize */ @@ -78,22 +78,22 @@ void nano_stack_init( FUNC_ALIAS(_stack_push_non_preemptible, nano_isr_stack_push, void); FUNC_ALIAS(_stack_push_non_preemptible, nano_fiber_stack_push, void); -/******************************************************************************* -* -* _stack_push_non_preemptible - push data onto a stack (no context switch) -* -* This routine pushes a data item onto a stack object; it may be called from -* either a fiber or ISR context. A fiber pending on the stack object will be -* made ready, but will NOT be scheduled to execute. -* -* RETURNS: N/A -* -* INTERNAL -* This function is capable of supporting invocations from both a fiber and an -* ISR context. However, the nano_isr_stack_push and nano_fiber_stack_push -* aliases are created to support any required implementation differences in -* the future without introducing a source code migration issue. -*/ +/** + * + * _stack_push_non_preemptible - push data onto a stack (no context switch) + * + * This routine pushes a data item onto a stack object; it may be called from + * either a fiber or ISR context. A fiber pending on the stack object will be + * made ready, but will NOT be scheduled to execute. + * + * RETURNS: N/A + * + * INTERNAL + * This function is capable of supporting invocations from both a fiber and an + * ISR context. However, the nano_isr_stack_push and nano_fiber_stack_push + * aliases are created to support any required implementation differences in + * the future without introducing a source code migration issue. + */ void _stack_push_non_preemptible( struct nano_stack *stack, /* stack on which to interact */ @@ -118,16 +118,16 @@ void _stack_push_non_preemptible( irq_unlock_inline(imask); } -/******************************************************************************* -* -* nano_task_stack_push - push data onto a nanokernel stack -* -* This routine pushes a data item onto a stack object; it may be called only -* from a task context. A fiber pending on the stack object will be -* made ready, and will preempt the running task immediately. -* -* RETURNS: N/A -*/ +/** + * + * nano_task_stack_push - push data onto a nanokernel stack + * + * This routine pushes a data item onto a stack object; it may be called only + * from a task context. A fiber pending on the stack object will be + * made ready, and will preempt the running task immediately. + * + * RETURNS: N/A + */ void nano_task_stack_push( struct nano_stack *stack, /* stack on which to interact */ @@ -158,26 +158,26 @@ FUNC_ALIAS(_stack_pop, nano_isr_stack_pop, int); FUNC_ALIAS(_stack_pop, nano_fiber_stack_pop, int); FUNC_ALIAS(_stack_pop, nano_task_stack_pop, int); -/******************************************************************************* -* -* _stack_pop - pop data from a nanokernel stack -* -* Pop the first data word from a nanokernel stack object; it may be called -* from a fiber, task, or ISR context. -* -* If the stack is not empty, a data word is popped and copied to the provided -* address and a non-zero value is returned. If the stack is empty, -* zero is returned. -* -* RETURNS: 1 if stack is not empty, 0 otherwise -* -* INTERNAL -* This function is capable of supporting invocations from fiber, task, and -* ISR contexts. However, the nano_isr_stack_pop, nano_task_stack_pop, and -* nano_fiber_stack_pop aliases are created to support any required -* implementation differences in the future without intoducing a source code -* migration issue. -*/ +/** + * + * _stack_pop - pop data from a nanokernel stack + * + * Pop the first data word from a nanokernel stack object; it may be called + * from a fiber, task, or ISR context. + * + * If the stack is not empty, a data word is popped and copied to the provided + * address and a non-zero value is returned. If the stack is empty, + * zero is returned. + * + * RETURNS: 1 if stack is not empty, 0 otherwise + * + * INTERNAL + * This function is capable of supporting invocations from fiber, task, and + * ISR contexts. However, the nano_isr_stack_pop, nano_task_stack_pop, and + * nano_fiber_stack_pop aliases are created to support any required + * implementation differences in the future without intoducing a source code + * migration issue. + */ int _stack_pop( struct nano_stack *stack, /* stack on which to interact */ @@ -199,23 +199,23 @@ int _stack_pop( return rv; } -/******************************************************************************* -* -* nano_fiber_stack_pop_wait - pop data from a nanokernel stack, wait if empty -* -* Pop the first data word from a nanokernel stack object; it can only be -* called from a fiber context -* -* If data is not available the calling fiber will pend until data is pushed -* onto the stack. -* -* RETURNS: the data popped from the stack -* -* INTERNAL -* There exists a separate nano_task_stack_pop_wait() implementation since a -* task context cannot pend on a nanokernel object. Instead tasks will poll the -* the stack object. -*/ +/** + * + * nano_fiber_stack_pop_wait - pop data from a nanokernel stack, wait if empty + * + * Pop the first data word from a nanokernel stack object; it can only be + * called from a fiber context + * + * If data is not available the calling fiber will pend until data is pushed + * onto the stack. + * + * RETURNS: the data popped from the stack + * + * INTERNAL + * There exists a separate nano_task_stack_pop_wait() implementation since a + * task context cannot pend on a nanokernel object. Instead tasks will poll the + * the stack object. + */ uint32_t nano_fiber_stack_pop_wait( struct nano_stack *stack /* stack on which to interact */ @@ -238,18 +238,18 @@ uint32_t nano_fiber_stack_pop_wait( return data; } -/******************************************************************************* -* -* nano_task_stack_pop_wait - pop data from a nanokernel stack, poll if empty -* -* Pop the first data word from a nanokernel stack; it can only be called -* from a task context. -* -* If data is not available the calling task will poll until data is pushed -* onto the stack. -* -* RETURNS: the data popped from the stack -*/ +/** + * + * nano_task_stack_pop_wait - pop data from a nanokernel stack, poll if empty + * + * Pop the first data word from a nanokernel stack; it can only be called + * from a task context. + * + * If data is not available the calling task will poll until data is pushed + * onto the stack. + * + * RETURNS: the data popped from the stack + */ uint32_t nano_task_stack_pop_wait( struct nano_stack *stack /* stack on which to interact */ diff --git a/kernel/nanokernel/nano_sys_clock.c b/kernel/nanokernel/nano_sys_clock.c index 07424992ebc..23a30455f90 100644 --- a/kernel/nanokernel/nano_sys_clock.c +++ b/kernel/nanokernel/nano_sys_clock.c @@ -53,13 +53,13 @@ int sys_clock_hw_cycles_per_tick; /* updated by timer driver for tickless, stays at 1 for non-tickless */ uint32_t _sys_idle_elapsed_ticks = 1; -/******************************************************************************* -* -* nano_time_init - constructor that initializes nanokernel time tracking system -* -* RETURNS: N/A -* -*/ +/** + * + * nano_time_init - constructor that initializes nanokernel time tracking system + * + * RETURNS: N/A + * + */ void nano_time_init(void) { @@ -72,26 +72,26 @@ SYS_PREKERNEL_INIT(nano_time_init, 250); int64_t _nano_ticks = 0; -/******************************************************************************* -* -* nano_tick_get_32 - return the lower part of the current system tick count -* -* RETURNS: the current system tick count -* -*/ +/** + * + * nano_tick_get_32 - return the lower part of the current system tick count + * + * RETURNS: the current system tick count + * + */ uint32_t nano_tick_get_32(void) { return (uint32_t)_nano_ticks; } -/******************************************************************************* -* -* nano_tick_get - return the current system tick count -* -* RETURNS: the current system tick count -* -*/ +/** + * + * nano_tick_get - return the current system tick count + * + * RETURNS: the current system tick count + * + */ int64_t nano_tick_get(void) { @@ -108,47 +108,47 @@ int64_t nano_tick_get(void) return tmp_nano_ticks; } -/******************************************************************************* -* -* nano_cycle_get_32 - return a high resolution timestamp -* -* RETURNS: the current timer hardware count -* -*/ +/** + * + * nano_cycle_get_32 - return a high resolution timestamp + * + * RETURNS: the current timer hardware count + * + */ uint32_t nano_cycle_get_32(void) { return timer_read(); } -/******************************************************************************* -* -* nano_tick_delta - return number of ticks since a reference time -* -* This function is meant to be used in contained fragments of code. The first -* call to it in a particular code fragment fills in a reference time variable -* which then gets passed and updated every time the function is called. From -* the second call on, the delta between the value passed to it and the current -* tick count is the return value. Since the first call is meant to only fill in -* the reference time, its return value should be discarded. -* -* Since a code fragment that wants to use nano_tick_delta passes in its -* own reference time variable, multiple code fragments can make use of this -* function concurrently. -* -* e.g. -* uint64_t reftime; -* (void) nano_tick_delta(&reftime); /# prime it #/ -* [do stuff] -* x = nano_tick_delta(&reftime); /# how long since priming #/ -* [do more stuff] -* y = nano_tick_delta(&reftime); /# how long since [do stuff] #/ -* -* RETURNS: tick count since reference time; undefined for first invocation -* -* NOTE: We use inline function for both 64-bit and 32-bit functions. -* Compiler optimizes out 64-bit result handling in 32-bit version. -*/ +/** + * + * nano_tick_delta - return number of ticks since a reference time + * + * This function is meant to be used in contained fragments of code. The first + * call to it in a particular code fragment fills in a reference time variable + * which then gets passed and updated every time the function is called. From + * the second call on, the delta between the value passed to it and the current + * tick count is the return value. Since the first call is meant to only fill in + * the reference time, its return value should be discarded. + * + * Since a code fragment that wants to use nano_tick_delta passes in its + * own reference time variable, multiple code fragments can make use of this + * function concurrently. + * + * e.g. + * uint64_t reftime; + * (void) nano_tick_delta(&reftime); /# prime it #/ + * [do stuff] + * x = nano_tick_delta(&reftime); /# how long since priming #/ + * [do more stuff] + * y = nano_tick_delta(&reftime); /# how long since [do stuff] #/ + * + * RETURNS: tick count since reference time; undefined for first invocation + * + * NOTE: We use inline function for both 64-bit and 32-bit functions. + * Compiler optimizes out 64-bit result handling in 32-bit version. + */ static ALWAYS_INLINE int64_t _nano_tick_delta(int64_t *reftime) { @@ -170,24 +170,24 @@ static ALWAYS_INLINE int64_t _nano_tick_delta(int64_t *reftime) return delta; } -/******************************************************************************* -* -* nano_tick_delta - return number of ticks since a reference time -* -* RETURNS: tick count since reference time; undefined for first invocation -*/ +/** + * + * nano_tick_delta - return number of ticks since a reference time + * + * RETURNS: tick count since reference time; undefined for first invocation + */ int64_t nano_tick_delta(int64_t *reftime) { return _nano_tick_delta(reftime); } -/******************************************************************************* -* -* nano_tick_delta_32 - return 32-bit number of ticks since a reference time -* -* RETURNS: 32-bit tick count since reference time; undefined for first invocation -*/ +/** + * + * nano_tick_delta_32 - return 32-bit number of ticks since a reference time + * + * RETURNS: 32-bit tick count since reference time; undefined for first invocation + */ uint32_t nano_tick_delta_32(int64_t *reftime) { @@ -234,16 +234,16 @@ static inline void handle_expired_nano_timers(int ticks) #endif #if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS) -/******************************************************************************* -* -* _nano_sys_clock_tick_announce - announce a tick to the nanokernel -* -* This function is only to be called by the system clock timer driver when a -* tick is to be announced to the nanokernel. It takes care of dequeuing the -* timers that have expired and wake up the fibers pending on them. -* -* RETURNS: N/A -*/ +/** + * + * _nano_sys_clock_tick_announce - announce a tick to the nanokernel + * + * This function is only to be called by the system clock timer driver when a + * tick is to be announced to the nanokernel. It takes care of dequeuing the + * timers that have expired and wake up the fibers pending on them. + * + * RETURNS: N/A + */ void _nano_sys_clock_tick_announce(uint32_t ticks) { diff --git a/kernel/nanokernel/nano_timer.c b/kernel/nanokernel/nano_timer.c index eafc5fd8996..f677f6b1f64 100644 --- a/kernel/nanokernel/nano_timer.c +++ b/kernel/nanokernel/nano_timer.c @@ -34,21 +34,21 @@ struct nano_timer *_nano_timer_list = NULL; -/******************************************************************************* -* -* nano_timer_init - initialize a nanokernel timer object -* -* This function initializes a nanokernel timer object structure. -* -* It may be called from either a fiber or task context. -* -* The passed to this function must have enough space for a pointer -* in its first field, that may be overwritten when the timer expires, plus -* whatever data the user wishes to store and recover when the timer expires. -* -* RETURNS: N/A -* -*/ +/** + * + * nano_timer_init - initialize a nanokernel timer object + * + * This function initializes a nanokernel timer object structure. + * + * It may be called from either a fiber or task context. + * + * The passed to this function must have enough space for a pointer + * in its first field, that may be overwritten when the timer expires, plus + * whatever data the user wishes to store and recover when the timer expires. + * + * RETURNS: N/A + * + */ void nano_timer_init(struct nano_timer *timer, void *userData) { @@ -56,43 +56,43 @@ void nano_timer_init(struct nano_timer *timer, void *userData) timer->userData = userData; } -/******************************************************************************* -* -* nano_fiber_timer_start - start a nanokernel timer from a fiber -* -* This function starts a previously initialized nanokernel timer object. -* The timer will expire in system clock ticks. -* -* RETURNS: N/A -* -*/ +/** + * + * nano_fiber_timer_start - start a nanokernel timer from a fiber + * + * This function starts a previously initialized nanokernel timer object. + * The timer will expire in system clock ticks. + * + * RETURNS: N/A + * + */ FUNC_ALIAS(_timer_start, nano_fiber_timer_start, void); -/******************************************************************************* -* -* nano_task_timer_start - start a nanokernel timer from a task -* -* This function starts a previously initialized nanokernel timer object. -* The timer will expire in system clock ticks. -* -* RETURNS: N/A -* -*/ +/** + * + * nano_task_timer_start - start a nanokernel timer from a task + * + * This function starts a previously initialized nanokernel timer object. + * The timer will expire in system clock ticks. + * + * RETURNS: N/A + * + */ FUNC_ALIAS(_timer_start, nano_task_timer_start, void); -/******************************************************************************* -* -* _timer_start - start a nanokernel timer (generic implementation) -* -* This function starts a previously initialized nanokernel timer object. -* The timer will expire in system clock ticks. -* -* RETURNS: N/A -* -* NOMANUAL -*/ +/** + * + * _timer_start - start a nanokernel timer (generic implementation) + * + * This function starts a previously initialized nanokernel timer object. + * The timer will expire in system clock ticks. + * + * RETURNS: N/A + * + * NOMANUAL + */ void _timer_start(struct nano_timer *timer, /* timer to start */ int ticks /* number of system ticks @@ -127,16 +127,16 @@ void _timer_start(struct nano_timer *timer, /* timer to start */ irq_unlock_inline(imask); } -/******************************************************************************* -* -* _timer_stop - stop a nanokernel timer (generic implementation) -* -* This function stops a previously started nanokernel timer object. -* -* RETURNS: N/A -* -* NOMANUAL -*/ +/** + * + * _timer_stop - stop a nanokernel timer (generic implementation) + * + * This function stops a previously started nanokernel timer object. + * + * RETURNS: N/A + * + * NOMANUAL + */ static void _timer_stop(struct nano_timer *timer /* timer to stop */ ) @@ -176,15 +176,15 @@ static void _timer_stop(struct nano_timer *timer /* timer to stop */ irq_unlock_inline(imask); } -/******************************************************************************* -* -* nano_fiber_timer_stop - stop a nanokernel timer from a fiber -* -* This function stops a previously started nanokernel timer object. -* -* RETURNS: N/A -* -*/ +/** + * + * nano_fiber_timer_stop - stop a nanokernel timer from a fiber + * + * This function stops a previously started nanokernel timer object. + * + * RETURNS: N/A + * + */ void nano_fiber_timer_stop(struct nano_timer *timer /* timer to stop */ ) @@ -198,15 +198,15 @@ void nano_fiber_timer_stop(struct nano_timer *timer /* timer to stop */ } } -/******************************************************************************* -* -* nano_task_timer_stop - stop a nanokernel timer from a task -* -* This function stops a previously started nanokernel timer object. -* -* RETURNS: N/A -* -*/ +/** + * + * nano_task_timer_stop - stop a nanokernel timer from a task + * + * This function stops a previously started nanokernel timer object. + * + * RETURNS: N/A + * + */ void nano_task_timer_stop(struct nano_timer *timer /* timer to stop */ ) @@ -220,17 +220,17 @@ void nano_task_timer_stop(struct nano_timer *timer /* timer to stop */ } } -/******************************************************************************* -* -* nano_fiber_timer_test - make the current fiber check for a timer expiry -* -* This function will check if a timer has expired. The timer must -* have been initialized by nano_timer_init() and started via either -* nano_fiber_timer_start() or nano_task_timer_start() first. -* -* RETURNS: pointer to timer initialization data, or NULL if timer not expired -* -*/ +/** + * + * nano_fiber_timer_test - make the current fiber check for a timer expiry + * + * This function will check if a timer has expired. The timer must + * have been initialized by nano_timer_init() and started via either + * nano_fiber_timer_start() or nano_task_timer_start() first. + * + * RETURNS: pointer to timer initialization data, or NULL if timer not expired + * + */ void *nano_fiber_timer_test(struct nano_timer *timer /* timer to check */ ) @@ -238,18 +238,18 @@ void *nano_fiber_timer_test(struct nano_timer *timer /* timer to check */ return nano_fiber_lifo_get(&timer->lifo); } -/******************************************************************************* -* -* nano_fiber_timer_wait - make the current fiber wait for a timer to expire -* -* This function will pend on a timer if it hasn't expired yet. The timer must -* have been initialized by nano_timer_init() and started via either -* nano_fiber_timer_start() or nano_task_timer_start() first and must not -* have been stopped via nano_task_timer_stop() or nano_fiber_timer_stop(). -* -* RETURNS: pointer to timer initialization data -* -*/ +/** + * + * nano_fiber_timer_wait - make the current fiber wait for a timer to expire + * + * This function will pend on a timer if it hasn't expired yet. The timer must + * have been initialized by nano_timer_init() and started via either + * nano_fiber_timer_start() or nano_task_timer_start() first and must not + * have been stopped via nano_task_timer_stop() or nano_fiber_timer_stop(). + * + * RETURNS: pointer to timer initialization data + * + */ void *nano_fiber_timer_wait(struct nano_timer *timer /* timer to pend on */ ) @@ -257,17 +257,17 @@ void *nano_fiber_timer_wait(struct nano_timer *timer /* timer to pend on */ return nano_fiber_lifo_get_wait(&timer->lifo); } -/******************************************************************************* -* -* nano_task_timer_test - make the current task check for a timer expiry -* -* This function will check if a timer has expired. The timer must -* have been initialized by nano_timer_init() and started via either -* nano_fiber_timer_start() or nano_task_timer_start() first. -* -* RETURNS: pointer to timer initialization data, or NULL if timer not expired -* -*/ +/** + * + * nano_task_timer_test - make the current task check for a timer expiry + * + * This function will check if a timer has expired. The timer must + * have been initialized by nano_timer_init() and started via either + * nano_fiber_timer_start() or nano_task_timer_start() first. + * + * RETURNS: pointer to timer initialization data, or NULL if timer not expired + * + */ void *nano_task_timer_test(struct nano_timer *timer /* timer to check */ ) @@ -275,18 +275,18 @@ void *nano_task_timer_test(struct nano_timer *timer /* timer to check */ return nano_task_lifo_get(&timer->lifo); } -/******************************************************************************* -* -* nano_task_timer_wait - make the current task wait for a timer to expire -* -* This function will pend on a timer if it hasn't expired yet. The timer must -* have been initialized by nano_timer_init() and started via either -* nano_fiber_timer_start() or nano_task_timer_start() first and must not -* have been stopped via nano_task_timer_stop() or nano_fiber_timer_stop(). -* -* RETURNS: pointer to timer initialization data -* -*/ +/** + * + * nano_task_timer_wait - make the current task wait for a timer to expire + * + * This function will pend on a timer if it hasn't expired yet. The timer must + * have been initialized by nano_timer_init() and started via either + * nano_fiber_timer_start() or nano_task_timer_start() first and must not + * have been stopped via nano_task_timer_stop() or nano_fiber_timer_stop(). + * + * RETURNS: pointer to timer initialization data + * + */ void *nano_task_timer_wait(struct nano_timer *timer /* timer to pend on */ ) diff --git a/kernel/nanokernel/version.c b/kernel/nanokernel/version.c index 6996dd667e9..28d7761d039 100644 --- a/kernel/nanokernel/version.c +++ b/kernel/nanokernel/version.c @@ -35,15 +35,15 @@ static uint32_t kernel_version = KERNELVERSION; -/******************************************************************************* -* -* sys_kernel_version_get - return the kernel version of the present build -* -* The kernel version is a four-byte value, whose format is decribed in the -* file "kernel_version.h". -* -* RETURNS: kernel version -*/ +/** + * + * sys_kernel_version_get - return the kernel version of the present build + * + * The kernel version is a four-byte value, whose format is decribed in the + * file "kernel_version.h". + * + * RETURNS: kernel version + */ uint32_t sys_kernel_version_get(void) { diff --git a/lib/libc/minimal/source/stdout/prf.c b/lib/libc/minimal/source/stdout/prf.c index 4cc17d3f151..5c908ec3dfe 100644 --- a/lib/libc/minimal/source/stdout/prf.c +++ b/lib/libc/minimal/source/stdout/prf.c @@ -500,18 +500,18 @@ static int _to_float(char *buf, uint32_t double_temp[], int full, int c, return buf - start; } -/******************************************************************************* -* -* _isdigit - is the input value an ASCII digit character? -* -* This function provides a traditional implementation of the isdigit() -* primitive that is footprint conversative, i.e. it does not utilize a -* lookup table. -* -* RETURNS: non-zero if input integer in an ASCII digit character -* -* INTERNAL -*/ +/** + * + * _isdigit - is the input value an ASCII digit character? + * + * This function provides a traditional implementation of the isdigit() + * primitive that is footprint conversative, i.e. it does not utilize a + * lookup table. + * + * RETURNS: non-zero if input integer in an ASCII digit character + * + * INTERNAL + */ static inline int _isdigit(int c) { diff --git a/lib/libc/minimal/source/string/string.c b/lib/libc/minimal/source/string/string.c index ca12b9047f4..75d2cde2828 100644 --- a/lib/libc/minimal/source/string/string.c +++ b/lib/libc/minimal/source/string/string.c @@ -32,12 +32,12 @@ #include -/******************************************************************************* -* -* strcpy - copy a string -* -* RETURNS: pointer to destination buffer -*/ +/** + * + * strcpy - copy a string + * + * RETURNS: pointer to destination buffer + */ char *strcpy(char *restrict d, const char *restrict s) { @@ -54,12 +54,12 @@ char *strcpy(char *restrict d, const char *restrict s) return dest; } -/******************************************************************************* -* -* strncpy - copy part of a string -* -* RETURNS: pointer to destination buffer -*/ +/** + * + * strncpy - copy part of a string + * + * RETURNS: pointer to destination buffer + */ char *strncpy(char *restrict d, const char *restrict s, size_t n) { @@ -81,12 +81,12 @@ char *strncpy(char *restrict d, const char *restrict s, size_t n) return dest; } -/******************************************************************************* -* -* strchr - string scanning operation -* -* RETURNS: pointer to 1st instance of found byte, or NULL if not found -*/ +/** + * + * strchr - string scanning operation + * + * RETURNS: pointer to 1st instance of found byte, or NULL if not found + */ char *strchr(const char *s, int c) { @@ -98,12 +98,12 @@ char *strchr(const char *s, int c) return (*s == tmp) ? (char *) s : NULL; } -/******************************************************************************* -* -* strlen - get string length -* -* RETURNS: number of bytes in string -*/ +/** + * + * strlen - get string length + * + * RETURNS: number of bytes in string + */ size_t strlen(const char *s) { @@ -117,12 +117,12 @@ size_t strlen(const char *s) return n; } -/******************************************************************************* -* -* strcmp - compare two strings -* -* RETURNS: negative # if < , 0 if == , else positive # -*/ +/** + * + * strcmp - compare two strings + * + * RETURNS: negative # if < , 0 if == , else positive # + */ int strcmp(const char *s1, const char *s2) { @@ -134,12 +134,12 @@ int strcmp(const char *s1, const char *s2) return *s1 - *s2; } -/******************************************************************************* -* -* strncmp - compare part of two strings -* -* RETURNS: negative # if < , 0 if == , else positive # -*/ +/** + * + * strncmp - compare part of two strings + * + * RETURNS: negative # if < , 0 if == , else positive # + */ int strncmp(const char *s1, const char *s2, size_t n) { @@ -152,12 +152,12 @@ int strncmp(const char *s1, const char *s2, size_t n) return (n == 0) ? 0 : (*s1 - *s2); } -/******************************************************************************* -* -* memcmp - compare two memory areas -* -* RETURNS: negative # if < , 0 if == , else positive # -*/ +/** + * + * memcmp - compare two memory areas + * + * RETURNS: negative # if < , 0 if == , else positive # + */ int memcmp(const void *m1, const void *m2, size_t n) { @@ -175,12 +175,12 @@ int memcmp(const void *m1, const void *m2, size_t n) return *c1 - *c2; } -/******************************************************************************* -* -* memmove - copy bytes in memory with overlapping areas -* -* RETURNS: pointer to destination buffer -*/ +/** + * + * memmove - copy bytes in memory with overlapping areas + * + * RETURNS: pointer to destination buffer + */ void *memmove(void *d, const void *s, size_t n) { @@ -210,12 +210,12 @@ void *memmove(void *d, const void *s, size_t n) return d; } -/******************************************************************************* -* -* memcpy - copy bytes in memory -* -* RETURNS: pointer to start of destination buffer -*/ +/** + * + * memcpy - copy bytes in memory + * + * RETURNS: pointer to start of destination buffer + */ void *memcpy(void *restrict d, const void *restrict s, size_t n) { @@ -260,12 +260,12 @@ void *memcpy(void *restrict d, const void *restrict s, size_t n) return d; } -/******************************************************************************* -* -* memset - set bytes in memory -* -* RETURNS: pointer to start of buffer -*/ +/** + * + * memset - set bytes in memory + * + * RETURNS: pointer to start of buffer + */ void *memset(void *buf, int c, size_t n) { diff --git a/misc/printk.c b/misc/printk.c index 8a5b2940163..352722fdc4b 100644 --- a/misc/printk.c +++ b/misc/printk.c @@ -34,7 +34,7 @@ DESCRIPTION Low-level debugging output. BSP installs a character output routine at init time. If no routine is installed, a nop routine is called. -*/ + */ #include #include @@ -46,12 +46,12 @@ init time. If no routine is installed, a nop routine is called. static void _printk_dec_ulong(const unsigned long num); static void _printk_hex_ulong(const unsigned long num); -/****************************************************************************** -* -* _nop_char_out - default character output routine that does nothing -* -* RETURNS: 0 -*/ +/** + * + * _nop_char_out - default character output routine that does nothing + * + * RETURNS: 0 + */ static int _nop_char_out(int c /* character to swallow */ ) @@ -65,15 +65,15 @@ static int _nop_char_out(int c /* character to swallow */ static int (*_char_out)(int) = _nop_char_out; -/****************************************************************************** -* -* __printk_hook_install - install the character output routine for printk -* -* To be called by the BSP's console driver at init time. Installs a routine -* that outputs one ASCII character at a time. -* -* RETURNS: N/A -*/ +/** + * + * __printk_hook_install - install the character output routine for printk + * + * To be called by the BSP's console driver at init time. Installs a routine + * that outputs one ASCII character at a time. + * + * RETURNS: N/A + */ void __printk_hook_install(int (*fn)(int) /* putc routine to install */ ) @@ -81,14 +81,14 @@ void __printk_hook_install(int (*fn)(int) /* putc routine to install */ _char_out = fn; } -/****************************************************************************** -* -* _vprintk - printk internals -* -* See printk() for description. -* -* RETURNS: N/A -*/ +/** + * + * _vprintk - printk internals + * + * See printk() for description. + * + * RETURNS: N/A + */ static inline void _vprintk(const char *fmt, /* format string */ va_list ap /* variable parameters */ @@ -154,23 +154,23 @@ static inline void _vprintk(const char *fmt, /* format string */ } } -/****************************************************************************** -* -* printk - output a string -* -* Output a string on output installed by BSP at init time. Some printf-like -* formatting is available. -* -* Available formatting: -* - %x/%X: outputs a 32-bit number in ABCDWXYZ format. All eight digits -* are printed: if less than 8 characters are needed, leading zeroes -* are displayed. -* - %s: output a null-terminated string -* - %p: pointer, same as %x -* - %d/%i/%u: outputs a 32-bit number in unsigned decimal format. -* -* RETURNS: N/A -*/ +/** + * + * printk - output a string + * + * Output a string on output installed by BSP at init time. Some printf-like + * formatting is available. + * + * Available formatting: + * - %x/%X: outputs a 32-bit number in ABCDWXYZ format. All eight digits + * are printed: if less than 8 characters are needed, leading zeroes + * are displayed. + * - %s: output a null-terminated string + * - %p: pointer, same as %x + * - %d/%i/%u: outputs a 32-bit number in unsigned decimal format. + * + * RETURNS: N/A + */ void printk(const char *fmt, /* formatted string to output */ ...) @@ -182,15 +182,15 @@ void printk(const char *fmt, /* formatted string to output */ va_end(ap); } -/****************************************************************************** -* -* _printk_hex_ulong - output an unsigned long in hex format -* -* Output an unsigned long on output installed by BSP at init time. Should be -* able to handle an unsigned long of any size, 32 or 64 bit. -* -* RETURNS: N/A -*/ +/** + * + * _printk_hex_ulong - output an unsigned long in hex format + * + * Output an unsigned long on output installed by BSP at init time. Should be + * able to handle an unsigned long of any size, 32 or 64 bit. + * + * RETURNS: N/A + */ static void _printk_hex_ulong( const unsigned long num /* number to output */ @@ -205,15 +205,15 @@ static void _printk_hex_ulong( } } -/****************************************************************************** -* -* _printk_dec_ulong - output an unsigned long (32-bit) in decimal format -* -* Output an unsigned long on output installed by BSP at init time. Only works -* with 32-bit values. -* -* RETURNS: N/A -*/ +/** + * + * _printk_dec_ulong - output an unsigned long (32-bit) in decimal format + * + * Output an unsigned long on output installed by BSP at init time. Only works + * with 32-bit values. + * + * RETURNS: N/A + */ static void _printk_dec_ulong( const unsigned long num /* number to output */ @@ -236,14 +236,14 @@ static void _printk_dec_ulong( #else /* CONFIG_PRINTK */ -/****************************************************************************** -* -* printk - output a string -* -* Debugging output is dropped if it is not to be sent to the console. -* -* RETURNS: N/A -*/ +/** + * + * printk - output a string + * + * Debugging output is dropped if it is not to be sent to the console. + * + * RETURNS: N/A + */ void printk(const char *fmt, ...) { diff --git a/net/ip/contiki/ip/uip.h b/net/ip/contiki/ip/uip.h index fcb3fda2117..55ac66edfd6 100644 --- a/net/ip/contiki/ip/uip.h +++ b/net/ip/contiki/ip/uip.h @@ -454,7 +454,7 @@ void uip_reass_over(void); } } \endcode -*/ + */ typedef union { uint32_t u32[(UIP_BUFSIZE + 3) / 4]; @@ -1536,7 +1536,7 @@ CCIF extern uint8_t uip_flags; * \retval 0: nothing to send, * \retval 1: drop pkt * \retval 2: ICMP error message to send -*/ + */ /*static uint8_t uip_ext_hdr_options_process(); */ diff --git a/net/ip/contiki/ip/uipopt.h b/net/ip/contiki/ip/uipopt.h index 7a523b254e8..f2610c53f14 100644 --- a/net/ip/contiki/ip/uipopt.h +++ b/net/ip/contiki/ip/uipopt.h @@ -652,7 +652,7 @@ void uip_log(char *msg); }; typedef struct httpd_state uip_tcp_appstate_t \endcode -*/ + */ /** * \var #define UIP_APPCALL diff --git a/net/ip/contiki/ipv6/multicast/README.md b/net/ip/contiki/ipv6/multicast/README.md index 2108b92f2bf..0aea775529f 100644 --- a/net/ip/contiki/ipv6/multicast/README.md +++ b/net/ip/contiki/ipv6/multicast/README.md @@ -9,14 +9,14 @@ to contiki's uIPv6 engine. Currently, two modes are supported: -* 'Stateless Multicast RPL Forwarding' (SMRF) + * 'Stateless Multicast RPL Forwarding' (SMRF) RPL in MOP 3 handles group management as per the RPL docs, SMRF is a lightweight engine which handles datagram forwarding. SMRF is documented here: http://dx.doi.org/10.1007/s11277-013-1250-5 and here: http://dx.doi.org/10.1109/PerComW.2012.6197494 -* 'Multicast Forwarding with Trickle' according to the algorithm described + * 'Multicast Forwarding with Trickle' according to the algorithm described in the internet draft: http://tools.ietf.org/html/draft-ietf-roll-trickle-mcast The version of this draft that's currently implementated is documented @@ -32,9 +32,9 @@ To be able to send multicast traffic from the internet to 6LoWPAN nodes or the o way round, we need border routers or other gateway devices to be able to achieve the following: -* Add/Remove Trickle Multicast, RPL or other HBHO headers as necessary for datagrams + * Add/Remove Trickle Multicast, RPL or other HBHO headers as necessary for datagrams entering / exiting the 6LoWPAN -* Advertise multicast group membership to the internet (e.g. with MLD) + * Advertise multicast group membership to the internet (e.g. with MLD) These are currently not implemented and are in the ToDo list. Contributions welcome. diff --git a/net/ip/contiki/ipv6/uip-ds6.h b/net/ip/contiki/ipv6/uip-ds6.h index c3e4af86c04..c7f492d4cb3 100644 --- a/net/ip/contiki/ipv6/uip-ds6.h +++ b/net/ip/contiki/ipv6/uip-ds6.h @@ -58,7 +58,7 @@ * - the number of elements requested by the user in contiki configuration (name suffixed by _NBU) * - the number of elements assigned by the system (name suffixed by _NBS) * - the total number of elements is the sum (name suffixed by _NB) -*/ + */ /* Default router list */ #define UIP_DS6_DEFRT_NBS 0 diff --git a/net/ip/contiki/ipv6/uip-nd6.h b/net/ip/contiki/ipv6/uip-nd6.h index c273c3fe7b6..abb25fc8ac7 100644 --- a/net/ip/contiki/ipv6/uip-nd6.h +++ b/net/ip/contiki/ipv6/uip-nd6.h @@ -415,7 +415,7 @@ void uip_nd6_init(void); void uip_appserver_addr_get(uip_ipaddr_t *ipaddr); /*--------------------------------------*/ -/******* ANNEX - message formats ********/ +/** ANNEX - message formats ********/ /*--------------------------------------*/ /* diff --git a/net/ip/contiki/mac/frame802154.c b/net/ip/contiki/mac/frame802154.c index 3cd85d20b4b..ce75ccbff12 100644 --- a/net/ip/contiki/mac/frame802154.c +++ b/net/ip/contiki/mac/frame802154.c @@ -44,11 +44,11 @@ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * -*/ + */ /* * \brief This file is where the main functions that relate to frame * manipulation will reside. -*/ + */ /** * \file @@ -61,7 +61,7 @@ /** * \addtogroup frame802154 * @{ -*/ + */ #include "sys/cc.h" #include "net/mac/frame802154.h" @@ -161,7 +161,7 @@ field_len(frame802154_t *p, field_length_t *flen) * frame to send. * * \return The length of the frame header. -*/ + */ int frame802154_hdrlen(frame802154_t *p) { @@ -181,7 +181,7 @@ frame802154_hdrlen(frame802154_t *p) * \param buf Pointer to the buffer to use for the frame. * * \return The length of the frame header -*/ + */ int frame802154_create(frame802154_t *p, uint8_t *buf) { diff --git a/net/ip/contiki/mac/frame802154.h b/net/ip/contiki/mac/frame802154.h index 64a36712044..1c0fcb2b6fc 100644 --- a/net/ip/contiki/mac/frame802154.h +++ b/net/ip/contiki/mac/frame802154.h @@ -41,7 +41,7 @@ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. -*/ + */ /** * \addtogroup net @@ -59,7 +59,7 @@ * This file converts to and from a structure to a packed 802.15.4 * frame. * -*/ + */ /* Includes */ @@ -125,7 +125,7 @@ * 3. Addressing fields - 4 - 20 bytes - Variable * 4. Aux security header - 0 - 14 bytes - Variable * 5. CRC - 2 bytes - Fixed -*/ + */ /** * \brief Defines the bitfields of the frame control field (FCF). diff --git a/net/ip/contiki/sicslowpan/sicslowpan_compression.h b/net/ip/contiki/sicslowpan/sicslowpan_compression.h index 7f49e922650..072dae06026 100644 --- a/net/ip/contiki/sicslowpan/sicslowpan_compression.h +++ b/net/ip/contiki/sicslowpan/sicslowpan_compression.h @@ -245,7 +245,7 @@ struct sicslowpan_addr_context { * multicast address is mappable to a 9-bit group-id * It is true if the group is the all nodes or all * routers group. -*/ + */ #define sicslowpan_is_mcast_addr_compressable(a) \ ((((a)->u16[1]) == 0) && \ (((a)->u16[2]) == 0) && \ diff --git a/net/ip/contiki/sicslowpan/sicslowpan_fragmentation.c b/net/ip/contiki/sicslowpan/sicslowpan_fragmentation.c index e6870528e43..c9216c7d149 100644 --- a/net/ip/contiki/sicslowpan/sicslowpan_fragmentation.c +++ b/net/ip/contiki/sicslowpan/sicslowpan_fragmentation.c @@ -378,7 +378,7 @@ static int reassemble(struct net_mbuf *mbuf) sicslowpan_len(mbuf) = 0; processed_ip_in_len = 0; } -*/ + */ /* * Since we don't support the mesh and broadcast header, the first header * we look for is the fragmentation header diff --git a/samples/include/irq_test_common.h b/samples/include/irq_test_common.h index 2fe687d697d..1ef4f2eb9f9 100644 --- a/samples/include/irq_test_common.h +++ b/samples/include/irq_test_common.h @@ -34,7 +34,7 @@ DESCRIPTION Interrupt stuff, abstracted across CPU architectures. -*/ + */ #ifndef _IRQ_TEST_COMMON__H_ #define _IRQ_TEST_COMMON__H_ @@ -105,11 +105,11 @@ struct isrInitInfo { void *arg[2]; }; -/******************************************************************************* -* -* initIRQ - init interrupts -* -*/ +/** + * + * initIRQ - init interrupts + * + */ static int initIRQ(struct isrInitInfo *i) { diff --git a/samples/include/util_test_common.h b/samples/include/util_test_common.h index 1e3cd59a75c..ad66a211b86 100644 --- a/samples/include/util_test_common.h +++ b/samples/include/util_test_common.h @@ -34,7 +34,7 @@ DESCRIPTION Common utility-type macros for use in the sample projects. -*/ + */ #ifndef _UTIL_TEST_COMMON__H_ #define _UTIL_TEST_COMMON__H_ diff --git a/samples/microkernel/apps/hello_world/src/hello.c b/samples/microkernel/apps/hello_world/src/hello.c index 023d2b3547b..5816739e6de 100644 --- a/samples/microkernel/apps/hello_world/src/hello.c +++ b/samples/microkernel/apps/hello_world/src/hello.c @@ -54,12 +54,12 @@ #define SLEEPTICKS (SLEEPTIME * sys_clock_ticks_per_sec / 1000) /* -* -* \param taskname task identification string -* \param mySem task's own semaphore -* \param otherSem other task's semaphore -* -*/ + * + * \param taskname task identification string + * \param mySem task's own semaphore + * \param otherSem other task's semaphore + * + */ void helloLoop(const char *taskname, ksem_t mySem, ksem_t otherSem) { while (1) { diff --git a/samples/microkernel/apps/philosophers/src/phil_fiber.c b/samples/microkernel/apps/philosophers/src/phil_fiber.c index 6e67be21682..4d3bbb02ed7 100644 --- a/samples/microkernel/apps/philosophers/src/phil_fiber.c +++ b/samples/microkernel/apps/philosophers/src/phil_fiber.c @@ -61,29 +61,29 @@ extern struct nano_sem forks[N_PHILOSOPHERS]; kmutex_t forks[] = {forkMutex0, forkMutex1, forkMutex2, forkMutex3, forkMutex4, forkMutex5}; #endif /* CONFIG_NANOKERNEL */ -/******************************************************************************* -* -* myPrint - print a philosophers state -* -* @param id Philosopher ID. -* @param str EATING or THINKING. -* -* RETURNS: N/A -*/ +/** + * + * myPrint - print a philosophers state + * + * @param id Philosopher ID. + * @param str EATING or THINKING. + * + * RETURNS: N/A + */ static void myPrint(int id, char *str) { PRINTF("\x1b[%d;%dHPhilosopher %d %s\n", id + 1, 1, id, str); } -/******************************************************************************* -* -* myDelay - wait for a number of ticks to elapse -* -* @param ticks Number of ticks to delay. -* -* RETURNS: N/A -*/ +/** + * + * myDelay - wait for a number of ticks to elapse + * + * @param ticks Number of ticks to delay. + * + * RETURNS: N/A + */ static void myDelay(int ticks) { @@ -98,15 +98,15 @@ static void myDelay(int ticks) #endif } -/******************************************************************************* -* -* philEntry - entry point to a philosopher's thread -* -* This routine runs as a task in the microkernel environment -* and as a fiber in the nanokernel environment. -* -* RETURNS: N/A -*/ +/** + * + * philEntry - entry point to a philosopher's thread + * + * This routine runs as a task in the microkernel environment + * and as a fiber in the nanokernel environment. + * + * RETURNS: N/A + */ void philEntry(void) { diff --git a/samples/microkernel/apps/philosophers/src/phil_task.c b/samples/microkernel/apps/philosophers/src/phil_task.c index 96e96102475..f4a1dc82e70 100644 --- a/samples/microkernel/apps/philosophers/src/phil_task.c +++ b/samples/microkernel/apps/philosophers/src/phil_task.c @@ -61,12 +61,12 @@ struct nano_sem forks[N_PHILOSOPHERS]; #endif /* CONFIG_NANOKERNEL */ #ifdef CONFIG_NANOKERNEL -/******************************************************************************* -* -* main - nanokernel entry point -* -* RETURNS: does not return -*/ +/** + * + * main - nanokernel entry point + * + * RETURNS: does not return + */ int main(void) { @@ -93,12 +93,12 @@ int main(void) } #else -/******************************************************************************* -* -* philDemo - routine to start dining philosopher demo -* -* RETURNS: does not return -*/ +/** + * + * philDemo - routine to start dining philosopher demo + * + * RETURNS: does not return + */ void philDemo(void) { diff --git a/samples/microkernel/benchmark/app_kernel/src/event_b.c b/samples/microkernel/benchmark/app_kernel/src/event_b.c index b78443997c4..568a9552fcc 100644 --- a/samples/microkernel/benchmark/app_kernel/src/event_b.c +++ b/samples/microkernel/benchmark/app_kernel/src/event_b.c @@ -56,7 +56,7 @@ int example_handler (int event); * Function declarations. */ -/******************************************************************************* +/** * * event_test - event signal speed test * @@ -177,7 +177,7 @@ void event_test(void) } -/******************************************************************************* +/** * * example_handler - event handler for the tests * diff --git a/samples/microkernel/benchmark/app_kernel/src/fifo_b.c b/samples/microkernel/benchmark/app_kernel/src/fifo_b.c index 57b2ebd5e64..b993cf4eb2b 100644 --- a/samples/microkernel/benchmark/app_kernel/src/fifo_b.c +++ b/samples/microkernel/benchmark/app_kernel/src/fifo_b.c @@ -34,7 +34,7 @@ #ifdef FIFO_BENCH -/******************************************************************************* +/** * * queue_test - queue transfer speed test * diff --git a/samples/microkernel/benchmark/app_kernel/src/fifo_r.c b/samples/microkernel/benchmark/app_kernel/src/fifo_r.c index 1857947408d..17a64a19eaa 100644 --- a/samples/microkernel/benchmark/app_kernel/src/fifo_r.c +++ b/samples/microkernel/benchmark/app_kernel/src/fifo_r.c @@ -36,7 +36,7 @@ #ifdef FIFO_BENCH /* queue transfer speed test */ -/******************************************************************************* +/** * * dequtask - data receive task * diff --git a/samples/microkernel/benchmark/app_kernel/src/mailbox_b.c b/samples/microkernel/benchmark/app_kernel/src/mailbox_b.c index 1af847cae60..cf96fbc194b 100644 --- a/samples/microkernel/benchmark/app_kernel/src/mailbox_b.c +++ b/samples/microkernel/benchmark/app_kernel/src/mailbox_b.c @@ -90,7 +90,7 @@ void mailbox_put(uint32_t size, int count, uint32_t *time); * Function declarations. */ -/******************************************************************************* +/** * * mailbox_test - mailbox transfer speed test * @@ -141,7 +141,7 @@ void mailbox_test(void) } -/******************************************************************************* +/** * * mailbox_put - write the number of data chunks into the mailbox * diff --git a/samples/microkernel/benchmark/app_kernel/src/mailbox_r.c b/samples/microkernel/benchmark/app_kernel/src/mailbox_r.c index 3723d567617..0433e33371e 100644 --- a/samples/microkernel/benchmark/app_kernel/src/mailbox_r.c +++ b/samples/microkernel/benchmark/app_kernel/src/mailbox_r.c @@ -46,7 +46,7 @@ int mailbox_get(kmbox_t mailbox,int size,int count,unsigned int* time); /* mailbox transfer speed test */ -/******************************************************************************* +/** * * mailrecvtask - receive task * @@ -81,7 +81,7 @@ void mailrecvtask(void) } -/******************************************************************************* +/** * * mailbox_get - receive data portions from the specified mailbox * diff --git a/samples/microkernel/benchmark/app_kernel/src/master.c b/samples/microkernel/benchmark/app_kernel/src/master.c index 9def107ab90..006ea99b0de 100644 --- a/samples/microkernel/benchmark/app_kernel/src/master.c +++ b/samples/microkernel/benchmark/app_kernel/src/master.c @@ -37,7 +37,7 @@ _B : Is a file that contains a benchmark function _R : Is a file that contains the receiver task of a benchmark function -*/ + */ #include "master.h" @@ -61,7 +61,7 @@ FILE * output_file; */ uint32_t tm_off; -/******************************************************************************* +/** * * kbhit - check for keypress * @@ -76,7 +76,7 @@ int kbhit(void) } -/******************************************************************************* +/** * * init_output - prepares the test output * @@ -99,7 +99,7 @@ void init_output(int *continuously, int *autorun) output_file = stdout; } -/******************************************************************************* +/** * * output_close - close output for the test * @@ -116,7 +116,7 @@ void output_close(void) #define WAIT_FOR_USER() {} -/******************************************************************************* +/** * * BenchTask - perform all selected benchmarks * @@ -171,7 +171,7 @@ void BenchTask(void) } -/******************************************************************************* +/** * * dummy_test - dummy test * diff --git a/samples/microkernel/benchmark/app_kernel/src/memmap_b.c b/samples/microkernel/benchmark/app_kernel/src/memmap_b.c index ad6cdcc6d65..aa30ba16873 100644 --- a/samples/microkernel/benchmark/app_kernel/src/memmap_b.c +++ b/samples/microkernel/benchmark/app_kernel/src/memmap_b.c @@ -36,7 +36,7 @@ #ifdef MEMMAP_BENCH -/******************************************************************************* +/** * * memorymap_test - memory map get/free test * diff --git a/samples/microkernel/benchmark/app_kernel/src/mempool_b.c b/samples/microkernel/benchmark/app_kernel/src/mempool_b.c index 7401c165264..8d40ff531d4 100644 --- a/samples/microkernel/benchmark/app_kernel/src/mempool_b.c +++ b/samples/microkernel/benchmark/app_kernel/src/mempool_b.c @@ -34,7 +34,7 @@ #ifdef MEMPOOL_BENCH -/******************************************************************************* +/** * * mempool_test - memory pool get/free test * diff --git a/samples/microkernel/benchmark/app_kernel/src/mutex_b.c b/samples/microkernel/benchmark/app_kernel/src/mutex_b.c index 07fcce47989..03afeb05f62 100644 --- a/samples/microkernel/benchmark/app_kernel/src/mutex_b.c +++ b/samples/microkernel/benchmark/app_kernel/src/mutex_b.c @@ -34,7 +34,7 @@ #ifdef MUTEX_BENCH -/******************************************************************************* +/** * * mutex_test - mutex lock/unlock test * diff --git a/samples/microkernel/benchmark/app_kernel/src/nop_b.c b/samples/microkernel/benchmark/app_kernel/src/nop_b.c index 9c466a7dce9..8576882decc 100644 --- a/samples/microkernel/benchmark/app_kernel/src/nop_b.c +++ b/samples/microkernel/benchmark/app_kernel/src/nop_b.c @@ -38,7 +38,7 @@ extern void _task_nop(void); -/******************************************************************************* +/** * * call_test - Kernel entry timing test * diff --git a/samples/microkernel/benchmark/app_kernel/src/pipe_b.c b/samples/microkernel/benchmark/app_kernel/src/pipe_b.c index e0e75e6ca6b..4bd5152f668 100644 --- a/samples/microkernel/benchmark/app_kernel/src/pipe_b.c +++ b/samples/microkernel/benchmark/app_kernel/src/pipe_b.c @@ -107,7 +107,7 @@ int pipeput(kpipe_t pipe, K_PIPE_OPTION * Function declarations. */ -/******************************************************************************* +/** * * pipe_test - test the pipes transfer speed * @@ -202,7 +202,7 @@ void pipe_test(void) } -/******************************************************************************* +/** * * pipeput - write a data portion to the pipe and measure time * diff --git a/samples/microkernel/benchmark/app_kernel/src/pipe_r.c b/samples/microkernel/benchmark/app_kernel/src/pipe_r.c index a7fcf88d24a..89df6948d99 100644 --- a/samples/microkernel/benchmark/app_kernel/src/pipe_r.c +++ b/samples/microkernel/benchmark/app_kernel/src/pipe_r.c @@ -47,7 +47,7 @@ int pipeget(kpipe_t pipe, K_PIPE_OPTION option, /* pipes transfer speed test */ -/******************************************************************************* +/** * * piperecvtask - receive task * @@ -98,7 +98,7 @@ void piperecvtask(void) } -/******************************************************************************* +/** * * pipeget - read a data portion from the pipe and measure time * diff --git a/samples/microkernel/benchmark/app_kernel/src/receiver.c b/samples/microkernel/benchmark/app_kernel/src/receiver.c index 1e51fe9f12c..37a77537881 100644 --- a/samples/microkernel/benchmark/app_kernel/src/receiver.c +++ b/samples/microkernel/benchmark/app_kernel/src/receiver.c @@ -37,7 +37,7 @@ _B : Is a file that contains a benchmark function _R : Is a file that contains the receiver task of a benchmark function -*/ + */ #include "receiver.h" @@ -48,7 +48,7 @@ void waittask(void); void mailrecvtask(void); void piperecvtask(void); -/******************************************************************************* +/** * * recvtask - main function of the task that receives data in the test * diff --git a/samples/microkernel/benchmark/app_kernel/src/sema_b.c b/samples/microkernel/benchmark/app_kernel/src/sema_b.c index 94c085aa707..7bea6778a5e 100644 --- a/samples/microkernel/benchmark/app_kernel/src/sema_b.c +++ b/samples/microkernel/benchmark/app_kernel/src/sema_b.c @@ -35,7 +35,7 @@ #ifdef SEMA_BENCH -/******************************************************************************* +/** * * sema_test - semaphore signal speed test * diff --git a/samples/microkernel/benchmark/app_kernel/src/sema_r.c b/samples/microkernel/benchmark/app_kernel/src/sema_r.c index 5b02dc2634b..259db13567d 100644 --- a/samples/microkernel/benchmark/app_kernel/src/sema_r.c +++ b/samples/microkernel/benchmark/app_kernel/src/sema_r.c @@ -37,7 +37,7 @@ /* semaphore signal speed test */ -/******************************************************************************* +/** * * waittask - receive task (Wait task) * diff --git a/samples/microkernel/benchmark/boot_time/src/boot_time.c b/samples/microkernel/benchmark/boot_time/src/boot_time.c index a5370e949b5..e53daa85f81 100644 --- a/samples/microkernel/benchmark/boot_time/src/boot_time.c +++ b/samples/microkernel/benchmark/boot_time/src/boot_time.c @@ -37,7 +37,7 @@ Measure boot time for both nanokernel and microkernel project which includes - from _start to main() - from _start to task - from _start to idle (for microkernel) -*/ + */ #ifdef CONFIG_NANOKERNEL #include @@ -121,12 +121,12 @@ void bootTimeTask(void) char __stack fiberStack[512]; -/******************************************************************************* -* -* main - nanokernel entry point -* -* RETURNS: N/A -*/ +/** + * + * main - nanokernel entry point + * + * RETURNS: N/A + */ void main(void) { diff --git a/samples/microkernel/benchmark/footprint/src/microkernel_footprint.c b/samples/microkernel/benchmark/footprint/src/microkernel_footprint.c index f2b1ba4b1a2..a580ad9d063 100644 --- a/samples/microkernel/benchmark/footprint/src/microkernel_footprint.c +++ b/samples/microkernel/benchmark/footprint/src/microkernel_footprint.c @@ -128,7 +128,7 @@ static pfunc func_array[] = { #endif /* TEST_max */ }; -/******************************************************************************* +/** * * dummyIsr - dummy ISR * @@ -141,7 +141,7 @@ void dummyIsr(void *unused) } #ifdef TEST_reg -/******************************************************************************* +/** * * isrDummyIntStub - static interrupt stub that invokes dummy ISR * @@ -162,7 +162,7 @@ static void isrDummyIntStub(void *unused) } #endif /* TEST_reg */ -/******************************************************************************* +/** * * fgTaskEntry - entry function for foreground task * diff --git a/samples/microkernel/benchmark/latency_measure/src/main.c b/samples/microkernel/benchmark/latency_measure/src/main.c index 1245d8a7b26..5bd1d442948 100644 --- a/samples/microkernel/benchmark/latency_measure/src/main.c +++ b/samples/microkernel/benchmark/latency_measure/src/main.c @@ -44,7 +44,7 @@ uint32_t tm_off; /* time necessary to read the time */ int errorCount = 0; /* track number of errors */ -/******************************************************************************* +/** * * nanoTest - test latency of nanokernel * @@ -75,7 +75,7 @@ void nanoTest(void) } #ifdef CONFIG_NANOKERNEL -/******************************************************************************* +/** * * main - nanokernel-only testing entry point * @@ -102,7 +102,7 @@ int microSemaLockUnlock(void); int microMutexLockUnlock(void); void microTaskSwitchYield(void); -/******************************************************************************* +/** * * microTest - test latency of microkernel * @@ -132,7 +132,7 @@ void microTest(void) printDashLine(); } -/******************************************************************************* +/** * * main - microkernel testing entry point * diff --git a/samples/microkernel/benchmark/latency_measure/src/micro_int_to_task.c b/samples/microkernel/benchmark/latency_measure/src/micro_int_to_task.c index 4400b950724..c2a78608e9b 100644 --- a/samples/microkernel/benchmark/latency_measure/src/micro_int_to_task.c +++ b/samples/microkernel/benchmark/latency_measure/src/micro_int_to_task.c @@ -46,16 +46,16 @@ static volatile int flagVar = 0; static uint32_t timestamp; -/******************************************************************************* -* -* latencyTestIsr - test ISR used to measure best case interrupt latency -* -* The interrupt handler gets the second timestamp. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * latencyTestIsr - test ISR used to measure best case interrupt latency + * + * The interrupt handler gets the second timestamp. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void latencyTestIsr(void *unused) { @@ -65,17 +65,17 @@ static void latencyTestIsr(void *unused) timestamp = TIME_STAMP_DELTA_GET(0); } -/******************************************************************************* -* -* makeInt - interrupt preparation function -* -* Function makes all the test preparations: registers the interrupt handler, -* gets the first timestamp and invokes the software interrupt. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * makeInt - interrupt preparation function + * + * Function makes all the test preparations: registers the interrupt handler, + * gets the first timestamp and invokes the software interrupt. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void makeInt(void) { @@ -89,7 +89,7 @@ static void makeInt(void) } } -/******************************************************************************* +/** * * microIntToTask - the test main function * diff --git a/samples/microkernel/benchmark/latency_measure/src/micro_int_to_task_evt.c b/samples/microkernel/benchmark/latency_measure/src/micro_int_to_task_evt.c index c789dc5c583..ffa422d275f 100644 --- a/samples/microkernel/benchmark/latency_measure/src/micro_int_to_task_evt.c +++ b/samples/microkernel/benchmark/latency_measure/src/micro_int_to_task_evt.c @@ -48,16 +48,16 @@ static uint32_t timestamp = 0; -/******************************************************************************* -* -* latencyTestIsr - test ISR used to measure best case interrupt latency -* -* The interrupt handler gets the second timestamp. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * latencyTestIsr - test ISR used to measure best case interrupt latency + * + * The interrupt handler gets the second timestamp. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void latencyTestIsr(void *unused) { @@ -67,7 +67,7 @@ static void latencyTestIsr(void *unused) timestamp = TIME_STAMP_DELTA_GET(0); } -/******************************************************************************* +/** * * microInt - software interrupt generating task * @@ -88,7 +88,7 @@ void microInt(void) task_suspend(task_id_get()); } -/******************************************************************************* +/** * * microIntToTaskEvt - the test main function * diff --git a/samples/microkernel/benchmark/latency_measure/src/micro_sema_lock_release.c b/samples/microkernel/benchmark/latency_measure/src/micro_sema_lock_release.c index 59b59345707..8d500d5665a 100644 --- a/samples/microkernel/benchmark/latency_measure/src/micro_sema_lock_release.c +++ b/samples/microkernel/benchmark/latency_measure/src/micro_sema_lock_release.c @@ -53,7 +53,7 @@ static uint32_t timestamp; -/******************************************************************************* +/** * * microSemaLockUnlock - the function tests semaphore lock/unlock time * @@ -103,7 +103,7 @@ int microSemaLockUnlock(void) return 0; } -/******************************************************************************* +/** * * microMutexLockUnlock - test for the multiple mutex lock/unlock time * diff --git a/samples/microkernel/benchmark/latency_measure/src/micro_task_switch_yield.c b/samples/microkernel/benchmark/latency_measure/src/micro_task_switch_yield.c index 6471a4a9cfa..606b5b6727e 100644 --- a/samples/microkernel/benchmark/latency_measure/src/micro_task_switch_yield.c +++ b/samples/microkernel/benchmark/latency_measure/src/micro_task_switch_yield.c @@ -50,7 +50,7 @@ static int abs(int i) { return (i >= 0) ? i : -i; } static uint32_t helper_task_iterations = 0; -/******************************************************************************* +/** * * yieldingTask - helper task for measuring task switch latency using yield * @@ -69,7 +69,7 @@ void yieldingTask(void) } } -/******************************************************************************* +/** * * microTaskSwitchYield - entry point for task context switch using yield test * diff --git a/samples/microkernel/benchmark/latency_measure/src/nano_ctx_switch.c b/samples/microkernel/benchmark/latency_measure/src/nano_ctx_switch.c index f97aacbefab..02c43f2c9c4 100644 --- a/samples/microkernel/benchmark/latency_measure/src/nano_ctx_switch.c +++ b/samples/microkernel/benchmark/latency_measure/src/nano_ctx_switch.c @@ -64,17 +64,17 @@ static volatile uint32_t ctxSwitchCounter = 0; /* context switch balancer. Incremented by one fiber, decremented by another*/ static volatile int ctxSwitchBalancer = 0; -/******************************************************************************* -* -* fiberOne -* -* Fiber makes all the test preparations: registers the interrupt handler, -* gets the first timestamp and invokes the software interrupt. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * fiberOne + * + * Fiber makes all the test preparations: registers the interrupt handler, + * gets the first timestamp and invokes the software interrupt. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void fiberOne(void) { @@ -88,7 +88,7 @@ static void fiberOne(void) timestamp = TIME_STAMP_DELTA_GET(timestamp); } -/******************************************************************************* +/** * * fiberWaiter - check the time when it gets executed after the semaphore * @@ -110,7 +110,7 @@ static void fiberTwo(void) } } -/******************************************************************************* +/** * * nanoCtxSwitch - the test main function * diff --git a/samples/microkernel/benchmark/latency_measure/src/nano_int.c b/samples/microkernel/benchmark/latency_measure/src/nano_int.c index 2327100b1d3..d738aa7ad00 100644 --- a/samples/microkernel/benchmark/latency_measure/src/nano_int.c +++ b/samples/microkernel/benchmark/latency_measure/src/nano_int.c @@ -48,16 +48,16 @@ static char __stack fiberStack[STACKSIZE]; static uint32_t timestamp; -/******************************************************************************* -* -* latencyTestIsr - test ISR used to measure best case interrupt latency -* -* The interrupt handler gets the second timestamp. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * latencyTestIsr - test ISR used to measure best case interrupt latency + * + * The interrupt handler gets the second timestamp. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void latencyTestIsr(void *unused) { @@ -66,17 +66,17 @@ static void latencyTestIsr(void *unused) timestamp = TIME_STAMP_DELTA_GET(timestamp); } -/******************************************************************************* -* -* fiberInt - interrupt preparation fiber -* -* Fiber makes all the test preparations: registers the interrupt handler, -* gets the first timestamp and invokes the software interrupt. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * fiberInt - interrupt preparation fiber + * + * Fiber makes all the test preparations: registers the interrupt handler, + * gets the first timestamp and invokes the software interrupt. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void fiberInt(void) { @@ -85,7 +85,7 @@ static void fiberInt(void) raiseIntFunc(); } -/******************************************************************************* +/** * * nanoIntLatency - the test main function * diff --git a/samples/microkernel/benchmark/latency_measure/src/nano_int_lock_unlock.c b/samples/microkernel/benchmark/latency_measure/src/nano_int_lock_unlock.c index d8390158e7b..5c225840647 100644 --- a/samples/microkernel/benchmark/latency_measure/src/nano_int_lock_unlock.c +++ b/samples/microkernel/benchmark/latency_measure/src/nano_int_lock_unlock.c @@ -49,7 +49,7 @@ static uint32_t timestamp = 0; -/******************************************************************************* +/** * * nanoIntLockUnlock - the test main function * diff --git a/samples/microkernel/benchmark/latency_measure/src/nano_int_to_fiber.c b/samples/microkernel/benchmark/latency_measure/src/nano_int_to_fiber.c index 2d9bd5b8582..ca83635d6ac 100644 --- a/samples/microkernel/benchmark/latency_measure/src/nano_int_to_fiber.c +++ b/samples/microkernel/benchmark/latency_measure/src/nano_int_to_fiber.c @@ -50,16 +50,16 @@ static volatile int flagVar = 0; static uint32_t timestamp; -/******************************************************************************* -* -* latencyTestIsr - test ISR used to measure best case interrupt latency -* -* The interrupt handler gets the second timestamp. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * latencyTestIsr - test ISR used to measure best case interrupt latency + * + * The interrupt handler gets the second timestamp. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void latencyTestIsr(void *unused) { @@ -69,17 +69,17 @@ static void latencyTestIsr(void *unused) timestamp = TIME_STAMP_DELTA_GET(0); } -/******************************************************************************* -* -* fiberInt - interrupt preparation fiber -* -* Fiber makes all the test preparations: registers the interrupt handler, -* gets the first timestamp and invokes the software interrupt. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * fiberInt - interrupt preparation fiber + * + * Fiber makes all the test preparations: registers the interrupt handler, + * gets the first timestamp and invokes the software interrupt. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void fiberInt(void) { @@ -93,7 +93,7 @@ static void fiberInt(void) } } -/******************************************************************************* +/** * * nanoIntToFiber - the test main function * diff --git a/samples/microkernel/benchmark/latency_measure/src/nano_int_to_fiber_sem.c b/samples/microkernel/benchmark/latency_measure/src/nano_int_to_fiber_sem.c index e414e1bab2b..7066356be9b 100644 --- a/samples/microkernel/benchmark/latency_measure/src/nano_int_to_fiber_sem.c +++ b/samples/microkernel/benchmark/latency_measure/src/nano_int_to_fiber_sem.c @@ -61,16 +61,16 @@ static struct nano_sem testSema; static uint32_t timestamp = 0; -/******************************************************************************* -* -* latencyTestIsr - test ISR used to measure best case interrupt latency -* -* The interrupt handler gets the second timestamp. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * latencyTestIsr - test ISR used to measure best case interrupt latency + * + * The interrupt handler gets the second timestamp. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void latencyTestIsr(void *unused) { @@ -80,17 +80,17 @@ static void latencyTestIsr(void *unused) timestamp = TIME_STAMP_DELTA_GET(0); } -/******************************************************************************* -* -* fiberInt - interrupt preparation fiber -* -* Fiber makes all the test preparations: registers the interrupt handler, -* gets the first timestamp and invokes the software interrupt. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * fiberInt - interrupt preparation fiber + * + * Fiber makes all the test preparations: registers the interrupt handler, + * gets the first timestamp and invokes the software interrupt. + * + * RETURNS: N/A + * + * \NOMANUAL + */ static void fiberInt(void) { @@ -99,7 +99,7 @@ static void fiberInt(void) fiber_yield(); } -/******************************************************************************* +/** * * fiberWaiter - check the time when it gets executed after the semaphore * @@ -117,7 +117,7 @@ static void fiberWaiter(void) timestamp = TIME_STAMP_DELTA_GET(timestamp); } -/******************************************************************************* +/** * * nanoIntToFiberSem - the test main function * diff --git a/samples/microkernel/benchmark/latency_measure/src/raise_int.c b/samples/microkernel/benchmark/latency_measure/src/raise_int.c index 95e5a3a618c..b1401c819a2 100644 --- a/samples/microkernel/benchmark/latency_measure/src/raise_int.c +++ b/samples/microkernel/benchmark/latency_measure/src/raise_int.c @@ -1388,7 +1388,7 @@ static void (*intFPtr[256])(void) = { genInt248, genInt249, genInt250,genInt251, genInt252, genInt253, genInt254,genInt255}; -/******************************************************************************* +/** * * raiseInt - generate a software interrupt * @@ -1407,7 +1407,7 @@ void raiseInt(uint8_t id) #if defined(CONFIG_CPU_CORTEX_M3_M4) #include -/******************************************************************************* +/** * * raiseInt - generate a software interrupt * diff --git a/samples/microkernel/benchmark/latency_measure/src/utils.c b/samples/microkernel/benchmark/latency_measure/src/utils.c index d4a7cac5ecf..1a4c5dd8699 100644 --- a/samples/microkernel/benchmark/latency_measure/src/utils.c +++ b/samples/microkernel/benchmark/latency_measure/src/utils.c @@ -56,7 +56,7 @@ static ptestIsr pcurrIsrFunc; /* scratchpad for the string used to print on console */ char tmpString[TMP_STRING_SIZE]; -/******************************************************************************* +/** * * initSwInterrupt - initialize the interrupt handler * @@ -78,7 +78,7 @@ int initSwInterrupt(ptestIsr pIsrHdlr) return vector; } -/******************************************************************************* +/** * * setSwInterrupt - set the new ISR for software interrupt * @@ -97,7 +97,7 @@ void setSwInterrupt(ptestIsr pIsrHdlr) pcurrIsrFunc = pIsrHdlr; } -/******************************************************************************* +/** * * raiseIntFunc - generate a software interrupt * diff --git a/samples/microkernel/benchmark/latency_measure/src/utils.h b/samples/microkernel/benchmark/latency_measure/src/utils.h index 02d47638ea5..63c0157124a 100644 --- a/samples/microkernel/benchmark/latency_measure/src/utils.h +++ b/samples/microkernel/benchmark/latency_measure/src/utils.h @@ -57,7 +57,7 @@ extern int errorCount; PRINTF("|%-77s|\n", tmpString); \ } while (0) -/******************************************************************************* +/** * * printDashLine - print dash line * @@ -114,7 +114,7 @@ int nanoIntLockUnlock(void); /* pointer to the ISR */ typedef void (*ptestIsr) (void *unused); -/******************************************************************************* +/** * * initSwInterrupt - initialize the interrupt handler * @@ -129,7 +129,7 @@ typedef void (*ptestIsr) (void *unused); int initSwInterrupt(ptestIsr pIsrHdlr); -/******************************************************************************* +/** * * setSwInterrupt - set the new ISR for software interrupt * diff --git a/samples/microkernel/test/test_critical/src/critical.c b/samples/microkernel/test/test_critical/src/critical.c index f4efadf035a..bc35793b02c 100644 --- a/samples/microkernel/test/test_critical/src/critical.c +++ b/samples/microkernel/test/test_critical/src/critical.c @@ -33,7 +33,7 @@ /* DESCRIPTION This module tests the task_offload_to_fiber() API. -*/ + */ #include #include @@ -45,14 +45,14 @@ This module tests the task_offload_to_fiber() API. static uint32_t criticalVar = 0; static uint32_t altTaskIterations = 0; -/******************************************************************************* -* -* criticalRtn - routine to be called from K_swapper() -* -* This routine increments the global variable . -* -* RETURNS: 0 -*/ +/** + * + * criticalRtn - routine to be called from K_swapper() + * + * This routine increments the global variable . + * + * RETURNS: 0 + */ int criticalRtn(void) { @@ -64,14 +64,14 @@ int criticalRtn(void) return 0; } -/******************************************************************************* -* -* criticalLoop - common code for invoking task_offload_to_fiber() -* -* \param count number of critical section calls made thus far -* -* RETURNS: number of critical section calls made by task -*/ +/** + * + * criticalLoop - common code for invoking task_offload_to_fiber() + * + * \param count number of critical section calls made thus far + * + * RETURNS: number of critical section calls made by task + */ uint32_t criticalLoop(uint32_t count) { @@ -86,14 +86,14 @@ uint32_t criticalLoop(uint32_t count) return count; } -/******************************************************************************* -* -* AlternateTask - alternate task -* -* This routine calls task_offload_to_fiber() many times. -* -* RETURNS: N/A -*/ +/** + * + * AlternateTask - alternate task + * + * This routine calls task_offload_to_fiber() many times. + * + * RETURNS: N/A + */ void AlternateTask(void) { @@ -110,16 +110,16 @@ void AlternateTask(void) task_sem_give(REGRESS_SEM); } -/******************************************************************************* -* -* RegressionTask - regression task -* -* This routine calls task_offload_to_fiber() many times. It also checks to -* ensure that the number of times it is called matches the global variable -* . -* -* RETURNS: N/A -*/ +/** + * + * RegressionTask - regression task + * + * This routine calls task_offload_to_fiber() many times. It also checks to + * ensure that the number of times it is called matches the global variable + * . + * + * RETURNS: N/A + */ void RegressionTask(void) { diff --git a/samples/microkernel/test/test_events/src/events.c b/samples/microkernel/test/test_events/src/events.c index 69198b027a2..32b6138fe8f 100644 --- a/samples/microkernel/test/test_events/src/events.c +++ b/samples/microkernel/test/test_events/src/events.c @@ -36,7 +36,7 @@ This modules tests the following event APIs: task_event_set_handler(), task_event_send(), isr_event_send(), task_event_recv(), task_event_recv_wait(), task_event_recv_wait_timeout() -*/ + */ #include #include @@ -65,12 +65,12 @@ extern struct nano_sem fiberSem; /* semaphore that allows test control the fiber extern const int _k_num_events; /* non-public microkernel global variable */ -/******************************************************************************* -* -* isr_event_signal_handler - ISR handler to signal an event -* -* RETURNS: N/A -*/ +/** + * + * isr_event_signal_handler - ISR handler to signal an event + * + * RETURNS: N/A + */ void isr_event_signal_handler(void *data) { @@ -79,24 +79,24 @@ void isr_event_signal_handler(void *data) isr_event_send(pInfo->event); } -/******************************************************************************* -* -* releaseTestFiber - release the test fiber -* -* RETURNS: N/A -*/ +/** + * + * releaseTestFiber - release the test fiber + * + * RETURNS: N/A + */ void releaseTestFiber(void) { nano_task_sem_give(&fiberSem); } -/******************************************************************************* -* -* microObjectsInit - initialize objects used in this microkernel test suite -* -* RETURNS: N/A -*/ +/** + * + * microObjectsInit - initialize objects used in this microkernel test suite + * + * RETURNS: N/A + */ void microObjectsInit(void) { @@ -111,18 +111,18 @@ void microObjectsInit(void) TC_PRINT("Microkernel objects initialized\n"); } -/******************************************************************************* -* -* eventNoWaitTest - test the task_event_recv() API -* -* There are three cases to be tested here. The first is for testing an invalid -* event. The second is for testing for an event when there is one. The third -* is for testing for an event when there are none. Note that the "consumption" -* of the event gets confirmed by the order in which the latter two checks are -* done. -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * eventNoWaitTest - test the task_event_recv() API + * + * There are three cases to be tested here. The first is for testing an invalid + * event. The second is for testing for an event when there is one. The third + * is for testing for an event when there are none. Note that the "consumption" + * of the event gets confirmed by the order in which the latter two checks are + * done. + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int eventNoWaitTest(void) { @@ -157,17 +157,17 @@ int eventNoWaitTest(void) return TC_PASS; } -/******************************************************************************* -* -* eventWaitTest - test the task_event_recv_wait() API -* -* This test checks task_event_recv_wait() against the following cases: -* 1. There is already an event waiting (signalled from a task and ISR). -* 2. The current task must wait on the event until it is signalled -* from either another task, an ISR or a fiber. -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * eventWaitTest - test the task_event_recv_wait() API + * + * This test checks task_event_recv_wait() against the following cases: + * 1. There is already an event waiting (signalled from a task and ISR). + * 2. The current task must wait on the event until it is signalled + * from either another task, an ISR or a fiber. + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int eventWaitTest(void) { @@ -229,18 +229,18 @@ int eventWaitTest(void) return TC_PASS; } -/******************************************************************************* -* -* eventTimeoutTest - test the task_event_recv_wait_timeout() API -* -* This test checks task_event_recv_wait_timeout() against the following cases: -* 1. The current task times out while waiting for the event. -* 2. There is already an event waiting (signalled from a task). -* 3. The current task must wait on the event until it is signalled -* from either another task, an ISR or a fiber. -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * eventTimeoutTest - test the task_event_recv_wait_timeout() API + * + * This test checks task_event_recv_wait_timeout() against the following cases: + * 1. The current task times out while waiting for the event. + * 2. There is already an event waiting (signalled from a task). + * 3. The current task must wait on the event until it is signalled + * from either another task, an ISR or a fiber. + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int eventTimeoutTest(void) { @@ -280,17 +280,17 @@ int eventTimeoutTest(void) return TC_PASS; } -/******************************************************************************* -* -* isrEventSignalTest - test the isr_event_send() API -* -* Although other tests have done some testing using isr_event_send(), none -* of them have demonstrated that signalling an event more than once does not -* "queue" events. That is, should two or more signals of the same event occur -* before it is tested, it can only be tested for successfully once. -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * isrEventSignalTest - test the isr_event_send() API + * + * Although other tests have done some testing using isr_event_send(), none + * of them have demonstrated that signalling an event more than once does not + * "queue" events. That is, should two or more signals of the same event occur + * before it is tested, it can only be tested for successfully once. + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int isrEventSignalTest(void) { @@ -322,16 +322,16 @@ int isrEventSignalTest(void) return TC_PASS; } -/******************************************************************************* -* -* fiberEventSignalTest - test the fiber_event_send() API -* -* Signalling an event by fiber_event_send() more than once does not "queue" -* events. That is, should two or more signals of the same event occur before -* it is tested, it can only be tested for successfully once. -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * fiberEventSignalTest - test the fiber_event_send() API + * + * Signalling an event by fiber_event_send() more than once does not "queue" + * events. That is, should two or more signals of the same event occur before + * it is tested, it can only be tested for successfully once. + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int fiberEventSignalTest(void) { @@ -359,14 +359,14 @@ int fiberEventSignalTest(void) return TC_PASS; } -/******************************************************************************* -* -* eventHandler - handler to run on EVENT_ID event -* -* \param event signalled event -* -* RETURNS: -*/ +/** + * + * eventHandler - handler to run on EVENT_ID event + * + * \param event signalled event + * + * RETURNS: + */ int eventHandler(int event) { @@ -377,14 +377,14 @@ int eventHandler(int event) return handlerRetVal; /* 0 if not to wake waiting task; 1 if to wake */ } -/******************************************************************************* -* -* altEventHandler - handler to run on ALT_EVENT event -* -* \param event signalled event -* -* RETURNS: 1 -*/ +/** + * + * altEventHandler - handler to run on ALT_EVENT event + * + * \param event signalled event + * + * RETURNS: 1 + */ int altEventHandler(int event) { @@ -395,17 +395,17 @@ int altEventHandler(int event) return 1; } -/******************************************************************************* -* -* eventSignalHandlerTest - test the task_event_set_handler() API -* -* This test checks that the event handler is set up properly when -* task_event_set_handler() is called. It shows that event handlers are tied -* to the specified event and that the return value from the handler affects -* whether the event wakes a task waiting upon that event. -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * eventSignalHandlerTest - test the task_event_set_handler() API + * + * This test checks that the event handler is set up properly when + * task_event_set_handler() is called. It shows that event handlers are tied + * to the specified event and that the return value from the handler affects + * whether the event wakes a task waiting upon that event. + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int eventSignalHandlerTest(void) { @@ -518,12 +518,12 @@ int eventSignalHandlerTest(void) return TC_PASS; } -/******************************************************************************* -* -* AlternateTask - alternate task to signal various events to a waiting task -* -* RETURNS: N/A -*/ +/** + * + * AlternateTask - alternate task to signal various events to a waiting task + * + * RETURNS: N/A + */ void AlternateTask(void) { @@ -556,12 +556,12 @@ void AlternateTask(void) task_event_send(EVENT_ID); } -/******************************************************************************* -* -* RegressionTask - main entry point to the test suite -* -* RETURNS: N/A -*/ +/** + * + * RegressionTask - main entry point to the test suite + * + * RETURNS: N/A + */ void RegressionTask(void) { diff --git a/samples/microkernel/test/test_events/src/test_fiber.c b/samples/microkernel/test/test_events/src/test_fiber.c index 87a7c987c7e..b1026fb7870 100644 --- a/samples/microkernel/test/test_events/src/test_fiber.c +++ b/samples/microkernel/test/test_events/src/test_fiber.c @@ -34,7 +34,7 @@ DESCRIPTION The module implements functions for the fiber that tests event signaling -*/ + */ #include #include @@ -49,16 +49,16 @@ struct nano_sem fiberSem; /* semaphore that allows test control the fiber */ static char __stack fiberStack[FIBER_STACK_SIZE]; /* test fiber stack size */ -/******************************************************************************* -* -* testFiberEntry - the test fiber entry function -* -* Fiber waits on the semaphore controlled by the test task -* It signals the event for the eventWaitTest() function -* in single and cycle test, for eventTimeoutTest() -* -* RETURNS: N/A -*/ +/** + * + * testFiberEntry - the test fiber entry function + * + * Fiber waits on the semaphore controlled by the test task + * It signals the event for the eventWaitTest() function + * in single and cycle test, for eventTimeoutTest() + * + * RETURNS: N/A + */ static void testFiberEntry(void) { /* signal event for eventWaitTest() */ @@ -82,12 +82,12 @@ static void testFiberEntry(void) fiber_event_send(EVENT_ID); } -/******************************************************************************* -* -* testFiberInit - initializes variables and starts the test fiber -* -* RETURNS: N/A -*/ +/** + * + * testFiberInit - initializes variables and starts the test fiber + * + * RETURNS: N/A + */ void testFiberInit(void) { diff --git a/samples/microkernel/test/test_fifo/src/fifo.c b/samples/microkernel/test/test_fifo/src/fifo.c index 12526e9668e..4bc0da3b446 100644 --- a/samples/microkernel/test/test_fifo/src/fifo.c +++ b/samples/microkernel/test/test_fifo/src/fifo.c @@ -43,7 +43,7 @@ Scenarios tested include: while it is being dequeued - Verify the data being dequeued are in correct order - Verify the return codes are correct for the APIs -*/ + */ #include #include @@ -62,14 +62,14 @@ Scenarios tested include: static int myData[NUM_OF_ELEMENT]; static int tcRC = TC_PASS; /* test case return code */ -/******************************************************************************* -* -* initMyData - initialize data array -* -* This routine initializes the myData array used in the FIFO tests. -* -* RETURNS: N/A -*/ +/** + * + * initMyData - initialize data array + * + * This routine initializes the myData array used in the FIFO tests. + * + * RETURNS: N/A + */ void initMyData(void) { @@ -78,14 +78,14 @@ void initMyData(void) } /* for */ } /* initMyData */ -/******************************************************************************* -* -* printMyData - print data array -* -* This routine prints myData array. -* -* RETURNS: N/A -*/ +/** + * + * printMyData - print data array + * + * This routine prints myData array. + * + * RETURNS: N/A + */ void printMyData(void) { @@ -95,32 +95,32 @@ void printMyData(void) } /* printMyData */ -/******************************************************************************* -* -* verifyRetValue - verify return value -* -* This routine verifies current value against expected value -* and returns true if they are the same. -* -* \param expectRetValue expect value -* \param currentRetValue current value -* -* RETURNS: true, false -*/ +/** + * + * verifyRetValue - verify return value + * + * This routine verifies current value against expected value + * and returns true if they are the same. + * + * \param expectRetValue expect value + * \param currentRetValue current value + * + * RETURNS: true, false + */ bool verifyRetValue(int expectRetValue, int currentRetValue) { return (expectRetValue == currentRetValue); } /* verifyRetValue */ -/******************************************************************************* -* -* initMicroObjects - initialize microkernel objects -* -* This routine initializes the microkernel objects used in the FIFO tests. -* -* RETURNS: N/A -*/ +/** + * + * initMicroObjects - initialize microkernel objects + * + * This routine initializes the microkernel objects used in the FIFO tests. + * + * RETURNS: N/A + */ void initMicroObjects(void) { @@ -128,20 +128,20 @@ void initMicroObjects(void) printMyData(); } /* initMicroObjects */ -/******************************************************************************* -* -* fillFIFO - fills up the FIFO queue -* -* This routine fills the FIFO queue with myData array. This assumes the -* queue is empty before we put in elements. -* -* \param queue FIFO queue -* \param numElements Number of elements used to inserted into the queue -* -* RETURNS: TC_PASS, TC_FAIL -* -* Also updates tcRC when result is TC_FAIL. -*/ +/** + * + * fillFIFO - fills up the FIFO queue + * + * This routine fills the FIFO queue with myData array. This assumes the + * queue is empty before we put in elements. + * + * \param queue FIFO queue + * \param numElements Number of elements used to inserted into the queue + * + * RETURNS: TC_PASS, TC_FAIL + * + * Also updates tcRC when result is TC_FAIL. + */ int fillFIFO(kfifo_t queue, int numElements) { @@ -185,17 +185,17 @@ exitTest3: } /* fillFIFO */ -/******************************************************************************* -* -* MicroTestFifoTask - task to test FIFO queue -* -* This routine is run in three context switches: -* - it puts an element to the FIFO queue -* - it purges the FIFO queue -* - it dequeues an element from the FIFO queue -* -* RETURNS: N/A -*/ +/** + * + * MicroTestFifoTask - task to test FIFO queue + * + * This routine is run in three context switches: + * - it puts an element to the FIFO queue + * - it purges the FIFO queue + * - it dequeues an element from the FIFO queue + * + * RETURNS: N/A + */ void MicroTestFifoTask(void) { int retValue; /* return value of task_fifo_xxx interface */ @@ -270,21 +270,21 @@ exitTest4: task_sem_give(SEM_TestDone); } -/******************************************************************************* -* -* verifyQueueData - Verifies data in queue is correct -* -* This routine assumes that the queue is full when this function is called. -* It counts the number of elements in the queue, dequeues elements and verifies -* that they are in the right order. Expect the dequeue order as: myData[0], -* myData[1]. -* -* \param loopCnt number of elements passed to the for loop -* -* RETURNS: TC_PASS, TC_FAIL -* -* Also updates tcRC when result is TC_FAIL. -*/ +/** + * + * verifyQueueData - Verifies data in queue is correct + * + * This routine assumes that the queue is full when this function is called. + * It counts the number of elements in the queue, dequeues elements and verifies + * that they are in the right order. Expect the dequeue order as: myData[0], + * myData[1]. + * + * \param loopCnt number of elements passed to the for loop + * + * RETURNS: TC_PASS, TC_FAIL + * + * Also updates tcRC when result is TC_FAIL. + */ int verifyQueueData(int loopCnt) { int result = TC_PASS; /* TC_PASS or TC_FAIL for this function */ @@ -348,20 +348,20 @@ exitTest2: } /* verifyQueueData */ -/******************************************************************************* -* -* RegressionTask - main task to test FIFO queue -* -* This routine initializes data, fills the FIFO queue and verifies the -* data in the queue is in correct order when items are being dequeued. -* It also tests the wait (with and without timeouts) to put data into -* queue when the queue is full. The queue is purged at some point -* and checked to see if the number of elements is correct. -* The get wait interfaces (with and without timeouts) are also tested -* and data verified. -* -* RETURNS: N/A -*/ +/** + * + * RegressionTask - main task to test FIFO queue + * + * This routine initializes data, fills the FIFO queue and verifies the + * data in the queue is in correct order when items are being dequeued. + * It also tests the wait (with and without timeouts) to put data into + * queue when the queue is full. The queue is purged at some point + * and checked to see if the number of elements is correct. + * The get wait interfaces (with and without timeouts) are also tested + * and data verified. + * + * RETURNS: N/A + */ void RegressionTask(void) { diff --git a/samples/microkernel/test/test_fp_sharing/src/float_regs_x86_gcc.h b/samples/microkernel/test/test_fp_sharing/src/float_regs_x86_gcc.h index ded2072d1cb..fbe8430f81d 100644 --- a/samples/microkernel/test/test_fp_sharing/src/float_regs_x86_gcc.h +++ b/samples/microkernel/test/test_fp_sharing/src/float_regs_x86_gcc.h @@ -40,25 +40,25 @@ #include #include "float_context.h" -/******************************************************************************* -* -* _LoadAllFloatRegisters - load all floating point registers -* -* This function loads ALL floating point registers from the memory buffer -* specified by . It is expected that a subsequent call to -* _StoreAllFloatRegisters() will be issued to dump the floating point registers -* to memory. -* -* The format/organization of the FP_REG_SET structure is not important; the -* generic C test code (main.c and fiber.c) merely treat the FP_REG_SET -* (and FP_NONVOLATILE_REG_SET) as an array of bytes. -* -* The only requirement is that the arch specific implementations of -* _LoadAllFloatRegisters(), _StoreAllFloatRegisters(), and -* _LoadThenStoreAllFloatRegisters agree on the format. -* -* RETURNS: N/A -*/ +/** + * + * _LoadAllFloatRegisters - load all floating point registers + * + * This function loads ALL floating point registers from the memory buffer + * specified by . It is expected that a subsequent call to + * _StoreAllFloatRegisters() will be issued to dump the floating point registers + * to memory. + * + * The format/organization of the FP_REG_SET structure is not important; the + * generic C test code (main.c and fiber.c) merely treat the FP_REG_SET + * (and FP_NONVOLATILE_REG_SET) as an array of bytes. + * + * The only requirement is that the arch specific implementations of + * _LoadAllFloatRegisters(), _StoreAllFloatRegisters(), and + * _LoadThenStoreAllFloatRegisters agree on the format. + * + * RETURNS: N/A + */ static inline void _LoadAllFloatRegisters(FP_REG_SET *pFromBuffer) { @@ -109,23 +109,23 @@ static inline void _LoadAllFloatRegisters(FP_REG_SET *pFromBuffer) } -/******************************************************************************* -* -* _LoadThenStoreAllFloatRegisters - load then dump all float registers to memory -* -* This function loads ALL floating point registers from the memory buffer -* specified by , and then stores them back to that buffer. -* -* This routine is called by a high priority context prior to calling a primitive -* that pends and triggers a co-operative context switch to a low priority -* context. Because the kernel doesn't save floating point context for -* co-operative context switches, the x87 FPU register stack must be put back -* in an empty state before the switch occurs in case the next task to perform -* floating point operations was also co-operatively switched out and simply -* inherits the existing x87 FPU state (expecting the stack to be empty). -* -* RETURNS: N/A -*/ +/** + * + * _LoadThenStoreAllFloatRegisters - load then dump all float registers to memory + * + * This function loads ALL floating point registers from the memory buffer + * specified by , and then stores them back to that buffer. + * + * This routine is called by a high priority context prior to calling a primitive + * that pends and triggers a co-operative context switch to a low priority + * context. Because the kernel doesn't save floating point context for + * co-operative context switches, the x87 FPU register stack must be put back + * in an empty state before the switch occurs in case the next task to perform + * floating point operations was also co-operatively switched out and simply + * inherits the existing x87 FPU state (expecting the stack to be empty). + * + * RETURNS: N/A + */ static inline void _LoadThenStoreAllFloatRegisters(FP_REG_SET *pFromToBuffer) { @@ -164,17 +164,17 @@ static inline void _LoadThenStoreAllFloatRegisters(FP_REG_SET *pFromToBuffer) } -/******************************************************************************* -* -* _StoreAllFloatRegisters - dump all floating point registers to memory -* -* This function stores ALL floating point registers to the memory buffer -* specified by . It is expected that a previous invocation of -* _LoadAllFloatRegisters() occured to load all the floating point registers -* from a memory buffer. -* -* RETURNS: N/A -*/ +/** + * + * _StoreAllFloatRegisters - dump all floating point registers to memory + * + * This function stores ALL floating point registers to the memory buffer + * specified by . It is expected that a previous invocation of + * _LoadAllFloatRegisters() occured to load all the floating point registers + * from a memory buffer. + * + * RETURNS: N/A + */ static inline void _StoreAllFloatRegisters(FP_REG_SET *pToBuffer) { @@ -201,20 +201,20 @@ static inline void _StoreAllFloatRegisters(FP_REG_SET *pToBuffer) ); } -/******************************************************************************* -* -* _StoreNonVolatileFloatRegisters - dump non-volatile FP registers to memory -* -* This routine is called by a high priority context after resuming execution -* from calling a primitive that will pend and thus result in a co-operative -* context switch to a low priority context. -* -* Only the non-volatile floating point registers are expected to survive across -* a function call, regardless of whether the call results in the context being -* pended. -* -* RETURNS: N/A -*/ +/** + * + * _StoreNonVolatileFloatRegisters - dump non-volatile FP registers to memory + * + * This routine is called by a high priority context after resuming execution + * from calling a primitive that will pend and thus result in a co-operative + * context switch to a low priority context. + * + * Only the non-volatile floating point registers are expected to survive across + * a function call, regardless of whether the call results in the context being + * pended. + * + * RETURNS: N/A + */ void _StoreNonVolatileFloatRegisters(FP_NONVOLATILE_REG_SET *pToBuffer) { diff --git a/samples/microkernel/test/test_fp_sharing/src/main.c b/samples/microkernel/test/test_fp_sharing/src/main.c index 5f810c6e7e7..d16b1113fd4 100644 --- a/samples/microkernel/test/test_fp_sharing/src/main.c +++ b/samples/microkernel/test/test_fp_sharing/src/main.c @@ -54,7 +54,7 @@ a nanoCpuFpDisable() from main(), and then indicate that only x87 FPU registers will be utilized (nanoCpuFpEnable). The fiber context should continue to load ALL non-integer registers, but main() should validate that only the x87 FPU registers are being saved/restored. -*/ + */ #if defined(CONFIG_ISA_IA32) #ifndef CONFIG_FLOAT @@ -137,13 +137,13 @@ int fpu_sharing_error; static volatile unsigned int load_store_low_count = 0; static volatile unsigned int load_store_high_count = 0; -/******************************************************************************* -* -* main - -* load_store_low - low priority FPU load/store context -* -* RETURNS: N/A -*/ +/** + * + * main - + * load_store_low - low priority FPU load/store context + * + * RETURNS: N/A + */ #ifdef CONFIG_NANOKERNEL void main(void) @@ -306,12 +306,12 @@ void load_store_low(void) } } -/******************************************************************************* -* -* load_store_high - high priority FPU load/store context -* -* RETURNS: N/A -*/ +/** + * + * load_store_high - high priority FPU load/store context + * + * RETURNS: N/A + */ #ifdef CONFIG_NANOKERNEL void load_store_high(int unused1, int unused2) diff --git a/samples/microkernel/test/test_fp_sharing/src/pi.c b/samples/microkernel/test/test_fp_sharing/src/pi.c index 6777855aa9d..248b4945127 100644 --- a/samples/microkernel/test/test_fp_sharing/src/pi.c +++ b/samples/microkernel/test/test_fp_sharing/src/pi.c @@ -50,7 +50,7 @@ iterations results in an accuracy of 3 decimal places. A reference value of pi is computed once at the start of the test. All subsequent computations must produce the same value, otherwise an error has occurred. -*/ + */ #ifdef CONFIG_MICROKERNEL #include @@ -73,12 +73,12 @@ static double reference_pi = 0.0f; static volatile unsigned int calc_pi_low_count = 0; static volatile unsigned int calc_pi_high_count = 0; -/******************************************************************************* -* -* calculate_pi_low - entry point for the low priority pi compute task -* -* RETURNS: N/A -*/ +/** + * + * calculate_pi_low - entry point for the low priority pi compute task + * + * RETURNS: N/A + */ void calculate_pi_low(void) { @@ -116,12 +116,12 @@ void calculate_pi_low(void) } } -/******************************************************************************* -* -* calculate_pi_high - entry point for the high priority pi compute task -* -* RETURNS: N/A -*/ +/** + * + * calculate_pi_high - entry point for the high priority pi compute task + * + * RETURNS: N/A + */ void calculate_pi_high(void) { diff --git a/samples/microkernel/test/test_libs/src/libraries.c b/samples/microkernel/test/test_libs/src/libraries.c index 63594d6d179..042b6467150 100644 --- a/samples/microkernel/test/test_libs/src/libraries.c +++ b/samples/microkernel/test/test_libs/src/libraries.c @@ -38,7 +38,7 @@ IMPORTANT: The module only ensures that each supported library is present, and that a bare minimum of its functionality is operating correctly. It does NOT guarantee that ALL standards-defined functionality is present, nor does it guarantee that ALL functionality provided is working correctly. -*/ + */ #include #include @@ -57,7 +57,7 @@ it guarantee that ALL functionality provided is working correctly. volatile long longMax = LONG_MAX; volatile long longOne = 1L; -/******************************************************************************* +/** * * limitsTest - test implementation-defined constants library * @@ -75,7 +75,7 @@ int limitsTest(void) return TC_PASS; } -/******************************************************************************* +/** * * stdboolTest - test boolean types and values library * @@ -101,7 +101,7 @@ int stdboolTest(void) volatile long longVariable; volatile size_t sizeOfLongVariable = sizeof(longVariable); -/******************************************************************************* +/** * * stddefTest - test standard type definitions library * @@ -127,7 +127,7 @@ int stddefTest(void) volatile uint8_t unsignedByte = 0xff; volatile uint32_t unsignedInt = 0xffffff00; -/******************************************************************************* +/** * * stdintTest - test integer types library * @@ -153,7 +153,7 @@ int stdintTest(void) char buffer[BUFSIZE]; -/******************************************************************************* +/** * * memset_test - test string memset * @@ -174,7 +174,7 @@ int memset_test(void) return TC_PASS; } -/******************************************************************************* +/** * * strlen_test - test string length function * @@ -196,7 +196,7 @@ int strlen_test(void) return TC_PASS; } -/******************************************************************************* +/** * * strcmp_test - test string compare function * @@ -234,7 +234,7 @@ int strcmp_test(void) return TC_PASS; } -/******************************************************************************* +/** * * strncmp_test - test string N compare function * @@ -273,7 +273,7 @@ int strncmp_test(void) } -/******************************************************************************* +/** * * strcpy_test - test string copy function * @@ -296,7 +296,7 @@ int strcpy_test(void) return TC_PASS; } -/******************************************************************************* +/** * * strncpy_test - test string N copy function * @@ -320,7 +320,7 @@ int strncpy_test(void) return TC_PASS; } -/******************************************************************************* +/** * * strchr_test - test string scanning function * @@ -351,7 +351,7 @@ int strchr_test(void) return TC_PASS; } -/******************************************************************************* +/** * * memcmp_test - test memory comparison function * @@ -379,8 +379,8 @@ int memcmp_test(void) return TC_PASS; } -/******************************************************************************* -* +/** + * * stringTest - test string operations library * * RETURNS: TC_PASS or TC_FAIL */ @@ -398,7 +398,7 @@ int stringTest(void) return TC_PASS; } -/******************************************************************************* +/** * * RegressionTask - main task in the test suite * diff --git a/samples/microkernel/test/test_libs/src/main.c b/samples/microkernel/test/test_libs/src/main.c index e121087cf7f..32a47fadbe4 100644 --- a/samples/microkernel/test/test_libs/src/main.c +++ b/samples/microkernel/test/test_libs/src/main.c @@ -42,7 +42,7 @@ then announces the result of the test. NOTE: At present only a single test task is used, but more tasks may be added in the future to enhance test coverage. -*/ + */ #include #include @@ -61,7 +61,7 @@ in the future to enhance test coverage. static ksem_t resultSems[] = { SEM_TASKDONE, SEM_TASKFAIL, ENDLIST }; -/******************************************************************************* +/** * * RegressionTaskEntry - entry point for RegressionTask * @@ -78,7 +78,7 @@ void RegressionTaskEntry(void) task_sem_give(resultSems[RegressionTask()]); } -/******************************************************************************* +/** * * MonitorTaskEntry - entry point for MonitorTask * diff --git a/samples/microkernel/test/test_mail/src/mail.c b/samples/microkernel/test/test_mail/src/mail.c index b7700c07299..a3c4363ed38 100644 --- a/samples/microkernel/test/test_mail/src/mail.c +++ b/samples/microkernel/test/test_mail/src/mail.c @@ -56,7 +56,7 @@ are not (yet) tested include: to ensure higher priority messages get preference. - Having receiving tasks of differing priorities waiting on a mailbox. to ensure higher priority tasks get preference. -*/ + */ #include @@ -84,23 +84,23 @@ extern kmbox_t noRcvrMbox; extern kmemory_pool_t testPool; extern kmemory_pool_t smallBlkszPool; -/******************************************************************************* -* -* setMsg_Sender - sets various fields in the message for the sender -* -* Sets the following fields in the message: -* rx_task to receiverTask - destination for the message -* mailbox to inMbox -* -* @param inMsg The message being received. -* @param inMbox Mail box to receive the message. -* @param receiverTask Destination for the message. -* @param dataArea Pointer to (optional) buffer to send. -* @param dataSize Size of (optional) buffer to send. -* @param info Additional (optional) info to send. -* -* RETURNS: N/A -*/ +/** + * + * setMsg_Sender - sets various fields in the message for the sender + * + * Sets the following fields in the message: + * rx_task to receiverTask - destination for the message + * mailbox to inMbox + * + * @param inMsg The message being received. + * @param inMbox Mail box to receive the message. + * @param receiverTask Destination for the message. + * @param dataArea Pointer to (optional) buffer to send. + * @param dataSize Size of (optional) buffer to send. + * @param info Additional (optional) info to send. + * + * RETURNS: N/A + */ static void setMsg_Sender(struct k_msg *inMsg, kmbox_t inMbox, ktask_t receiverTask, void *dataArea, uint32_t dataSize, uint32_t info) @@ -112,24 +112,24 @@ static void setMsg_Sender(struct k_msg *inMsg, kmbox_t inMbox, ktask_t receiverT inMsg->info = info; } -/******************************************************************************* -* -* setMsg_Receiver - sets various fields in the message for the receiver -* -* Sets the following fields in the message: -* rx_data to NULL - to allow message transfer to occur -* size to MSGSIZE -* tx_task to senderTask - receiver tries to get message from this source -* mailbox to inMbox -* -* @param inMsg Message descriptor. -* @param inMbox Mail box to receive from. -* @param senderTask Sending task to receive from. -* @param inBuffer Incoming data area -* @param inBufferSize Size of incoming data area. -* -* RETURNS: N/A -*/ +/** + * + * setMsg_Receiver - sets various fields in the message for the receiver + * + * Sets the following fields in the message: + * rx_data to NULL - to allow message transfer to occur + * size to MSGSIZE + * tx_task to senderTask - receiver tries to get message from this source + * mailbox to inMbox + * + * @param inMsg Message descriptor. + * @param inMbox Mail box to receive from. + * @param senderTask Sending task to receive from. + * @param inBuffer Incoming data area + * @param inBufferSize Size of incoming data area. + * + * RETURNS: N/A + */ static void setMsg_Receiver(struct k_msg *inMsg, kmbox_t inMbox, ktask_t senderTask, void *inBuffer, uint32_t inBufferSize) @@ -143,16 +143,16 @@ static void setMsg_Receiver(struct k_msg *inMsg, kmbox_t inMbox, ktask_t senderT } } -/******************************************************************************* -* -* setMsg_RecvBuf - sets rx_data field in msg and clears buffer -* -* @param inMsg The message being received. -* @param inBuffer Incoming data area. -* @param inBufferSize Size of incoming data area. -* -* RETURNS: N/A -*/ +/** + * + * setMsg_RecvBuf - sets rx_data field in msg and clears buffer + * + * @param inMsg The message being received. + * @param inBuffer Incoming data area. + * @param inBufferSize Size of incoming data area. + * + * RETURNS: N/A + */ static void setMsg_RecvBuf(struct k_msg *inMsg, char *inBuffer, uint32_t inBufferSize) { @@ -163,14 +163,14 @@ static void setMsg_RecvBuf(struct k_msg *inMsg, char *inBuffer, uint32_t inBuffe } } -/******************************************************************************* -* -* MsgSenderTask - task that tests sending of mailbox messages -* -* This routine exercises the task_mbox_put[_wait[_timeout]] APIs. -* -* RETURNS: TC_PASS or TC_FAIL -*/ +/** + * + * MsgSenderTask - task that tests sending of mailbox messages + * + * This routine exercises the task_mbox_put[_wait[_timeout]] APIs. + * + * RETURNS: TC_PASS or TC_FAIL + */ int MsgSenderTask(void) { @@ -358,14 +358,14 @@ int MsgSenderTask(void) return TC_PASS; } -/******************************************************************************* -* -* MsgRcvrTask - task that tests receiving of mailbox messages -* -* This routine exercises the task_mbox_get[_wait[_timeout]] and task_mbox_data_get[xxx] APIs. -* -* RETURNS: TC_PASS or TC_FAIL -*/ +/** + * + * MsgRcvrTask - task that tests receiving of mailbox messages + * + * This routine exercises the task_mbox_get[_wait[_timeout]] and task_mbox_data_get[xxx] APIs. + * + * RETURNS: TC_PASS or TC_FAIL + */ int MsgRcvrTask(void) { diff --git a/samples/microkernel/test/test_mail/src/main.c b/samples/microkernel/test/test_mail/src/main.c index 5160b85ec83..5c15f3523bd 100644 --- a/samples/microkernel/test/test_mail/src/main.c +++ b/samples/microkernel/test/test_mail/src/main.c @@ -40,7 +40,7 @@ Each test task entry point invokes a test routine that returns a success/failure indication, then gives a corresponding semaphore. An additional task monitors these semaphores until it detects a failure or the completion of all test tasks, then announces the result of the test. -*/ + */ #include #include @@ -69,15 +69,15 @@ kmbox_t noRcvrMbox = NORCVRMBOX; kmemory_pool_t testPool = TESTPOOL; kmemory_pool_t smallBlkszPool = SMALLBLKSZPOOL; -/******************************************************************************* -* -* MsgSenderTaskEntry - entry point for MsgSenderTask -* -* This routine signals "task done" or "task fail", based on the return code of -* MsgSenderTask. -* -* RETURNS: N/A -*/ +/** + * + * MsgSenderTaskEntry - entry point for MsgSenderTask + * + * This routine signals "task done" or "task fail", based on the return code of + * MsgSenderTask. + * + * RETURNS: N/A + */ void MsgSenderTaskEntry(void) { @@ -86,15 +86,15 @@ void MsgSenderTaskEntry(void) task_sem_give(resultSems[MsgSenderTask()]); } -/******************************************************************************* -* -* MsgRcvrTaskEntry - entry point for MsgRcvrTask -* -* This routine signals "task done" or "task fail", based on the return code of -* MsgRcvrTask. -* -* RETURNS: N/A -*/ +/** + * + * MsgRcvrTaskEntry - entry point for MsgRcvrTask + * + * This routine signals "task done" or "task fail", based on the return code of + * MsgRcvrTask. + * + * RETURNS: N/A + */ void MsgRcvrTaskEntry(void) { @@ -103,15 +103,15 @@ void MsgRcvrTaskEntry(void) task_sem_give(resultSems[MsgRcvrTask()]); } -/******************************************************************************* -* -* MonitorTaskEntry - entry point for MonitorTask -* -* This routine keeps tabs on the progress of the tasks doing the actual testing -* and generates the final test case summary message. -* -* RETURNS: N/A -*/ +/** + * + * MonitorTaskEntry - entry point for MonitorTask + * + * This routine keeps tabs on the progress of the tasks doing the actual testing + * and generates the final test case summary message. + * + * RETURNS: N/A + */ void MonitorTaskEntry(void) { diff --git a/samples/microkernel/test/test_map/src/map.c b/samples/microkernel/test/test_map/src/map.c index 34bcae439d6..8bc7b3f0e99 100644 --- a/samples/microkernel/test/test_map/src/map.c +++ b/samples/microkernel/test/test_map/src/map.c @@ -42,7 +42,7 @@ NOTE One should ensure that the block is released to the same map from which it was allocated, and is only released once. Using an invalid pointer will have unpredictable side effects. -*/ + */ #include #include @@ -60,18 +60,18 @@ static int tcRC = TC_PASS; /* test case return code */ int testMapGetAllBlocks(void **P); int testMapFreeAllBlocks(void **P); -/******************************************************************************* -* -* verifyRetValue - verify return value -* -* This routine verifies current value against expected value -* and returns true if they are the same. -* -* \param expectRetValue expect value -* \param currentRetValue current value -* -* RETURNS: true, false -*/ +/** + * + * verifyRetValue - verify return value + * + * This routine verifies current value against expected value + * and returns true if they are the same. + * + * \param expectRetValue expect value + * \param currentRetValue current value + * + * RETURNS: true, false + */ bool verifyRetValue(int expectRetValue, int currentRetValue) { @@ -79,16 +79,16 @@ bool verifyRetValue(int expectRetValue, int currentRetValue) } /* verifyRetValue */ -/******************************************************************************* -* -* HelperTask - helper task -* -* This routine gets all blocks from the memory map. It uses semaphores -* SEM_REGRESDONE and SEM_HELPERDONE to synchronize between different parts -* of the test. -* -* RETURNS: N/A -*/ +/** + * + * HelperTask - helper task + * + * This routine gets all blocks from the memory map. It uses semaphores + * SEM_REGRESDONE and SEM_HELPERDONE to synchronize between different parts + * of the test. + * + * RETURNS: N/A + */ void HelperTask(void) { @@ -141,21 +141,21 @@ exitTest1: } /* HelperTask */ -/******************************************************************************* -* -* testMapGetAllBlocks - get all blocks from the memory map -* -* Get all blocks from the memory map. It also tries to get one more block -* from the map after the map is empty to verify the error return code. -* -* This routine tests the following: -* -* task_mem_map_alloc(), task_mem_map_used_get() -* -* \param p pointer to pointer of allocated blocks -* -* RETURNS: TC_PASS, TC_FAIL -*/ +/** + * + * testMapGetAllBlocks - get all blocks from the memory map + * + * Get all blocks from the memory map. It also tries to get one more block + * from the map after the map is empty to verify the error return code. + * + * This routine tests the following: + * + * task_mem_map_alloc(), task_mem_map_used_get() + * + * \param p pointer to pointer of allocated blocks + * + * RETURNS: TC_PASS, TC_FAIL + */ int testMapGetAllBlocks(void **p) { @@ -213,21 +213,21 @@ int testMapGetAllBlocks(void **p) return TC_PASS; } /* testMapGetAllBlocks */ -/******************************************************************************* -* -* testMapFreeAllBlocks - free all memeory blocks -* -* This routine frees all memory blocks and also verifies that the number of -* blocks used are correct. -* -* This routine tests the following: -* -* task_mem_map_free(), task_mem_map_used_get() -* -* \param p pointer to pointer of allocated blocks -* -* RETURNS: TC_PASS, TC_FAIL -*/ +/** + * + * testMapFreeAllBlocks - free all memeory blocks + * + * This routine frees all memory blocks and also verifies that the number of + * blocks used are correct. + * + * This routine tests the following: + * + * task_mem_map_free(), task_mem_map_used_get() + * + * \param p pointer to pointer of allocated blocks + * + * RETURNS: TC_PASS, TC_FAIL + */ int testMapFreeAllBlocks(void **p) { @@ -273,16 +273,16 @@ int testMapFreeAllBlocks(void **p) return TC_PASS; } /* testMapFreeAllBlocks */ -/******************************************************************************* -* -* printPointers - print the pointers -* -* This routine prints out the pointers. -* -* \param pointer pointer to pointer of allocated blocks -* -* RETURNS: N/A -*/ +/** + * + * printPointers - print the pointers + * + * This routine prints out the pointers. + * + * \param pointer pointer to pointer of allocated blocks + * + * RETURNS: N/A + */ void printPointers(void **pointer) { TC_PRINT("%s: ", __func__); @@ -295,20 +295,20 @@ void printPointers(void **pointer) } /* printPointers */ -/******************************************************************************* -* -* RegressionTask - main task to test task_mem_map_xxx interfaces -* -* This routine calls testMapGetAllBlocks() to get all memory blocks from the -* map and calls testMapFreeAllBlocks() to free all memory blocks. It also -* tries to wait (with and without timeout) for a memory block. -* -* This routine tests the following: -* -* task_mem_map_alloc_wait, task_mem_map_alloc_wait_timeout -* -* RETURNS: N/A -*/ +/** + * + * RegressionTask - main task to test task_mem_map_xxx interfaces + * + * This routine calls testMapGetAllBlocks() to get all memory blocks from the + * map and calls testMapFreeAllBlocks() to free all memory blocks. It also + * tries to wait (with and without timeout) for a memory block. + * + * This routine tests the following: + * + * task_mem_map_alloc_wait, task_mem_map_alloc_wait_timeout + * + * RETURNS: N/A + */ void RegressionTask(void) { diff --git a/samples/microkernel/test/test_mutex/src/mutex.c b/samples/microkernel/test/test_mutex/src/mutex.c index fad897daac6..4deadbc103d 100644 --- a/samples/microkernel/test/test_mutex/src/mutex.c +++ b/samples/microkernel/test/test_mutex/src/mutex.c @@ -63,7 +63,7 @@ Timeline : RegressionTask (@ priority 25) gives Mutex2 : RegressionTask (@ priority 30) gives Mutex1 : RegressionTask (@ priority 40) sleeps -*/ + */ #include #include @@ -75,12 +75,12 @@ Timeline static int tcRC = TC_PASS; /* test case return code */ -/******************************************************************************* -* -* Task10 - -* -* RETURNS: N/A -*/ +/** + * + * Task10 - + * + * RETURNS: N/A + */ void Task10(void) { @@ -98,12 +98,12 @@ void Task10(void) } /* Task10 */ -/******************************************************************************* -* -* Task15 - -* -* RETURNS: N/A -*/ +/** + * + * Task15 - + * + * RETURNS: N/A + */ void Task15(void) { @@ -130,12 +130,12 @@ void Task15(void) task_mutex_unlock(Mutex4); } -/******************************************************************************* -* -* Task20 - -* -* RETURNS: N/A -*/ +/** + * + * Task20 - + * + * RETURNS: N/A + */ void Task20(void) { @@ -160,12 +160,12 @@ void Task20(void) } /* Task20 */ -/******************************************************************************* -* -* Task25 - -* -* RETURNS: N/A -*/ +/** + * + * Task25 - + * + * RETURNS: N/A + */ void Task25(void) { @@ -183,12 +183,12 @@ void Task25(void) task_mutex_unlock(Mutex2); } /* Task25 */ -/******************************************************************************* -* -* Task30 - -* -* RETURNS: N/A -*/ +/** + * + * Task30 - + * + * RETURNS: N/A + */ void Task30(void) { @@ -214,12 +214,12 @@ void Task30(void) task_mutex_unlock(Mutex1); } -/******************************************************************************* -* -* Task45 - -* -* RETURNS: N/A -*/ +/** + * + * Task45 - + * + * RETURNS: N/A + */ void Task45(void) { @@ -235,14 +235,14 @@ void Task45(void) task_mutex_unlock(Mutex3); } -/******************************************************************************* -* -* RegressionTask - main task to test task_mutex_xxx interfaces -* -* This task will lock on Mutex1, Mutex2, Mutex3 and Mutex4. -* -* RETURNS: N/A -*/ +/** + * + * RegressionTask - main task to test task_mutex_xxx interfaces + * + * This task will lock on Mutex1, Mutex2, Mutex3 and Mutex4. + * + * RETURNS: N/A + */ void RegressionTask(void) { diff --git a/samples/microkernel/test/test_pipe/src/main.c b/samples/microkernel/test/test_pipe/src/main.c index c75caf6bfc5..98fe09dc90a 100644 --- a/samples/microkernel/test/test_pipe/src/main.c +++ b/samples/microkernel/test/test_pipe/src/main.c @@ -40,7 +40,7 @@ Each test task entry point invokes a test routine that returns a success/failure indication, then gives a corresponding semaphore. An additional task monitors these semaphores until it detects a failure or the completion of all test tasks, then announces the result of the test. -*/ + */ #include #include @@ -63,15 +63,15 @@ ksem_t counterSem = COUNTER_SEM; kpipe_t pipeId = PIPE_ID; -/******************************************************************************* -* -* RegressionTaskEntry - entry point for RegressionTask -* -* This routine signals "task done" or "task fail", based on the return code of -* RegressionTask. -* -* RETURNS: N/A -*/ +/** + * + * RegressionTaskEntry - entry point for RegressionTask + * + * This routine signals "task done" or "task fail", based on the return code of + * RegressionTask. + * + * RETURNS: N/A + */ void RegressionTaskEntry(void) { @@ -80,15 +80,15 @@ void RegressionTaskEntry(void) task_sem_give(resultSems[RegressionTask()]); } -/******************************************************************************* -* -* AlternateTaskEntry - entry point for AlternateTask -* -* This routine signals "task done" or "task fail", based on the return code of -* AlternateTask. -* -* RETURNS: N/A -*/ +/** + * + * AlternateTaskEntry - entry point for AlternateTask + * + * This routine signals "task done" or "task fail", based on the return code of + * AlternateTask. + * + * RETURNS: N/A + */ void AlternateTaskEntry(void) { @@ -97,15 +97,15 @@ void AlternateTaskEntry(void) task_sem_give(resultSems[AlternateTask()]); } -/******************************************************************************* -* -* MonitorTaskEntry - entry point for MonitorTask -* -* This routine keeps tabs on the progress of the tasks doing the actual testing -* and generates the final test case summary message. -* -* RETURNS: N/A -*/ +/** + * + * MonitorTaskEntry - entry point for MonitorTask + * + * This routine keeps tabs on the progress of the tasks doing the actual testing + * and generates the final test case summary message. + * + * RETURNS: N/A + */ void MonitorTaskEntry(void) { diff --git a/samples/microkernel/test/test_pipe/src/pipe.c b/samples/microkernel/test/test_pipe/src/pipe.c index 2840ea3a795..1d897dcd7b6 100644 --- a/samples/microkernel/test/test_pipe/src/pipe.c +++ b/samples/microkernel/test/test_pipe/src/pipe.c @@ -39,7 +39,7 @@ This modules tests the following target pipe routines: The following target pipe routine does not yet have a test case: task_pipe_put_async() -*/ + */ #include #include @@ -156,12 +156,12 @@ extern ksem_t counterSem; extern kpipe_t pipeId; -/******************************************************************************* -* -* microObjectsInit - initialize objects used in this microkernel test suite -* -* RETURNS: N/A -*/ +/** + * + * microObjectsInit - initialize objects used in this microkernel test suite + * + * RETURNS: N/A + */ void microObjectsInit(void) { @@ -172,15 +172,15 @@ void microObjectsInit(void) } } -/******************************************************************************* -* -* receiveBufferCheck - check the contents of the receive buffer -* -* \param buffer pointer to buffer to check -* \param size number of bytes to check -* -* RETURNS: on success, index of wrong character on failure -*/ +/** + * + * receiveBufferCheck - check the contents of the receive buffer + * + * \param buffer pointer to buffer to check + * \param size number of bytes to check + * + * RETURNS: on success, index of wrong character on failure + */ int receiveBufferCheck(char *buffer, int size) { @@ -195,17 +195,17 @@ int receiveBufferCheck(char *buffer, int size) return size; } -/******************************************************************************* -* -* pipePutHelperWork - helper routine to pipePutTest() -* -* \param singleItems testcase list (one item in the pipe) -* \param nSingles number of items in testcase -* \param manyItems testcase list (many items in the pipe) -* \param nMany number of items in testcase -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * pipePutHelperWork - helper routine to pipePutTest() + * + * \param singleItems testcase list (one item in the pipe) + * \param nSingles number of items in testcase + * \param manyItems testcase list (many items in the pipe) + * \param nMany number of items in testcase + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int pipePutHelperWork(SIZE_EXPECT *singleItems, int nSingles, SIZE_EXPECT *manyItems, int nMany) @@ -295,12 +295,12 @@ int pipePutHelperWork(SIZE_EXPECT *singleItems, int nSingles, return TC_PASS; } -/******************************************************************************* -* -* pipePutHelper - helper routine to pipePutTest() -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * pipePutHelper - helper routine to pipePutTest() + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int pipePutHelper(void) { @@ -330,19 +330,19 @@ int pipePutHelper(void) return TC_PASS; } -/******************************************************************************* -* -* pipePutTestWork - test task_pipe_put() -* -* This routine tests the task_pipe_put() API. -* -* \param singleItems testcase list (one item in the pipe) -* \param nSingles number of items in testcase -* \param manyItems testcase list (many items in the pipe) -* \param nMany number of items in testcase -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * pipePutTestWork - test task_pipe_put() + * + * This routine tests the task_pipe_put() API. + * + * \param singleItems testcase list (one item in the pipe) + * \param nSingles number of items in testcase + * \param manyItems testcase list (many items in the pipe) + * \param nMany number of items in testcase + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int pipePutTestWork(SIZE_EXPECT *singleItems, int nSingles, SIZE_EXPECT *manyItems, int nMany) @@ -419,14 +419,14 @@ int pipePutTestWork(SIZE_EXPECT *singleItems, int nSingles, return TC_PASS; } -/******************************************************************************* -* -* pipePutTest - test task_pipe_put() -* -* This routine tests the task_pipe_put() API. -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * pipePutTest - test task_pipe_put() + * + * This routine tests the task_pipe_put() API. + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int pipePutTest(void) { @@ -456,12 +456,12 @@ int pipePutTest(void) return TC_PASS; } -/******************************************************************************* -* -* pipePutWaitHelper - help test task_pipe_put_wait() -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * pipePutWaitHelper - help test task_pipe_put_wait() + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int pipePutWaitHelper(void) { @@ -520,12 +520,12 @@ int pipePutWaitHelper(void) return TC_PASS; } -/******************************************************************************* -* -* pipePutWaitTest - test task_pipe_put_wait() -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * pipePutWaitTest - test task_pipe_put_wait() + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int pipePutWaitTest(void) { @@ -580,12 +580,12 @@ int pipePutWaitTest(void) return TC_PASS; } -/******************************************************************************* -* -* pipePutTimeoutHelper - test task_pipe_get_wait_timeout() -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * pipePutTimeoutHelper - test task_pipe_get_wait_timeout() + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int pipePutTimeoutHelper(void) { @@ -644,12 +644,12 @@ int pipePutTimeoutHelper(void) return TC_PASS; } -/******************************************************************************* -* -* pipePutTimeoutTest - test task_pipe_put_wait_timeout() -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * pipePutTimeoutTest - test task_pipe_put_wait_timeout() + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int pipePutTimeoutTest(void) { @@ -724,17 +724,17 @@ int pipePutTimeoutTest(void) return TC_PASS; } -/******************************************************************************* -* -* pipeGetTest - routine to test task_pipe_get() -* -* This routine tests the task_pipe_get() API. Some of this functionality -* has already been tested while testing task_pipe_put(). As a result, the -* only remaining functionality that needs to be checked are attempts to get -* data from an empty pipe. -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * pipeGetTest - routine to test task_pipe_get() + * + * This routine tests the task_pipe_get() API. Some of this functionality + * has already been tested while testing task_pipe_put(). As a result, the + * only remaining functionality that needs to be checked are attempts to get + * data from an empty pipe. + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int pipeGetTest(void) { @@ -773,15 +773,15 @@ int pipeGetTest(void) return TC_PASS; } -/******************************************************************************* -* -* pipeGetWaitHelperWork - test task_pipe_get_wait() -* -* \param items testcase list for task_pipe_get_wait() -* \param nItems number of items in list -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * pipeGetWaitHelperWork - test task_pipe_get_wait() + * + * \param items testcase list for task_pipe_get_wait() + * \param nItems number of items in list + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int pipeGetWaitHelperWork(SIZE_EXPECT *items, int nItems) { @@ -810,12 +810,12 @@ int pipeGetWaitHelperWork(SIZE_EXPECT *items, int nItems) return TC_PASS; } -/******************************************************************************* -* -* pipeGetWaitHelper - test task_pipe_get_wait() -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * pipeGetWaitHelper - test task_pipe_get_wait() + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int pipeGetWaitHelper(void) { @@ -838,15 +838,15 @@ int pipeGetWaitHelper(void) return TC_PASS; } -/******************************************************************************* -* -* pipeGetWaitTestWork - test task_pipe_get_wait() -* -* \param items testcase list for task_pipe_get_wait() -* \param nItems number of items in list -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * pipeGetWaitTestWork - test task_pipe_get_wait() + * + * \param items testcase list for task_pipe_get_wait() + * \param nItems number of items in list + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int pipeGetWaitTestWork(SIZE_EXPECT *items, int nItems) { @@ -874,12 +874,12 @@ int pipeGetWaitTestWork(SIZE_EXPECT *items, int nItems) return TC_PASS; } -/******************************************************************************* -* -* pipeGetWaitTest - test task_pipe_get_wait() -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * pipeGetWaitTest - test task_pipe_get_wait() + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int pipeGetWaitTest(void) { @@ -910,12 +910,12 @@ int pipeGetWaitTest(void) return TC_PASS; } -/******************************************************************************* -* -* pipeGetTimeoutTest - test remaining task_pipe_get_wait_timeout() functionality -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * pipeGetTimeoutTest - test remaining task_pipe_get_wait_timeout() functionality + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int pipeGetTimeoutTest(void) { @@ -940,14 +940,14 @@ int pipeGetTimeoutTest(void) return TC_PASS; } -/******************************************************************************* -* -* AlternateTask - alternate task in the test suite -* -* This routine runs at a lower priority than RegressionTask(). -* -* RETURNS: TC_PASS or TC_FAIL -*/ +/** + * + * AlternateTask - alternate task in the test suite + * + * This routine runs at a lower priority than RegressionTask(). + * + * RETURNS: TC_PASS or TC_FAIL + */ int AlternateTask(void) { @@ -987,14 +987,14 @@ int AlternateTask(void) return TC_PASS; } -/******************************************************************************* -* -* RegressionTask - main task in the test suite -* -* This is the entry point to the pipe test suite. -* -* RETURNS: TC_PASS or TC_FAIL -*/ +/** + * + * RegressionTask - main task in the test suite + * + * This is the entry point to the pipe test suite. + * + * RETURNS: TC_PASS or TC_FAIL + */ int RegressionTask(void) { diff --git a/samples/microkernel/test/test_pool/src/pool.c b/samples/microkernel/test/test_pool/src/pool.c index 8538701f95d..c691d5fc55b 100644 --- a/samples/microkernel/test/test_pool/src/pool.c +++ b/samples/microkernel/test/test_pool/src/pool.c @@ -36,7 +36,7 @@ This modules tests the following memory pool routines: task_mem_pool_alloc(), task_mem_pool_alloc_wait(), task_mem_pool_alloc_wait_timeout(), task_mem_pool_free() -*/ + */ #include #include @@ -109,12 +109,12 @@ static TEST_CASE defrag[] = { {&blockList[9], POOL_ID, 1024, 0, RC_OK} }; -/******************************************************************************* -* -* blockCompare - compare the two blocks -* -* RETURNS: 0 if the same, non-zero if not the same -*/ +/** + * + * blockCompare - compare the two blocks + * + * RETURNS: 0 if the same, non-zero if not the same + */ int blockCompare(struct k_block *b1, struct k_block *b2) { @@ -133,12 +133,12 @@ int blockCompare(struct k_block *b1, struct k_block *b2) return diff; } -/******************************************************************************* -* -* poolBlockGetFunc - wrapper for task_mem_pool_alloc() -* -* RETURNS: task_mem_pool_alloc() return value -*/ +/** + * + * poolBlockGetFunc - wrapper for task_mem_pool_alloc() + * + * RETURNS: task_mem_pool_alloc() return value + */ int poolBlockGetFunc(struct k_block *block, kmemory_pool_t pool, int size, int32_t unused) @@ -148,12 +148,12 @@ int poolBlockGetFunc(struct k_block *block, kmemory_pool_t pool, int size, return task_mem_pool_alloc(block, pool, size); } -/******************************************************************************* -* -* poolBlockGetWFunc - wrapper for task_mem_pool_alloc_wait() -* -* RETURNS: task_mem_pool_alloc_wait() return value -*/ +/** + * + * poolBlockGetWFunc - wrapper for task_mem_pool_alloc_wait() + * + * RETURNS: task_mem_pool_alloc_wait() return value + */ int poolBlockGetWFunc(struct k_block *block, kmemory_pool_t pool, int size, int32_t unused) @@ -163,12 +163,12 @@ int poolBlockGetWFunc(struct k_block *block, kmemory_pool_t pool, int size, return task_mem_pool_alloc_wait(block, pool, size); } -/******************************************************************************* -* -* poolBlockGetWTFunc - wrapper for task_mem_pool_alloc_wait_timeout() -* -* RETURNS: task_mem_pool_alloc_wait_timeout() return value -*/ +/** + * + * poolBlockGetWTFunc - wrapper for task_mem_pool_alloc_wait_timeout() + * + * RETURNS: task_mem_pool_alloc_wait_timeout() return value + */ int poolBlockGetWTFunc(struct k_block *block, kmemory_pool_t pool, int size, int32_t timeout) @@ -176,12 +176,12 @@ int poolBlockGetWTFunc(struct k_block *block, kmemory_pool_t pool, return task_mem_pool_alloc_wait_timeout(block, pool, size, timeout); } -/******************************************************************************* -* -* freeBlocks - free any blocks allocated in the test set -* -* RETURNS: N/A -*/ +/** + * + * freeBlocks - free any blocks allocated in the test set + * + * RETURNS: N/A + */ void freeBlocks(TEST_CASE *tests, int nTests) { @@ -194,12 +194,12 @@ void freeBlocks(TEST_CASE *tests, int nTests) } } -/******************************************************************************* -* -* poolBlockGetWork - perform the work of getting blocks -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * poolBlockGetWork - perform the work of getting blocks + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int poolBlockGetWork(char *string, poolBlockGetFunc_t func, TEST_CASE *tests, int nTests) @@ -221,14 +221,14 @@ int poolBlockGetWork(char *string, poolBlockGetFunc_t func, return TC_PASS; } -/******************************************************************************* -* -* poolBlockGetTest - test the task_mem_pool_alloc() API -* -* The pool is 4 kB in size. -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * poolBlockGetTest - test the task_mem_pool_alloc() API + * + * The pool is 4 kB in size. + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int poolBlockGetTest(void) { @@ -256,12 +256,12 @@ int poolBlockGetTest(void) return TC_PASS; } -/******************************************************************************* -* -* HelperTask - helper task to poolBlockGetTimeoutTest() -* -* RETURNS: N/A -*/ +/** + * + * HelperTask - helper task to poolBlockGetTimeoutTest() + * + * RETURNS: N/A + */ void HelperTask(void) { @@ -271,12 +271,12 @@ void HelperTask(void) task_mem_pool_free(&helperBlock); } -/******************************************************************************* -* -* poolBlockGetTimeoutTest - test task_mem_pool_alloc_wait_timeout() -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * poolBlockGetTimeoutTest - test task_mem_pool_alloc_wait_timeout() + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int poolBlockGetTimeoutTest(void) { @@ -324,12 +324,12 @@ int poolBlockGetTimeoutTest(void) return TC_PASS; } -/******************************************************************************* -* -* poolBlockGetWaitTest - -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * poolBlockGetWaitTest - + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int poolBlockGetWaitTest(void) { @@ -366,12 +366,12 @@ int poolBlockGetWaitTest(void) return TC_PASS; } -/******************************************************************************* -* -* DefragTask - task responsible for defragmenting the pool POOL_ID -* -* RETURNS: N/A -*/ +/** + * + * DefragTask - task responsible for defragmenting the pool POOL_ID + * + * RETURNS: N/A + */ void DefragTask(void) { @@ -382,12 +382,12 @@ void DefragTask(void) task_sem_give(REGRESS_SEM); /* DefragTask is finished */ } -/******************************************************************************* -* -* poolDefragTest - -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * poolDefragTest - + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int poolDefragTest(void) { @@ -431,14 +431,14 @@ int poolDefragTest(void) return TC_PASS; } -/******************************************************************************* -* -* AlternateTask - alternate task in the test suite -* -* This routine runs at a lower priority than RegressionTask(). -* -* RETURNS: N/A -*/ +/** + * + * AlternateTask - alternate task in the test suite + * + * This routine runs at a lower priority than RegressionTask(). + * + * RETURNS: N/A + */ void AlternateTask(void) { @@ -451,14 +451,14 @@ void AlternateTask(void) evidence = 2; } -/******************************************************************************* -* -* RegressionTask - main task in the test suite -* -* This is the entry point to the memory pool test suite. -* -* RETURNS: N/A -*/ +/** + * + * RegressionTask - main task in the test suite + * + * This is the entry point to the memory pool test suite. + * + * RETURNS: N/A + */ void RegressionTask(void) { diff --git a/samples/microkernel/test/test_rand32/src/test-rand32.c b/samples/microkernel/test/test_rand32/src/test-rand32.c index bd325594b3c..f38f9784a39 100644 --- a/samples/microkernel/test/test_rand32/src/test-rand32.c +++ b/samples/microkernel/test/test_rand32/src/test-rand32.c @@ -41,7 +41,7 @@ #define N_VALUES 10 -/******************************************************************************* +/** * * RegressionTaskEntry - regression test's entry point * diff --git a/samples/microkernel/test/test_sema/src/main.c b/samples/microkernel/test/test_sema/src/main.c index 2a497df639c..c0fe8abf7f8 100644 --- a/samples/microkernel/test/test_sema/src/main.c +++ b/samples/microkernel/test/test_sema/src/main.c @@ -40,7 +40,7 @@ Each test task entry point invokes a test routine that returns a success/failure indication, then gives a corresponding semaphore. An additional task monitors these semaphores until it detects a failure or the completion of all test tasks, then announces the result of the test. -*/ + */ #include #include @@ -91,15 +91,15 @@ ksem_t semList[] = { static vvfn _trigger_isrSemaSignal = (vvfn) sw_isr_trigger_0; -/******************************************************************************* -* -* RegressionTaskEntry - entry point for RegressionTask -* -* This routine signals "task done" or "task fail", based on the return code of -* RegressionTask. -* -* RETURNS: N/A -*/ +/** + * + * RegressionTaskEntry - entry point for RegressionTask + * + * This routine signals "task done" or "task fail", based on the return code of + * RegressionTask. + * + * RETURNS: N/A + */ void RegressionTaskEntry(void) { @@ -108,15 +108,15 @@ void RegressionTaskEntry(void) task_sem_give(resultSems[RegressionTask()]); } -/******************************************************************************* -* -* AlternateTaskEntry - entry point for AlternateTask -* -* This routine signals "task done" or "task fail", based on the return code of -* MsgRcvrTask. -* -* RETURNS: N/A -*/ +/** + * + * AlternateTaskEntry - entry point for AlternateTask + * + * This routine signals "task done" or "task fail", based on the return code of + * MsgRcvrTask. + * + * RETURNS: N/A + */ void AlternateTaskEntry(void) { @@ -125,15 +125,15 @@ void AlternateTaskEntry(void) task_sem_give(resultSems[AlternateTask()]); } -/******************************************************************************* -* -* HighPriTaskEntry - entry point for HighPriTask -* -* This routine signals "task done" or "task fail", based on the return code of -* HighPriTask. -* -* RETURNS: N/A -*/ +/** + * + * HighPriTaskEntry - entry point for HighPriTask + * + * This routine signals "task done" or "task fail", based on the return code of + * HighPriTask. + * + * RETURNS: N/A + */ void HighPriTaskEntry(void) { @@ -142,15 +142,15 @@ void HighPriTaskEntry(void) task_sem_give(resultSems[HighPriTask()]); } -/******************************************************************************* -* -* LowPriTaskEntry - entry point for LowPriTask -* -* This routine signals "task done" or "task fail", based on the return code of -* LowPriTask. -* -* RETURNS: N/A -*/ +/** + * + * LowPriTaskEntry - entry point for LowPriTask + * + * This routine signals "task done" or "task fail", based on the return code of + * LowPriTask. + * + * RETURNS: N/A + */ void LowPriTaskEntry(void) { @@ -159,28 +159,28 @@ void LowPriTaskEntry(void) task_sem_give(resultSems[LowPriTask()]); } -/******************************************************************************* -* -* testIsrHandler - ISR that gives specified semaphore -* -* \param isrData pointer to semaphore to be given -* -* RETURNS: N/A -*/ +/** + * + * testIsrHandler - ISR that gives specified semaphore + * + * \param isrData pointer to semaphore to be given + * + * RETURNS: N/A + */ static void testIsrHandler(void *isrData) { isr_sem_give(*(ksem_t *)isrData, &CMD_PKT_SET(cmdPktSet)); } -/******************************************************************************* -* -* trigger_isrSemaSignal - generate interrupt that gives specified semaphore -* -* \param semaphore semaphore to be given -* -* RETURNS: N/A -*/ +/** + * + * trigger_isrSemaSignal - generate interrupt that gives specified semaphore + * + * \param semaphore semaphore to be given + * + * RETURNS: N/A + */ void trigger_isrSemaSignal(ksem_t semaphore) { @@ -188,27 +188,27 @@ void trigger_isrSemaSignal(ksem_t semaphore) _trigger_isrSemaSignal(); } -/******************************************************************************* -* -* releaseTestFiber - release the test fiber -* -* RETURNS: N/A -*/ +/** + * + * releaseTestFiber - release the test fiber + * + * RETURNS: N/A + */ void releaseTestFiber(void) { nano_task_sem_give(&fiberSem); } -/******************************************************************************* -* -* testInterruptsInit - initialize interrupt-related code -* -* Binds an ISR to the interrupt vector used to give semaphores from interrupt -* level. -* -* RETURNS: N/A -*/ +/** + * + * testInterruptsInit - initialize interrupt-related code + * + * Binds an ISR to the interrupt vector used to give semaphores from interrupt + * level. + * + * RETURNS: N/A + */ static void testInterruptsInit(void) { @@ -220,15 +220,15 @@ static void testInterruptsInit(void) (void) initIRQ(&i); } -/******************************************************************************* -* -* MonitorTaskEntry - entry point for MonitorTask -* -* This routine keeps tabs on the progress of the tasks doing the actual testing -* and generates the final test case summary message. -* -* RETURNS: N/A -*/ +/** + * + * MonitorTaskEntry - entry point for MonitorTask + * + * This routine keeps tabs on the progress of the tasks doing the actual testing + * and generates the final test case summary message. + * + * RETURNS: N/A + */ void MonitorTaskEntry(void) { diff --git a/samples/microkernel/test/test_sema/src/sema.c b/samples/microkernel/test/test_sema/src/sema.c index ac9edb85574..ecf816f8940 100644 --- a/samples/microkernel/test/test_sema/src/sema.c +++ b/samples/microkernel/test/test_sema/src/sema.c @@ -39,7 +39,7 @@ This modules tests the following semaphore routines: task_sem_reset(), task_sem_give(), task_sem_count_get(), task_sem_take(), task_sem_take_wait(), task_sem_take_wait_timeout(), isr_sem_give(), fiber_sem_give() -*/ + */ #include #include @@ -70,12 +70,12 @@ extern ksem_t blockLpSem; extern ksem_t semList[]; -/******************************************************************************* -* -* simpleSemaTest - signal semaphore that has no waiting tasks from ISR -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * simpleSemaTest - signal semaphore that has no waiting tasks from ISR + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int simpleSemaTest(void) { @@ -163,12 +163,12 @@ int simpleSemaTest(void) return TC_PASS; } -/******************************************************************************* -* -* simpleSemaWaitTest - test the waiting of a semaphore -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * simpleSemaWaitTest - test the waiting of a semaphore + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int simpleSemaWaitTest(void) { @@ -221,12 +221,12 @@ int simpleSemaWaitTest(void) return TC_PASS; } -/******************************************************************************* -* -* simpleGroupTest - test for a group of semaphores -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * simpleGroupTest - test for a group of semaphores + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int simpleGroupTest(void) { @@ -301,15 +301,15 @@ int simpleGroupTest(void) return TC_PASS; } -/******************************************************************************* -* -* simpleGroupWaitTest - test a group of semaphores with waiting -* -* This routine tests the waiting feature on a group of semaphores. Note that -* timing out on a wait has already been tested so it need not be done again. -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * simpleGroupWaitTest - test a group of semaphores with waiting + * + * This routine tests the waiting feature on a group of semaphores. Note that + * timing out on a wait has already been tested so it need not be done again. + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int simpleGroupWaitTest(void) { @@ -364,20 +364,20 @@ int simpleGroupWaitTest(void) return TC_PASS; } -/******************************************************************************* -* -* simpleFiberSemTest - test semaphore signaling from fiber -* -* Routine starts a fiber and does the following tests: -* - fiber signals the semaphore N times, task checks that task_sem_count_get is N -* - task waits on a semaphore and fiber signals it -* - task waits on a semaphore group and fiber signals each of them once. Task -* checks which of the semaphores has been signaled -* -* See also: testFiber.c -* -* RETURNS: TC_PASS on success or TC_FAIL on failure -*/ +/** + * + * simpleFiberSemTest - test semaphore signaling from fiber + * + * Routine starts a fiber and does the following tests: + * - fiber signals the semaphore N times, task checks that task_sem_count_get is N + * - task waits on a semaphore and fiber signals it + * - task waits on a semaphore group and fiber signals each of them once. Task + * checks which of the semaphores has been signaled + * + * See also: testFiber.c + * + * RETURNS: TC_PASS on success or TC_FAIL on failure + */ static int simpleFiberSemTest(void) { int signalCount; @@ -419,12 +419,12 @@ static int simpleFiberSemTest(void) return TC_PASS; } -/******************************************************************************* -* -* HighPriTask - a high priority task -* -* RETURNS: TC_PASS or TC_FAIL -*/ +/** + * + * HighPriTask - a high priority task + * + * RETURNS: TC_PASS or TC_FAIL + */ int HighPriTask(void) { @@ -453,12 +453,12 @@ int HighPriTask(void) } -/******************************************************************************* -* -* LowPriTask - a low priority task -* -* RETURNS: TC_PASS or TC_FAIL -*/ +/** + * + * LowPriTask - a low priority task + * + * RETURNS: TC_PASS or TC_FAIL + */ int LowPriTask(void) { @@ -478,14 +478,14 @@ int LowPriTask(void) return TC_PASS; } -/******************************************************************************* -* -* AlternateTask - alternate task in the test suite -* -* This routine runs at a lower priority than RegressionTask(). -* -* RETURNS: TC_PASS or TC_FAIL -*/ +/** + * + * AlternateTask - alternate task in the test suite + * + * This routine runs at a lower priority than RegressionTask(). + * + * RETURNS: TC_PASS or TC_FAIL + */ int AlternateTask(void) { @@ -557,14 +557,14 @@ int AlternateTask(void) return TC_PASS; } -/******************************************************************************* -* -* RegressionTask - entry point to semaphore test suite -* -* This is the entry point to the semaphore test suite. -* -* RETURNS: TC_PASS or TC_FAIL -*/ +/** + * + * RegressionTask - entry point to semaphore test suite + * + * This is the entry point to the semaphore test suite. + * + * RETURNS: TC_PASS or TC_FAIL + */ int RegressionTask(void) { diff --git a/samples/microkernel/test/test_sema/src/test_fiber.c b/samples/microkernel/test/test_sema/src/test_fiber.c index 6966ba94fc1..2dd00da8c78 100644 --- a/samples/microkernel/test/test_sema/src/test_fiber.c +++ b/samples/microkernel/test/test_sema/src/test_fiber.c @@ -34,7 +34,7 @@ DESCRIPTION The module implements functions for the fiber that tests semaphore signaling -*/ + */ #include #include @@ -55,19 +55,19 @@ static char __stack fiberStack[FIBER_STACK_SIZE]; /* test fiber stack size */ /* array of command packets used by test fiber to signal semaphores */ static CMD_PKT_SET_INSTANCE(cmdPktSet, N_TESTS + 1) -/******************************************************************************* -* -* testFiberEntry - the test fiber entry function -* -* Fiber waits on the semaphore controlled by the test task -* It signals the semaphore, the testing task waits for, -* then it signals the semaphore for N_TASKS times, testing task -* checks this number. -* Then fiber signals each of the semaphores in the group. Test -* task checks this. -* -* RETURNS: N/A -*/ +/** + * + * testFiberEntry - the test fiber entry function + * + * Fiber waits on the semaphore controlled by the test task + * It signals the semaphore, the testing task waits for, + * then it signals the semaphore for N_TASKS times, testing task + * checks this number. + * Then fiber signals each of the semaphores in the group. Test + * task checks this. + * + * RETURNS: N/A + */ static void testFiberEntry(void) { int i; @@ -88,12 +88,12 @@ static void testFiberEntry(void) } } -/******************************************************************************* -* -* testFiberInit - initializes variables and starts the test fiber -* -* RETURNS: N/A -*/ +/** + * + * testFiberInit - initializes variables and starts the test fiber + * + * RETURNS: N/A + */ void testFiberInit(void) { diff --git a/samples/microkernel/test/test_sprintf/src/test_sprintf.c b/samples/microkernel/test/test_sprintf/src/test_sprintf.c index a992d0cd8bd..74b5a0ae43c 100644 --- a/samples/microkernel/test/test_sprintf/src/test_sprintf.c +++ b/samples/microkernel/test/test_sprintf/src/test_sprintf.c @@ -33,7 +33,7 @@ /* DESCRIPTION This module contains the code for testing sprintf() functionality. -*/ + */ #include #include @@ -73,12 +73,12 @@ typedef union { } raw_double_u; #ifdef CONFIG_FLOAT -/******************************************************************************* -* -* sprintfDoubleTest - test sprintf with doubles -* -* RETURNS: TC_PASS on success, TC_FAIL otherwise -*/ +/** + * + * sprintfDoubleTest - test sprintf with doubles + * + * RETURNS: TC_PASS on success, TC_FAIL otherwise + */ int sprintfDoubleTest(void) { @@ -229,10 +229,10 @@ int sprintfDoubleTest(void) } #endif /* CONFIG_FLOAT */ -/******************************************************************************* -* -* tvsnprintf - a test wrapper for vsnprintf() -*/ +/** + * + * tvsnprintf - a test wrapper for vsnprintf() + */ int tvsnprintf(char *s, size_t len, const char *format, ...) { @@ -246,17 +246,17 @@ int tvsnprintf(char *s, size_t len, const char *format, ...) return r; } -/******************************************************************************* -* -* vsnprintfTest - test the vsprintf() routine -* -* This routine does not aim to test the same underlying functionality as -* sprintfTest(). Instead it tries to limit it to functionality specific to -* vsnprintf(). Instead of calling vsnprintf() directly, it invokes the wrapper -* routine tvsnprintf(). -* -* RETURNS: TC_PASS on success, TC_FAIL otherwise -*/ +/** + * + * vsnprintfTest - test the vsprintf() routine + * + * This routine does not aim to test the same underlying functionality as + * sprintfTest(). Instead it tries to limit it to functionality specific to + * vsnprintf(). Instead of calling vsnprintf() directly, it invokes the wrapper + * routine tvsnprintf(). + * + * RETURNS: TC_PASS on success, TC_FAIL otherwise + */ int vsnprintfTest(void) { @@ -317,10 +317,10 @@ int vsnprintfTest(void) return status; } -/******************************************************************************* -* -* tvsprintf - a test wrapper for vsprintf() -*/ +/** + * + * tvsprintf - a test wrapper for vsprintf() + */ int tvsprintf(char *s, const char *format, ...) { @@ -334,16 +334,16 @@ int tvsprintf(char *s, const char *format, ...) return r; } -/******************************************************************************* -* -* vsprintfTest - test the vsprintf() routine -* -* This routine does not aim to test the same underlying functionality as -* sprintfTest(). Instead it tries to limit it to functionality specific to -* vsprintf(). -* -* RETURNS: TC_PASS on success, TC_FAIL otherwise -*/ +/** + * + * vsprintfTest - test the vsprintf() routine + * + * This routine does not aim to test the same underlying functionality as + * sprintfTest(). Instead it tries to limit it to functionality specific to + * vsprintf(). + * + * RETURNS: TC_PASS on success, TC_FAIL otherwise + */ int vsprintfTest(void) { @@ -368,16 +368,16 @@ int vsprintfTest(void) return status; } -/******************************************************************************* -* -* snprintfTest - test the snprintf() routine -* -* This routine does not aim to test the same underlying functionality as -* sprintfTest(). Instead it tries to limit it to functionality specific to -* snprintf(). -* -* RETURNS: TC_PASS on success, TC_FAIL otherwise -*/ +/** + * + * snprintfTest - test the snprintf() routine + * + * This routine does not aim to test the same underlying functionality as + * sprintfTest(). Instead it tries to limit it to functionality specific to + * snprintf(). + * + * RETURNS: TC_PASS on success, TC_FAIL otherwise + */ int snprintfTest(void) { @@ -438,12 +438,12 @@ int snprintfTest(void) return status; } -/******************************************************************************* -* -* sprintfMiscTest - test the sprintf() routine with miscellaneous specifiers -* -* RETURNS: TC_PASS on success, TC_FAIL otherwise -*/ +/** + * + * sprintfMiscTest - test the sprintf() routine with miscellaneous specifiers + * + * RETURNS: TC_PASS on success, TC_FAIL otherwise + */ int sprintfMiscTest(void) { @@ -514,12 +514,12 @@ int sprintfMiscTest(void) return status; } -/******************************************************************************* -* -* sprintfIntegerTest - test the sprintf() routine with integers -* -* RETURNS: TC_PASS on success, TC_FAIL otherwise -*/ +/** + * + * sprintfIntegerTest - test the sprintf() routine with integers + * + * RETURNS: TC_PASS on success, TC_FAIL otherwise + */ int sprintfIntegerTest(void) { @@ -667,12 +667,12 @@ int sprintfIntegerTest(void) return status; } -/******************************************************************************* -* -* sprintfStringTest - test sprintf with strings -* -* RETURNS: TC_PASS on success, TC_FAIL otherwise -*/ +/** + * + * sprintfStringTest - test sprintf with strings + * + * RETURNS: TC_PASS on success, TC_FAIL otherwise + */ int sprintfStringTest(void) { @@ -713,12 +713,12 @@ int sprintfStringTest(void) return status; } -/******************************************************************************* -* -* RegressionTask - test entry point -* -* RETURNS: N/A -*/ +/** + * + * RegressionTask - test entry point + * + * RETURNS: N/A + */ void RegressionTask(void) { diff --git a/samples/microkernel/test/test_stackprot/src/stackprot.c b/samples/microkernel/test/test_stackprot/src/stackprot.c index 85f9d881928..2e471fc6aac 100644 --- a/samples/microkernel/test/test_stackprot/src/stackprot.c +++ b/samples/microkernel/test/test_stackprot/src/stackprot.c @@ -44,7 +44,7 @@ DESCRIPTION thus will not set tcRC to TC_FAIL. When this alternate task (fiber) terminates, control is returned back to the regression (main) task which prints out a short string couple times. -*/ + */ #include @@ -64,17 +64,17 @@ static int tcRC = TC_PASS; void check_input(const char *name, const char *input); -/******************************************************************************* -* -* printLoop -* -* This function calls check_input 6 times with the input name and a short -* string, which is printed properly by check_input. -* -* \param name task or fiber identification string -* -* RETURNS: N/A -*/ +/** + * + * printLoop + * + * This function calls check_input 6 times with the input name and a short + * string, which is printed properly by check_input. + * + * \param name task or fiber identification string + * + * RETURNS: N/A + */ void printLoop(const char *name) { @@ -85,21 +85,21 @@ void printLoop(const char *name) } } -/******************************************************************************* -* -* check_input -* -* This function copies the input string to a buffer of 16 characters and -* prints the name and buffer as a string. If the input string is longer -* than the buffer, an error condition is detected. -* -* When stack protection feature is enabled (see prj.conf file), the -* system error handler is invoked and reports a "Stack Check Fail" error. -* When stack protection feature is not enabled, the system crashes with -* error like: Trying to execute code outside RAM or ROM. -* -* RETURNS: N/A -*/ +/** + * + * check_input + * + * This function copies the input string to a buffer of 16 characters and + * prints the name and buffer as a string. If the input string is longer + * than the buffer, an error condition is detected. + * + * When stack protection feature is enabled (see prj.conf file), the + * system error handler is invoked and reports a "Stack Check Fail" error. + * When stack protection feature is not enabled, the system crashes with + * error like: Trying to execute code outside RAM or ROM. + * + * RETURNS: N/A + */ void check_input(const char *name, const char *input) { @@ -110,18 +110,18 @@ void check_input(const char *name, const char *input) TC_PRINT("%s: %s\n", name, buf); } -/******************************************************************************* -* -* Microkernel: AlternateTask -* Nanokernel: fiber1 -* -* This task/fiber passes a long string to check_input function. It terminates due -* to stack overflow and reports "Stack Check Fail" when stack protection -* feature is enabled. Hence it will not execute the printLoop function and will -* not set tcRC to TC_FAIL. Control is transferred back to the other task. -* -* RETURNS: N/A -*/ +/** + * + * Microkernel: AlternateTask + * Nanokernel: fiber1 + * + * This task/fiber passes a long string to check_input function. It terminates due + * to stack overflow and reports "Stack Check Fail" when stack protection + * feature is enabled. Hence it will not execute the printLoop function and will + * not set tcRC to TC_FAIL. Control is transferred back to the other task. + * + * RETURNS: N/A + */ #ifdef CONFIG_MICROKERNEL void AlternateTask(void) #else @@ -139,17 +139,17 @@ void fiber1(void) tcRC = TC_FAIL; } -/******************************************************************************* -* -* Microkernel: RegressionTask -* Nanokernel: main -* -* This is the entry point to the test stack protection feature. It calls -* printLoop to print a string and alternates execution with AlternateTask -* when the task goes to sleep in printLoop. -* -* RETURNS: N/A -*/ +/** + * + * Microkernel: RegressionTask + * Nanokernel: main + * + * This is the entry point to the test stack protection feature. It calls + * printLoop to print a string and alternates execution with AlternateTask + * when the task goes to sleep in printLoop. + * + * RETURNS: N/A + */ #ifdef CONFIG_MICROKERNEL void RegressionTask(void) diff --git a/samples/microkernel/test/test_static_idt/src/static_idt.c b/samples/microkernel/test/test_static_idt/src/static_idt.c index b0c1c61c6a5..37654247afa 100644 --- a/samples/microkernel/test/test_static_idt/src/static_idt.c +++ b/samples/microkernel/test/test_static_idt/src/static_idt.c @@ -33,7 +33,7 @@ /* DESCRIPTION Ensures interrupt and exception stubs are installed correctly. -*/ + */ #include @@ -75,39 +75,39 @@ static char __stack fiberStack[512]; #endif -/******************************************************************************* -* -* isr_handler - handler to perform various actions from within an ISR context -* -* This routine is the ISR handler for _trigger_isrHandler(). -* -* RETURNS: N/A -*/ +/** + * + * isr_handler - handler to perform various actions from within an ISR context + * + * This routine is the ISR handler for _trigger_isrHandler(). + * + * RETURNS: N/A + */ void isr_handler(void) { intHandlerExecuted++; } -/******************************************************************************* -* -* exc_divide_error_handler - -* -* This is the handler for the divde by zero exception. The source of this -* divide-by-zero error comes from the following line in main() ... -* error = error / excHandlerExecuted; -* Where excHandlerExecuted is zero. -* The disassembled code for it looks something like .... -* f7 fb idiv %ecx -* This handler is part of a test that is only interested in detecting the -* error so that we know the exception connect code is working. Therefore, -* a very quick and dirty approach is taken for dealing with the exception; -* we skip the offending instruction by adding 2 to the EIP. (If nothing is -* done, then control goes back to the offending instruction and an infinite -* loop of divide-by-zero errors would be created.) -* -* RETURNS: N/A -*/ +/** + * + * exc_divide_error_handler - + * + * This is the handler for the divde by zero exception. The source of this + * divide-by-zero error comes from the following line in main() ... + * error = error / excHandlerExecuted; + * Where excHandlerExecuted is zero. + * The disassembled code for it looks something like .... + * f7 fb idiv %ecx + * This handler is part of a test that is only interested in detecting the + * error so that we know the exception connect code is working. Therefore, + * a very quick and dirty approach is taken for dealing with the exception; + * we skip the offending instruction by adding 2 to the EIP. (If nothing is + * done, then control goes back to the offending instruction and an infinite + * loop of divide-by-zero errors would be created.) + * + * RETURNS: N/A + */ void exc_divide_error_handler(NANO_ESF *pEsf) { @@ -116,15 +116,15 @@ void exc_divide_error_handler(NANO_ESF *pEsf) } -/******************************************************************************* -* -* nanoIdtStubTest - check the IDT. -* -* This test examines the IDT and verifies that the static interrupt and -* exception stubs are installed at the correct place. -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * nanoIdtStubTest - check the IDT. + * + * This test examines the IDT and verifies that the static interrupt and + * exception stubs are installed at the correct place. + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int nanoIdtStubTest(void) { @@ -172,12 +172,12 @@ int nanoIdtStubTest(void) return TC_PASS; } -/******************************************************************************* -* -* idtSpurTask/Fiber - task/fiber to test spurious handlers -* -* RETURNS: 0 -*/ +/** + * + * idtSpurTask/Fiber - task/fiber to test spurious handlers + * + * RETURNS: 0 + */ #ifdef CONFIG_MICROKERNEL void idtSpurTask(void) @@ -199,14 +199,14 @@ static void idtSpurFiber(int a1, int a2) } -/******************************************************************************* -* -* idtTestTask/main - entry point to static IDT tests -* -* This is the entry point to the static IDT tests. -* -* RETURNS: N/A -*/ +/** + * + * idtTestTask/main - entry point to static IDT tests + * + * This is the entry point to the static IDT tests. + * + * RETURNS: N/A + */ #ifdef CONFIG_MICROKERNEL void idtTestTask(void) diff --git a/samples/microkernel/test/test_static_idt/src/test_stubs.S b/samples/microkernel/test/test_static_idt/src/test_stubs.S index 4767f0d9100..cec817906ef 100644 --- a/samples/microkernel/test/test_static_idt/src/test_stubs.S +++ b/samples/microkernel/test/test_static_idt/src/test_stubs.S @@ -34,7 +34,7 @@ DESCRIPTION This module implements assembler exception and interrupt stubs for regression testing. -*/ + */ #define _ASMLANGUAGE diff --git a/samples/microkernel/test/test_task/src/task.c b/samples/microkernel/test/test_task/src/task.c index c4c2d770054..efd295d0c5b 100644 --- a/samples/microkernel/test/test_task/src/task.c +++ b/samples/microkernel/test/test_task/src/task.c @@ -36,7 +36,7 @@ This module tests the following task APIs: isr_task_id_get(), isr_task_priority_get(), task_id_get(), task_priority_get(), task_resume(), task_suspend(), task_priority_set(), task_sleep(), task_yield() -*/ + */ #include #include @@ -70,12 +70,12 @@ static int helperData; static volatile int mainTaskNotReady = 0; -/******************************************************************************* -* -* isr_task_command_handler - ISR handler to call isr_task_id_get() and isr_task_priority_get() -* -* RETURNS: N/A -*/ +/** + * + * isr_task_command_handler - ISR handler to call isr_task_id_get() and isr_task_priority_get() + * + * RETURNS: N/A + */ void isr_task_command_handler(void *data) { @@ -95,12 +95,12 @@ void isr_task_command_handler(void *data) pInfo->data = value; } -/******************************************************************************* -* -* isrAPIsTest - test isr_task_id_get() and isr_task_priority_get -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * isrAPIsTest - test isr_task_id_get() and isr_task_priority_get + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int isrAPIsTest(int taskId, int taskPrio) { @@ -123,12 +123,12 @@ int isrAPIsTest(int taskId, int taskPrio) return TC_PASS; } -/******************************************************************************* -* -* taskMacrosTest - test task_id_get() and task_priority_get() macros -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * taskMacrosTest - test task_id_get() and task_priority_get() macros + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int taskMacrosTest(int taskId, int taskPrio) { @@ -151,12 +151,12 @@ int taskMacrosTest(int taskId, int taskPrio) return TC_PASS; } -/******************************************************************************* -* -* microObjectsInit - initialize objects used in this microkernel test suite -* -* RETURNS: N/A -*/ +/** + * + * microObjectsInit - initialize objects used in this microkernel test suite + * + * RETURNS: N/A + */ void microObjectsInit(void) { @@ -170,12 +170,12 @@ void microObjectsInit(void) TC_PRINT("Microkernel objects initialized\n"); } -/******************************************************************************* -* -* helperTaskSetPrioTest - helper task portion to test setting the priority -* -* RETURNS: N/A -*/ +/** + * + * helperTaskSetPrioTest - helper task portion to test setting the priority + * + * RETURNS: N/A + */ void helperTaskSetPrioTest(void) { @@ -192,12 +192,12 @@ void helperTaskSetPrioTest(void) task_sem_give(RT_SEM); } -/******************************************************************************* -* -* taskSetPrioTest - test the task_priority_set() API -* -* RETURNS: N/A -*/ +/** + * + * taskSetPrioTest - test the task_priority_set() API + * + * RETURNS: N/A + */ int taskSetPrioTest(void) { @@ -266,12 +266,12 @@ int taskSetPrioTest(void) return TC_PASS; } -/******************************************************************************* -* -* helperTaskSleepTest - helper task portion to test task_sleep() -* -* RETURNS: N/A -*/ +/** + * + * helperTaskSleepTest - helper task portion to test task_sleep() + * + * RETURNS: N/A + */ void helperTaskSleepTest(void) { @@ -287,12 +287,12 @@ void helperTaskSleepTest(void) task_sem_give(RT_SEM); } -/******************************************************************************* -* -* taskSleepTest - test task_sleep() -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * taskSleepTest - test task_sleep() + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int taskSleepTest(void) { @@ -318,12 +318,12 @@ int taskSleepTest(void) return TC_PASS; } -/******************************************************************************* -* -* helperTaskYieldTest - helper task portion of task_yield() test -* -* RETURNS: N/A -*/ +/** + * + * helperTaskYieldTest - helper task portion of task_yield() test + * + * RETURNS: N/A + */ void helperTaskYieldTest(void) { @@ -338,12 +338,12 @@ void helperTaskYieldTest(void) task_sem_give(RT_SEM); } -/******************************************************************************* -* -* taskYieldTest - test task_yield() -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * taskYieldTest - test task_yield() + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int taskYieldTest(void) { @@ -376,13 +376,13 @@ int taskYieldTest(void) return TC_PASS; } -/******************************************************************************* -* -* helperTaskSuspendTest - helper task portion of task_suspend() and -* task_resume() tests -* -* RETURNS: N/A -*/ +/** + * + * helperTaskSuspendTest - helper task portion of task_suspend() and + * task_resume() tests + * + * RETURNS: N/A + */ void helperTaskSuspendTest(void) { @@ -391,18 +391,18 @@ void helperTaskSuspendTest(void) task_sem_take_wait(HT_SEM); } -/******************************************************************************* -* -* taskSuspendTest - test task_suspend() and task_resume() -* -* This test suspends the helper task. Once it is suspended, the main task -* (RegressionTask) sleeps for one second. If the helper task is truly -* suspended, it will not execute and modify . Once confirmed, -* the helper task is resumed, and the main task sleeps once more. If the -* helper task has truly resumed, it will modify . -* -* RETURNS: TC_PASS on success or TC_FAIL on failure -*/ +/** + * + * taskSuspendTest - test task_suspend() and task_resume() + * + * This test suspends the helper task. Once it is suspended, the main task + * (RegressionTask) sleeps for one second. If the helper task is truly + * suspended, it will not execute and modify . Once confirmed, + * the helper task is resumed, and the main task sleeps once more. If the + * helper task has truly resumed, it will modify . + * + * RETURNS: TC_PASS on success or TC_FAIL on failure + */ int taskSuspendTest(void) { @@ -430,12 +430,12 @@ int taskSuspendTest(void) return TC_PASS; } -/******************************************************************************* -* -* HelperTask - helper task to test the task APIs -* -* RETURNS: N/A -*/ +/** + * + * HelperTask - helper task to test the task APIs + * + * RETURNS: N/A + */ void HelperTask(void) { @@ -466,12 +466,12 @@ void HelperTask(void) helperTaskSuspendTest(); } -/******************************************************************************* -* -* RegressionTask - main task to test the task APIs -* -* RETURNS: N/A -*/ +/** + * + * RegressionTask - main task to test the task APIs + * + * RETURNS: N/A + */ void RegressionTask(void) { diff --git a/samples/microkernel/test/test_task_irq/src/main.c b/samples/microkernel/test/test_task_irq/src/main.c index 1e4fda9f4f0..8dca86180b7 100644 --- a/samples/microkernel/test/test_task_irq/src/main.c +++ b/samples/microkernel/test/test_task_irq/src/main.c @@ -54,15 +54,15 @@ static ksem_t rdySem = SEM_RDY; #define NUM_OBJECTS 4 extern uint32_t irq_vectors[NUM_OBJECTS]; -/******************************************************************************* -* -* taskAMain - entry point for taskA -* -* This routine signals "task done" or "task fail", based on the return code of -* taskA. -* -* RETURNS: N/A -*/ +/** + * + * taskAMain - entry point for taskA + * + * This routine signals "task done" or "task fail", based on the return code of + * taskA. + * + * RETURNS: N/A + */ void taskAMain(void) { @@ -70,15 +70,15 @@ void taskAMain(void) task_sem_give(resultSems[taskA(rdySem)]); } -/******************************************************************************* -* -* taskBMain - entry point for taskB -* -* This routine signals "task done" or "task fail", based on the return code of -* taskB. -* -* RETURNS: N/A -*/ +/** + * + * taskBMain - entry point for taskB + * + * This routine signals "task done" or "task fail", based on the return code of + * taskB. + * + * RETURNS: N/A + */ void taskBMain(void) { @@ -86,16 +86,16 @@ void taskBMain(void) task_sem_give(resultSems[taskB(rdySem)]); } -/******************************************************************************* -* -* registerWait - wait for devices to be registered and generate SW ints -* -* This routine waits for the tasks to indicate the IRQ objects are allocated and -* then generates SW interrupts for all IRQs. Signals "task done" if all task -* indicated the IRQs are allocated or signals "task fail"if not. -* -* RETURNS: N/A -*/ +/** + * + * registerWait - wait for devices to be registered and generate SW ints + * + * This routine waits for the tasks to indicate the IRQ objects are allocated and + * then generates SW interrupts for all IRQs. Signals "task done" if all task + * indicated the IRQs are allocated or signals "task fail"if not. + * + * RETURNS: N/A + */ void registerWait(void) { extern void raiseInt(uint8_t id); @@ -122,15 +122,15 @@ void registerWait(void) task_sem_give(resultSems[TC_PASS]); } -/******************************************************************************* -* -* MonitorTaskEntry - entry point for MonitorTask -* -* This routine keeps tabs on the progress of the tasks doing the actual testing -* and generates the final test case summary message. -* -* RETURNS: N/A -*/ +/** + * + * MonitorTaskEntry - entry point for MonitorTask + * + * This routine keeps tabs on the progress of the tasks doing the actual testing + * and generates the final test case summary message. + * + * RETURNS: N/A + */ void MonitorTaskEntry(void) { diff --git a/samples/microkernel/test/test_task_irq/src/raise_int.c b/samples/microkernel/test/test_task_irq/src/raise_int.c index b0451dc8d42..50302252270 100644 --- a/samples/microkernel/test/test_task_irq/src/raise_int.c +++ b/samples/microkernel/test/test_task_irq/src/raise_int.c @@ -1388,7 +1388,7 @@ static void (*intFPtr[256])(void) = { genInt248, genInt249, genInt250,genInt251, genInt252, genInt253, genInt254,genInt255}; -/******************************************************************************* +/** * * raiseInt - generate a software interrupt * @@ -1407,7 +1407,7 @@ void raiseInt(uint8_t id) #if defined(CONFIG_CPU_CORTEX_M3_M4) #include -/******************************************************************************* +/** * * raiseInt - generate a software interrupt * diff --git a/samples/microkernel/test/test_task_irq/src/test_device.c b/samples/microkernel/test/test_task_irq/src/test_device.c index 18b136f97e8..3d2ec35775b 100644 --- a/samples/microkernel/test/test_task_irq/src/test_device.c +++ b/samples/microkernel/test/test_task_irq/src/test_device.c @@ -38,7 +38,7 @@ Each function allocates 2 IRQ objects and then tests for an event associated with the IRQ. The taskA() function also attempts to allocate an IRQ that has already been allocated by another task. The taskB() function also exercises the task_irq_free() API. -*/ + */ #include #include @@ -68,16 +68,16 @@ exercises the task_irq_free() API. #define NUM_OBJECTS 4 uint32_t irq_vectors[NUM_OBJECTS] = {[0 ... (NUM_OBJECTS - 1)] = INVALID_VECTOR}; -/******************************************************************************* -* -* taskA - first of 2 tasks to allocate IRQ objects and check for events -* -* This task allocates 2 IRQ objects with unique IRQs and then tests for an -* interrupt associated with those IRQs. The function then attempts to allocate -* a device that has already been allocate from taskB. -* -* RETURNS: TC_PASS, TC_FAIL -*/ +/** + * + * taskA - first of 2 tasks to allocate IRQ objects and check for events + * + * This task allocates 2 IRQ objects with unique IRQs and then tests for an + * interrupt associated with those IRQs. The function then attempts to allocate + * a device that has already been allocate from taskB. + * + * RETURNS: TC_PASS, TC_FAIL + */ int taskA(ksem_t semRdy) { @@ -140,16 +140,16 @@ int taskA(ksem_t semRdy) return TC_PASS; } -/******************************************************************************* -* -* taskB - second of 2 tasks to allocate IRQ objects and check for events -* -* This task allocates 2 IRQ objects with unique IRQs and then tests for an -* interrupt associated with those IRQs. The function then frees an IRQ object -* using task_irq_free(). -* -* RETURNS: TC_PASS, TC_FAIL -*/ +/** + * + * taskB - second of 2 tasks to allocate IRQ objects and check for events + * + * This task allocates 2 IRQ objects with unique IRQs and then tests for an + * interrupt associated with those IRQs. The function then frees an IRQ object + * using task_irq_free(). + * + * RETURNS: TC_PASS, TC_FAIL + */ int taskB(ksem_t semRdy) { diff --git a/samples/microkernel/test/test_tickless/src/test_tickless.c b/samples/microkernel/test/test_tickless/src/test_tickless.c index 4b9c6161b6e..520d08a17f1 100644 --- a/samples/microkernel/test/test_tickless/src/test_tickless.c +++ b/samples/microkernel/test/test_tickless/src/test_tickless.c @@ -33,7 +33,7 @@ /* DESCRIPTION Unit test for tickless idle feature. -*/ + */ #include diff --git a/samples/microkernel/test/test_tickless/src/timestamps.c b/samples/microkernel/test/test_tickless/src/timestamps.c index 1ea9a202cd9..53330782e99 100644 --- a/samples/microkernel/test/test_tickless/src/timestamps.c +++ b/samples/microkernel/test/test_tickless/src/timestamps.c @@ -33,7 +33,7 @@ /* DESCRIPTION BSP-specific timestamp support for the tickless idle test. -*/ + */ #include #include @@ -71,16 +71,16 @@ BSP-specific timestamp support for the tickless idle test. #define _TIMESTAMP_MAX ((uint32_t)0x7FFFFFFF) #define _TIMESTAMP_EXT ((uint32_t)0x80000000) -/******************************************************************************* -* -* _TimestampOpen - timestamp initialization -* -* This routine initializes the timestamp timer. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _TimestampOpen - timestamp initialization + * + * This routine initializes the timestamp timer. + * + * RETURNS: N/A + * + * \NOMANUAL + */ void _TimestampOpen(void) { @@ -103,16 +103,16 @@ void _TimestampOpen(void) _TIMESTAMP_CTRL = 0x1; /* enable timer */ } -/******************************************************************************* -* -* _TimestampRead - timestamp timer read -* -* This routine returns the timestamp value. -* -* RETURNS: timestamp value -* -* \NOMANUAL -*/ +/** + * + * _TimestampRead - timestamp timer read + * + * This routine returns the timestamp value. + * + * RETURNS: timestamp value + * + * \NOMANUAL + */ uint32_t _TimestampRead(void) { @@ -139,16 +139,16 @@ uint32_t _TimestampRead(void) return timerVal; } -/******************************************************************************* -* -* _TimestampClose - timestamp release -* -* This routine releases the timestamp timer. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _TimestampClose - timestamp release + * + * This routine releases the timestamp timer. + * + * RETURNS: N/A + * + * \NOMANUAL + */ void _TimestampClose(void) { @@ -186,16 +186,16 @@ void _TimestampClose(void) #define _TIMESTAMP_RACCESS *((volatile uint32_t *)(_TIMESTAMP_ADDR + 0x800)) #define _TIMESTAMP_WACCESS *((volatile uint32_t *)(_TIMESTAMP_ADDR + 0x804)) -/******************************************************************************* -* -* _TimestampOpen - timestamp initialization -* -* This routine initializes the timestamp timer. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _TimestampOpen - timestamp initialization + * + * This routine initializes the timestamp timer. + * + * RETURNS: N/A + * + * \NOMANUAL + */ void _TimestampOpen(void) { @@ -223,16 +223,16 @@ void _TimestampOpen(void) _TIMESTAMP_STATUS = 0x10; /* enable counter */ } -/******************************************************************************* -* -* _TimestampRead - timestamp timer read -* -* This routine returns the timestamp value. -* -* RETURNS: timestamp value -* -* \NOMANUAL -*/ +/** + * + * _TimestampRead - timestamp timer read + * + * This routine returns the timestamp value. + * + * RETURNS: timestamp value + * + * \NOMANUAL + */ uint32_t _TimestampRead(void) { @@ -260,16 +260,16 @@ uint32_t _TimestampRead(void) return prescale1; } -/******************************************************************************* -* -* _TimestampClose - timestamp release -* -* This routine releases the timestamp timer. -* -* RETURNS: N/A -* -* \NOMANUAL -*/ +/** + * + * _TimestampClose - timestamp release + * + * This routine releases the timestamp timer. + * + * RETURNS: N/A + * + * \NOMANUAL + */ void _TimestampClose(void) { diff --git a/samples/microkernel/test/test_timer/src/timer.c b/samples/microkernel/test/test_timer/src/timer.c index 24d7f0564e6..40d3bcfb65f 100644 --- a/samples/microkernel/test/test_timer/src/timer.c +++ b/samples/microkernel/test/test_timer/src/timer.c @@ -37,7 +37,7 @@ This module tests the following microkernel timer routines: task_timer_alloc(), task_timer_free() task_timer_start(), task_timer_restart(), task_timer_stop() task_tick_delta(), task_tick_get_32() -*/ + */ #include #include @@ -52,12 +52,12 @@ extern struct nano_lifo _k_timer_free; /* For white box testing only */ static ktimer_t pTimer[NTIMERS + 1]; -/******************************************************************************* -* -* testLowTimerStop - test that task_timer_stop() does stop a timer -* -* RETURNS: TC_PASS on success, TC_FAIL otherwise -*/ +/** + * + * testLowTimerStop - test that task_timer_stop() does stop a timer + * + * RETURNS: TC_PASS on success, TC_FAIL otherwise + */ int testLowTimerStop(void) { @@ -79,12 +79,12 @@ int testLowTimerStop(void) return TC_PASS; } -/******************************************************************************* -* -* testLowTimerPeriodicity - test the periodic feature of a timer -* -* RETURNS: TC_PASS on success, TC_FAIL otherwise -*/ +/** + * + * testLowTimerPeriodicity - test the periodic feature of a timer + * + * RETURNS: TC_PASS on success, TC_FAIL otherwise + */ int testLowTimerPeriodicity(void) { @@ -149,15 +149,15 @@ int testLowTimerPeriodicity(void) return TC_PASS; } -/******************************************************************************* -* -* testLowTimerDoesNotStart - test that the timer does not start -* -* This test checks that the timer does not start under a variety of -* circumstances. -* -* RETURNS: TC_PASS on success, TC_FAIL otherwise -*/ +/** + * + * testLowTimerDoesNotStart - test that the timer does not start + * + * This test checks that the timer does not start under a variety of + * circumstances. + * + * RETURNS: TC_PASS on success, TC_FAIL otherwise + */ int testLowTimerDoesNotStart(void) { @@ -188,12 +188,12 @@ int testLowTimerDoesNotStart(void) return TC_PASS; } -/******************************************************************************* -* -* testLowTimerOneShot - test the one shot feature of a timer -* -* RETURNS: TC_PASS on success, TC_FAIL otherwise -*/ +/** + * + * testLowTimerOneShot - test the one shot feature of a timer + * + * RETURNS: TC_PASS on success, TC_FAIL otherwise + */ int testLowTimerOneShot(void) { @@ -236,20 +236,20 @@ int testLowTimerOneShot(void) return TC_PASS; } -/******************************************************************************* -* -* testLowTimerGet - test the task_timer_alloc() API -* -* This routine allocates all the timers in the system using task_timer_alloc(). -* It verifies that all the allocated timers have unique IDs before freeing -* them using task_timer_free(). -* -* This routine also does some partial testing of task_timer_free(). That is, -* it checks that timers that have been freed are available to be allocated -* again at a later time. -* -* RETURNS: TC_PASS on success, TC_FAIL otherwise -*/ +/** + * + * testLowTimerGet - test the task_timer_alloc() API + * + * This routine allocates all the timers in the system using task_timer_alloc(). + * It verifies that all the allocated timers have unique IDs before freeing + * them using task_timer_free(). + * + * This routine also does some partial testing of task_timer_free(). That is, + * it checks that timers that have been freed are available to be allocated + * again at a later time. + * + * RETURNS: TC_PASS on success, TC_FAIL otherwise + */ int testLowTimerGet(void) { @@ -317,12 +317,12 @@ static void test_nano_timers(int unused1, int unused2) /* on failure, don't give semaphore, main test will not obtain it */ } -/******************************************************************************* -* -* RegressionTaskEntry - regression test's entry point -* -* RETURNS: N/A -*/ +/** + * + * RegressionTaskEntry - regression test's entry point + * + * RETURNS: N/A + */ void RegressionTaskEntry(void) { diff --git a/samples/microkernel/test/test_xip/src/test.h b/samples/microkernel/test/test_xip/src/test.h index 9fea8cea5de..e04bb61a3f2 100644 --- a/samples/microkernel/test/test_xip/src/test.h +++ b/samples/microkernel/test/test_xip/src/test.h @@ -34,7 +34,7 @@ DESCRIPTION This header contains defines, externs etc. for the XIP regression test. -*/ + */ /* This test relies on these values being one larger than the one before */ diff --git a/samples/microkernel/test/test_xip/src/test_xip.c b/samples/microkernel/test/test_xip/src/test_xip.c index a4f0814e48f..570025f9973 100644 --- a/samples/microkernel/test/test_xip/src/test_xip.c +++ b/samples/microkernel/test/test_xip/src/test_xip.c @@ -36,7 +36,7 @@ This module tests that XIP performs as expected. If the first task is even activated that is a good indication that XIP is working. However, the test does do some some testing on global variables for completeness sake. -*/ + */ #include #include @@ -44,23 +44,23 @@ global variables for completeness sake. #if defined(CONFIG_NANOKERNEL) -/******************************************************************************* -* -* main - main task entry point -* -* Entry point for nanokernel only builds. -* -* RETURNS: N/A -*/ +/** + * + * main - main task entry point + * + * Entry point for nanokernel only builds. + * + * RETURNS: N/A + */ void main(void) #else -/******************************************************************************* -* -* RegressionTaskEntry - regression test's entry point -* -* RETURNS: N/A -*/ +/** + * + * RegressionTaskEntry - regression test's entry point + * + * RETURNS: N/A + */ void RegressionTaskEntry(void) #endif diff --git a/samples/microkernel/test/test_xip/src/test_xip_helper.c b/samples/microkernel/test/test_xip/src/test_xip_helper.c index 83c8b151ce5..21c49111014 100644 --- a/samples/microkernel/test/test_xip/src/test_xip_helper.c +++ b/samples/microkernel/test/test_xip/src/test_xip_helper.c @@ -34,7 +34,7 @@ DESCRIPTION This module contains support code for the XIP regression test. -*/ + */ #include #include "test.h" diff --git a/samples/nanokernel/benchmark/footprint/src/nanokernel_footprint.c b/samples/nanokernel/benchmark/footprint/src/nanokernel_footprint.c index 9f12c5f48ef..2b594cbbb4a 100644 --- a/samples/nanokernel/benchmark/footprint/src/nanokernel_footprint.c +++ b/samples/nanokernel/benchmark/footprint/src/nanokernel_footprint.c @@ -103,7 +103,7 @@ volatile pfunc func_array[] = { #endif /* TEST_max */ }; -/******************************************************************************* +/** * * dummyIsr - dummy ISR * @@ -116,7 +116,7 @@ void dummyIsr(void *unused) } #ifdef TEST_reg -/******************************************************************************* +/** * * isrDummyIntStub - static interrupt stub that invokes dummy ISR * @@ -137,7 +137,7 @@ static void isrDummyIntStub(void *unused) } #endif /* TEST_reg */ -/******************************************************************************* +/** * * fiberEntry - trivial fiber * @@ -160,7 +160,7 @@ static void fiberEntry(int message, int arg1) #endif /* !TEST_min */ -/******************************************************************************* +/** * * main - mainline for background task * diff --git a/samples/nanokernel/benchmark/sys_kernel/src/lifo.c b/samples/nanokernel/benchmark/sys_kernel/src/lifo.c index 8033d978e9a..ab39d413476 100644 --- a/samples/nanokernel/benchmark/sys_kernel/src/lifo.c +++ b/samples/nanokernel/benchmark/sys_kernel/src/lifo.c @@ -37,7 +37,7 @@ struct nano_lifo nanoLifo2; static struct nano_fifo nanoFifo_sync; /* for synchronization */ -/******************************************************************************* +/** * * lifo_test_init - initialize LIFOs for the test * @@ -53,7 +53,7 @@ void lifo_test_init(void) } -/******************************************************************************* +/** * * lifo_fiber1 - lifo test context * @@ -93,7 +93,7 @@ void lifo_fiber1(int par1, int par2) } -/******************************************************************************* +/** * * lifo_fiber2 - lifo test context * @@ -125,7 +125,7 @@ void lifo_fiber2(int par1, int par2) nano_fiber_fifo_get_wait(&nanoFifo_sync); } -/******************************************************************************* +/** * * lifo_fiber3 - lifo test context * @@ -159,7 +159,7 @@ void lifo_fiber3(int par1, int par2) nano_fiber_fifo_get_wait(&nanoFifo_sync); } -/******************************************************************************* +/** * * lifo_test - the main test entry * diff --git a/samples/nanokernel/benchmark/sys_kernel/src/mwfifo.c b/samples/nanokernel/benchmark/sys_kernel/src/mwfifo.c index 11a70499149..799da022ec7 100644 --- a/samples/nanokernel/benchmark/sys_kernel/src/mwfifo.c +++ b/samples/nanokernel/benchmark/sys_kernel/src/mwfifo.c @@ -38,7 +38,7 @@ struct nano_fifo nanoFifo2; static struct nano_fifo nanoFifo_sync; /* for synchronization */ -/******************************************************************************* +/** * * fifo_test_init - initialize FIFOs for the test * @@ -54,7 +54,7 @@ void fifo_test_init(void) } -/******************************************************************************* +/** * * fifo_fiber1 - fifo test context * @@ -86,7 +86,7 @@ void fifo_fiber1(int par1, int par2) } -/******************************************************************************* +/** * * fifo_fiber2 - fifo test context * @@ -119,7 +119,7 @@ void fifo_fiber2(int par1, int par2) } -/******************************************************************************* +/** * * fifo_fiber3 - fifo test context * @@ -154,7 +154,7 @@ void fifo_fiber3(int par1, int par2) } -/******************************************************************************* +/** * * fifo_test - the main test entry * diff --git a/samples/nanokernel/benchmark/sys_kernel/src/sema.c b/samples/nanokernel/benchmark/sys_kernel/src/sema.c index 3eb7f54a171..12bf14d808a 100644 --- a/samples/nanokernel/benchmark/sys_kernel/src/sema.c +++ b/samples/nanokernel/benchmark/sys_kernel/src/sema.c @@ -35,7 +35,7 @@ struct nano_sem nanoSem1; struct nano_sem nanoSem2; -/******************************************************************************* +/** * * sema_test_init - initialize semaphores for the test * @@ -51,7 +51,7 @@ void sema_test_init(void) } -/******************************************************************************* +/** * * sema_fiber1 - semaphore test context * @@ -76,7 +76,7 @@ void sema_fiber1(int par1, int par2) } -/******************************************************************************* +/** * * sema_fiber2 - semaphore test context * @@ -100,7 +100,7 @@ void sema_fiber2(int par1, int par2) } } -/******************************************************************************* +/** * * sema_fiber3 - semaphore test context * @@ -127,7 +127,7 @@ void sema_fiber3(int par1, int par2) } -/******************************************************************************* +/** * * sema_test - the main test entry * diff --git a/samples/nanokernel/benchmark/sys_kernel/src/stack.c b/samples/nanokernel/benchmark/sys_kernel/src/stack.c index b3b8eac5005..0386bc80571 100644 --- a/samples/nanokernel/benchmark/sys_kernel/src/stack.c +++ b/samples/nanokernel/benchmark/sys_kernel/src/stack.c @@ -38,7 +38,7 @@ struct nano_stack nanoChannel2; uint32_t stack1[2]; uint32_t stack2[2]; -/******************************************************************************* +/** * * stack_test_init - initialize stacks for the test * @@ -54,7 +54,7 @@ void stack_test_init(void) } -/******************************************************************************* +/** * * stack_fiber1 - stack test context * @@ -90,7 +90,7 @@ void stack_fiber1(int par1, int par2) } -/******************************************************************************* +/** * * stack_fiber2 - stack test context * @@ -120,7 +120,7 @@ void stack_fiber2(int par1, int par2) } -/******************************************************************************* +/** * * stack_fiber2 - stack test context * @@ -153,7 +153,7 @@ void stack_fiber3(int par1, int par2) } -/******************************************************************************* +/** * * stack_test - the main test entry * diff --git a/samples/nanokernel/benchmark/sys_kernel/src/syskernel.c b/samples/nanokernel/benchmark/sys_kernel/src/syskernel.c index b65796c94ca..a17b0d30bb0 100644 --- a/samples/nanokernel/benchmark/sys_kernel/src/syskernel.c +++ b/samples/nanokernel/benchmark/sys_kernel/src/syskernel.c @@ -76,7 +76,7 @@ const char sz_case_timing_fmt[] = "%ld nSec"; /* time necessary to read the time */ uint32_t tm_off; -/******************************************************************************* +/** * * begin_test - get the time ticks before test starts * @@ -95,7 +95,7 @@ void begin_test(void) bench_test_start(); } -/******************************************************************************* +/** * * check_result - checks number of tests and calculate average time * @@ -138,7 +138,7 @@ int check_result(int i, uint32_t t) } -/******************************************************************************* +/** * * kbhit - check for a key press * @@ -153,7 +153,7 @@ int kbhit(void) } -/******************************************************************************* +/** * * init_output - prepares the test output * @@ -175,7 +175,7 @@ void init_output(int *continuously) } -/******************************************************************************* +/** * * output_close - close output for the test * @@ -188,7 +188,7 @@ void output_close(void) { } -/******************************************************************************* +/** * * SysKernelBench - perform all selected benchmarks * diff --git a/samples/nanokernel/test/test_arm_m3_irq_vector_table/src/main.c b/samples/nanokernel/test/test_arm_m3_irq_vector_table/src/main.c index e582d01dc2a..bc30b73e9b4 100644 --- a/samples/nanokernel/test/test_arm_m3_irq_vector_table/src/main.c +++ b/samples/nanokernel/test/test_arm_m3_irq_vector_table/src/main.c @@ -37,7 +37,7 @@ release a semaphore. The task then verifies it can obtain all three semaphores. The ISRs are installed at build time, directly in the vector table. -*/ + */ #if !defined(CONFIG_CPU_CORTEX_M3_M4) #error project can only run on Cortex-M3/M4 @@ -49,12 +49,12 @@ The ISRs are installed at build time, directly in the vector table. struct nano_sem sem[3]; -/******************************************************************************* -* -* isr0 - ISR for IRQ0 -* -* RETURNS: N/A -*/ +/** + * + * isr0 - ISR for IRQ0 + * + * RETURNS: N/A + */ void isr0(void) { @@ -63,12 +63,12 @@ void isr0(void) _IntExit(); } -/******************************************************************************* -* -* isr1 - ISR for IRQ1 -* -* RETURNS: N/A -*/ +/** + * + * isr1 - ISR for IRQ1 + * + * RETURNS: N/A + */ void isr1(void) { @@ -77,12 +77,12 @@ void isr1(void) _IntExit(); } -/******************************************************************************* -* -* isr2 - ISR for IRQ2 -* -* RETURNS: N/A -*/ +/** + * + * isr2 - ISR for IRQ2 + * + * RETURNS: N/A + */ void isr2(void) { @@ -91,12 +91,12 @@ void isr2(void) _IntExit(); } -/******************************************************************************* -* -* main - task entry point -* -* RETURNS: N/A -*/ +/** + * + * main - task entry point + * + * RETURNS: N/A + */ void main(void) { diff --git a/samples/nanokernel/test/test_context/src/context.c b/samples/nanokernel/test/test_context/src/context.c index 7ffb9f13bf3..8b1c6478a81 100644 --- a/samples/nanokernel/test/test_context/src/context.c +++ b/samples/nanokernel/test/test_context/src/context.c @@ -39,7 +39,7 @@ This module tests the following CPU and context related routines: irq_lock_inline(), irq_unlock_inline(), irq_connect(), nanoCpuExcConnect(), irq_enable(), irq_disable(), -*/ + */ #include #include @@ -117,15 +117,15 @@ static ISR_INFO isrInfo; static void (*_trigger_isrHandler)(void) = (vvfn)sw_isr_trigger_0; -/******************************************************************************* -* -* isr_handler - handler to perform various actions from within an ISR context -* -* This routine is the ISR handler for _trigger_isrHandler(). It performs -* the command requested in . -* -* RETURNS: N/A -*/ +/** + * + * isr_handler - handler to perform various actions from within an ISR context + * + * This routine is the ISR handler for _trigger_isrHandler(). It performs + * the command requested in . + * + * RETURNS: N/A + */ void isr_handler(void *data) { @@ -148,19 +148,19 @@ void isr_handler(void *data) /* Cortex-M3/M4 does not implement connecting non-IRQ exception handlers */ #if !defined(CONFIG_CPU_CORTEX_M3_M4) -/******************************************************************************* -* -* exc_divide_error_handler - divide by zero exception handler -* -* This handler is part of a test that is only interested in detecting the -* error so that we know the exception connect code is working. It simply -* adds 2 to the EIP to skip over the offending instruction: -* f7 f9 idiv %ecx -* thereby preventing the infinite loop of divide-by-zero errors which would -* arise if control simply returns to that instruction. -* -* RETURNS: N/A -*/ +/** + * + * exc_divide_error_handler - divide by zero exception handler + * + * This handler is part of a test that is only interested in detecting the + * error so that we know the exception connect code is working. It simply + * adds 2 to the EIP to skip over the offending instruction: + * f7 f9 idiv %ecx + * thereby preventing the infinite loop of divide-by-zero errors which would + * arise if control simply returns to that instruction. + * + * RETURNS: N/A + */ void exc_divide_error_handler(NANO_ESF *pEsf) { @@ -169,14 +169,14 @@ void exc_divide_error_handler(NANO_ESF *pEsf) } #endif -/******************************************************************************* -* -* initNanoObjects - initialize nanokernel objects -* -* This routine initializes the nanokernel objects used in this module's tests. -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * initNanoObjects - initialize nanokernel objects + * + * This routine initializes the nanokernel objects used in this module's tests. + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int initNanoObjects(void) { @@ -197,17 +197,17 @@ int initNanoObjects(void) return initIRQ(&i) < 0 ? TC_FAIL : TC_PASS; } -/******************************************************************************* -* -* nano_cpu_idleTest - test the nano_cpu_idle() routine -* -* This tests the nano_cpu_idle() routine. The first thing it does is align to -* a tick boundary. The only source of interrupts while the test is running is -* expected to be the tick clock timer which should wake the CPU. Thus after -* each call to nano_cpu_idle(), the tick count should be one higher. -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * nano_cpu_idleTest - test the nano_cpu_idle() routine + * + * This tests the nano_cpu_idle() routine. The first thing it does is align to + * a tick boundary. The only source of interrupts while the test is running is + * expected to be the tick clock timer which should wake the CPU. Thus after + * each call to nano_cpu_idle(), the tick count should be one higher. + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int nano_cpu_idleTest(void) { @@ -231,12 +231,12 @@ int nano_cpu_idleTest(void) return TC_PASS; } -/******************************************************************************* -* -* irq_lockWrapper - a wrapper for irq_lock() -* -* RETURNS: irq_lock() return value -*/ +/** + * + * irq_lockWrapper - a wrapper for irq_lock() + * + * RETURNS: irq_lock() return value + */ int irq_lockWrapper(int unused) { @@ -245,24 +245,24 @@ int irq_lockWrapper(int unused) return irq_lock(); } -/******************************************************************************* -* -* irq_unlockWrapper - a wrapper for irq_unlock() -* -* RETURNS: N/A -*/ +/** + * + * irq_unlockWrapper - a wrapper for irq_unlock() + * + * RETURNS: N/A + */ void irq_unlockWrapper(int imask) { irq_unlock(imask); } -/******************************************************************************* -* -* irq_lock_inlineWrapper - a wrapper for irq_lock_inline() -* -* RETURNS: irq_lock_inline() return value -*/ +/** + * + * irq_lock_inlineWrapper - a wrapper for irq_lock_inline() + * + * RETURNS: irq_lock_inline() return value + */ int irq_lock_inlineWrapper(int unused) { @@ -271,24 +271,24 @@ int irq_lock_inlineWrapper(int unused) return irq_lock_inline(); } -/******************************************************************************* -* -* irq_unlock_inlineWrapper - a wrapper for irq_unlock_inline() -* -* RETURNS: N/A -*/ +/** + * + * irq_unlock_inlineWrapper - a wrapper for irq_unlock_inline() + * + * RETURNS: N/A + */ void irq_unlock_inlineWrapper(int imask) { irq_unlock_inline(imask); } -/******************************************************************************* -* -* irq_disableWrapper - a wrapper for irq_disable() -* -* RETURNS: -*/ +/** + * + * irq_disableWrapper - a wrapper for irq_disable() + * + * RETURNS: + */ int irq_disableWrapper(int irq) { @@ -296,28 +296,28 @@ int irq_disableWrapper(int irq) return irq; } -/******************************************************************************* -* -* irq_enableWrapper - a wrapper for irq_enable() -* -* RETURNS: N/A -*/ +/** + * + * irq_enableWrapper - a wrapper for irq_enable() + * + * RETURNS: N/A + */ void irq_enableWrapper(int irq) { irq_enable(irq); } -/******************************************************************************* -* -* nanoCpuDisableInterruptsTest - test routines for disabling and enabling ints -* -* This routine tests the routines for disabling and enabling interrupts. These -* include irq_lock() and irq_unlock(), irq_lock_inline() and -* irq_unlock_inline(), irq_disable() and irq_enable(). -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * nanoCpuDisableInterruptsTest - test routines for disabling and enabling ints + * + * This routine tests the routines for disabling and enabling interrupts. These + * include irq_lock() and irq_unlock(), irq_lock_inline() and + * irq_unlock_inline(), irq_disable() and irq_enable(). + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int nanoCpuDisableInterruptsTest(disable_interrupt_func disableRtn, enable_interrupt_func enableRtn, int irq) @@ -374,16 +374,16 @@ int nanoCpuDisableInterruptsTest(disable_interrupt_func disableRtn, return (tick == nano_tick_get_32()) ? TC_FAIL : TC_PASS; } -/******************************************************************************* -* -* nanoCtxTaskTest - test the various nanoCtxXXX() routines from a task -* -* This routines tests the context_self_get() and context_type_get() routines from both -* a task and an ISR (that interrupted a task). Checking those routines with -* fibers are done elsewhere. -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * nanoCtxTaskTest - test the various nanoCtxXXX() routines from a task + * + * This routines tests the context_self_get() and context_type_get() routines from both + * a task and an ISR (that interrupted a task). Checking those routines with + * fibers are done elsewhere. + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int nanoCtxTaskTest(void) { @@ -418,22 +418,22 @@ int nanoCtxTaskTest(void) return TC_PASS; } -/******************************************************************************* -* -* nanoCtxFiberTest - test the various nanoCtxXXX() routines from a fiber -* -* This routines tests the context_self_get() and context_type_get() routines from both -* a fiber and an ISR (that interrupted a fiber). Checking those routines with -* tasks are done elsewhere. -* -* This routine may set to the following values: -* 1 - if fiber context ID matches that of the task -* 2 - if context ID taken during ISR does not match that of the fiber -* 3 - context_type_get() when called from an ISR is not NANO_TYPE_ISR -* 3 - context_type_get() when called from a fiber is not NANO_TYPE_FIBER -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * nanoCtxFiberTest - test the various nanoCtxXXX() routines from a fiber + * + * This routines tests the context_self_get() and context_type_get() routines from both + * a fiber and an ISR (that interrupted a fiber). Checking those routines with + * tasks are done elsewhere. + * + * This routine may set to the following values: + * 1 - if fiber context ID matches that of the task + * 2 - if context ID taken during ISR does not match that of the fiber + * 3 - context_type_get() when called from an ISR is not NANO_TYPE_ISR + * 3 - context_type_get() when called from a fiber is not NANO_TYPE_FIBER + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int nanoCtxFiberTest(nano_context_id_t taskCtxId) { @@ -473,18 +473,18 @@ int nanoCtxFiberTest(nano_context_id_t taskCtxId) return TC_PASS; } -/******************************************************************************* -* -* fiberHelper - entry point to the fiber's helper -* -* This routine is the entry point to the fiber's helper fiber. It is used to -* help test the behaviour of the fiber_yield() routine. -* -* \param arg1 unused -* \param arg2 unused -* -* RETURNS: N/A -*/ +/** + * + * fiberHelper - entry point to the fiber's helper + * + * This routine is the entry point to the fiber's helper fiber. It is used to + * help test the behaviour of the fiber_yield() routine. + * + * \param arg1 unused + * \param arg2 unused + * + * RETURNS: N/A + */ static void fiberHelper(int arg1, int arg2) { @@ -510,23 +510,23 @@ static void fiberHelper(int arg1, int arg2) } -/******************************************************************************* -* -* fiber_yieldTest - test the fiber_yield() routine -* -* This routine tests the fiber_yield() routine. It starts another fiber -* (thus also testing fiber_fiber_start()) and checks that behaviour of -* fiber_yield() against the cases of there being a higher priority fiber, -* a lower priority fiber, and another fiber of equal priority. -* -* On error, it may set to one of the following values: -* 10 - helper fiber ran prematurely -* 11 - fiber_yield() did not yield to a higher priority fiber -* 12 - fiber_yield() did not yield to an equal prioirty fiber -* 13 - fiber_yield() yielded to a lower priority fiber -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * fiber_yieldTest - test the fiber_yield() routine + * + * This routine tests the fiber_yield() routine. It starts another fiber + * (thus also testing fiber_fiber_start()) and checks that behaviour of + * fiber_yield() against the cases of there being a higher priority fiber, + * a lower priority fiber, and another fiber of equal priority. + * + * On error, it may set to one of the following values: + * 10 - helper fiber ran prematurely + * 11 - fiber_yield() did not yield to a higher priority fiber + * 12 - fiber_yield() did not yield to an equal prioirty fiber + * 13 - fiber_yield() yielded to a lower priority fiber + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int fiber_yieldTest(void) { @@ -592,17 +592,17 @@ int fiber_yieldTest(void) return TC_PASS; } -/******************************************************************************* -* -* fiberEntry - entry point to fiber started by the task -* -* This routine is the entry point to the fiber started by the task. -* -* \param taskCtxId context ID of the spawning task -* \param arg1 unused -* -* RETURNS: N/A -*/ +/** + * + * fiberEntry - entry point to fiber started by the task + * + * This routine is the entry point to the fiber started by the task. + * + * \param taskCtxId context ID of the spawning task + * \param arg1 unused + * + * RETURNS: N/A + */ static void fiberEntry(int taskCtxId, int arg1) { @@ -815,14 +815,14 @@ static int test_timeout(void) return TC_PASS; } -/******************************************************************************* -* -* main - entry point to timer tests -* -* This is the entry point to the CPU and context tests. -* -* RETURNS: N/A -*/ +/** + * + * main - entry point to timer tests + * + * This is the entry point to the CPU and context tests. + * + * RETURNS: N/A + */ void main(void) { diff --git a/samples/nanokernel/test/test_fifo/src/fifo.c b/samples/nanokernel/test/test_fifo/src/fifo.c index 3ad97642489..57cd93557fc 100644 --- a/samples/nanokernel/test/test_fifo/src/fifo.c +++ b/samples/nanokernel/test/test_fifo/src/fifo.c @@ -63,7 +63,7 @@ in ISR context. Scenario #4: Timeout scenarios with multiple FIFOs and fibers. -*/ + */ #include #include @@ -144,17 +144,17 @@ void testTaskFifoGetW(void); extern int test_fifo_timeout(void); -/******************************************************************************* -* -* isr_fifo_put - add an item to a FIFO -* -* This routine is the ISR handler for _trigger_nano_isr_fifo_put(). It adds -* an item to the FIFO in the context of an ISR. -* -* \param parameter pointer to ISR handler parameter -* -* RETURNS: N/A -*/ +/** + * + * isr_fifo_put - add an item to a FIFO + * + * This routine is the ISR handler for _trigger_nano_isr_fifo_put(). It adds + * an item to the FIFO in the context of an ISR. + * + * \param parameter pointer to ISR handler parameter + * + * RETURNS: N/A + */ void isr_fifo_put(void *parameter) { @@ -163,17 +163,17 @@ void isr_fifo_put(void *parameter) nano_isr_fifo_put(pInfo->channel, pInfo->data); } -/******************************************************************************* -* -* isr_fifo_get - get an item from a FIFO -* -* This routine is the ISR handler for _trigger_nano_isr_fifo_get(). It gets -* an item from the FIFO in the context of an ISR. -* -* \param parameter pointer to ISR handler parameter -* -* RETURNS: N/A -*/ +/** + * + * isr_fifo_get - get an item from a FIFO + * + * This routine is the ISR handler for _trigger_nano_isr_fifo_get(). It gets + * an item from the FIFO in the context of an ISR. + * + * \param parameter pointer to ISR handler parameter + * + * RETURNS: N/A + */ void isr_fifo_get(void *parameter) { @@ -183,12 +183,12 @@ void isr_fifo_get(void *parameter) } -/******************************************************************************* -* -* fiber1 - entry point for the first fiber -* -* RETURNS: N/A -*/ +/** + * + * fiber1 - entry point for the first fiber + * + * RETURNS: N/A + */ void fiber1(void) { @@ -252,15 +252,15 @@ void fiber1(void) } /* fiber1 */ -/******************************************************************************* -* -* testFiberFifoGetW - test the nano_fiber_fifo_get_wait() interface -* -* This function tests the fifo put and get wait interfaces in the fiber context. -* It gets data from nanoFifoObj2 queue and puts data to nanoFifoObj queue. -* -* RETURNS: N/A -*/ +/** + * + * testFiberFifoGetW - test the nano_fiber_fifo_get_wait() interface + * + * This function tests the fifo put and get wait interfaces in the fiber context. + * It gets data from nanoFifoObj2 queue and puts data to nanoFifoObj queue. + * + * RETURNS: N/A + */ void testFiberFifoGetW(void) { @@ -299,17 +299,17 @@ void testFiberFifoGetW(void) } /* testFiberFifoGetW */ -/******************************************************************************* -* -* testIsrFifoFromFiber - test ISR FIFO routines (triggered from fiber) -* -* This function tests the fifo put and get interfaces in the isr context. -* It is invoked from a fiber. -* -* We use nanoFifoObj queue to put and get data. -* -* RETURNS: N/A -*/ +/** + * + * testIsrFifoFromFiber - test ISR FIFO routines (triggered from fiber) + * + * This function tests the fifo put and get interfaces in the isr context. + * It is invoked from a fiber. + * + * We use nanoFifoObj queue to put and get data. + * + * RETURNS: N/A + */ void testIsrFifoFromFiber(void) { @@ -353,17 +353,17 @@ void testIsrFifoFromFiber(void) } /* testIsrFifoFromFiber */ -/******************************************************************************* -* -* testIsrFifoFromTask - test ISR FIFO routines (triggered from task) -* -* This function tests the fifo put and get interfaces in the isr context. -* It is invoked from a task. -* -* We use nanoFifoObj queue to put and get data. -* -* RETURNS: N/A -*/ +/** + * + * testIsrFifoFromTask - test ISR FIFO routines (triggered from task) + * + * This function tests the fifo put and get interfaces in the isr context. + * It is invoked from a task. + * + * We use nanoFifoObj queue to put and get data. + * + * RETURNS: N/A + */ void testIsrFifoFromTask(void) { @@ -416,12 +416,12 @@ void testIsrFifoFromTask(void) TC_END_RESULT(retCode); } /* testIsrFifoFromTask */ -/******************************************************************************* -* -* fiber2 - entry point for the second fiber -* -* RETURNS: N/A -*/ +/** + * + * fiber2 - entry point for the second fiber + * + * RETURNS: N/A + */ void fiber2(void) { @@ -472,12 +472,12 @@ void fiber2(void) TC_END_RESULT(retCode); } /* fiber2 */ -/******************************************************************************* -* -* fiber3 - entry point for the third fiber -* -* RETURNS: N/A -*/ +/** + * + * fiber3 - entry point for the third fiber + * + * RETURNS: N/A + */ void fiber3(void) { @@ -518,15 +518,15 @@ void fiber3(void) } -/******************************************************************************* -* -* testTaskFifoGetW - test the nano_task_fifo_get_wait() interface -* -* This is in the task context. It puts data to nanoFifoObj2 queue and gets -* data from nanoFifoObj queue. -* -* RETURNS: N/A -*/ +/** + * + * testTaskFifoGetW - test the nano_task_fifo_get_wait() interface + * + * This is in the task context. It puts data to nanoFifoObj2 queue and gets + * data from nanoFifoObj queue. + * + * RETURNS: N/A + */ void testTaskFifoGetW(void) { @@ -558,14 +558,14 @@ void testTaskFifoGetW(void) TC_END_RESULT(retCode); } /* testTaskFifoGetW */ -/******************************************************************************* -* -* initNanoObjects - initialize nanokernel objects -* -* This routine initializes the nanokernel objects used in the FIFO tests. -* -* RETURNS: N/A -*/ +/** + * + * initNanoObjects - initialize nanokernel objects + * + * This routine initializes the nanokernel objects used in the FIFO tests. + * + * RETURNS: N/A + */ void initNanoObjects(void) { @@ -588,14 +588,14 @@ void initNanoObjects(void) } /* initNanoObjects */ -/******************************************************************************* -* -* main - entry point to FIFO tests -* -* This is the entry point to the FIFO tests. -* -* RETURNS: N/A -*/ +/** + * + * main - entry point to FIFO tests + * + * This is the entry point to the FIFO tests. + * + * RETURNS: N/A + */ void main(void) { diff --git a/samples/nanokernel/test/test_lifo/src/lifo.c b/samples/nanokernel/test/test_lifo/src/lifo.c index 6d5cf514b70..100141ab51e 100644 --- a/samples/nanokernel/test/test_lifo/src/lifo.c +++ b/samples/nanokernel/test/test_lifo/src/lifo.c @@ -54,7 +54,7 @@ Scenario #4: Timeout scenarios with multiple LIFOs and fibers. These scenarios will be tested using a combinations of tasks, fibers and ISRs. -*/ + */ #include #include @@ -105,17 +105,17 @@ static void (*_trigger_nano_isr_lifo_get)(void) = (vvfn)sw_isr_trigger_1; static struct nano_lifo multi_waiters; static struct nano_sem reply_multi_waiters; -/******************************************************************************* -* -* isr_lifo_put - add an item to a LIFO -* -* This routine is the ISR handler for _trigger_nano_isr_lifo_put(). It adds -* an item to the LIFO in the context of an ISR. -* -* \param data pointer to ISR handler parameter -* -* RETURNS: N/A -*/ +/** + * + * isr_lifo_put - add an item to a LIFO + * + * This routine is the ISR handler for _trigger_nano_isr_lifo_put(). It adds + * an item to the LIFO in the context of an ISR. + * + * \param data pointer to ISR handler parameter + * + * RETURNS: N/A + */ void isr_lifo_put(void *data) { @@ -124,17 +124,17 @@ void isr_lifo_put(void *data) nano_isr_lifo_put(pInfo->channel, pInfo->data); } -/******************************************************************************* -* -* isr_lifo_get - get an item from a LIFO -* -* This routine is the ISR handler for _trigger_nano_isr_lifo_get(). It gets -* an item from the LIFO in the context of an ISR. -* -* \param data pointer to ISR handler parameter -* -* RETURNS: N/A -*/ +/** + * + * isr_lifo_get - get an item from a LIFO + * + * This routine is the ISR handler for _trigger_nano_isr_lifo_get(). It gets + * an item from the LIFO in the context of an ISR. + * + * \param data pointer to ISR handler parameter + * + * RETURNS: N/A + */ void isr_lifo_get(void *data) { @@ -143,16 +143,16 @@ void isr_lifo_get(void *data) pInfo->data = nano_isr_lifo_get(pInfo->channel); } -/******************************************************************************* -* -* fiberLifoWaitTest - fiber portion of test that waits on a LIFO -* -* This routine works with taskLifoWaitTest() to test the addition and removal -* of items to/from a LIFO. The cases covered will have a fiber or task waiting -* on an empty LIFO. -* -* RETURNS: 0 on success, -1 on failure -*/ +/** + * + * fiberLifoWaitTest - fiber portion of test that waits on a LIFO + * + * This routine works with taskLifoWaitTest() to test the addition and removal + * of items to/from a LIFO. The cases covered will have a fiber or task waiting + * on an empty LIFO. + * + * RETURNS: 0 on success, -1 on failure + */ int fiberLifoWaitTest(void) { @@ -204,15 +204,15 @@ int fiberLifoWaitTest(void) return 0; } -/******************************************************************************* -* -* fiberLifoNonWaitTest - fiber portion of test that does not wait on a LIFO -* -* This routine works with fiberLifoNonWaitTest() to test the addition and -* removal of items from a LIFO without having to wait. -* -* RETURNS: 0 on success, -1 on failure -*/ +/** + * + * fiberLifoNonWaitTest - fiber portion of test that does not wait on a LIFO + * + * This routine works with fiberLifoNonWaitTest() to test the addition and + * removal of items from a LIFO without having to wait. + * + * RETURNS: 0 on success, -1 on failure + */ int fiberLifoNonWaitTest(void) { @@ -282,18 +282,18 @@ errorReturn: return -1; } -/******************************************************************************* -* -* fiberEntry - entry point for the fiber portion of the LIFO tests -* -* NOTE: The fiber portion of the tests have higher priority than the task -* portion of the tests. -* -* \param arg1 unused -* \param arg2 unused -* -* RETURNS: N/A -*/ +/** + * + * fiberEntry - entry point for the fiber portion of the LIFO tests + * + * NOTE: The fiber portion of the tests have higher priority than the task + * portion of the tests. + * + * \param arg1 unused + * \param arg2 unused + * + * RETURNS: N/A + */ static void fiberEntry(int arg1, int arg2) { @@ -310,16 +310,16 @@ static void fiberEntry(int arg1, int arg2) } -/******************************************************************************* -* -* taskLifoWaitTest - task portion of test that waits on a LIFO -* -* This routine works with fiberLifoWaitTest() to test the addition and removal -* of items to/from a LIFO. The cases covered will have a fiber or task waiting -* on an empty LIFO. -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * taskLifoWaitTest - task portion of test that waits on a LIFO + * + * This routine works with fiberLifoWaitTest() to test the addition and removal + * of items to/from a LIFO. The cases covered will have a fiber or task waiting + * on an empty LIFO. + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int taskLifoWaitTest(void) { @@ -368,15 +368,15 @@ int taskLifoWaitTest(void) return TC_PASS; } -/******************************************************************************* -* -* taskLifoNonWaitTest - task portion of test that does not wait on a LIFO -* -* This routine works with fiberLifoNonWaitTest() to test the addition and -* removal of items from a LIFO without having to wait. -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * taskLifoNonWaitTest - task portion of test that does not wait on a LIFO + * + * This routine works with fiberLifoNonWaitTest() to test the addition and + * removal of items from a LIFO without having to wait. + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int taskLifoNonWaitTest(void) { @@ -446,14 +446,14 @@ int taskLifoNonWaitTest(void) return TC_PASS; } -/******************************************************************************* -* -* initNanoObjects - initialize nanokernel objects -* -* This routine initializes the nanokernel objects used in the LIFO tests. -* -* RETURNS: N/A -*/ +/** + * + * initNanoObjects - initialize nanokernel objects + * + * This routine initializes the nanokernel objects used in the LIFO tests. + * + * RETURNS: N/A + */ void initNanoObjects(void) { @@ -492,12 +492,12 @@ static LIFO_ITEM multi_waiters_items[NUM_WAITERS] = { [0 ...(NUM_WAITERS-1)].data = 0xabad1dea, }; -/******************************************************************************* -* -* fiber_multi_waiters - fiber entry point for multiple-waiters test -* -* RETURNS: N/A -*/ +/** + * + * fiber_multi_waiters - fiber entry point for multiple-waiters test + * + * RETURNS: N/A + */ static void fiber_multi_waiters(int arg1, int arg2) { @@ -518,12 +518,12 @@ static void fiber_multi_waiters(int arg1, int arg2) nano_fiber_sem_give(&reply_multi_waiters); } -/******************************************************************************* -* -* do_test_multiple_waiters - task part of multiple-waiter test, repeatable -* -* RETURNS: N/A -*/ +/** + * + * do_test_multiple_waiters - task part of multiple-waiter test, repeatable + * + * RETURNS: N/A + */ static int do_test_multiple_waiters(void) { @@ -559,12 +559,12 @@ static int do_test_multiple_waiters(void) return TC_PASS; } -/******************************************************************************* -* -* test_multiple_waiters - entry point for multiple-waiters test -* -* RETURNS: N/A -*/ +/** + * + * test_multiple_waiters - entry point for multiple-waiters test + * + * RETURNS: N/A + */ static int test_multiple_waiters(void) { @@ -1049,14 +1049,14 @@ static int test_timeout(void) return TC_PASS; } -/******************************************************************************* -* -* main - entry point to LIFO tests -* -* This is the entry point to the LIFO tests. -* -* RETURNS: N/A -*/ +/** + * + * main - entry point to LIFO tests + * + * This is the entry point to the LIFO tests. + * + * RETURNS: N/A + */ void main(void) { diff --git a/samples/nanokernel/test/test_sema/src/sema.c b/samples/nanokernel/test/test_sema/src/sema.c index 660745722ea..121a7ea7fd2 100644 --- a/samples/nanokernel/test/test_sema/src/sema.c +++ b/samples/nanokernel/test/test_sema/src/sema.c @@ -52,7 +52,7 @@ Scenario #3: Scenario #4: Timeout scenarios with multiple semaphores and fibers. -*/ + */ #include #include @@ -95,17 +95,17 @@ static void (*_trigger_nano_isr_sem_take)(void) = (vvfn)sw_isr_trigger_1; static struct nano_sem multi_waiters; static struct nano_sem reply_multi_waiters; -/******************************************************************************* -* -* isr_sem_take - take a semaphore -* -* This routine is the ISR handler for _trigger_nano_isr_sem_take(). It takes a -* semaphore within the context of an ISR. -* -* \param data pointer to ISR handler parameter -* -* RETURNS: N/A -*/ +/** + * + * isr_sem_take - take a semaphore + * + * This routine is the ISR handler for _trigger_nano_isr_sem_take(). It takes a + * semaphore within the context of an ISR. + * + * \param data pointer to ISR handler parameter + * + * RETURNS: N/A + */ void isr_sem_take(void *data) { @@ -114,17 +114,17 @@ void isr_sem_take(void *data) pInfo->data = nano_isr_sem_take(pInfo->sem); } -/******************************************************************************* -* -* isr_sem_give - give a semaphore -* -* This routine is the ISR handler for _trigger_nano_isr_sem_take(). It gives a -* semaphore within the context of an ISR. -* -* \param data pointer to ISR handler parameter -* -* RETURNS: N/A -*/ +/** + * + * isr_sem_give - give a semaphore + * + * This routine is the ISR handler for _trigger_nano_isr_sem_take(). It gives a + * semaphore within the context of an ISR. + * + * \param data pointer to ISR handler parameter + * + * RETURNS: N/A + */ void isr_sem_give(void *data) { @@ -134,15 +134,15 @@ void isr_sem_give(void *data) pInfo->data = 1; /* Indicate semaphore has been given */ } -/******************************************************************************* -* -* testSemFiberNoWait - give and take the semaphore in a fiber without blocking -* -* This test gives and takes the test semaphore in the context of a fiber -* without blocking on the semaphore. -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * testSemFiberNoWait - give and take the semaphore in a fiber without blocking + * + * This test gives and takes the test semaphore in the context of a fiber + * without blocking on the semaphore. + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int testSemFiberNoWait(void) { @@ -178,18 +178,18 @@ errorReturn: return TC_FAIL; } -/******************************************************************************* -* -* fiberEntry - entry point for the fiber portion of the semaphore tests -* -* NOTE: The fiber portion of the tests have higher priority than the task -* portion of the tests. -* -* \param arg1 unused -* \param arg2 unused -* -* RETURNS: N/A -*/ +/** + * + * fiberEntry - entry point for the fiber portion of the semaphore tests + * + * NOTE: The fiber portion of the tests have higher priority than the task + * portion of the tests. + * + * \param arg1 unused + * \param arg2 unused + * + * RETURNS: N/A + */ static void fiberEntry(int arg1, int arg2) { @@ -254,14 +254,14 @@ static void fiberEntry(int arg1, int arg2) } } -/******************************************************************************* -* -* initNanoObjects - initialize nanokernel objects -* -* This routine initializes the nanokernel objects used in the semaphore tests. -* -* RETURNS: N/A -*/ +/** + * + * initNanoObjects - initialize nanokernel objects + * + * This routine initializes the nanokernel objects used in the semaphore tests. + * + * RETURNS: N/A + */ void initNanoObjects(void) { @@ -280,15 +280,15 @@ void initNanoObjects(void) TC_PRINT("Nano objects initialized\n"); } -/******************************************************************************* -* -* testSemIsrNoWait - give and take the semaphore in an ISR without blocking -* -* This test gives and takes the test semaphore in the context of an ISR without -* blocking on the semaphore. -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * testSemIsrNoWait - give and take the semaphore in an ISR without blocking + * + * This test gives and takes the test semaphore in the context of an ISR without + * blocking on the semaphore. + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int testSemIsrNoWait(void) { @@ -327,15 +327,15 @@ errorReturn: return TC_FAIL; } -/******************************************************************************* -* -* testSemTaskNoWait - give and take the semaphore in a task without blocking -* -* This test gives and takes the test semaphore in the context of a task without -* blocking on the semaphore. -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * testSemTaskNoWait - give and take the semaphore in a task without blocking + * + * This test gives and takes the test semaphore in the context of a task without + * blocking on the semaphore. + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int testSemTaskNoWait(void) { @@ -370,15 +370,15 @@ errorReturn: return TC_FAIL; } -/******************************************************************************* -* -* testSemWait - perform tests that wait on a semaphore -* -* This routine works with fiberEntry() to perform the tests that wait on -* a semaphore. -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * testSemWait - perform tests that wait on a semaphore + * + * This routine works with fiberEntry() to perform the tests that wait on + * a semaphore. + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int testSemWait(void) { @@ -428,12 +428,12 @@ int testSemWait(void) #define NUM_WAITERS 3 static char __stack fiber_multi_waiters_stacks[NUM_WAITERS][FIBER_STACKSIZE]; -/******************************************************************************* -* -* fiber_multi_waiters - fiber entry point for multiple-waiters test -* -* RETURNS: N/A -*/ +/** + * + * fiber_multi_waiters - fiber entry point for multiple-waiters test + * + * RETURNS: N/A + */ static void fiber_multi_waiters(int arg1, int arg2) { @@ -444,12 +444,12 @@ static void fiber_multi_waiters(int arg1, int arg2) nano_fiber_sem_give(&reply_multi_waiters); } -/******************************************************************************* -* -* do_test_multiple_waiters - task part of multiple-waiter test, repeatable -* -* RETURNS: N/A -*/ +/** + * + * do_test_multiple_waiters - task part of multiple-waiter test, repeatable + * + * RETURNS: N/A + */ static int do_test_multiple_waiters(void) { @@ -490,12 +490,12 @@ static int do_test_multiple_waiters(void) return TC_PASS; } -/******************************************************************************* -* -* test_multiple_waiters - entry point for multiple-waiters test -* -* RETURNS: N/A -*/ +/** + * + * test_multiple_waiters - entry point for multiple-waiters test + * + * RETURNS: N/A + */ static int test_multiple_waiters(void) { @@ -934,14 +934,14 @@ static int test_timeout(void) return TC_PASS; } -/******************************************************************************* -* -* main - entry point to semaphore tests -* -* This is the entry point to the semaphore tests. -* -* RETURNS: N/A -*/ +/** + * + * main - entry point to semaphore tests + * + * This is the entry point to the semaphore tests. + * + * RETURNS: N/A + */ void main(void) { diff --git a/samples/nanokernel/test/test_stack/src/stack.c b/samples/nanokernel/test/test_stack/src/stack.c index 802d33b02d8..cf5bc5b9922 100644 --- a/samples/nanokernel/test/test_stack/src/stack.c +++ b/samples/nanokernel/test/test_stack/src/stack.c @@ -58,7 +58,7 @@ is returned back to function testTaskStackPopW which also finished it's executio and returned to main. Finally function testIsrStackFromTask is run and it popped all data from queue1, push and pop one last item to the queue. All these are run in ISR context. -*/ + */ #include #include @@ -126,14 +126,14 @@ void testIsrStackFromFiber(void); void testIsrStackFromTask(void); -/******************************************************************************* -* -* initData -* -* Initialize myData and myIsrData arrays. -* -* RETURNS: none -*/ +/** + * + * initData + * + * Initialize myData and myIsrData arrays. + * + * RETURNS: none + */ void initData(void) { @@ -143,17 +143,17 @@ void initData(void) } } /* initData */ -/******************************************************************************* -* -* isr_stack_push - add an item to a STACK -* -* This routine is the ISR handler for _trigger_nano_isr_stack_push(). It adds -* an item to the STACK in the context of an ISR. -* -* \param parameter pointer to ISR handler parameter -* -* RETURNS: N/A -*/ +/** + * + * isr_stack_push - add an item to a STACK + * + * This routine is the ISR handler for _trigger_nano_isr_stack_push(). It adds + * an item to the STACK in the context of an ISR. + * + * \param parameter pointer to ISR handler parameter + * + * RETURNS: N/A + */ void isr_stack_push(void *parameter) { @@ -163,18 +163,18 @@ void isr_stack_push(void *parameter) } /* isr_stack_push */ -/******************************************************************************* -* -* isr_stack_pop - get an item from a STACK -* -* This routine is the ISR handler for _trigger_nano_isr_stack_pop(). It gets -* an item from the STACK in the context of an ISR. If the queue is empty, -* it sets data to INVALID_DATA. -* -* \param parameter pointer to ISR handler parameter -* -* RETURNS: N/A -*/ +/** + * + * isr_stack_pop - get an item from a STACK + * + * This routine is the ISR handler for _trigger_nano_isr_stack_pop(). It gets + * an item from the STACK in the context of an ISR. If the queue is empty, + * it sets data to INVALID_DATA. + * + * \param parameter pointer to ISR handler parameter + * + * RETURNS: N/A + */ void isr_stack_pop(void *parameter) { @@ -188,16 +188,16 @@ void isr_stack_pop(void *parameter) } /* isr_stack_pop */ -/******************************************************************************* -* -* fiber1 -* -* This is the fiber started from the main task. Gets all items from -* the STACK queue and puts four items back to the STACK queue. Control is -* transferred back to the main task. -* -* RETURNS: N/A -*/ +/** + * + * fiber1 + * + * This is the fiber started from the main task. Gets all items from + * the STACK queue and puts four items back to the STACK queue. Control is + * transferred back to the main task. + * + * RETURNS: N/A + */ void fiber1(void) { @@ -236,15 +236,15 @@ void fiber1(void) -/******************************************************************************* -* -* testFiberStackPopW -* -* This function tests the stack push and pop wait interfaces in the fiber context. -* It gets data from nanoStackObj2 queue and puts data to nanoStackObj queue. -* -* RETURNS: N/A -*/ +/** + * + * testFiberStackPopW + * + * This function tests the stack push and pop wait interfaces in the fiber context. + * It gets data from nanoStackObj2 queue and puts data to nanoStackObj queue. + * + * RETURNS: N/A + */ void testFiberStackPopW(void) { @@ -281,17 +281,17 @@ void testFiberStackPopW(void) } /* testFiberStackPopW */ -/******************************************************************************* -* -* testIsrStackFromFiber -* -* This function tests the stack push and pop interfaces in the isr context. -* It is invoked from a fiber. -* -* We use nanoStackObj queue to push and pop data. -* -* RETURNS: N/A -*/ +/** + * + * testIsrStackFromFiber + * + * This function tests the stack push and pop interfaces in the isr context. + * It is invoked from a fiber. + * + * We use nanoStackObj queue to push and pop data. + * + * RETURNS: N/A + */ void testIsrStackFromFiber(void) { @@ -337,17 +337,17 @@ void testIsrStackFromFiber(void) } /* testIsrStackFromFiber */ -/******************************************************************************* -* -* testIsrStackFromTask -* -* This function tests the stack push and pop interfaces in the isr context. -* It is invoked from a task. -* -* We use nanoStackObj queue to push and pop data. -* -* RETURNS: N/A -*/ +/** + * + * testIsrStackFromTask + * + * This function tests the stack push and pop interfaces in the isr context. + * It is invoked from a task. + * + * We use nanoStackObj queue to push and pop data. + * + * RETURNS: N/A + */ void testIsrStackFromTask(void) { @@ -395,14 +395,14 @@ void testIsrStackFromTask(void) TC_END_RESULT(retCode); } -/******************************************************************************* -* -* fiber2 -* -* This is the fiber started from the testTaskStackPopW function. -* -* RETURNS: N/A -*/ +/** + * + * fiber2 + * + * This is the fiber started from the testTaskStackPopW function. + * + * RETURNS: N/A + */ void fiber2(void) { @@ -414,15 +414,15 @@ void fiber2(void) } -/******************************************************************************* -* -* testTaskStackPopW -* -* This is in the task context. It puts data to nanoStackObj2 queue and gets -* data from nanoStackObj queue. -* -* RETURNS: N/A -*/ +/** + * + * testTaskStackPopW + * + * This is in the task context. It puts data to nanoStackObj2 queue and gets + * data from nanoStackObj queue. + * + * RETURNS: N/A + */ void testTaskStackPopW(void) { @@ -454,15 +454,15 @@ void testTaskStackPopW(void) TC_END_RESULT(retCode); } /* testTaskStackPopW */ -/******************************************************************************* -* -* fiber3 - a fiber to help test nano_task_stack_pop_wait() -* -* This fiber blocks for one second before pushing an item onto the stack. -* The main task, which was waiting for item from the stack then unblocks. -* -* RETURNS: N/A -*/ +/** + * + * fiber3 - a fiber to help test nano_task_stack_pop_wait() + * + * This fiber blocks for one second before pushing an item onto the stack. + * The main task, which was waiting for item from the stack then unblocks. + * + * RETURNS: N/A + */ void fiber3(void) { @@ -471,14 +471,14 @@ void fiber3(void) nano_fiber_stack_push(&nanoStackObj, myData[0]); } -/******************************************************************************* -* -* initNanoObjects - initialize nanokernel objects -* -* This routine initializes the nanokernel objects used in the STACK tests. -* -* RETURNS: N/A -*/ +/** + * + * initNanoObjects - initialize nanokernel objects + * + * This routine initializes the nanokernel objects used in the STACK tests. + * + * RETURNS: N/A + */ void initNanoObjects(void) { @@ -495,14 +495,14 @@ void initNanoObjects(void) nano_timer_init(&timer, timerData); } /* initNanoObjects */ -/******************************************************************************* -* -* main - entry point to STACK tests -* -* This is the entry point to the STACK tests. -* -* RETURNS: N/A -*/ +/** + * + * main - entry point to STACK tests + * + * This is the entry point to the STACK tests. + * + * RETURNS: N/A + */ void main(void) { diff --git a/samples/nanokernel/test/test_timer/src/timer.c b/samples/nanokernel/test/test_timer/src/timer.c index 9fe6d7c86b5..5196b86e1dd 100644 --- a/samples/nanokernel/test/test_timer/src/timer.c +++ b/samples/nanokernel/test/test_timer/src/timer.c @@ -37,7 +37,7 @@ This module tests the following timer related routines: nano_fiber_timer_test(), nano_fiber_timer_wait(), nano_task_timer_start(), nano_task_timer_stop(), nano_task_timer_test(), nano_task_timer_wait(), nano_time_init(), nano_tick_get_32(), nano_cycle_get_32(), nano_tick_delta() -*/ + */ #include #include @@ -77,14 +77,14 @@ static int fiberDetectedError = 0; static char __stack fiberStack[FIBER_STACKSIZE]; static char __stack fiber2Stack[FIBER2_STACKSIZE]; -/******************************************************************************* -* -* initNanoObjects - initialize nanokernel objects -* -* This routine initializes the nanokernel objects used in the LIFO tests. -* -* RETURNS: N/A -*/ +/** + * + * initNanoObjects - initialize nanokernel objects + * + * This routine initializes the nanokernel objects used in the LIFO tests. + * + * RETURNS: N/A + */ void initNanoObjects(void) { @@ -96,29 +96,29 @@ void initNanoObjects(void) nano_sem_init(&wakeFiber); } -/******************************************************************************* -* -* basicTimerWait - basic checking of time spent waiting upon a timer -* -* This routine can be called from a task or a fiber to wait upon a timer. -* It will busy wait until the current tick ends, at which point it will -* start and then wait upon a timer. The length of time it spent waiting -* gets cross-checked with the nano_tick_get_32() and nanoTimeElapsed() APIs. -* All three are expected to match up, but a tolerance of one (1) tick is -* considered acceptable. -* -* This routine can be considered as testing nano_tick_get_32(), -* nanoTimeElapsed() and nanoXXXTimerGetW() successful expiration cases. -* -* \param startRtn routine to start the timer -* \param waitRtn routine to get and wait for the timer -* \param getRtn routine to get the timer (no waiting) -* \param pTimer pointer to the timer -* \param pTimerData pointer to the expected timer data -* \param ticks number of ticks to wait -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * basicTimerWait - basic checking of time spent waiting upon a timer + * + * This routine can be called from a task or a fiber to wait upon a timer. + * It will busy wait until the current tick ends, at which point it will + * start and then wait upon a timer. The length of time it spent waiting + * gets cross-checked with the nano_tick_get_32() and nanoTimeElapsed() APIs. + * All three are expected to match up, but a tolerance of one (1) tick is + * considered acceptable. + * + * This routine can be considered as testing nano_tick_get_32(), + * nanoTimeElapsed() and nanoXXXTimerGetW() successful expiration cases. + * + * \param startRtn routine to start the timer + * \param waitRtn routine to get and wait for the timer + * \param getRtn routine to get the timer (no waiting) + * \param pTimer pointer to the timer + * \param pTimerData pointer to the expected timer data + * \param ticks number of ticks to wait + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int basicTimerWait(timer_start_func startRtn, timer_getw_func waitRtn, timer_get_func getRtn, struct nano_timer *pTimer, @@ -181,22 +181,22 @@ int basicTimerWait(timer_start_func startRtn, timer_getw_func waitRtn, return TC_PASS; } -/******************************************************************************* -* -* startTimers - start four timers -* -* This routine starts four timers. -* The first () is added to an empty list of timers. -* The second () is added to the end of the list of timers. -* The third () is added to the head of the list of timers. -* The fourth () is added to the middle of the list of timers. -* -* Four timers are used so that the various paths can be tested. -* -* \param startRtn routine to start the timers -* -* RETURNS: N/A -*/ +/** + * + * startTimers - start four timers + * + * This routine starts four timers. + * The first () is added to an empty list of timers. + * The second () is added to the end of the list of timers. + * The third () is added to the head of the list of timers. + * The fourth () is added to the middle of the list of timers. + * + * Four timers are used so that the various paths can be tested. + * + * \param startRtn routine to start the timers + * + * RETURNS: N/A + */ void startTimers(timer_start_func startRtn) { @@ -213,19 +213,19 @@ void startTimers(timer_start_func startRtn) startRtn(&midTimer, MID_TIMEOUT); } -/******************************************************************************* -* -* busyWaitTimers - busy wait while checking timers expire in the correct order -* -* This routine checks that the four timers created using startTimers() finish -* in the correct order. It busy waits on all four timers waiting until they -* expire. The timers are expected to expire in the following order: -* , , , -* -* \param getRtn timer get routine (fiber or task) -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * busyWaitTimers - busy wait while checking timers expire in the correct order + * + * This routine checks that the four timers created using startTimers() finish + * in the correct order. It busy waits on all four timers waiting until they + * expire. The timers are expected to expire in the following order: + * , , , + * + * \param getRtn timer get routine (fiber or task) + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int busyWaitTimers(timer_get_func getRtn) { @@ -281,21 +281,21 @@ int busyWaitTimers(timer_get_func getRtn) return (nano_tick_get_32() < ticks) ? TC_PASS : TC_FAIL; } -/******************************************************************************* -* -* stopTimers - stop the four timers and make sure they did not expire -* -* This routine stops the four started timers and then checks the timers for -* six seconds to make sure that they did not fire. The four timers will be -* stopped in the reverse order in which they were started. Doing so will -* exercise the code that removes timers from important locations in the list; -* these include the middle, the head, the tail, and the last item. -* -* \param stopRtn routine to stop timer (fiber or task) -* \param getRtn timer get routine (fiber or task) -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * stopTimers - stop the four timers and make sure they did not expire + * + * This routine stops the four started timers and then checks the timers for + * six seconds to make sure that they did not fire. The four timers will be + * stopped in the reverse order in which they were started. Doing so will + * exercise the code that removes timers from important locations in the list; + * these include the middle, the head, the tail, and the last item. + * + * \param stopRtn routine to stop timer (fiber or task) + * \param getRtn timer get routine (fiber or task) + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int stopTimers(timer_stop_func stopRtn, timer_get_func getRtn) { @@ -325,18 +325,18 @@ int stopTimers(timer_stop_func stopRtn, timer_get_func getRtn) return TC_PASS; } -/******************************************************************************* -* -* fiber2Entry - entry point for the second fiber -* -* The second fiber has a lower priority than the first, but is still given -* precedence over the task. -* -* \param arg1 unused -* \param arg2 unused -* -* RETURNS: N/A -*/ +/** + * + * fiber2Entry - entry point for the second fiber + * + * The second fiber has a lower priority than the first, but is still given + * precedence over the task. + * + * \param arg1 unused + * \param arg2 unused + * + * RETURNS: N/A + */ static void fiber2Entry(int arg1, int arg2) { @@ -346,18 +346,18 @@ static void fiber2Entry(int arg1, int arg2) nano_fiber_timer_stop(&timer); } -/******************************************************************************* -* -* fiberEntry - entry point for the fiber portion of the timer tests -* -* NOTE: The fiber portion of the tests have higher priority than the task -* portion of the tests. -* -* \param arg1 unused -* \param arg2 unused -* -* RETURNS: N/A -*/ +/** + * + * fiberEntry - entry point for the fiber portion of the timer tests + * + * NOTE: The fiber portion of the tests have higher priority than the task + * portion of the tests. + * + * \param arg1 unused + * \param arg2 unused + * + * RETURNS: N/A + */ static void fiberEntry(int arg1, int arg2) { @@ -427,12 +427,12 @@ static void fiberEntry(int arg1, int arg2) nano_fiber_sem_give(&wakeTask); } -/******************************************************************************* -* -* nano_cycle_get_32Test - test the nano_cycle_get_32() API -* -* RETURNS: TC_PASS on success, TC_FAIL on failure -*/ +/** + * + * nano_cycle_get_32Test - test the nano_cycle_get_32() API + * + * RETURNS: TC_PASS on success, TC_FAIL on failure + */ int nano_cycle_get_32Test(void) { @@ -454,14 +454,14 @@ int nano_cycle_get_32Test(void) return TC_PASS; } -/******************************************************************************* -* -* main - entry point to timer tests -* -* This is the entry point to the timer tests. -* -* RETURNS: N/A -*/ +/** + * + * main - entry point to timer tests + * + * This is the entry point to the timer tests. + * + * RETURNS: N/A + */ void main(void) { diff --git a/samples/network/listener/src/listener.c b/samples/network/listener/src/listener.c index 42ade68806d..aeccee6a791 100644 --- a/samples/network/listener/src/listener.c +++ b/samples/network/listener/src/listener.c @@ -91,12 +91,12 @@ static struct net_context *get_context(const struct net_addr *addr) } /* -* -* \param taskname task identification string -* \param mySem task's own semaphore -* \param otherSem other task's semaphore -* -*/ + * + * \param taskname task identification string + * \param mySem task's own semaphore + * \param otherSem other task's semaphore + * + */ void helloLoop(const char *taskname, ksem_t mySem, ksem_t otherSem) { static struct in6_addr in6addr_my = TASK_IPADDR; /* aaaa::2 */ diff --git a/samples/network/test_15_4/src/network.c b/samples/network/test_15_4/src/network.c index 67fa6be692e..a19b4759e6f 100644 --- a/samples/network/test_15_4/src/network.c +++ b/samples/network/test_15_4/src/network.c @@ -168,12 +168,12 @@ static struct net_context *get_context(const struct net_addr *remote, #define TASK_IPADDR { { { 0xaa,0xaa,0,0,0,0,0,0,0,0,0,0,0,0,0,0x2 } } } /* -* -* \param taskname task identification string -* \param mySem task's own semaphore -* \param otherSem other task's semaphore -* -*/ + * + * \param taskname task identification string + * \param mySem task's own semaphore + * \param otherSem other task's semaphore + * + */ static void listen(const char *taskname, ksem_t mySem, ksem_t otherSem, struct net_context *ctx) { diff --git a/shared/include/nanokernel/x86/idtEnt.h b/shared/include/nanokernel/x86/idtEnt.h index 15a82ec7cb8..04fa8cf0849 100644 --- a/shared/include/nanokernel/x86/idtEnt.h +++ b/shared/include/nanokernel/x86/idtEnt.h @@ -34,7 +34,7 @@ DESCRIPTION This header file provides code for constructing an IA-32 interrupt descriptor. -*/ + */ #ifndef _IDTENT_H #define _IDTENT_H @@ -79,27 +79,27 @@ typedef struct idtEntry { unsigned short hiOffset; } __packed IDT_ENTRY; -/******************************************************************************* -* -* _IdtEntCreate - Create an IDT entry -* -* This routine creates an interrupt-gate descriptor at the location defined by -* . The entry is created such that is invoked when an -* interrupt vector is asserted. The argument specifies the privilege -* level for the interrupt-gate descriptor; (hardware) interrupts and exceptions -* should specify a level of 0, whereas handlers for user-mode software generated -* interrupts should specify 3. -* -* RETURNS: N/A -* -* INTERNAL -* This is a shared routine between the IA-32 nanokernel runtime code and the -* genIdt host tool code. It is done this way to keep the two sides in sync. -* -* The runtime passes a pointer directly to the IDT entry to update whereas the -* host side simply passes a pointer to a local variable. -* -*/ +/** + * + * _IdtEntCreate - Create an IDT entry + * + * This routine creates an interrupt-gate descriptor at the location defined by + * . The entry is created such that is invoked when an + * interrupt vector is asserted. The argument specifies the privilege + * level for the interrupt-gate descriptor; (hardware) interrupts and exceptions + * should specify a level of 0, whereas handlers for user-mode software generated + * interrupts should specify 3. + * + * RETURNS: N/A + * + * INTERNAL + * This is a shared routine between the IA-32 nanokernel runtime code and the + * genIdt host tool code. It is done this way to keep the two sides in sync. + * + * The runtime passes a pointer directly to the IDT entry to update whereas the + * host side simply passes a pointer to a local variable. + * + */ static inline void _IdtEntCreate ( diff --git a/shared/include/nanokernel/x86/segselect.h b/shared/include/nanokernel/x86/segselect.h index 0f924e1f7b4..3e282b30c37 100644 --- a/shared/include/nanokernel/x86/segselect.h +++ b/shared/include/nanokernel/x86/segselect.h @@ -34,7 +34,7 @@ DESCRIPTION This header contains the IA-32 segment selector defintions. These are extracted into their own file so they can be shared with the host tools. -*/ + */ #ifndef _SEGSELECT_H #define _SEGSELECT_H