doc: Various corrections to doxygen info for Kernel APIs

Most kernel APIs are now ready for inclusion in the API guide.
The APIs largely follow a standard template to provide users
of the API guide with a consistent look-and-feel.

Change-Id: Ib682c31f912e19f5f6d8545d74c5f675b1741058
Signed-off-by: Allan Stephens <allan.stephens@windriver.com>
This commit is contained in:
Allan Stephens 2016-11-11 15:45:03 -05:00 committed by Anas Nashif
commit c98da84e69
6 changed files with 666 additions and 373 deletions

View file

@ -60,17 +60,12 @@
/* SSE control/status register default value (used by assembler code) */
extern uint32_t _sse_mxcsr_default_value;
/**
*
* @brief Save a thread's floating point context information.
/*
* Save a thread's floating point context information.
*
* This routine saves the system's "live" floating point context into the
* specified thread control block. The SSE registers are saved only if the
* thread is actually using them.
*
* @param tcs Pointer to thread control block.
*
* @return N/A
*/
static void _FpCtxSave(struct tcs *tcs)
{
@ -83,16 +78,11 @@ static void _FpCtxSave(struct tcs *tcs)
_do_fp_regs_save(&tcs->arch.preempFloatReg);
}
/**
*
* @brief Initialize a thread's floating point context information.
/*
* Initialize a thread's floating point context information.
*
* This routine initializes the system's "live" floating point context.
* The SSE registers are initialized only if the thread is actually using them.
*
* @param tcs Pointer to thread control block.
*
* @return N/A
*/
static inline void _FpCtxInit(struct tcs *tcs)
{
@ -104,37 +94,9 @@ static inline void _FpCtxInit(struct tcs *tcs)
#endif
}
/**
/*
* Enable preservation of floating point context information.
*
* @brief Enable preservation of floating point context information.
*
* This routine informs the kernel that the specified thread (which may be
* the current thread) will be using the floating point registers.
* The @a options parameter indicates which floating point register sets
* will be used by the specified thread:
*
* a) K_FP_REGS indicates x87 FPU and MMX registers only
* b) K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
*
* Invoking this routine initializes the thread's floating point context info
* to that of an FPU that has been reset. The next time the thread is scheduled
* by _Swap() it will either inherit an FPU that is guaranteed to be in a "sane"
* state (if the most recent user of the FPU was cooperatively swapped out)
* or the thread's own floating point context will be loaded (if the most
* recent user of the FPU was pre-empted, or if this thread is the first user
* of the FPU). Thereafter, the kernel will protect the thread's FP context
* so that it is not altered during a preemptive context switch.
*
* @warning
* This routine should only be used to enable floating point support for a
* thread that does not currently have such support enabled already.
*
* @param tcs Pointer to thread control block.
* @param options Registers to be preserved (K_FP_REGS or K_SSE_REGS).
*
* @return N/A
*
* @internal
* The transition from "non-FP supporting" to "FP supporting" must be done
* atomically to avoid confusing the floating point logic used by _Swap(), so
* this routine locks interrupts to ensure that a context switch does not occur.
@ -232,21 +194,8 @@ void k_float_enable(struct tcs *tcs, unsigned int options)
}
/**
* Disable preservation of floating point context information.
*
* @brief Disable preservation of floating point context information.
*
* This routine informs the kernel that the specified thread (which may be
* the current thread) will no longer be using the floating point registers.
*
* @warning
* This routine should only be used to disable floating point support for
* a thread that currently has such support enabled.
*
* @param tcs Pointer to thread control block.
*
* @return N/A
*
* @internal
* The transition from "FP supporting" to "non-FP supporting" must be done
* atomically to avoid confusing the floating point logic used by _Swap(), so
* this routine locks interrupts to ensure that a context switch does not occur.
@ -276,9 +225,8 @@ void k_float_disable(struct tcs *tcs)
irq_unlock(imask);
}
/**
*
* @brief Handler for "device not available" exception.
/*
* Handler for "device not available" exception.
*
* This routine is registered to handle the "device not available" exception
* (vector = 7).
@ -286,10 +234,6 @@ void k_float_disable(struct tcs *tcs)
* The processor will generate this exception if any x87 FPU, MMX, or SSEx
* instruction is executed while CR0[TS]=1. The handler then enables the
* current thread to use all supported floating point registers.
*
* @param pEsf This value is not used.
*
* @return N/A
*/
void _FpNotAvailableExcHandler(NANO_ESF *pEsf)
{

View file

@ -381,7 +381,7 @@ static ALWAYS_INLINE void _arch_irq_unlock(unsigned int key)
/**
* The NANO_SOFT_IRQ macro must be used as the value for the @a irq parameter
* to NANO_CPU_INT_REGSITER when connecting to an interrupt that does not
* to NANO_CPU_INT_REGISTER when connecting to an interrupt that does not
* correspond to any IRQ line (such as spurious vector or SW IRQ)
*/
#define NANO_SOFT_IRQ ((unsigned int) (-1))
@ -397,10 +397,62 @@ extern void _arch_irq_enable(unsigned int irq);
*/
extern void _arch_irq_disable(unsigned int irq);
#ifdef CONFIG_FP_SHARING
extern void k_float_enable(k_tid_t thread_id, unsigned int options);
extern void k_float_disable(k_tid_t thread_id);
#endif /* CONFIG_FP_SHARING */
/**
* @defgroup float_apis Floating Point APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Enable preservation of floating point context information.
*
* This routine informs the kernel that the specified thread (which may be
* the current thread) will be using the floating point registers.
* The @a options parameter indicates which floating point register sets
* will be used by the specified thread:
*
* a) K_FP_REGS indicates x87 FPU and MMX registers only
* b) K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
*
* Invoking this routine initializes the thread's floating point context info
* to that of an FPU that has been reset. The next time the thread is scheduled
* by _Swap() it will either inherit an FPU that is guaranteed to be in a "sane"
* state (if the most recent user of the FPU was cooperatively swapped out)
* or the thread's own floating point context will be loaded (if the most
* recent user of the FPU was pre-empted, or if this thread is the first user
* of the FPU). Thereafter, the kernel will protect the thread's FP context
* so that it is not altered during a preemptive context switch.
*
* @warning
* This routine should only be used to enable floating point support for a
* thread that does not currently have such support enabled already.
*
* @param thread ID of thread.
* @param options Registers to be preserved (K_FP_REGS or K_SSE_REGS).
*
* @return N/A
*/
extern void k_float_enable(k_tid_t thread, unsigned int options);
/**
* @brief Disable preservation of floating point context information.
*
* This routine informs the kernel that the specified thread (which may be
* the current thread) will no longer be using the floating point registers.
*
* @warning
* This routine should only be used to disable floating point support for
* a thread that currently has such support enabled.
*
* @param thread ID of thread.
*
* @return N/A
*/
extern void k_float_disable(k_tid_t thread);
/**
* @}
*/
#include <stddef.h> /* for size_t */

View file

@ -26,28 +26,26 @@ extern "C" {
typedef int atomic_t;
typedef atomic_t atomic_val_t;
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
/**
* @defgroup atomic_apis Atomic Services APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Atomic compare-and-set.
*
* @brief Atomic compare-and-set primitive
* This routine performs an atomic compare-and-set on @a target. If the current
* value of @a target equals @a old_value, @a target is set to @a new_value.
* If the current value of @a target does not equal @a old_value, @a target
* is left unchanged.
*
* This routine provides the compare-and-set operator. If the original value at
* <target> equals <oldValue>, then <newValue> is stored at <target> and the
* function returns 1.
*
* If the original value at <target> does not equal <oldValue>, then the store
* is not done and the function returns 0.
*
* The reading of the original value at <target>, the comparison,
* and the write of the new value (if it occurs) all happen atomically with
* respect to both interrupts and accesses of other processors to <target>.
*
* @param target address to be tested
* @param old_value value to compare against
* @param new_value value to compare against
* @return Returns 1 if <new_value> is written, 0 otherwise.
* @param target Address of atomic variable.
* @param old_value Original value to compare against.
* @param new_value New value to store.
* @return 1 if @a new_value is written, 0 otherwise.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline int atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value)
{
@ -55,104 +53,121 @@ static inline int atomic_cas(atomic_t *target, atomic_val_t old_value,
0, __ATOMIC_SEQ_CST,
__ATOMIC_SEQ_CST);
}
#else
extern int atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value);
#endif
/**
*
* @brief Atomic addition primitive
* @brief Atomic addition.
*
* This routine provides the atomic addition operator. The <value> is
* atomically added to the value at <target>, placing the result at <target>,
* and the old value from <target> is returned.
* This routine performs an atomic addition on @a target.
*
* @param target memory location to add to
* @param value the value to add
* @param target Address of atomic variable.
* @param value Value to add.
*
* @return The previous value from <target>
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_add(target, value, __ATOMIC_SEQ_CST);
}
#else
extern atomic_val_t atomic_add(atomic_t *target, atomic_val_t value);
#endif
/**
*
* @brief Atomic subtraction primitive
* @brief Atomic subtraction.
*
* This routine provides the atomic subtraction operator. The <value> is
* atomically subtracted from the value at <target>, placing the result at
* <target>, and the old value from <target> is returned.
* This routine performs an atomic subtraction on @a target.
*
* @param target the memory location to subtract from
* @param value the value to subtract
* @param target Address of atomic variable.
* @param value Value to subtract.
*
* @return The previous value from <target>
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_sub(target, value, __ATOMIC_SEQ_CST);
}
#else
extern atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value);
#endif
/**
*
* @brief Atomic increment primitive
* @brief Atomic increment.
*
* @param target memory location to increment
* This routine performs an atomic increment by 1 on @a target.
*
* This routine provides the atomic increment operator. The value at <target>
* is atomically incremented by 1, and the old value from <target> is returned.
* @param target Address of atomic variable.
*
* @return The value from <target> before the increment
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_inc(atomic_t *target)
{
return atomic_add(target, 1);
}
#else
extern atomic_val_t atomic_inc(atomic_t *target);
#endif
/**
*
* @brief Atomic decrement primitive
* @brief Atomic decrement.
*
* @param target memory location to decrement
* This routine performs an atomic decrement by 1 on @a target.
*
* This routine provides the atomic decrement operator. The value at <target>
* is atomically decremented by 1, and the old value from <target> is returned.
* @param target Address of atomic variable.
*
* @return The value from <target> prior to the decrement
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_dec(atomic_t *target)
{
return atomic_sub(target, 1);
}
#else
extern atomic_val_t atomic_dec(atomic_t *target);
#endif
/**
*
* @brief Atomic get primitive
* @brief Atomic get.
*
* @param target memory location to read from
* This routine performs an atomic read on @a target.
*
* This routine provides the atomic get primitive to atomically read
* a value from <target>. It simply does an ordinary load. Note that <target>
* is expected to be aligned to a 4-byte boundary.
* @param target Address of atomic variable.
*
* @return The value read from <target>
* @return Value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_get(const atomic_t *target)
{
return __atomic_load_n(target, __ATOMIC_SEQ_CST);
}
#else
extern atomic_val_t atomic_get(const atomic_t *target);
#endif
/**
*
* @brief Atomic get-and-set primitive
* @brief Atomic get-and-set.
*
* This routine provides the atomic set operator. The <value> is atomically
* written at <target> and the previous value at <target> is returned.
* This routine atomically sets @a target to @a value and returns
* the previous value of @a target.
*
* @param target the memory location to write to
* @param value the value to write
* @param target Address of atomic variable.
* @param value Value to write to @a target.
*
* @return The previous value from <target>
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
{
/* This builtin, as described by Intel, is not a traditional
@ -161,236 +176,253 @@ static inline atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
*/
return __atomic_exchange_n(target, value, __ATOMIC_SEQ_CST);
}
#else
extern atomic_val_t atomic_set(atomic_t *target, atomic_val_t value);
#endif
/**
*
* @brief Atomic clear primitive
* @brief Atomic clear.
*
* This routine provides the atomic clear operator. The value of 0 is atomically
* written at <target> and the previous value at <target> is returned. (Hence,
* atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).)
* This routine atomically sets @a target to zero and returns its previous
* value. (Hence, it is equivalent to atomic_set(target, 0).)
*
* @param target the memory location to write
* @param target Address of atomic variable.
*
* @return The previous value from <target>
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_clear(atomic_t *target)
{
return atomic_set(target, 0);
}
#else
extern atomic_val_t atomic_clear(atomic_t *target);
#endif
/**
*
* @brief Atomic bitwise inclusive OR primitive
* @brief Atomic bitwise inclusive OR.
*
* This routine provides the atomic bitwise inclusive OR operator. The <value>
* is atomically bitwise OR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
* This routine atomically sets @a target to the bitwise inclusive OR of
* @a target and @a value.
*
* @param target the memory location to be modified
* @param value the value to OR
* @param target Address of atomic variable.
* @param value Value to OR.
*
* @return The previous value from <target>
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_or(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_or(target, value, __ATOMIC_SEQ_CST);
}
#else
extern atomic_val_t atomic_or(atomic_t *target, atomic_val_t value);
#endif
/**
*
* @brief Atomic bitwise exclusive OR (XOR) primitive
* @brief Atomic bitwise exclusive OR (XOR).
*
* This routine provides the atomic bitwise exclusive OR operator. The <value>
* is atomically bitwise XOR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
* This routine atomically sets @a target to the bitwise exclusive OR (XOR) of
* @a target and @a value.
*
* @param target the memory location to be modified
* @param value the value to XOR
* @param target Address of atomic variable.
* @param value Value to XOR
*
* @return The previous value from <target>
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_xor(target, value, __ATOMIC_SEQ_CST);
}
#else
extern atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value);
#endif
/**
*
* @brief Atomic bitwise AND primitive
* @brief Atomic bitwise AND.
*
* This routine provides the atomic bitwise AND operator. The <value> is
* atomically bitwise AND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
* This routine atomically sets @a target to the bitwise AND of @a target
* and @a value.
*
* @param target the memory location to be modified
* @param value the value to AND
* @param target Address of atomic variable.
* @param value Value to AND.
*
* @return The previous value from <target>
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_and(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_and(target, value, __ATOMIC_SEQ_CST);
}
#else
extern atomic_val_t atomic_and(atomic_t *target, atomic_val_t value);
#endif
/**
*
* @brief Atomic bitwise NAND primitive
* @brief Atomic bitwise NAND.
*
* This routine provides the atomic bitwise NAND operator. The <value> is
* atomically bitwise NAND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
* This routine atomically sets @a target to the bitwise NAND of @a target
* and @a value. (This operation is equivalent to target = ~(target & value).)
*
* The operation here is equivalent to *target = ~(tmp & value)
* @param target Address of atomic variable.
* @param value Value to NAND.
*
* @param target the memory location to be modified
* @param value the value to NAND
*
* @return The previous value from <target>
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_nand(target, value, __ATOMIC_SEQ_CST);
}
#else
extern atomic_val_t atomic_add(atomic_t *target, atomic_val_t value);
extern atomic_val_t atomic_and(atomic_t *target, atomic_val_t value);
extern atomic_val_t atomic_dec(atomic_t *target);
extern atomic_val_t atomic_inc(atomic_t *target);
extern atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value);
extern atomic_val_t atomic_or(atomic_t *target, atomic_val_t value);
extern atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value);
extern atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value);
extern atomic_val_t atomic_clear(atomic_t *target);
extern atomic_val_t atomic_get(const atomic_t *target);
extern atomic_val_t atomic_set(atomic_t *target, atomic_val_t value);
extern int atomic_cas(atomic_t *target, atomic_val_t oldValue,
atomic_val_t newValue);
#endif /* CONFIG_ATOMIC_OPERATIONS_BUILTIN */
#endif
/**
* @brief Initialize an atomic variable.
*
* This macro can be used to initialize an atomic variable. For example,
* @code atomic_t my_var = ATOMIC_INIT(75); @endcode
*
* @param i Value to assign to atomic variable.
*/
#define ATOMIC_INIT(i) (i)
/**
* @cond INTERNAL_HIDDEN
*/
#define ATOMIC_BITS (sizeof(atomic_val_t) * 8)
#define ATOMIC_MASK(bit) (1 << ((bit) & (ATOMIC_BITS - 1)))
#define ATOMIC_ELEM(addr, bit) ((addr) + ((bit) / ATOMIC_BITS))
/** @def ATOMIC_DEFINE
* @brief Helper to declare an atomic_t array.
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @brief Define an array of atomic variables.
*
* A helper to define an atomic_t array based on the number of needed
* bits, e.g. any bit count of 32 or less will produce a single-element
* array.
* This macro defines an array of atomic variables containing at least
* @a num_bits bits.
*
* @param name Name of atomic_t array.
* @param num_bits Maximum number of bits needed.
* @note
* If used from file scope, the bits of the array are initialized to zero;
* if used from within a function, the bits are left uninitialized.
*
* @return n/a
* @param name Name of array of atomic variables.
* @param num_bits Number of bits needed.
*/
#define ATOMIC_DEFINE(name, num_bits) \
atomic_t name[1 + ((num_bits) - 1) / ATOMIC_BITS]
/** @brief Test whether a bit is set
/**
* @brief Atomically test a bit.
*
* Test whether bit number bit is set or not.
* This routine tests whether bit number @a bit of @a target is set or not.
* The target may be a single atomic variable or an array of them.
*
* Also works for an array of multiple atomic_t variables, in which
* case the bit number may go beyond the number of bits in a single
* atomic_t variable.
* @param target Address of atomic variable or array.
* @param bit Bit number (starting from 0).
*
* @param addr base address to start counting from
* @param bit bit number counted from the base address
*
* @return 1 if the bit was set, 0 if it wasn't
* @return 1 if the bit was set, 0 if it wasn't.
*/
static inline int atomic_test_bit(const atomic_t *addr, int bit)
static inline int atomic_test_bit(const atomic_t *target, int bit)
{
atomic_val_t val = atomic_get(ATOMIC_ELEM(addr, bit));
atomic_val_t val = atomic_get(ATOMIC_ELEM(target, bit));
return (1 & (val >> (bit & (ATOMIC_BITS - 1))));
}
/** @brief Clear a bit and return its old value
/**
* @brief Atomically test and clear a bit.
*
* Atomically clear a bit and return its old value.
* Atomically clear bit number @a bit of @a target and return its old value.
* The target may be a single atomic variable or an array of them.
*
* Also works for an array of multiple atomic_t variables, in which
* case the bit number may go beyond the number of bits in a single
* atomic_t variable.
* @param target Address of atomic variable or array.
* @param bit Bit number (starting from 0).
*
* @param addr base address to start counting from
* @param bit bit number counted from the base address
*
* @return 1 if the bit was set, 0 if it wasn't
* @return 1 if the bit was set, 0 if it wasn't.
*/
static inline int atomic_test_and_clear_bit(atomic_t *addr, int bit)
static inline int atomic_test_and_clear_bit(atomic_t *target, int bit)
{
atomic_val_t mask = ATOMIC_MASK(bit);
atomic_val_t old;
old = atomic_and(ATOMIC_ELEM(addr, bit), ~mask);
old = atomic_and(ATOMIC_ELEM(target, bit), ~mask);
return (old & mask) != 0;
}
/** @brief Set a bit and return its old value
/**
* @brief Atomically set a bit.
*
* Atomically set a bit and return its old value.
* Atomically set bit number @a bit of @a target and return its old value.
* The target may be a single atomic variable or an array of them.
*
* Also works for an array of multiple atomic_t variables, in which
* case the bit number may go beyond the number of bits in a single
* atomic_t variable.
* @param target Address of atomic variable or array.
* @param bit Bit number (starting from 0).
*
* @param addr base address to start counting from
* @param bit bit number counted from the base address
*
* @return 1 if the bit was set, 0 if it wasn't
* @return 1 if the bit was set, 0 if it wasn't.
*/
static inline int atomic_test_and_set_bit(atomic_t *addr, int bit)
static inline int atomic_test_and_set_bit(atomic_t *target, int bit)
{
atomic_val_t mask = ATOMIC_MASK(bit);
atomic_val_t old;
old = atomic_or(ATOMIC_ELEM(addr, bit), mask);
old = atomic_or(ATOMIC_ELEM(target, bit), mask);
return (old & mask) != 0;
}
/** @brief Clear a bit
/**
* @brief Atomically clear a bit.
*
* Atomically clear a bit.
* Atomically clear bit number @a bit of @a target.
* The target may be a single atomic variable or an array of them.
*
* Also works for an array of multiple atomic_t variables, in which
* case the bit number may go beyond the number of bits in a single
* atomic_t variable.
* @param target Address of atomic variable or array.
* @param bit Bit number (starting from 0).
*
* @param addr base address to start counting from
* @param bit bit number counted from the base address
* @return N/A
*/
static inline void atomic_clear_bit(atomic_t *addr, int bit)
static inline void atomic_clear_bit(atomic_t *target, int bit)
{
atomic_val_t mask = ATOMIC_MASK(bit);
atomic_and(ATOMIC_ELEM(addr, bit), ~mask);
atomic_and(ATOMIC_ELEM(target, bit), ~mask);
}
/** @brief Set a bit
/**
* @brief Atomically set a bit.
*
* Atomically set a bit.
* Atomically set bit number @a bit of @a target.
* The target may be a single atomic variable or an array of them.
*
* Also works for an array of multiple atomic_t variables, in which
* case the bit number may go beyond the number of bits in a single
* atomic_t variable.
* @param target Address of atomic variable or array.
* @param bit Bit number (starting from 0).
*
* @param addr base address to start counting from
* @param bit bit number counted from the base address
* @return N/A
*/
static inline void atomic_set_bit(atomic_t *addr, int bit)
static inline void atomic_set_bit(atomic_t *target, int bit)
{
atomic_val_t mask = ATOMIC_MASK(bit);
atomic_or(ATOMIC_ELEM(addr, bit), mask);
atomic_or(ATOMIC_ELEM(target, bit), mask);
}
/**
* @}
*/
#ifdef __cplusplus
}
#endif

View file

@ -30,6 +30,13 @@
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup isr_apis Interrupt Service Routine APIs
* @ingroup kernel_apis
* @{
*/
/**
* Configure a static interrupt.
*
@ -47,78 +54,91 @@ extern "C" {
_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p)
/**
* @brief Disable all interrupts on the CPU (inline)
* @brief Lock interrupts.
*
* This routine disables interrupts. It can be called from either interrupt,
* task or fiber level. This routine returns an architecture-dependent
* lock-out key representing the "interrupt disable state" prior to the call;
* this key can be passed to irq_unlock() to re-enable interrupts.
* This routine disables all interrupts on the CPU. It returns an unsigned
* integer "lock-out key", which is an architecture-dependent indicator of
* whether interrupts were locked prior to the call. The lock-out key must be
* passed to irq_unlock() to re-enable interrupts.
*
* The lock-out key should only be used as the argument to the irq_unlock()
* API. It should never be used to manually re-enable interrupts or to inspect
* or manipulate the contents of the source register.
* This routine can be called recursively, as long as the caller keeps track
* of each lock-out key that is generated. Interrupts are re-enabled by
* passing each of the keys to irq_unlock() in the reverse order they were
* acquired. (That is, each call to irq_lock() must be balanced by
* a corresponding call to irq_unlock().)
*
* This function can be called recursively: it will return a key to return the
* state of interrupt locking to the previous level.
* @note
* This routine can be called by ISRs or by threads. If it is called by a
* thread, the interrupt lock is thread-specific; this means that interrupts
* remain disabled only while the thread is running. If the thread performs an
* operation that allows another thread to run (for example, giving a semaphore
* or sleeping for N milliseconds), the interrupt lock no longer applies and
* interrupts may be re-enabled while other processing occurs. When the thread
* once again becomes the current thread, the kernel re-establishes its
* interrupt lock; this ensures the thread won't be interrupted until it has
* explicitly released the interrupt lock it established.
*
* WARNINGS
* Invoking a kernel routine with interrupts locked may result in
* interrupts being re-enabled for an unspecified period of time. If the
* called routine blocks, interrupts will be re-enabled while another
* thread executes, or while the system is idle.
*
* The "interrupt disable state" is an attribute of a thread. Thus, if a
* fiber or task disables interrupts and subsequently invokes a kernel
* routine that causes the calling thread to block, the interrupt
* disable state will be restored when the thread is later rescheduled
* for execution.
*
* @return An architecture-dependent unsigned int lock-out key representing the
* "interrupt disable state" prior to the call.
* @warning
* The lock-out key should never be used to manually re-enable interrupts
* or to inspect or manipulate the contents of the CPU's interrupt bits.
*
* @return Lock-out key.
*/
#define irq_lock() _arch_irq_lock()
/**
* @brief Unlock interrupts.
*
* @brief Enable all interrupts on the CPU (inline)
* This routine reverses the effect of a previous call to irq_lock() using
* the associated lock-out key. The caller must call the routine once for
* each time it called irq_lock(), supplying the keys in the reverse order
* they were acquired, before interrupts are enabled.
*
* This routine re-enables interrupts on the CPU. The @a key parameter
* is an architecture-dependent lock-out key that is returned by a previous
* invocation of irq_lock().
* @note Can be called by ISRs.
*
* This routine can be called from either interrupt, task or fiber level
*
* @param key architecture-dependent lock-out key
* @param key Lock-out key generated by irq_lock().
*
* @return N/A
*/
#define irq_unlock(key) _arch_irq_unlock(key)
/**
* @brief Enable a specific IRQ
* @brief Enable an IRQ.
*
* This routine enables interrupts from source @a irq.
*
* @param irq IRQ line.
*
* @param irq IRQ line
* @return N/A
*/
#define irq_enable(irq) _arch_irq_enable(irq)
/**
* @brief Disable a specific IRQ
* @brief Disable an IRQ.
*
* This routine disables interrupts from source @a irq.
*
* @param irq IRQ line.
*
* @param irq IRQ line
* @return N/A
*/
#define irq_disable(irq) _arch_irq_disable(irq)
/**
* @brief Return IRQ enable state
* @brief Get IRQ enable state.
*
* This routine indicates if interrupts from source @a irq are enabled.
*
* @param irq IRQ line.
*
* @param irq IRQ line
* @return interrupt enable state, true or false
*/
#define irq_is_enabled(irq) _arch_irq_is_enabled(irq)
/**
* @}
*/
#ifdef __cplusplus
}
#endif

View file

@ -96,8 +96,6 @@ struct k_timer;
typedef struct k_thread *k_tid_t;
/* threads/scheduler/execution contexts */
enum execution_context_types {
K_ISR = 0,
K_COOP_THREAD,
@ -106,6 +104,12 @@ enum execution_context_types {
typedef void (*k_thread_entry_t)(void *p1, void *p2, void *p3);
/**
* @defgroup thread_apis Thread APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Spawn a thread.
*
@ -133,14 +137,14 @@ typedef void (*k_thread_entry_t)(void *p1, void *p2, void *p3);
* @return ID of new thread.
*/
extern k_tid_t k_thread_spawn(char *stack, unsigned stack_size,
void (*entry)(void *, void *, void*),
void (*entry)(void *, void *, void *),
void *p1, void *p2, void *p3,
int32_t prio, uint32_t options, int32_t delay);
/**
* @brief Put the current thread to sleep.
*
* This routine puts the currently thread to sleep for @a duration
* This routine puts the current thread to sleep for @a duration
* milliseconds.
*
* @param duration Number of milliseconds to sleep.
@ -155,9 +159,6 @@ extern void k_sleep(int32_t duration);
* This routine causes the current thread to execute a "do nothing" loop for
* @a usec_to_wait microseconds.
*
* @warning This routine utilizes the system clock, so it must not be invoked
* until the system clock is operational or while interrupts are locked.
*
* @return N/A
*/
extern void k_busy_wait(uint32_t usec_to_wait);
@ -207,7 +208,7 @@ extern k_tid_t k_current_get(void);
extern int k_thread_cancel(k_tid_t thread);
/**
* @brief Abort thread.
* @brief Abort a thread.
*
* This routine permanently stops execution of @a thread. The thread is taken
* off all kernel queues it is part of (i.e. the ready queue, the timeout
@ -222,6 +223,10 @@ extern int k_thread_cancel(k_tid_t thread);
*/
extern void k_thread_abort(k_tid_t thread);
/**
* @cond INTERNAL_HIDDEN
*/
#ifdef CONFIG_SYS_CLOCK_EXISTS
#define _THREAD_TIMEOUT_INIT(obj) \
(obj).nano_timeout = { \
@ -274,6 +279,10 @@ struct _static_thread_data {
.init_groups = (groups), \
}
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @brief Statically define and initialize a thread.
*
@ -412,9 +421,23 @@ extern void k_thread_resume(k_tid_t thread);
*/
extern void k_sched_time_slice_set(int32_t slice, int prio);
/**
* @} end defgroup thread_apis
*/
/**
* @addtogroup isr_apis
* @{
*/
/**
* @brief Determine if code is running at interrupt level.
*
* This routine allows the caller to customize its actions, depending on
* whether it is a thread or an ISR.
*
* @note Can be called by ISRs.
*
* @return 0 if invoked by a thread.
* @return Non-zero if invoked by an ISR.
*/
@ -423,44 +446,56 @@ extern int k_is_in_isr(void);
/**
* @brief Determine if code is running in a preemptible thread.
*
* Returns a 'true' value if these conditions are all met:
* This routine allows the caller to customize its actions, depending on
* whether it can be preempted by another thread. The routine returns a 'true'
* value if all of the following conditions are met:
*
* - the code is not running in an ISR
* - the thread's priority is in the preemptible range
* - the thread has not locked the scheduler
* - The code is running in a thread, not at ISR.
* - The thread's priority is in the preemptible range.
* - The thread has not locked the scheduler.
*
* @return 0 if invoked by either an ISR or a cooperative thread.
* @note Can be called by ISRs.
*
* @return 0 if invoked by an ISR or by a cooperative thread.
* @return Non-zero if invoked by a preemptible thread.
*/
extern int k_is_preempt_thread(void);
/*
* @brief Lock the scheduler
/**
* @} end addtogroup isr_apis
*/
/**
* @addtogroup thread_apis
* @{
*/
/**
* @brief Lock the scheduler.
*
* Prevent another thread from preempting the current thread.
* This routine prevents the current thread from being preempted by another
* thread by instructing the scheduler to treat it as a cooperative thread.
* If the thread subsequently performs an operation that makes it unready,
* it will be context switched out in the normal manner. When the thread
* again becomes the current thread, its non-preemptible status is maintained.
*
* @note If the thread does an operation that causes it to pend, it will still
* be context switched out.
* This routine can be called recursively.
*
* @note Similar to irq_lock, the scheduler lock state is tracked per-thread.
*
* This should be chosen over irq_lock when possible, basically when the data
* protected by it is not accessible from ISRs. However, the associated
* k_sched_unlock() is heavier to use than irq_unlock, so if the amount of
* processing is really small, irq_lock might be a better choice.
*
* Can be called recursively.
* @note k_sched_lock() and k_sched_unlock() should normally be used
* when the operation being performed can be safely interrupted by ISRs.
* However, if the amount of processing involved is very small, better
* performance may be obtained by using irq_lock() and irq_unlock().
*
* @return N/A
*/
extern void k_sched_lock(void);
/*
* @brief Unlock the scheduler
/**
* @brief Unlock the scheduler.
*
* Re-enable scheduling previously disabled by k_sched_lock(). Must be called
* an equal amount of times k_sched_lock() was called. Threads are rescheduled
* upon exit.
* This routine reverses the effect of a previous call to k_sched_lock().
* A thread must call the routine once for each time it called k_sched_lock()
* before the thread becomes preemptible.
*
* @return N/A
*/
@ -490,6 +525,10 @@ extern void k_thread_custom_data_set(void *value);
*/
extern void *k_thread_custom_data_get(void);
/**
* @} end addtogroup thread_apis
*/
/**
* kernel timing
*/
@ -502,7 +541,9 @@ extern void *k_thread_custom_data_get(void);
#define K_MINUTES(m) K_SECONDS((m) * 60)
#define K_HOURS(h) K_MINUTES((h) * 60)
/* private internal time manipulation (users should never play with ticks) */
/**
* @cond INTERNAL_HIDDEN
*/
/* added tick needed to account for tick in progress */
#define _TICK_ALIGN 1
@ -517,7 +558,6 @@ static int64_t __ticks_to_ms(int64_t ticks)
#endif
}
/* timeouts */
struct _timeout;
@ -531,8 +571,13 @@ struct _timeout {
_timeout_func_t func;
};
/**
* INTERNAL_HIDDEN @endcond
*/
/* timers */
/**
* @cond INTERNAL_HIDDEN
*/
struct k_timer {
/*
@ -577,6 +622,16 @@ struct k_timer {
_DEBUG_TRACING_KERNEL_OBJECTS_INIT \
}
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @defgroup timer_apis Timer APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Statically define and initialize a timer.
*
@ -687,8 +742,15 @@ extern uint32_t k_timer_status_sync(struct k_timer *timer);
*/
extern int32_t k_timer_remaining_get(struct k_timer *timer);
/**
* @} end defgroup timer_apis
*/
/* kernel clocks */
/**
* @defgroup clock_apis Kernel Clock APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Get system uptime.
@ -757,10 +819,12 @@ extern uint32_t k_uptime_delta_32(int64_t *reftime);
extern uint32_t k_cycle_get_32(void);
/**
* data transfers (basic)
* @} end defgroup clock_apis
*/
/* fifos */
/**
* @cond INTERNAL_HIDDEN
*/
struct k_fifo {
_wait_q_t wait_q;
@ -769,6 +833,23 @@ struct k_fifo {
_DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_fifo);
};
#define K_FIFO_INITIALIZER(obj) \
{ \
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
.data_q = SYS_SLIST_STATIC_INIT(&obj.data_q), \
_DEBUG_TRACING_KERNEL_OBJECTS_INIT \
}
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @defgroup fifo_apis Fifo APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Initialize a fifo.
*
@ -848,13 +929,6 @@ extern void k_fifo_put_slist(struct k_fifo *fifo, sys_slist_t *list);
*/
extern void *k_fifo_get(struct k_fifo *fifo, int32_t timeout);
#define K_FIFO_INITIALIZER(obj) \
{ \
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
.data_q = SYS_SLIST_STATIC_INIT(&obj.data_q), \
_DEBUG_TRACING_KERNEL_OBJECTS_INIT \
}
/**
* @brief Statically define and initialize a fifo.
*
@ -869,7 +943,13 @@ extern void *k_fifo_get(struct k_fifo *fifo, int32_t timeout);
__in_section(_k_fifo, static, name) = \
K_FIFO_INITIALIZER(name)
/* lifos */
/**
* @} end defgroup fifo_apis
*/
/**
* @cond INTERNAL_HIDDEN
*/
struct k_lifo {
_wait_q_t wait_q;
@ -878,6 +958,23 @@ struct k_lifo {
_DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_lifo);
};
#define K_LIFO_INITIALIZER(obj) \
{ \
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
.list = NULL, \
_DEBUG_TRACING_KERNEL_OBJECTS_INIT \
}
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @defgroup lifo_apis Lifo APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Initialize a lifo.
*
@ -922,13 +1019,6 @@ extern void k_lifo_put(struct k_lifo *lifo, void *data);
*/
extern void *k_lifo_get(struct k_lifo *lifo, int32_t timeout);
#define K_LIFO_INITIALIZER(obj) \
{ \
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
.list = NULL, \
_DEBUG_TRACING_KERNEL_OBJECTS_INIT \
}
/**
* @brief Statically define and initialize a lifo.
*
@ -943,7 +1033,13 @@ extern void *k_lifo_get(struct k_lifo *lifo, int32_t timeout);
__in_section(_k_lifo, static, name) = \
K_LIFO_INITIALIZER(name)
/* stacks */
/**
* @} end defgroup lifo_apis
*/
/**
* @cond INTERNAL_HIDDEN
*/
struct k_stack {
_wait_q_t wait_q;
@ -952,6 +1048,25 @@ struct k_stack {
_DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_stack);
};
#define K_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
{ \
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
.base = stack_buffer, \
.next = stack_buffer, \
.top = stack_buffer + stack_num_entries, \
_DEBUG_TRACING_KERNEL_OBJECTS_INIT \
}
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @defgroup stack_apis Stack APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Initialize a stack.
*
@ -999,15 +1114,6 @@ extern void k_stack_push(struct k_stack *stack, uint32_t data);
*/
extern int k_stack_pop(struct k_stack *stack, uint32_t *data, int32_t timeout);
#define K_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
{ \
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
.base = stack_buffer, \
.next = stack_buffer, \
.top = stack_buffer + stack_num_entries, \
_DEBUG_TRACING_KERNEL_OBJECTS_INIT \
}
/**
* @brief Statically define and initialize a stack
*
@ -1027,7 +1133,13 @@ extern int k_stack_pop(struct k_stack *stack, uint32_t *data, int32_t timeout);
stack_num_entries)
/**
* workqueues
* @} end defgroup stack_apis
*/
/**
* @defgroup workqueue_apis Workqueue Thread APIs
* @ingroup kernel_apis
* @{
*/
struct k_work;
@ -1228,10 +1340,12 @@ static inline int k_delayed_work_submit(struct k_delayed_work *work,
#endif /* CONFIG_SYS_CLOCK_EXISTS */
/**
* synchronization
* @} end defgroup workqueue_apis
*/
/* mutexes */
/**
* @cond INTERNAL_HIDDEN
*/
struct k_mutex {
_wait_q_t wait_q;
@ -1263,6 +1377,16 @@ struct k_mutex {
_DEBUG_TRACING_KERNEL_OBJECTS_INIT \
}
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @defgroup mutex_apis Mutex APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Statically define and initialize a mutex.
*
@ -1326,7 +1450,13 @@ extern int k_mutex_lock(struct k_mutex *mutex, int32_t timeout);
*/
extern void k_mutex_unlock(struct k_mutex *mutex);
/* semaphores */
/**
* @} end defgroup mutex_apis
*/
/**
* @cond INTERNAL_HIDDEN
*/
struct k_sem {
_wait_q_t wait_q;
@ -1336,6 +1466,24 @@ struct k_sem {
_DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_sem);
};
#define K_SEM_INITIALIZER(obj, initial_count, count_limit) \
{ \
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
.count = initial_count, \
.limit = count_limit, \
_DEBUG_TRACING_KERNEL_OBJECTS_INIT \
}
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @defgroup semaphore_apis Semaphore APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Initialize a semaphore.
*
@ -1409,14 +1557,6 @@ static inline unsigned int k_sem_count_get(struct k_sem *sem)
return sem->count;
}
#define K_SEM_INITIALIZER(obj, initial_count, count_limit) \
{ \
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
.count = initial_count, \
.limit = count_limit, \
_DEBUG_TRACING_KERNEL_OBJECTS_INIT \
}
/**
* @brief Statically define and initialize a semaphore.
*
@ -1433,13 +1573,29 @@ static inline unsigned int k_sem_count_get(struct k_sem *sem)
__in_section(_k_sem, static, name) = \
K_SEM_INITIALIZER(name, initial_count, count_limit)
/* alerts */
/**
* @} end defgroup semaphore_apis
*/
/**
* @defgroup alert_apis Alert APIs
* @ingroup kernel_apis
* @{
*/
typedef int (*k_alert_handler_t)(struct k_alert *);
/**
* @} end defgroup alert_apis
*/
/**
* @cond INTERNAL_HIDDEN
*/
#define K_ALERT_DEFAULT NULL
#define K_ALERT_IGNORE ((void *)(-1))
typedef int (*k_alert_handler_t)(struct k_alert *);
struct k_alert {
k_alert_handler_t handler;
atomic_t send_count;
@ -1460,6 +1616,15 @@ extern void _alert_deliver(struct k_work *work);
_DEBUG_TRACING_KERNEL_OBJECTS_INIT \
}
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @addtogroup alert_apis
* @{
*/
/**
* @brief Statically define and initialize an alert.
*
@ -1531,10 +1696,12 @@ extern int k_alert_recv(struct k_alert *alert, int32_t timeout);
extern void k_alert_send(struct k_alert *alert);
/**
* data transfers (complex)
* @} end addtogroup alert_apis
*/
/* message queues */
/**
* @cond INTERNAL_HIDDEN
*/
struct k_msgq {
_wait_q_t wait_q;
@ -1562,6 +1729,16 @@ struct k_msgq {
_DEBUG_TRACING_KERNEL_OBJECTS_INIT \
}
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @defgroup msgq_apis Message Queue APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Statically define and initialize a message queue.
*
@ -1634,7 +1811,7 @@ extern int k_msgq_put(struct k_msgq *q, void *data, int32_t timeout);
* This routine receives a message from message queue @a q in a "first in,
* first out" manner.
*
* @note Can be called by ISRs.
* @note Can be called by ISRs, but @a timeout must be set to K_NO_WAIT.
*
* @param q Address of the message queue.
* @param data Address of area to hold the received message.
@ -1689,6 +1866,16 @@ static inline uint32_t k_msgq_num_used_get(struct k_msgq *q)
return q->used_msgs;
}
/**
* @} end defgroup msgq_apis
*/
/**
* @defgroup mem_pool_apis Memory Pool APIs
* @ingroup kernel_apis
* @{
*/
struct k_mem_block {
struct k_mem_pool *pool_id;
void *addr_in_pool;
@ -1696,7 +1883,15 @@ struct k_mem_block {
size_t req_size;
};
/* mailboxes */
/**
* @} end defgroup mem_pool_apis
*/
/**
* @defgroup mailbox_apis Mailbox APIs
* @ingroup kernel_apis
* @{
*/
struct k_mbox_msg {
/** internal use only - needed for legacy API support */
@ -1723,6 +1918,10 @@ struct k_mbox_msg {
#endif
};
/**
* @cond INTERNAL_HIDDEN
*/
struct k_mbox {
_wait_q_t tx_msg_queue;
_wait_q_t rx_msg_queue;
@ -1737,6 +1936,10 @@ struct k_mbox {
_DEBUG_TRACING_KERNEL_OBJECTS_INIT \
}
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @brief Statically define and initialize a mailbox.
*
@ -1784,7 +1987,6 @@ extern void k_mbox_init(struct k_mbox *mbox);
extern int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
int32_t timeout);
#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
/**
* @brief Send a mailbox message in an asynchronous manner.
*
@ -1802,7 +2004,6 @@ extern int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
*/
extern void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
struct k_sem *sem);
#endif
/**
* @brief Receive a mailbox message.
@ -1876,7 +2077,13 @@ extern int k_mbox_data_block_get(struct k_mbox_msg *rx_msg,
struct k_mem_pool *pool,
struct k_mem_block *block, int32_t timeout);
/* pipes */
/**
* @} end defgroup mailbox_apis
*/
/**
* @cond INTERNAL_HIDDEN
*/
struct k_pipe {
unsigned char *buffer; /* Pipe buffer: may be NULL */
@ -1905,6 +2112,16 @@ struct k_pipe {
_DEBUG_TRACING_KERNEL_OBJECTS_INIT \
}
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @defgroup pipe_apis Pipe APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Statically define and initialize a pipe.
*
@ -1986,7 +2203,6 @@ extern int k_pipe_get(struct k_pipe *pipe, void *data,
size_t bytes_to_read, size_t *bytes_read,
size_t min_xfer, int32_t timeout);
#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
/**
* @brief Write memory block to a pipe.
*
@ -2003,13 +2219,14 @@ extern int k_pipe_get(struct k_pipe *pipe, void *data,
*/
extern void k_pipe_block_put(struct k_pipe *pipe, struct k_mem_block *block,
size_t size, struct k_sem *sem);
#endif
/**
* memory management
* @} end defgroup pipe_apis
*/
/* memory slabs */
/**
* @cond INTERNAL_HIDDEN
*/
struct k_mem_slab {
_wait_q_t wait_q;
@ -2034,6 +2251,16 @@ struct k_mem_slab {
_DEBUG_TRACING_KERNEL_OBJECTS_INIT \
}
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @defgroup mem_slab_apis Memory Slab APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Statically define and initialize a memory slab.
*
@ -2144,7 +2371,13 @@ static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
return slab->num_blocks - slab->num_used;
}
/* memory pools */
/**
* @} end defgroup mem_slab_apis
*/
/**
* @cond INTERNAL_HIDDEN
*/
/*
* Memory pool requires a buffer and two arrays of structures for the
@ -2197,10 +2430,7 @@ struct k_mem_pool {
/*
* Static memory pool initialization
*/
/**
* @cond internal
* Make Doxygen skip assembler macros
*/
/*
* Use .altmacro to be able to recalculate values and pass them as string
* arguments when calling assembler macros resursively
@ -2344,14 +2574,16 @@ static void __attribute__ ((used)) __k_mem_pool_quad_block_size_define(void)
}
/**
* @endcond
* End of assembler macros that Doxygen has to skip
* INTERNAL_HIDDEN @endcond
*/
/**
* @brief Define a memory pool
*
* This defines and initializes a memory pool.
* @addtogroup mem_pool_apis
* @{
*/
/**
* @brief Statically define and initialize a memory pool.
*
* The memory pool's buffer contains @a n_max blocks that are @a max_size bytes
* long. The memory pool allows blocks to be repeatedly partitioned into
@ -2425,6 +2657,16 @@ extern void k_mem_pool_free(struct k_mem_block *block);
*/
extern void k_mem_pool_defrag(struct k_mem_pool *pool);
/**
* @} end addtogroup mem_pool_apis
*/
/**
* @defgroup heap_apis Heap Memory Pool APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Allocate memory from heap.
*
@ -2449,6 +2691,10 @@ extern void *k_malloc(size_t size);
*/
extern void k_free(void *ptr);
/**
* @} end defgroup heap_apis
*/
/*
* legacy.h must be before arch/cpu.h to allow the ioapic/loapic drivers to
* hook into the device subsystem, which itself uses nanokernel semaphores,

View file

@ -29,13 +29,6 @@
extern "C" {
#endif
/**
* @brief Ring Buffer APIs
* @defgroup nanokernel_ringbuffer Ring Bufer
* @ingroup nanokernel_services
* @{
*/
#define SIZE32_OF(x) (sizeof((x))/sizeof(uint32_t))
/**
@ -55,6 +48,12 @@ struct ring_buf {
#endif
};
/**
* @defgroup ring_buffer_apis Ring Buffer APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Declare a power-of-two sized ring buffer
*