unified: Add object tracing support for kernel objects
Defines an object tracing list for each kernel object type that supports object tracing, and ensures that both statically and dynamically defined objects are added to the appropriate list. Ensure that each static kernel object is grouped together with the other static objects of the same type. Revise the initialization function for each kernel type (or create it, if needed) so that each static object is added to the object tracing list for its associated type. Note 1: Threads are handled a bit differently than other kernel object types. A statically-defined thread is added to the thread list when the thread is started, not when the kernel initializes. Also, a thread is removed from the thread list when the thread terminates or aborts, unlike other types of kernel objects which are never removed from an object tracing list. (Such support would require the creation of APIs to "uninitialize" the kernel object.) Note 2: The list head variables for all kernel object types are now explicitly defined. However, the list head variable for the ring buffer type continues to be implicitly defined for the time being, since it isn't considered to be an core kernel object type. Change-Id: Ie24d41023e05b3598dc6b344e6871a9692bba02d Signed-off-by: Allan Stephens <allan.stephens@windriver.com>
This commit is contained in:
parent
3f5c74c922
commit
e7d2cc216d
17 changed files with 440 additions and 142 deletions
|
@ -287,7 +287,7 @@ struct _static_thread_data {
|
|||
prio, options, delay) \
|
||||
char __noinit __stack _k_thread_obj_##name[stack_size]; \
|
||||
struct _static_thread_data _k_thread_data_##name __aligned(4) \
|
||||
__in_section(_k_task_list, private, task) = \
|
||||
__in_section(_static_thread_data, static, name) = \
|
||||
_THREAD_INITIALIZER(_k_thread_obj_##name, stack_size, \
|
||||
entry, p1, p2, p3, prio, options, delay, \
|
||||
NULL, 0); \
|
||||
|
@ -501,7 +501,9 @@ struct k_timer {
|
|||
* @param name Name of the timer variable.
|
||||
*/
|
||||
#define K_TIMER_DEFINE(name) \
|
||||
struct k_timer name = K_TIMER_INITIALIZER(name)
|
||||
struct k_timer name \
|
||||
__in_section(_k_timer, static, name) = \
|
||||
K_TIMER_INITIALIZER(name)
|
||||
|
||||
/**
|
||||
* @brief Initialize a timer.
|
||||
|
@ -774,7 +776,9 @@ extern void *k_fifo_get(struct k_fifo *fifo, int32_t timeout);
|
|||
* @param name Name of the FIFO variable.
|
||||
*/
|
||||
#define K_FIFO_DEFINE(name) \
|
||||
struct k_fifo name = K_FIFO_INITIALIZER(name)
|
||||
struct k_fifo name \
|
||||
__in_section(_k_fifo, static, name) = \
|
||||
K_FIFO_INITIALIZER(name)
|
||||
|
||||
/* lifos */
|
||||
|
||||
|
@ -852,7 +856,9 @@ extern void *k_lifo_get(struct k_lifo *lifo, int32_t timeout);
|
|||
* @param name Name of the LIFO variable.
|
||||
*/
|
||||
#define K_LIFO_DEFINE(name) \
|
||||
struct k_lifo name = K_LIFO_INITIALIZER(name)
|
||||
struct k_lifo name \
|
||||
__in_section(_k_lifo, static, name) = \
|
||||
K_LIFO_INITIALIZER(name)
|
||||
|
||||
/* stacks */
|
||||
|
||||
|
@ -891,7 +897,8 @@ extern int k_stack_pop(struct k_stack *stack, uint32_t *data, int32_t timeout);
|
|||
#define K_STACK_DEFINE(name, stack_num_entries) \
|
||||
uint32_t __noinit \
|
||||
_k_stack_buf_##name[stack_num_entries]; \
|
||||
struct k_stack name = \
|
||||
struct k_stack name \
|
||||
__in_section(_k_stack, static, name) = \
|
||||
K_STACK_INITIALIZER(name, _k_stack_buf_##name, \
|
||||
stack_num_entries)
|
||||
|
||||
|
@ -1146,7 +1153,9 @@ struct k_mutex {
|
|||
* @param name Name of the mutex object variable.
|
||||
*/
|
||||
#define K_MUTEX_DEFINE(name) \
|
||||
struct k_mutex name = K_MUTEX_INITIALIZER(name)
|
||||
struct k_mutex name \
|
||||
__in_section(_k_mutex, static, name) = \
|
||||
K_MUTEX_INITIALIZER(name)
|
||||
|
||||
/**
|
||||
* @brief Initialize a mutex
|
||||
|
@ -1370,7 +1379,8 @@ extern void k_sem_group_reset(struct k_sem *sem_array[]);
|
|||
* @param count_limit Highest value the count can take during operation.
|
||||
*/
|
||||
#define K_SEM_DEFINE(name, initial_count, count_limit) \
|
||||
struct k_sem name = \
|
||||
struct k_sem name \
|
||||
__in_section(_k_sem, static, name) = \
|
||||
K_SEM_INITIALIZER(name, initial_count, count_limit)
|
||||
|
||||
/* alerts */
|
||||
|
@ -1415,7 +1425,7 @@ extern void _alert_deliver(struct k_work *work);
|
|||
*/
|
||||
#define K_ALERT_DEFINE(name, alert_handler, max_num_pending_alerts) \
|
||||
struct k_alert name \
|
||||
__in_section(_k_event_list, alert, name) = \
|
||||
__in_section(_k_alert, static, name) = \
|
||||
K_ALERT_INITIALIZER(name, alert_handler, \
|
||||
max_num_pending_alerts)
|
||||
|
||||
|
@ -1524,7 +1534,8 @@ struct k_msgq {
|
|||
#define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align) \
|
||||
static char __noinit __aligned(q_align) \
|
||||
_k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)]; \
|
||||
struct k_msgq q_name = \
|
||||
struct k_msgq q_name \
|
||||
__in_section(_k_msgq, static, q_name) = \
|
||||
K_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name, \
|
||||
q_msg_size, q_max_msgs)
|
||||
|
||||
|
@ -1677,7 +1688,8 @@ struct k_mbox {
|
|||
* @param name Name of the mailbox
|
||||
*/
|
||||
#define K_MBOX_DEFINE(name) \
|
||||
struct k_mbox name = \
|
||||
struct k_mbox name \
|
||||
__in_section(_k_mbox, static, name) = \
|
||||
K_MBOX_INITIALIZER(name) \
|
||||
|
||||
/**
|
||||
|
@ -1842,7 +1854,8 @@ struct k_pipe {
|
|||
#define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align) \
|
||||
static unsigned char __noinit __aligned(pipe_align) \
|
||||
_k_pipe_buf_##name[pipe_buffer_size]; \
|
||||
struct k_pipe name = \
|
||||
struct k_pipe name \
|
||||
__in_section(_k_pipe, static, name) = \
|
||||
K_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
|
||||
|
||||
/**
|
||||
|
@ -1980,7 +1993,7 @@ struct k_mem_slab {
|
|||
char __noinit __aligned(slab_align) \
|
||||
_k_mem_slab_buf_##name[(slab_num_blocks) * (slab_block_size)]; \
|
||||
struct k_mem_slab name \
|
||||
__in_section(_k_mem_map_ptr, private, mem_slab) = \
|
||||
__in_section(_k_mem_slab, static, name) = \
|
||||
K_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
|
||||
slab_block_size, slab_num_blocks)
|
||||
|
||||
|
@ -2194,7 +2207,7 @@ __asm__(".macro _build_block_set n_max, name\n\t"
|
|||
* _build_block_set
|
||||
*/
|
||||
__asm__(".macro _build_mem_pool name, min_size, max_size, n_max\n\t"
|
||||
".pushsection ._k_memory_pool,\"aw\","
|
||||
".pushsection ._k_mem_pool.static.\\name,\"aw\","
|
||||
_SECTION_TYPE_SIGN "progbits\n\t"
|
||||
".globl \\name\n\t"
|
||||
"\\name:\n\t"
|
||||
|
|
|
@ -84,7 +84,7 @@ typedef int nano_context_type_t;
|
|||
abort, prio, groups) \
|
||||
char __noinit __stack _k_thread_obj_##name[stack_size]; \
|
||||
struct _static_thread_data _k_thread_data_##name __aligned(4) \
|
||||
__in_section(_k_task_list, private, task) = \
|
||||
__in_section(_static_thread_data, static, name) = \
|
||||
_THREAD_INITIALIZER(_k_thread_obj_##name, stack_size, \
|
||||
entry, p1, p2, p3, prio, 0, K_FOREVER, \
|
||||
abort, groups)
|
||||
|
@ -107,7 +107,7 @@ typedef int nano_context_type_t;
|
|||
extern void entry(void); \
|
||||
char __noinit __stack _k_thread_obj_##name[stack_size]; \
|
||||
struct _static_thread_data _k_thread_data_##name __aligned(4) \
|
||||
__in_section(_k_task_list, private, task) = \
|
||||
__in_section(_static_thread_data, static, name) = \
|
||||
_THREAD_INITIALIZER(_k_thread_obj_##name, stack_size, \
|
||||
entry, NULL, NULL, NULL, \
|
||||
priority, 0, K_FOREVER, \
|
||||
|
|
|
@ -16,13 +16,16 @@
|
|||
|
||||
/**
|
||||
* @file
|
||||
* @brief Kernel object tracing support.
|
||||
* @brief APIs used when examining the objects in a debug tracing list.
|
||||
*/
|
||||
|
||||
#ifndef _OBJECT_TRACING_H_
|
||||
#define _OBJECT_TRACING_H_
|
||||
|
||||
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
|
||||
|
||||
#if !defined(CONFIG_KERNEL_V2)
|
||||
|
||||
#include <nanokernel.h>
|
||||
extern struct nano_fifo *_trace_list_nano_fifo;
|
||||
extern struct nano_lifo *_trace_list_nano_lifo;
|
||||
|
@ -31,8 +34,6 @@ extern struct nano_timer *_trace_list_nano_timer;
|
|||
extern struct nano_stack *_trace_list_nano_stack;
|
||||
extern struct ring_buf *_trace_list_sys_ring_buf;
|
||||
|
||||
|
||||
#if !defined(CONFIG_KERNEL_V2)
|
||||
#ifdef CONFIG_MICROKERNEL
|
||||
#include <microkernel/base_api.h>
|
||||
#include <micro_private_types.h>
|
||||
|
@ -47,6 +48,25 @@ extern struct _k_event_struct *_trace_list_micro_event;
|
|||
extern struct k_timer *_trace_list_micro_timer;
|
||||
extern struct k_task *_trace_list_micro_task;
|
||||
#endif /*CONFIG_MICROKERNEL*/
|
||||
|
||||
#else
|
||||
|
||||
#include <kernel.h>
|
||||
extern struct k_timer *_trace_list_k_timer;
|
||||
extern struct k_mem_slab *_trace_list_k_mem_slab;
|
||||
extern struct k_mem_pool *_trace_list_k_mem_pool;
|
||||
extern struct k_sem *_trace_list_k_sem;
|
||||
extern struct k_mutex *_trace_list_k_mutex;
|
||||
extern struct k_alert *_trace_list_k_alert;
|
||||
extern struct k_fifo *_trace_list_k_fifo;
|
||||
extern struct k_lifo *_trace_list_k_lifo;
|
||||
extern struct k_stack *_trace_list_k_stack;
|
||||
extern struct k_msgq *_trace_list_k_msgq;
|
||||
extern struct k_mbox *_trace_list_k_mbox;
|
||||
extern struct k_pipe *_trace_list_k_pipe;
|
||||
|
||||
extern struct ring_buf *_trace_list_sys_ring_buf;
|
||||
|
||||
#endif /*CONFIG_KERNEL_V2*/
|
||||
|
||||
/**
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
/**
|
||||
* @file
|
||||
* @brief Kernel object tracing common structures.
|
||||
* @brief APIs used to add or remove an object in a debug tracing list.
|
||||
*/
|
||||
|
||||
#ifndef _OBJECT_TRACING_COMMON_H_
|
||||
|
@ -113,9 +113,16 @@
|
|||
} \
|
||||
while (0)
|
||||
|
||||
#if !defined(CONFIG_KERNEL_V2)
|
||||
|
||||
/*
|
||||
* Lists for object tracing.
|
||||
* Define list variables for all object types.
|
||||
*
|
||||
* This is ugly, since these list variables are redefined by every .c file
|
||||
* that drags in this include file (explicitly or implicitly). Fortunately,
|
||||
* the linker doesn't seem to mind seeing these duplicate definitions ...
|
||||
*/
|
||||
|
||||
#include <nanokernel.h>
|
||||
struct nano_fifo *_trace_list_nano_fifo;
|
||||
struct nano_lifo *_trace_list_nano_lifo;
|
||||
|
@ -124,7 +131,6 @@ struct nano_timer *_trace_list_nano_timer;
|
|||
struct nano_stack *_trace_list_nano_stack;
|
||||
struct ring_buf *_trace_list_sys_ring_buf;
|
||||
|
||||
#if !defined(CONFIG_KERNEL_V2)
|
||||
#ifdef CONFIG_MICROKERNEL
|
||||
#include <microkernel/base_api.h>
|
||||
struct _k_mbox_struct *_trace_list_micro_mbox;
|
||||
|
@ -138,6 +144,21 @@ struct _k_event_struct *_trace_list_micro_event;
|
|||
struct k_timer *_trace_list_micro_timer;
|
||||
struct k_task *_trace_list_micro_task;
|
||||
#endif /*CONFIG_MICROKERNEL*/
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* Define list variables for object types that don't do it in a .c file.
|
||||
*
|
||||
* This is ugly, since these list variables are redefined by every .c file
|
||||
* that drags in this include file (explicitly or implicitly). Fortunately,
|
||||
* the linker doesn't seem to mind seeing these duplicate definitions ...
|
||||
*/
|
||||
|
||||
struct ring_buf;
|
||||
|
||||
struct ring_buf *_trace_list_sys_ring_buf;
|
||||
|
||||
#endif /*CONFIG_KERNEL_V2*/
|
||||
|
||||
|
||||
|
|
|
@ -23,9 +23,36 @@
|
|||
#include <nano_private.h>
|
||||
#include <misc/debug/object_tracing_common.h>
|
||||
#include <atomic.h>
|
||||
#include <init.h>
|
||||
#include <toolchain.h>
|
||||
#include <sections.h>
|
||||
|
||||
extern struct k_alert _k_alert_list_start[];
|
||||
extern struct k_alert _k_alert_list_end[];
|
||||
|
||||
struct k_alert *_trace_list_k_alert;
|
||||
|
||||
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
|
||||
|
||||
/*
|
||||
* Complete initialization of statically defined alerts.
|
||||
*/
|
||||
static int init_alert_module(struct device *dev)
|
||||
{
|
||||
ARG_UNUSED(dev);
|
||||
|
||||
struct k_alert *alert;
|
||||
|
||||
for (alert = _k_alert_list_start; alert < _k_alert_list_end; alert++) {
|
||||
SYS_TRACING_OBJ_INIT(k_alert, alert);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_alert_module, PRIMARY, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
|
||||
#endif /* CONFIG_DEBUG_TRACING_KERNEL_OBJECTS */
|
||||
|
||||
void _alert_deliver(struct k_work *work)
|
||||
{
|
||||
struct k_alert *alert = CONTAINER_OF(work, struct k_alert, work_item);
|
||||
|
@ -55,7 +82,7 @@ void k_alert_init(struct k_alert *alert, k_alert_handler_t handler,
|
|||
alert->send_count = ATOMIC_INIT(0);
|
||||
alert->work_item = my_work_item;
|
||||
k_sem_init(&alert->sem, 0, max_num_pending_alerts);
|
||||
SYS_TRACING_OBJ_INIT(micro_event, alert);
|
||||
SYS_TRACING_OBJ_INIT(k_alert, alert);
|
||||
}
|
||||
|
||||
void k_alert_send(struct k_alert *alert)
|
||||
|
|
|
@ -29,6 +29,33 @@
|
|||
#include <wait_q.h>
|
||||
#include <ksched.h>
|
||||
#include <misc/slist.h>
|
||||
#include <init.h>
|
||||
|
||||
extern struct k_fifo _k_fifo_list_start[];
|
||||
extern struct k_fifo _k_fifo_list_end[];
|
||||
|
||||
struct k_fifo *_trace_list_k_fifo;
|
||||
|
||||
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
|
||||
|
||||
/*
|
||||
* Complete initialization of statically defined fifos.
|
||||
*/
|
||||
static int init_fifo_module(struct device *dev)
|
||||
{
|
||||
ARG_UNUSED(dev);
|
||||
|
||||
struct k_fifo *fifo;
|
||||
|
||||
for (fifo = _k_fifo_list_start; fifo < _k_fifo_list_end; fifo++) {
|
||||
SYS_TRACING_OBJ_INIT(k_fifo, fifo);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_fifo_module, PRIMARY, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
|
||||
#endif /* CONFIG_DEBUG_TRACING_KERNEL_OBJECTS */
|
||||
|
||||
void k_fifo_init(struct k_fifo *fifo)
|
||||
{
|
||||
|
|
|
@ -26,6 +26,33 @@
|
|||
#include <sections.h>
|
||||
#include <wait_q.h>
|
||||
#include <ksched.h>
|
||||
#include <init.h>
|
||||
|
||||
extern struct k_lifo _k_lifo_list_start[];
|
||||
extern struct k_lifo _k_lifo_list_end[];
|
||||
|
||||
struct k_lifo *_trace_list_k_lifo;
|
||||
|
||||
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
|
||||
|
||||
/*
|
||||
* Complete initialization of statically defined lifos.
|
||||
*/
|
||||
static int init_lifo_module(struct device *dev)
|
||||
{
|
||||
ARG_UNUSED(dev);
|
||||
|
||||
struct k_lifo *lifo;
|
||||
|
||||
for (lifo = _k_lifo_list_start; lifo < _k_lifo_list_end; lifo++) {
|
||||
SYS_TRACING_OBJ_INIT(k_lifo, lifo);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_lifo_module, PRIMARY, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
|
||||
#endif /* CONFIG_DEBUG_TRACING_KERNEL_OBJECTS */
|
||||
|
||||
void k_lifo_init(struct k_lifo *lifo)
|
||||
{
|
||||
|
|
|
@ -43,63 +43,78 @@ static struct k_mbox_async __noinit async_msg[CONFIG_NUM_MBOX_ASYNC_MSGS];
|
|||
/* stack of unused asynchronous message descriptors */
|
||||
K_STACK_DEFINE(async_msg_free, CONFIG_NUM_MBOX_ASYNC_MSGS);
|
||||
|
||||
/**
|
||||
* @brief Create pool of asynchronous message descriptors.
|
||||
*
|
||||
* A dummy thread requires minimal initialization, since it never actually
|
||||
* gets to execute. The K_DUMMY flag is sufficient to distinguish a dummy
|
||||
* thread from a real one. The threads are *not* added to the kernel's list of
|
||||
* known threads.
|
||||
*
|
||||
* Once initialized, the address of each descriptor is added to a stack
|
||||
* that governs access to them.
|
||||
*
|
||||
* @return N/A
|
||||
/* allocate an asynchronous message descriptor */
|
||||
static inline void _mbox_async_alloc(struct k_mbox_async **async)
|
||||
{
|
||||
k_stack_pop(&async_msg_free, (uint32_t *)async, K_FOREVER);
|
||||
}
|
||||
|
||||
/* free an asynchronous message descriptor */
|
||||
static inline void _mbox_async_free(struct k_mbox_async *async)
|
||||
{
|
||||
k_stack_push(&async_msg_free, (uint32_t)async);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS > 0 */
|
||||
|
||||
extern struct k_mbox _k_mbox_list_start[];
|
||||
extern struct k_mbox _k_mbox_list_end[];
|
||||
|
||||
struct k_mbox *_trace_list_k_mbox;
|
||||
|
||||
#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0) || \
|
||||
defined(CONFIG_DEBUG_TRACING_KERNEL_OBJECTS)
|
||||
|
||||
/*
|
||||
* Do run-time initialization of mailbox object subsystem.
|
||||
*/
|
||||
static int init_mbox_module(struct device *dev)
|
||||
{
|
||||
ARG_UNUSED(dev);
|
||||
|
||||
#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
|
||||
/*
|
||||
* Create pool of asynchronous message descriptors.
|
||||
*
|
||||
* A dummy thread requires minimal initialization, since it never gets
|
||||
* to execute. The K_DUMMY flag is sufficient to distinguish a dummy
|
||||
* thread from a real one. The threads are *not* added to the kernel's
|
||||
* list of known threads.
|
||||
*
|
||||
* Once initialized, the address of each descriptor is added to a stack
|
||||
* that governs access to them.
|
||||
*/
|
||||
|
||||
int i;
|
||||
|
||||
for (i = 0; i < CONFIG_NUM_MBOX_ASYNC_MSGS; i++) {
|
||||
async_msg[i].thread.flags = K_DUMMY;
|
||||
k_stack_push(&async_msg_free, (uint32_t)&async_msg[i]);
|
||||
}
|
||||
#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS > 0 */
|
||||
|
||||
/* Complete initialization of statically defined mailboxes. */
|
||||
|
||||
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
|
||||
struct k_mbox *mbox;
|
||||
|
||||
for (mbox = _k_mbox_list_start; mbox < _k_mbox_list_end; mbox++) {
|
||||
SYS_TRACING_OBJ_INIT(k_mbox, mbox);
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_TRACING_KERNEL_OBJECTS */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_mbox_module, PRIMARY, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
|
||||
/**
|
||||
* @brief Allocate an asynchronous message descriptor.
|
||||
*
|
||||
* @param async Address of area to hold the descriptor pointer.
|
||||
*
|
||||
* @return N/A.
|
||||
*/
|
||||
static inline void _mbox_async_alloc(struct k_mbox_async **async)
|
||||
{
|
||||
k_stack_pop(&async_msg_free, (uint32_t *)async, K_FOREVER);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Free an asynchronous message descriptor.
|
||||
*
|
||||
* @param Descriptor pointer.
|
||||
*/
|
||||
static inline void _mbox_async_free(struct k_mbox_async *async)
|
||||
{
|
||||
k_stack_push(&async_msg_free, (uint32_t)async);
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS or CONFIG_DEBUG_TRACING_KERNEL_OBJECTS */
|
||||
|
||||
void k_mbox_init(struct k_mbox *mbox_ptr)
|
||||
{
|
||||
sys_dlist_init(&mbox_ptr->tx_msg_queue);
|
||||
sys_dlist_init(&mbox_ptr->rx_msg_queue);
|
||||
SYS_TRACING_OBJ_INIT(mbox, mbox_ptr);
|
||||
SYS_TRACING_OBJ_INIT(k_mbox, mbox_ptr);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -30,8 +30,10 @@
|
|||
#define _QUAD_BLOCK_AVAILABLE 0x0F
|
||||
#define _QUAD_BLOCK_ALLOCATED 0x0
|
||||
|
||||
extern struct k_mem_pool _k_mem_pool_start[];
|
||||
extern struct k_mem_pool _k_mem_pool_end[];
|
||||
extern struct k_mem_pool _k_mem_pool_list_start[];
|
||||
extern struct k_mem_pool _k_mem_pool_list_end[];
|
||||
|
||||
struct k_mem_pool *_trace_list_k_mem_pool;
|
||||
|
||||
static void init_one_memory_pool(struct k_mem_pool *pool);
|
||||
|
||||
|
@ -50,8 +52,8 @@ static int init_static_pools(struct device *unused)
|
|||
|
||||
/* perform initialization for each memory pool */
|
||||
|
||||
for (pool = _k_mem_pool_start;
|
||||
pool < _k_mem_pool_end;
|
||||
for (pool = _k_mem_pool_list_start;
|
||||
pool < _k_mem_pool_list_end;
|
||||
pool++) {
|
||||
init_one_memory_pool(pool);
|
||||
}
|
||||
|
@ -104,7 +106,7 @@ static void init_one_memory_pool(struct k_mem_pool *pool)
|
|||
* first quad-block has a NULL memory pointer
|
||||
*/
|
||||
sys_dlist_init(&pool->wait_q);
|
||||
SYS_TRACING_OBJ_INIT(memory_pool, pool);
|
||||
SYS_TRACING_OBJ_INIT(k_mem_pool, pool);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -24,8 +24,10 @@
|
|||
#include <ksched.h>
|
||||
#include <init.h>
|
||||
|
||||
extern struct k_mem_slab _k_mem_map_ptr_start[];
|
||||
extern struct k_mem_slab _k_mem_map_ptr_end[];
|
||||
extern struct k_mem_slab _k_mem_slab_list_start[];
|
||||
extern struct k_mem_slab _k_mem_slab_list_end[];
|
||||
|
||||
struct k_mem_slab *_trace_list_k_mem_slab;
|
||||
|
||||
/**
|
||||
* @brief Initialize kernel memory slab subsystem.
|
||||
|
@ -63,12 +65,17 @@ static int init_mem_slab_module(struct device *dev)
|
|||
|
||||
struct k_mem_slab *slab;
|
||||
|
||||
for (slab = _k_mem_map_ptr_start; slab < _k_mem_map_ptr_end; slab++) {
|
||||
for (slab = _k_mem_slab_list_start;
|
||||
slab < _k_mem_slab_list_end;
|
||||
slab++) {
|
||||
create_free_list(slab);
|
||||
SYS_TRACING_OBJ_INIT(k_mem_slab, slab);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_mem_slab_module, PRIMARY, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
|
||||
void k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
|
||||
size_t block_size, uint32_t num_blocks)
|
||||
{
|
||||
|
@ -78,7 +85,7 @@ void k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
|
|||
slab->num_used = 0;
|
||||
create_free_list(slab);
|
||||
sys_dlist_init(&slab->wait_q);
|
||||
SYS_TRACING_OBJ_INIT(micro_mem_map, slab);
|
||||
SYS_TRACING_OBJ_INIT(k_mem_slab, slab);
|
||||
}
|
||||
|
||||
int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, int32_t timeout)
|
||||
|
@ -132,5 +139,3 @@ void k_mem_slab_free(struct k_mem_slab *slab, void **mem)
|
|||
|
||||
irq_unlock(key);
|
||||
}
|
||||
|
||||
SYS_INIT(init_mem_slab_module, PRIMARY, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
|
|
|
@ -28,6 +28,33 @@
|
|||
#include <string.h>
|
||||
#include <wait_q.h>
|
||||
#include <misc/dlist.h>
|
||||
#include <init.h>
|
||||
|
||||
extern struct k_msgq _k_msgq_list_start[];
|
||||
extern struct k_msgq _k_msgq_list_end[];
|
||||
|
||||
struct k_msgq *_trace_list_k_msgq;
|
||||
|
||||
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
|
||||
|
||||
/*
|
||||
* Complete initialization of statically defined message queues.
|
||||
*/
|
||||
static int init_msgq_module(struct device *dev)
|
||||
{
|
||||
ARG_UNUSED(dev);
|
||||
|
||||
struct k_msgq *msgq;
|
||||
|
||||
for (msgq = _k_msgq_list_start; msgq < _k_msgq_list_end; msgq++) {
|
||||
SYS_TRACING_OBJ_INIT(k_msgq, msgq);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_msgq_module, PRIMARY, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
|
||||
#endif /* CONFIG_DEBUG_TRACING_KERNEL_OBJECTS */
|
||||
|
||||
void k_msgq_init(struct k_msgq *q, char *buffer,
|
||||
size_t msg_size, uint32_t max_msgs)
|
||||
|
@ -40,7 +67,7 @@ void k_msgq_init(struct k_msgq *q, char *buffer,
|
|||
q->write_ptr = buffer;
|
||||
q->used_msgs = 0;
|
||||
sys_dlist_init(&q->wait_q);
|
||||
SYS_TRACING_OBJ_INIT(msgq, q);
|
||||
SYS_TRACING_OBJ_INIT(k_msgq, q);
|
||||
}
|
||||
|
||||
int k_msgq_put(struct k_msgq *q, void *data, int32_t timeout)
|
||||
|
|
|
@ -42,7 +42,9 @@
|
|||
#include <sections.h>
|
||||
#include <wait_q.h>
|
||||
#include <misc/dlist.h>
|
||||
#include <misc/debug/object_tracing_common.h>
|
||||
#include <errno.h>
|
||||
#include <init.h>
|
||||
|
||||
#ifdef CONFIG_OBJECT_MONITOR
|
||||
#define RECORD_STATE_CHANGE(mutex) \
|
||||
|
@ -63,13 +65,31 @@
|
|||
#define INIT_OBJECT_MONITOR(mutex) do { } while ((0))
|
||||
#endif
|
||||
|
||||
extern struct k_mutex _k_mutex_list_start[];
|
||||
extern struct k_mutex _k_mutex_list_end[];
|
||||
|
||||
struct k_mutex *_trace_list_k_mutex;
|
||||
|
||||
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
|
||||
#define INIT_KERNEL_TRACING(mutex) do { \
|
||||
mutex->__next = NULL; \
|
||||
} while ((0))
|
||||
#else
|
||||
#define INIT_KERNEL_TRACING(mutex) do { } while ((0))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Complete initialization of statically defined mutexes.
|
||||
*/
|
||||
static int init_mutex_module(struct device *dev)
|
||||
{
|
||||
ARG_UNUSED(dev);
|
||||
|
||||
struct k_mutex *mutex;
|
||||
|
||||
for (mutex = _k_mutex_list_start; mutex < _k_mutex_list_end; mutex++) {
|
||||
SYS_TRACING_OBJ_INIT(k_mutex, mutex);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_mutex_module, PRIMARY, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
|
||||
#endif /* CONFIG_DEBUG_TRACING_KERNEL_OBJECTS */
|
||||
|
||||
void k_mutex_init(struct k_mutex *mutex)
|
||||
{
|
||||
|
@ -81,8 +101,8 @@ void k_mutex_init(struct k_mutex *mutex)
|
|||
|
||||
sys_dlist_init(&mutex->wait_q);
|
||||
|
||||
SYS_TRACING_OBJ_INIT(k_mutex, mutex);
|
||||
INIT_OBJECT_MONITOR(mutex);
|
||||
INIT_KERNEL_TRACING(mutex);
|
||||
}
|
||||
|
||||
static int new_prio_for_inheritance(int target, int limit)
|
||||
|
|
|
@ -44,40 +44,96 @@ struct k_pipe_async {
|
|||
struct k_pipe_desc desc; /* Pipe message descriptor */
|
||||
};
|
||||
|
||||
extern struct k_pipe _k_pipe_list_start[];
|
||||
extern struct k_pipe _k_pipe_list_end[];
|
||||
|
||||
struct k_pipe *_trace_list_k_pipe;
|
||||
|
||||
#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
|
||||
|
||||
/* Array of asynchronous message descriptors */
|
||||
static struct k_pipe_async __noinit async_msg[CONFIG_NUM_PIPE_ASYNC_MSGS];
|
||||
|
||||
/* stack of unused asynchronous message descriptors */
|
||||
K_STACK_DEFINE(pipe_async_msgs, CONFIG_NUM_PIPE_ASYNC_MSGS);
|
||||
|
||||
/**
|
||||
* @brief Create pool of asynchronous pipe message descriptors
|
||||
*
|
||||
* A dummy thread requires minimal initialization since it never gets to
|
||||
* execute. The K_DUMMY flag is sufficient to distinguish a dummy thread
|
||||
* from a real one. The dummy threads are *not* added to the kernel's list of
|
||||
* known threads.
|
||||
*
|
||||
* Once initialized, the address of each descriptor is added to a stack that
|
||||
* governs access to them.
|
||||
*
|
||||
* @return N/A
|
||||
/* Allocate an asynchronous message descriptor */
|
||||
static void _pipe_async_alloc(struct k_pipe_async **async)
|
||||
{
|
||||
k_stack_pop(&pipe_async_msgs, (uint32_t *)async, K_FOREVER);
|
||||
}
|
||||
|
||||
/* Free an asynchronous message descriptor */
|
||||
static void _pipe_async_free(struct k_pipe_async *async)
|
||||
{
|
||||
k_stack_push(&pipe_async_msgs, (uint32_t)async);
|
||||
}
|
||||
|
||||
/* Finish an asynchronous operation */
|
||||
static void _pipe_async_finish(struct k_pipe_async *async_desc)
|
||||
{
|
||||
/*
|
||||
* An asynchronous operation is finished with the scheduler locked
|
||||
* to prevent the called routines from scheduling a new thread.
|
||||
*/
|
||||
|
||||
k_mem_pool_free(async_desc->desc.block);
|
||||
|
||||
if (async_desc->desc.sem != NULL) {
|
||||
k_sem_give(async_desc->desc.sem);
|
||||
}
|
||||
|
||||
_pipe_async_free(async_desc);
|
||||
}
|
||||
#endif /* CONFIG_NUM_PIPE_ASYNC_MSGS > 0 */
|
||||
|
||||
#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0) || \
|
||||
defined(CONFIG_DEBUG_TRACING_KERNEL_OBJECTS)
|
||||
|
||||
/*
|
||||
* Do run-time initialization of pipe object subsystem.
|
||||
*/
|
||||
static int init_pipes_module(struct device *dev)
|
||||
{
|
||||
ARG_UNUSED(dev);
|
||||
|
||||
#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
|
||||
/*
|
||||
* Create pool of asynchronous pipe message descriptors.
|
||||
*
|
||||
* A dummy thread requires minimal initialization, since it never gets
|
||||
* to execute. The K_DUMMY flag is sufficient to distinguish a dummy
|
||||
* thread from a real one. The threads are *not* added to the kernel's
|
||||
* list of known threads.
|
||||
*
|
||||
* Once initialized, the address of each descriptor is added to a stack
|
||||
* that governs access to them.
|
||||
*/
|
||||
|
||||
for (int i = 0; i < CONFIG_NUM_PIPE_ASYNC_MSGS; i++) {
|
||||
async_msg[i].thread.flags = K_DUMMY;
|
||||
async_msg[i].thread.swap_data = &async_msg[i].desc;
|
||||
k_stack_push(&pipe_async_msgs, (uint32_t)&async_msg[i]);
|
||||
}
|
||||
#endif /* CONFIG_NUM_PIPE_ASYNC_MSGS > 0 */
|
||||
|
||||
/* Complete initialization of statically defined mailboxes. */
|
||||
|
||||
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
|
||||
struct k_pipe *pipe;
|
||||
|
||||
for (pipe = _k_pipe_list_start; pipe < _k_pipe_list_end; pipe++) {
|
||||
SYS_TRACING_OBJ_INIT(k_pipe, pipe);
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_TRACING_KERNEL_OBJECTS */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_pipes_module, PRIMARY, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
|
||||
#endif /* CONFIG_NUM_PIPE_ASYNC_MSGS or CONFIG_DEBUG_TRACING_KERNEL_OBJECTS */
|
||||
|
||||
void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size)
|
||||
{
|
||||
pipe->buffer = buffer;
|
||||
|
@ -87,54 +143,9 @@ void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size)
|
|||
pipe->write_index = 0;
|
||||
sys_dlist_init(&pipe->wait_q.writers);
|
||||
sys_dlist_init(&pipe->wait_q.readers);
|
||||
SYS_TRACING_OBJ_INIT(pipe, pipe);
|
||||
SYS_TRACING_OBJ_INIT(k_pipe, pipe);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Allocate an asynchronous message descriptor
|
||||
*
|
||||
* @param async Address of area to hold the descriptor pointer
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
static void _pipe_async_alloc(struct k_pipe_async **async)
|
||||
{
|
||||
k_stack_pop(&pipe_async_msgs, (uint32_t *)async, K_FOREVER);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Free an asynchronous message descriptor
|
||||
*
|
||||
* @param async Descriptor pointer
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
static void _pipe_async_free(struct k_pipe_async *async)
|
||||
{
|
||||
k_stack_push(&pipe_async_msgs, (uint32_t)async);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Finish an asynchronous operation
|
||||
*
|
||||
* The asynchronous operation is finished with the scheduler locked to prevent
|
||||
* the called routines from scheduling a new thread.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
||||
static void _pipe_async_finish(struct k_pipe_async *async_desc)
|
||||
{
|
||||
k_mem_pool_free(async_desc->desc.block);
|
||||
|
||||
if (async_desc->desc.sem != NULL) {
|
||||
k_sem_give(async_desc->desc.sem);
|
||||
}
|
||||
|
||||
_pipe_async_free(async_desc);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Copy bytes from @a src to @a dest
|
||||
*
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <wait_q.h>
|
||||
#include <misc/dlist.h>
|
||||
#include <ksched.h>
|
||||
#include <init.h>
|
||||
|
||||
#ifdef CONFIG_SEMAPHORE_GROUPS
|
||||
struct _sem_desc {
|
||||
|
@ -48,6 +49,32 @@ struct _sem_thread {
|
|||
};
|
||||
#endif
|
||||
|
||||
extern struct k_sem _k_sem_list_start[];
|
||||
extern struct k_sem _k_sem_list_end[];
|
||||
|
||||
struct k_sem *_trace_list_k_sem;
|
||||
|
||||
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
|
||||
|
||||
/*
|
||||
* Complete initialization of statically defined semaphores.
|
||||
*/
|
||||
static int init_sem_module(struct device *dev)
|
||||
{
|
||||
ARG_UNUSED(dev);
|
||||
|
||||
struct k_sem *sem;
|
||||
|
||||
for (sem = _k_sem_list_start; sem < _k_sem_list_end; sem++) {
|
||||
SYS_TRACING_OBJ_INIT(k_sem, sem);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_sem_module, PRIMARY, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
|
||||
#endif /* CONFIG_DEBUG_TRACING_KERNEL_OBJECTS */
|
||||
|
||||
void k_sem_init(struct k_sem *sem, unsigned int initial_count,
|
||||
unsigned int limit)
|
||||
{
|
||||
|
@ -56,7 +83,7 @@ void k_sem_init(struct k_sem *sem, unsigned int initial_count,
|
|||
sem->count = initial_count;
|
||||
sem->limit = limit;
|
||||
sys_dlist_init(&sem->wait_q);
|
||||
SYS_TRACING_OBJ_INIT(nano_sem, sem);
|
||||
SYS_TRACING_OBJ_INIT(k_sem, sem);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SEMAPHORE_GROUPS
|
||||
|
|
|
@ -26,6 +26,33 @@
|
|||
#include <ksched.h>
|
||||
#include <wait_q.h>
|
||||
#include <misc/__assert.h>
|
||||
#include <init.h>
|
||||
|
||||
extern struct k_stack _k_stack_list_start[];
|
||||
extern struct k_stack _k_stack_list_end[];
|
||||
|
||||
struct k_stack *_trace_list_k_stack;
|
||||
|
||||
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
|
||||
|
||||
/*
|
||||
* Complete initialization of statically defined stacks.
|
||||
*/
|
||||
static int init_stack_module(struct device *dev)
|
||||
{
|
||||
ARG_UNUSED(dev);
|
||||
|
||||
struct k_stack *stack;
|
||||
|
||||
for (stack = _k_stack_list_start; stack < _k_stack_list_end; stack++) {
|
||||
SYS_TRACING_OBJ_INIT(k_stack, stack);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_stack_module, PRIMARY, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
|
||||
#endif /* CONFIG_DEBUG_TRACING_KERNEL_OBJECTS */
|
||||
|
||||
void k_stack_init(struct k_stack *stack, uint32_t *buffer, int num_entries)
|
||||
{
|
||||
|
|
|
@ -33,12 +33,14 @@
|
|||
#include <ksched.h>
|
||||
#include <wait_q.h>
|
||||
|
||||
extern struct _static_thread_data _k_task_list_start[];
|
||||
extern struct _static_thread_data _k_task_list_end[];
|
||||
extern struct _static_thread_data _static_thread_data_list_start[];
|
||||
extern struct _static_thread_data _static_thread_data_list_end[];
|
||||
|
||||
#define _FOREACH_STATIC_THREAD(thread_data) \
|
||||
for (struct _static_thread_data *thread_data = _k_task_list_start; \
|
||||
thread_data < _k_task_list_end; thread_data++)
|
||||
#define _FOREACH_STATIC_THREAD(thread_data) \
|
||||
for (struct _static_thread_data *thread_data = \
|
||||
_static_thread_data_list_start; \
|
||||
thread_data < _static_thread_data_list_end; \
|
||||
thread_data++)
|
||||
|
||||
|
||||
/* Legacy API */
|
||||
|
|
|
@ -16,8 +16,35 @@
|
|||
|
||||
#include <kernel.h>
|
||||
#include <misc/debug/object_tracing_common.h>
|
||||
#include <init.h>
|
||||
#include <wait_q.h>
|
||||
|
||||
extern struct k_timer _k_timer_list_start[];
|
||||
extern struct k_timer _k_timer_list_end[];
|
||||
|
||||
struct k_timer *_trace_list_k_timer;
|
||||
|
||||
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
|
||||
|
||||
/*
|
||||
* Complete initialization of statically defined timers.
|
||||
*/
|
||||
static int init_timer_module(struct device *dev)
|
||||
{
|
||||
ARG_UNUSED(dev);
|
||||
|
||||
struct k_timer *timer;
|
||||
|
||||
for (timer = _k_timer_list_start; timer < _k_timer_list_end; timer++) {
|
||||
SYS_TRACING_OBJ_INIT(k_timer, timer);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_timer_module, PRIMARY, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
|
||||
#endif /* CONFIG_DEBUG_TRACING_KERNEL_OBJECTS */
|
||||
|
||||
/**
|
||||
* @brief Handle expiration of a kernel timer object.
|
||||
*
|
||||
|
@ -71,7 +98,7 @@ void k_timer_init(struct k_timer *timer,
|
|||
|
||||
sys_dlist_init(&timer->wait_q);
|
||||
_init_timeout(&timer->timeout, timer_expiration_handler);
|
||||
SYS_TRACING_OBJ_INIT(micro_timer, timer);
|
||||
SYS_TRACING_OBJ_INIT(k_timer, timer);
|
||||
|
||||
timer->_legacy_data = NULL;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue