tracing: add empty test macros
Add support for new tracing macros in test backend. Move header from sample into the subsystem and make it available for general testing with any application. Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
parent
7a646b3f8e
commit
270be80caa
5 changed files with 895 additions and 170 deletions
|
@ -10,9 +10,3 @@ find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
|
|||
project(tracing_tests)
|
||||
|
||||
target_sources(app PRIVATE src/main.c)
|
||||
|
||||
target_sources_ifdef(
|
||||
CONFIG_TRACING_TEST
|
||||
app PRIVATE
|
||||
src/tracing_string_format_test.c
|
||||
)
|
||||
|
|
|
@ -1,130 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2019 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <zephyr.h>
|
||||
#include <tracing_test.h>
|
||||
#include <tracing/tracing_format.h>
|
||||
|
||||
void sys_trace_thread_switched_out(void)
|
||||
{
|
||||
struct k_thread *thread;
|
||||
|
||||
thread = k_current_get();
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_thread_switched_in(void)
|
||||
{
|
||||
struct k_thread *thread;
|
||||
|
||||
thread = k_current_get();
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_thread_priority_set(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_thread_create(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_thread_abort(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_thread_suspend(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_thread_resume(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_thread_ready(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_thread_pend(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_thread_info(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_thread_name_set(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_isr_enter(void)
|
||||
{
|
||||
TRACING_STRING("%s\n", __func__);
|
||||
}
|
||||
|
||||
void sys_trace_isr_exit(void)
|
||||
{
|
||||
TRACING_STRING("%s\n", __func__);
|
||||
}
|
||||
|
||||
void sys_trace_isr_exit_to_scheduler(void)
|
||||
{
|
||||
TRACING_STRING("%s\n", __func__);
|
||||
}
|
||||
|
||||
void sys_trace_idle(void)
|
||||
{
|
||||
TRACING_STRING("%s\n", __func__);
|
||||
}
|
||||
|
||||
void sys_trace_void(unsigned int id)
|
||||
{
|
||||
TRACING_STRING("%s: %d\n", __func__, id);
|
||||
}
|
||||
|
||||
void sys_trace_end_call(unsigned int id)
|
||||
{
|
||||
TRACING_STRING("%s: %d\n", __func__, id);
|
||||
}
|
||||
|
||||
void sys_trace_semaphore_init(struct k_sem *sem)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, sem);
|
||||
}
|
||||
|
||||
void sys_trace_semaphore_take(struct k_sem *sem)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, sem);
|
||||
}
|
||||
|
||||
void sys_trace_semaphore_give(struct k_sem *sem)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, sem);
|
||||
}
|
||||
|
||||
void sys_trace_mutex_init(struct k_mutex *mutex)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, mutex);
|
||||
}
|
||||
|
||||
void sys_trace_mutex_lock(struct k_mutex *mutex)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, mutex);
|
||||
}
|
||||
|
||||
void sys_trace_mutex_unlock(struct k_mutex *mutex)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, mutex);
|
||||
}
|
|
@ -50,3 +50,7 @@ zephyr_include_directories_ifdef(CONFIG_TRACING include)
|
|||
add_subdirectory_ifdef(CONFIG_TRACING_CTF ctf)
|
||||
|
||||
add_subdirectory_ifdef(CONFIG_SEGGER_SYSTEMVIEW sysview)
|
||||
zephyr_sources_ifdef(
|
||||
CONFIG_TRACING_TEST
|
||||
tracing_string_format_test.c
|
||||
)
|
||||
|
|
|
@ -1,44 +1,651 @@
|
|||
/*
|
||||
* Copyright (c) 2019 Intel Corporation
|
||||
* Copyright (c) 2021 Intel corporation
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#ifndef _TRACE_TEST_H
|
||||
#define _TRACE_TEST_H
|
||||
|
||||
#ifndef ZEPHYR_TRACE_TEST_H
|
||||
#define ZEPHYR_TRACE_TEST_H
|
||||
#include <kernel.h>
|
||||
#include <kernel_structs.h>
|
||||
#include <init.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
#define sys_port_trace_k_thread_foreach_enter() sys_trace_k_thread_foreach_enter(user_cb, user_data)
|
||||
#define sys_port_trace_k_thread_foreach_exit() sys_trace_k_thread_foreach_exit(user_cb, user_data)
|
||||
#define sys_port_trace_k_thread_foreach_unlocked_enter() \
|
||||
sys_trace_k_thread_foreach_unlocked_enter(user_cb, user_data)
|
||||
#define sys_port_trace_k_thread_foreach_unlocked_exit() \
|
||||
sys_trace_k_thread_foreach_unlocked_exit(user_cb, user_data)
|
||||
#define sys_port_trace_k_thread_create(new_thread) \
|
||||
sys_trace_k_thread_create(new_thread, stack_size, prio)
|
||||
#define sys_port_trace_k_thread_user_mode_enter() \
|
||||
sys_trace_k_thread_user_mode_enter(entry, p1, p2, p3)
|
||||
#define sys_port_trace_k_thread_heap_assign(thread, heap) \
|
||||
sys_trace_k_thread_heap_assign(thread, heap)
|
||||
#define sys_port_trace_k_thread_join_enter(thread, timeout) \
|
||||
sys_trace_k_thread_join_blocking(thread, timeout)
|
||||
#define sys_port_trace_k_thread_join_blocking(thread, timeout) \
|
||||
sys_trace_k_thread_join_blocking(thread, timeout)
|
||||
#define sys_port_trace_k_thread_join_exit(thread, timeout, ret) \
|
||||
sys_trace_k_thread_join_exit(thread, timeout, ret)
|
||||
#define sys_port_trace_k_thread_sleep_enter(timeout) sys_trace_k_thread_sleep_enter(timeout)
|
||||
#define sys_port_trace_k_thread_sleep_exit(timeout, ret) sys_trace_k_thread_sleep_exit(timeout, ret)
|
||||
#define sys_port_trace_k_thread_msleep_enter(ms) sys_trace_k_thread_msleep_enter(ms)
|
||||
#define sys_port_trace_k_thread_msleep_exit(ms, ret) sys_trace_k_thread_msleep_exit(ms, ret)
|
||||
#define sys_port_trace_k_thread_usleep_enter(us) sys_trace_k_thread_usleep_enter(us)
|
||||
#define sys_port_trace_k_thread_usleep_exit(us, ret) sys_trace_k_thread_usleep_exit(us, ret)
|
||||
#define sys_port_trace_k_thread_busy_wait_enter(usec_to_wait)
|
||||
#define sys_port_trace_k_thread_busy_wait_exit(usec_to_wait)
|
||||
#define sys_port_trace_k_thread_yield() sys_trace_k_thread_yield()
|
||||
#define sys_port_trace_k_thread_wakeup(thread) sys_trace_k_thread_wakeup(thread)
|
||||
#define sys_port_trace_k_thread_start(thread) sys_trace_k_thread_start(thread)
|
||||
#define sys_port_trace_k_thread_abort(thread) sys_trace_k_thread_abort(thread)
|
||||
#define sys_port_trace_k_thread_priority_set(thread) sys_trace_k_thread_priority_set(thread)
|
||||
#define sys_port_trace_k_thread_suspend_enter(thread) sys_trace_k_thread_suspend(thread)
|
||||
#define sys_port_trace_k_thread_suspend_exit(thread)
|
||||
#define sys_port_trace_k_thread_resume_enter(thread) sys_trace_k_thread_resume(thread)
|
||||
|
||||
void sys_trace_thread_switched_out(void);
|
||||
void sys_trace_thread_switched_in(void);
|
||||
void sys_trace_thread_priority_set(struct k_thread *thread);
|
||||
void sys_trace_thread_create(struct k_thread *thread);
|
||||
void sys_trace_thread_abort(struct k_thread *thread);
|
||||
void sys_trace_thread_suspend(struct k_thread *thread);
|
||||
void sys_trace_thread_resume(struct k_thread *thread);
|
||||
void sys_trace_thread_ready(struct k_thread *thread);
|
||||
void sys_trace_thread_pend(struct k_thread *thread);
|
||||
void sys_trace_thread_info(struct k_thread *thread);
|
||||
void sys_trace_thread_name_set(struct k_thread *thread);
|
||||
#define sys_port_trace_k_thread_sched_lock(...) sys_trace_k_thread_sched_lock()
|
||||
|
||||
#define sys_port_trace_k_thread_sched_unlock(...) sys_trace_k_thread_sched_unlock()
|
||||
|
||||
#define sys_port_trace_k_thread_name_set(thread, ret) sys_trace_k_thread_name_set(thread, ret)
|
||||
|
||||
#define sys_port_trace_k_thread_switched_out() sys_trace_k_thread_switched_out()
|
||||
|
||||
#define sys_port_trace_k_thread_switched_in() sys_trace_k_thread_switched_in()
|
||||
|
||||
#define sys_port_trace_k_thread_info(thread) sys_trace_k_thread_info(thread)
|
||||
|
||||
#define sys_port_trace_k_thread_sched_wakeup(thread) sys_trace_k_thread_sched_wakeup(thread)
|
||||
#define sys_port_trace_k_thread_sched_abort(thread) sys_trace_k_thread_sched_abort(thread)
|
||||
#define sys_port_trace_k_thread_sched_priority_set(thread, prio) \
|
||||
sys_trace_k_thread_sched_set_priority(thread, prio)
|
||||
#define sys_port_trace_k_thread_sched_ready(thread) sys_trace_k_thread_sched_ready(thread)
|
||||
#define sys_port_trace_k_thread_sched_pend(thread) sys_trace_k_thread_sched_pend(thread)
|
||||
#define sys_port_trace_k_thread_sched_resume(thread) sys_trace_k_thread_sched_resume(thread)
|
||||
#define sys_port_trace_k_thread_sched_suspend(thread) sys_trace_k_thread_sched_suspend(thread)
|
||||
|
||||
#define sys_port_trace_k_work_init(work)
|
||||
#define sys_port_trace_k_work_submit_to_queue_enter(queue, work)
|
||||
#define sys_port_trace_k_work_submit_to_queue_exit(queue, work, ret)
|
||||
#define sys_port_trace_k_work_submit_enter(work)
|
||||
#define sys_port_trace_k_work_submit_exit(work, ret)
|
||||
#define sys_port_trace_k_work_flush_enter(work)
|
||||
#define sys_port_trace_k_work_flush_blocking(work, timeout)
|
||||
#define sys_port_trace_k_work_flush_exit(work, ret)
|
||||
#define sys_port_trace_k_work_cancel_enter(work)
|
||||
#define sys_port_trace_k_work_cancel_exit(work, ret)
|
||||
#define sys_port_trace_k_work_cancel_sync_enter(work, sync)
|
||||
#define sys_port_trace_k_work_cancel_sync_blocking(work, sync)
|
||||
#define sys_port_trace_k_work_cancel_sync_exit(work, sync, ret)
|
||||
|
||||
#define sys_port_trace_k_work_queue_start_enter(queue)
|
||||
#define sys_port_trace_k_work_queue_start_exit(queue)
|
||||
#define sys_port_trace_k_work_queue_drain_enter(queue)
|
||||
#define sys_port_trace_k_work_queue_drain_exit(queue, ret)
|
||||
#define sys_port_trace_k_work_queue_unplug_enter(queue)
|
||||
#define sys_port_trace_k_work_queue_unplug_exit(queue, ret)
|
||||
|
||||
#define sys_port_trace_k_work_delayable_init(dwork)
|
||||
#define sys_port_trace_k_work_schedule_for_queue_enter(queue, dwork, delay)
|
||||
#define sys_port_trace_k_work_schedule_for_queue_exit(queue, dwork, delay, ret)
|
||||
#define sys_port_trace_k_work_schedule_enter(dwork, delay)
|
||||
#define sys_port_trace_k_work_schedule_exit(dwork, delay, ret)
|
||||
#define sys_port_trace_k_work_reschedule_for_queue_enter(queue, dwork, delay)
|
||||
#define sys_port_trace_k_work_reschedule_for_queue_exit(queue, dwork, delay, ret)
|
||||
#define sys_port_trace_k_work_reschedule_enter(dwork, delay)
|
||||
#define sys_port_trace_k_work_reschedule_exit(dwork, delay, ret)
|
||||
#define sys_port_trace_k_work_flush_delayable_enter(dwork, sync)
|
||||
#define sys_port_trace_k_work_flush_delayable_exit(dwork, sync, ret)
|
||||
#define sys_port_trace_k_work_cancel_delayable_enter(dwork)
|
||||
#define sys_port_trace_k_work_cancel_delayable_exit(dwork, ret)
|
||||
#define sys_port_trace_k_work_cancel_delayable_sync_enter(dwork, sync)
|
||||
#define sys_port_trace_k_work_cancel_delayable_sync_exit(dwork, sync, ret)
|
||||
|
||||
#define sys_port_trace_k_work_poll_init_enter(work)
|
||||
#define sys_port_trace_k_work_poll_init_exit(work)
|
||||
#define sys_port_trace_k_work_poll_submit_to_queue_enter(work_q, work, timeout)
|
||||
#define sys_port_trace_k_work_poll_submit_to_queue_blocking(work_q, work, timeout)
|
||||
#define sys_port_trace_k_work_poll_submit_to_queue_exit(work_q, work, timeout, ret)
|
||||
#define sys_port_trace_k_work_poll_submit_enter(work, timeout)
|
||||
#define sys_port_trace_k_work_poll_submit_exit(work, timeout, ret)
|
||||
#define sys_port_trace_k_work_poll_cancel_enter(work)
|
||||
#define sys_port_trace_k_work_poll_cancel_exit(work, ret)
|
||||
|
||||
#define sys_port_trace_k_poll_api_event_init(event)
|
||||
#define sys_port_trace_k_poll_api_poll_enter(events)
|
||||
#define sys_port_trace_k_poll_api_poll_exit(events, ret)
|
||||
#define sys_port_trace_k_poll_api_signal_init(signal)
|
||||
#define sys_port_trace_k_poll_api_signal_reset(signal)
|
||||
#define sys_port_trace_k_poll_api_signal_check(signal)
|
||||
#define sys_port_trace_k_poll_api_signal_raise(signal, ret)
|
||||
|
||||
#define sys_port_trace_k_sem_init(sem, ret) sys_trace_k_sem_init(sem, ret)
|
||||
#define sys_port_trace_k_sem_give_enter(sem) sys_trace_k_sem_give_enter(sem)
|
||||
#define sys_port_trace_k_sem_give_exit(sem)
|
||||
#define sys_port_trace_k_sem_take_enter(sem, timeout) sys_trace_k_sem_take_enter(sem, timeout)
|
||||
#define sys_port_trace_k_sem_take_blocking(sem, timeout) sys_trace_k_sem_take_blocking(sem, timeout)
|
||||
#define sys_port_trace_k_sem_take_exit(sem, timeout, ret) \
|
||||
sys_trace_k_sem_take_exit(sem, timeout, ret)
|
||||
#define sys_port_trace_k_sem_reset(sem) sys_trace_k_sem_reset(sem)
|
||||
|
||||
#define sys_port_trace_k_mutex_init(mutex, ret) sys_trace_k_mutex_init(mutex, ret)
|
||||
#define sys_port_trace_k_mutex_lock_enter(mutex, timeout) \
|
||||
sys_trace_k_mutex_lock_enter(mutex, timeout)
|
||||
#define sys_port_trace_k_mutex_lock_blocking(mutex, timeout) \
|
||||
sys_trace_k_mutex_lock_blocking(mutex, timeout)
|
||||
#define sys_port_trace_k_mutex_lock_exit(mutex, timeout, ret) \
|
||||
sys_trace_k_mutex_lock_exit(mutex, timeout, ret)
|
||||
#define sys_port_trace_k_mutex_unlock_enter(mutex) sys_trace_k_mutex_unlock_enter(mutex)
|
||||
#define sys_port_trace_k_mutex_unlock_exit(mutex, ret) sys_trace_k_mutex_unlock_exit(mutex, ret)
|
||||
|
||||
#define sys_port_trace_k_condvar_init(condvar, ret) sys_trace_k_condvar_init(condvar, ret)
|
||||
#define sys_port_trace_k_condvar_signal_enter(condvar) sys_trace_k_condvar_signal_enter(condvar)
|
||||
#define sys_port_trace_k_condvar_signal_blocking(condvar, timeout) \
|
||||
sys_trace_k_condvar_signal_blocking(condvar)
|
||||
#define sys_port_trace_k_condvar_signal_exit(condvar, ret) \
|
||||
sys_trace_k_condvar_signal_exit(condvar, ret)
|
||||
#define sys_port_trace_k_condvar_broadcast_enter(condvar) \
|
||||
sys_trace_k_condvar_broadcast_enter(condvar)
|
||||
#define sys_port_trace_k_condvar_broadcast_exit(condvar, ret) \
|
||||
sys_trace_k_condvar_broadcast_exit(condvar, ret)
|
||||
#define sys_port_trace_k_condvar_wait_enter(condvar) \
|
||||
sys_trace_k_condvar_wait_enter(condvar, mutex, timeout)
|
||||
#define sys_port_trace_k_condvar_wait_exit(condvar, ret) \
|
||||
sys_trace_k_condvar_wait_exit(condvar, mutex, timeout, ret)
|
||||
|
||||
#define sys_port_trace_k_queue_init(queue) sys_trace_k_queue_init(queue)
|
||||
#define sys_port_trace_k_queue_cancel_wait(queue) sys_trace_k_queue_cancel_wait(queue)
|
||||
#define sys_port_trace_k_queue_queue_insert_enter(queue, alloc) \
|
||||
sys_trace_k_queue_queue_insert_enter(queue, alloc, data);
|
||||
#define sys_port_trace_k_queue_queue_insert_blocking(queue, alloc, timeout) \
|
||||
sys_trace_k_queue_queue_insert_enter(queue, alloc, data);
|
||||
#define sys_port_trace_k_queue_queue_insert_exit(queue, alloc, ret) \
|
||||
sys_trace_k_queue_queue_insert_exit(queue, alloc, data, ret);
|
||||
#define sys_port_trace_k_queue_append_enter(queue) sys_trace_k_queue_append_enter(queue, data)
|
||||
#define sys_port_trace_k_queue_append_exit(queue) sys_trace_k_queue_append_exit(queue, data)
|
||||
#define sys_port_trace_k_queue_alloc_append_enter(queue) \
|
||||
sys_trace_k_queue_alloc_append_enter(queue, data)
|
||||
#define sys_port_trace_k_queue_alloc_append_exit(queue, ret) \
|
||||
sys_trace_k_queue_alloc_append_exit(queue, data, ret)
|
||||
#define sys_port_trace_k_queue_prepend_enter(queue) sys_trace_k_queue_prepend_enter(queue, data)
|
||||
#define sys_port_trace_k_queue_prepend_exit(queue) sys_trace_k_queue_prepend_exit(queue, data)
|
||||
#define sys_port_trace_k_queue_alloc_prepend_enter(queue) \
|
||||
sys_trace_k_queue_alloc_prepend_enter(queue, data)
|
||||
#define sys_port_trace_k_queue_alloc_prepend_exit(queue, ret) \
|
||||
sys_trace_k_queue_alloc_prepend_exit(queue, data, ret)
|
||||
#define sys_port_trace_k_queue_insert_enter(queue) sys_trace_k_queue_insert_enter(queue, prev, data)
|
||||
#define sys_port_trace_k_queue_insert_blocking(queue, timeout) \
|
||||
sys_trace_k_queue_insert_blocking(queue, prev, data)
|
||||
#define sys_port_trace_k_queue_insert_exit(queue) sys_trace_k_queue_insert_exit(queue, prev, data)
|
||||
#define sys_port_trace_k_queue_append_list_enter(queue)
|
||||
#define sys_port_trace_k_queue_append_list_exit(queue, ret) \
|
||||
sys_trace_k_queue_append_list_exit(queue, head, tail, ret)
|
||||
#define sys_port_trace_k_queue_merge_slist_enter(queue) \
|
||||
sys_trace_k_queue_merge_slist_enter(queue, list)
|
||||
#define sys_port_trace_k_queue_merge_slist_exit(queue, ret) \
|
||||
sys_trace_k_queue_merge_slist_exit(queue, list, ret)
|
||||
#define sys_port_trace_k_queue_get_enter(queue, timeout)
|
||||
#define sys_port_trace_k_queue_get_blocking(queue, timeout) \
|
||||
sys_trace_k_queue_get_blocking(queue, timeout)
|
||||
#define sys_port_trace_k_queue_get_exit(queue, timeout, ret) \
|
||||
sys_trace_k_queue_get_exit(queue, timeout, ret)
|
||||
#define sys_port_trace_k_queue_remove_enter(queue) sys_trace_k_queue_remove_enter(queue, data)
|
||||
#define sys_port_trace_k_queue_remove_exit(queue, ret) \
|
||||
sys_trace_k_queue_remove_exit(queue, data, ret)
|
||||
#define sys_port_trace_k_queue_unique_append_enter(queue) \
|
||||
sys_trace_k_queue_unique_append_enter(queue, data)
|
||||
#define sys_port_trace_k_queue_unique_append_exit(queue, ret) \
|
||||
sys_trace_k_queue_unique_append_exit(queue, data, ret)
|
||||
#define sys_port_trace_k_queue_peek_head(queue, ret) sys_trace_k_queue_peek_head(queue, ret)
|
||||
#define sys_port_trace_k_queue_peek_tail(queue, ret) sys_trace_k_queue_peek_tail(queue, ret)
|
||||
|
||||
/* FIFO */
|
||||
|
||||
#define sys_port_trace_k_fifo_init_enter(fifo) sys_trace_k_fifo_init_enter(fifo)
|
||||
|
||||
#define sys_port_trace_k_fifo_init_exit(fifo) sys_trace_k_fifo_init_exit(fifo)
|
||||
|
||||
#define sys_port_trace_k_fifo_cancel_wait_enter(fifo) sys_trace_k_fifo_cancel_wait_enter(fifo)
|
||||
|
||||
#define sys_port_trace_k_fifo_cancel_wait_exit(fifo) sys_trace_k_fifo_cancel_wait_exit(fifo)
|
||||
|
||||
#define sys_port_trace_k_fifo_put_enter(fifo, data) sys_trace_k_fifo_put_enter(fifo, data)
|
||||
|
||||
#define sys_port_trace_k_fifo_put_exit(fifo, data) sys_trace_k_fifo_put_exit(fifo, data)
|
||||
|
||||
#define sys_port_trace_k_fifo_alloc_put_enter(fifo, data) \
|
||||
sys_trace_k_fifo_alloc_put_enter(fifo, data)
|
||||
|
||||
#define sys_port_trace_k_fifo_alloc_put_exit(fifo, data, ret) \
|
||||
sys_trace_k_fifo_alloc_put_exit(fifo, data, ret)
|
||||
|
||||
#define sys_port_trace_k_fifo_put_list_enter(fifo, head, tail) \
|
||||
sys_trace_k_fifo_put_list_enter(fifo, head, tail)
|
||||
|
||||
#define sys_port_trace_k_fifo_put_list_exit(fifo, head, tail) \
|
||||
sys_trace_k_fifo_put_list_exit(fifo, head, tail)
|
||||
|
||||
#define sys_port_trace_k_fifo_put_slist_enter(fifo, list) \
|
||||
sys_trace_k_fifo_put_slist_enter(fifo, list)
|
||||
|
||||
#define sys_port_trace_k_fifo_put_slist_exit(fifo, list) sys_trace_k_fifo_put_slist_exit(fifo, list)
|
||||
|
||||
#define sys_port_trace_k_fifo_get_enter(fifo, timeout) sys_trace_k_fifo_get_enter(fifo, timeout)
|
||||
|
||||
#define sys_port_trace_k_fifo_get_exit(fifo, timeout, ret) \
|
||||
sys_trace_k_fifo_get_exit(fifo, timeout, ret)
|
||||
|
||||
#define sys_port_trace_k_fifo_peek_head_enter(fifo) sys_trace_k_fifo_peek_head_enter(fifo)
|
||||
|
||||
#define sys_port_trace_k_fifo_peek_head_exit(fifo, ret) sys_trace_k_fifo_peek_head_exit(fifo, ret)
|
||||
|
||||
#define sys_port_trace_k_fifo_peek_tail_enter(fifo) sys_trace_k_fifo_peek_tail_enter(fifo)
|
||||
|
||||
#define sys_port_trace_k_fifo_peek_tail_exit(fifo, ret) sys_trace_k_fifo_peek_tail_exit(fifo, ret)
|
||||
|
||||
/* LIFO */
|
||||
#define sys_port_trace_k_lifo_init_enter(lifo) sys_trace_k_lifo_init_enter(lifo)
|
||||
|
||||
#define sys_port_trace_k_lifo_init_exit(lifo) sys_trace_k_lifo_init_exit(lifo)
|
||||
|
||||
#define sys_port_trace_k_lifo_put_enter(lifo, data) sys_trace_k_lifo_put_enter(lifo, data)
|
||||
|
||||
#define sys_port_trace_k_lifo_put_exit(lifo, data) sys_trace_k_lifo_put_exit(lifo, data)
|
||||
|
||||
#define sys_port_trace_k_lifo_alloc_put_enter(lifo, data) \
|
||||
sys_trace_k_lifo_alloc_put_enter(lifo, data)
|
||||
|
||||
#define sys_port_trace_k_lifo_alloc_put_exit(lifo, data, ret) \
|
||||
sys_trace_k_lifo_alloc_put_exit(lifo, data, ret)
|
||||
|
||||
#define sys_port_trace_k_lifo_get_enter(lifo, timeout) sys_trace_k_lifo_get_enter(lifo, timeout)
|
||||
|
||||
#define sys_port_trace_k_lifo_get_exit(lifo, timeout, ret) \
|
||||
sys_trace_k_lifo_get_exit(lifo, timeout, ret)
|
||||
|
||||
/* Stack */
|
||||
#define sys_port_trace_k_stack_init(stack) sys_trace_k_stack_init(stack, buffer, num_entries)
|
||||
|
||||
#define sys_port_trace_k_stack_alloc_init_enter(stack) \
|
||||
sys_trace_k_stack_alloc_init_enter(stack, num_entries)
|
||||
|
||||
#define sys_port_trace_k_stack_alloc_init_exit(stack, ret) \
|
||||
sys_trace_k_stack_alloc_init_exit(stack, num_entries, ret)
|
||||
|
||||
#define sys_port_trace_k_stack_cleanup_enter(stack) sys_trace_k_stack_cleanup_enter(stack)
|
||||
|
||||
#define sys_port_trace_k_stack_cleanup_exit(stack, ret) sys_trace_k_stack_cleanup_exit(stack, ret)
|
||||
|
||||
#define sys_port_trace_k_stack_push_enter(stack) sys_trace_k_stack_push_enter(stack, data)
|
||||
|
||||
#define sys_port_trace_k_stack_push_exit(stack, ret) sys_trace_k_stack_push_exit(stack, data, ret)
|
||||
|
||||
#define sys_port_trace_k_stack_pop_enter(stack, timeout)
|
||||
|
||||
#define sys_port_trace_k_stack_pop_blocking(stack, timeout) \
|
||||
sys_trace_k_stack_pop_blocking(stack, data, timeout)
|
||||
|
||||
#define sys_port_trace_k_stack_pop_exit(stack, timeout, ret) \
|
||||
sys_trace_k_stack_pop_exit(stack, data, timeout, ret)
|
||||
|
||||
/* Message Queue */
|
||||
#define sys_port_trace_k_msgq_init(msgq) sys_trace_k_msgq_init(msgq)
|
||||
|
||||
#define sys_port_trace_k_msgq_alloc_init_enter(msgq) \
|
||||
sys_trace_k_msgq_alloc_init_enter(msgq, msg_size, max_msgs)
|
||||
|
||||
#define sys_port_trace_k_msgq_alloc_init_exit(msgq, ret) \
|
||||
sys_trace_k_msgq_alloc_init_exit(msgq, msg_size, max_msgs, ret)
|
||||
|
||||
#define sys_port_trace_k_msgq_cleanup_enter(msgq) sys_trace_k_msgq_cleanup_enter(msgq)
|
||||
|
||||
#define sys_port_trace_k_msgq_cleanup_exit(msgq, ret) sys_trace_k_msgq_cleanup_exit(msgq, ret)
|
||||
|
||||
#define sys_port_trace_k_msgq_put_enter(msgq, timeout) \
|
||||
sys_trace_k_msgq_put_enter(msgq, data, timeout)
|
||||
|
||||
#define sys_port_trace_k_msgq_put_blocking(msgq, timeout) \
|
||||
sys_trace_k_msgq_put_blocking(msgq, data, timeout)
|
||||
#define sys_port_trace_k_msgq_put_exit(msgq, timeout, ret) \
|
||||
sys_trace_k_msgq_put_exit(msgq, data, timeout, ret)
|
||||
#define sys_port_trace_k_msgq_get_enter(msgq, timeout) \
|
||||
sys_trace_k_msgq_get_enter(msgq, data, timeout)
|
||||
#define sys_port_trace_k_msgq_get_blocking(msgq, timeout) \
|
||||
sys_trace_k_msgq_get_blocking(msgq, data, timeout)
|
||||
#define sys_port_trace_k_msgq_get_exit(msgq, timeout, ret) \
|
||||
sys_trace_k_msgq_get_exit(msgq, data, timeout, ret)
|
||||
#define sys_port_trace_k_msgq_peek(msgq, ret) sys_trace_k_msgq_peek(msgq, data, ret)
|
||||
#define sys_port_trace_k_msgq_purge(msgq) sys_trace_k_msgq_purge(msgq)
|
||||
|
||||
#define sys_port_trace_k_mbox_init(mbox) sys_trace_k_mbox_init(mbox)
|
||||
#define sys_port_trace_k_mbox_message_put_enter(mbox, timeout) \
|
||||
sys_trace_k_mbox_message_put_enter(mbox, tx_msg, timeout)
|
||||
#define sys_port_trace_k_mbox_message_put_blocking(mbox, timeout) \
|
||||
sys_trace_k_mbox_message_put_blocking(mbox, tx_msg, timeout)
|
||||
#define sys_port_trace_k_mbox_message_put_exit(mbox, timeout, ret) \
|
||||
sys_trace_k_mbox_message_put_exit(mbox, tx_msg, timeout, ret)
|
||||
#define sys_port_trace_k_mbox_put_enter(mbox, timeout) \
|
||||
sys_trace_k_mbox_put_enter(mbox, tx_msg, timeout)
|
||||
#define sys_port_trace_k_mbox_put_exit(mbox, timeout, ret) \
|
||||
sys_trace_k_mbox_put_exit(mbox, tx_msg, timeout, ret)
|
||||
#define sys_port_trace_k_mbox_async_put_enter(mbox, sem) sys_trace_k_mbox_async_put_enter(mbox, sem)
|
||||
#define sys_port_trace_k_mbox_async_put_exit(mbox, sem) sys_trace_k_mbox_async_put_exit(mbox, sem)
|
||||
#define sys_port_trace_k_mbox_get_enter(mbox, timeout) \
|
||||
sys_trace_k_mbox_get_enter(mbox, rx_msg, buffer, timeout)
|
||||
#define sys_port_trace_k_mbox_get_blocking(mbox, timeout) \
|
||||
sys_trace_k_mbox_get_blocking(mbox, rx_msg, buffer, timeout)
|
||||
#define sys_port_trace_k_mbox_get_exit(mbox, timeout, ret) \
|
||||
sys_trace_k_mbox_get_exit(mbox, rx_msg, buffer, timeout, ret)
|
||||
#define sys_port_trace_k_mbox_data_get(rx_msg) sys_trace_k_mbox_data_get(mbox, rx_msg, buffer)
|
||||
|
||||
#define sys_port_trace_k_pipe_init(pipe) sys_trace_k_pipe_init(pipe, buffer, size)
|
||||
#define sys_port_trace_k_pipe_cleanup_enter(pipe) sys_trace_k_pipe_cleanup_enter(pipe)
|
||||
#define sys_port_trace_k_pipe_cleanup_exit(pipe, ret) sys_trace_k_pipe_cleanup_exit(pipe, ret)
|
||||
#define sys_port_trace_k_pipe_alloc_init_enter(pipe) sys_trace_k_pipe_alloc_init_enter(pipe, size)
|
||||
#define sys_port_trace_k_pipe_alloc_init_exit(pipe, ret) \
|
||||
sys_trace_k_pipe_alloc_init_exit(pipe, size, ret)
|
||||
#define sys_port_trace_k_pipe_put_enter(pipe, timeout) \
|
||||
sys_trace_k_pipe_put_enter(pipe, data, bytes_to_write, bytes_written, min_xfer, timeout)
|
||||
#define sys_port_trace_k_pipe_put_blocking(pipe, timeout) \
|
||||
sys_trace_k_pipe_put_blocking(pipe, data, bytes_to_write, bytes_written, min_xfer, timeout)
|
||||
#define sys_port_trace_k_pipe_put_exit(pipe, timeout, ret) \
|
||||
sys_trace_k_pipe_put_exit(pipe, data, bytes_to_write, bytes_written, min_xfer, timeout, ret)
|
||||
#define sys_port_trace_k_pipe_get_enter(pipe, timeout) \
|
||||
sys_trace_k_pipe_get_enter(pipe, data, bytes_to_read, bytes_read, min_xfer, timeout)
|
||||
#define sys_port_trace_k_pipe_get_blocking(pipe, timeout) \
|
||||
sys_trace_k_pipe_get_blocking(pipe, data, bytes_to_read, bytes_read, min_xfer, timeout)
|
||||
#define sys_port_trace_k_pipe_get_exit(pipe, timeout, ret) \
|
||||
sys_trace_k_pipe_get_exit(pipe, data, bytes_to_read, bytes_read, min_xfer, timeout, ret)
|
||||
#define sys_port_trace_k_pipe_block_put_enter(pipe, sem) \
|
||||
sys_trace_k_pipe_block_put_enter(pipe, block, bytes_to_write, sem)
|
||||
#define sys_port_trace_k_pipe_block_put_exit(pipe, sem) \
|
||||
sys_trace_k_pipe_block_put_exit(pipe, block, bytes_to_write, sem)
|
||||
|
||||
#define sys_port_trace_k_heap_init(h) sys_trace_k_heap_init(h, mem, bytes)
|
||||
#define sys_port_trace_k_heap_aligned_alloc_enter(h, timeout) \
|
||||
sys_trace_k_heap_aligned_alloc_enter(h, bytes, timeout)
|
||||
#define sys_port_trace_k_heap_aligned_alloc_blocking(h, timeout) \
|
||||
sys_trace_k_heap_aligned_alloc_blocking(h, bytes, timeout)
|
||||
#define sys_port_trace_k_heap_aligned_alloc_exit(h, timeout, ret) \
|
||||
sys_trace_k_heap_aligned_alloc_exit(h, bytes, timeout, ret)
|
||||
#define sys_port_trace_k_heap_alloc_enter(h, timeout) \
|
||||
sys_trace_k_heap_alloc_enter(h, bytes, timeout)
|
||||
#define sys_port_trace_k_heap_alloc_exit(h, timeout, ret) \
|
||||
sys_trace_k_heap_alloc_exit(h, bytes, timeout, ret)
|
||||
#define sys_port_trace_k_heap_free(h) sys_trace_k_heap_free(h, mem)
|
||||
#define sys_port_trace_k_heap_sys_k_aligned_alloc_enter(heap) \
|
||||
sys_trace_k_heap_sys_k_aligned_alloc_enter(heap, align, size)
|
||||
#define sys_port_trace_k_heap_sys_k_aligned_alloc_exit(heap, ret) \
|
||||
sys_trace_k_heap_sys_k_aligned_alloc_exit(heap, align, size, ret)
|
||||
#define sys_port_trace_k_heap_sys_k_malloc_enter(heap) \
|
||||
sys_trace_k_heap_sys_k_malloc_enter(heap, size)
|
||||
#define sys_port_trace_k_heap_sys_k_malloc_exit(heap, ret) \
|
||||
sys_trace_k_heap_sys_k_malloc_exit(heap, size, ret)
|
||||
#define sys_port_trace_k_heap_sys_k_free_enter(heap) sys_trace_k_heap_sys_k_free_enter(heap)
|
||||
#define sys_port_trace_k_heap_sys_k_free_exit(heap) sys_trace_k_heap_sys_k_free_exit(heap)
|
||||
#define sys_port_trace_k_heap_sys_k_calloc_enter(heap) \
|
||||
sys_trace_k_heap_sys_k_calloc_enter(heap, nmemb, size)
|
||||
#define sys_port_trace_k_heap_sys_k_calloc_exit(heap, ret) \
|
||||
sys_trace_k_heap_sys_k_calloc_exit(heap, nmemb, size, ret)
|
||||
|
||||
#define sys_port_trace_k_mem_slab_init(slab, rc) \
|
||||
sys_trace_k_mem_slab_init(slab, buffer, block_size, num_blocks, rc)
|
||||
#define sys_port_trace_k_mem_slab_alloc_enter(slab, timeout) \
|
||||
sys_trace_k_mem_slab_alloc_enter(slab, mem, timeout)
|
||||
#define sys_port_trace_k_mem_slab_alloc_blocking(slab, timeout) \
|
||||
sys_trace_k_mem_slab_alloc_blocking(slab, mem, timeout)
|
||||
#define sys_port_trace_k_mem_slab_alloc_exit(slab, timeout, ret) \
|
||||
sys_trace_k_mem_slab_alloc_exit(slab, mem, timeout, ret)
|
||||
#define sys_port_trace_k_mem_slab_free_enter(slab)
|
||||
#define sys_port_trace_k_mem_slab_free_exit(slab) sys_trace_k_mem_slab_free_exit(slab, mem)
|
||||
|
||||
#define sys_port_trace_k_timer_init(timer) sys_trace_k_timer_init(timer, expiry_fn, stop_fn)
|
||||
#define sys_port_trace_k_timer_start(timer) sys_trace_k_timer_start(timer, duration, period)
|
||||
#define sys_port_trace_k_timer_stop(timer) sys_trace_k_timer_stop(timer)
|
||||
#define sys_port_trace_k_timer_status_sync_enter(timer)
|
||||
#define sys_port_trace_k_timer_status_sync_blocking(timer, timeout) \
|
||||
sys_trace_k_timer_status_sync_blocking(timer)
|
||||
#define sys_port_trace_k_timer_status_sync_exit(timer, result) \
|
||||
sys_trace_k_timer_status_sync_exit(timer, result)
|
||||
#define sys_port_trace_k_thread_abort_exit(thread) sys_trace_k_thread_abort_exit(thread)
|
||||
|
||||
#define sys_port_trace_k_thread_abort_enter(thread) sys_trace_k_thread_abort_enter(thread)
|
||||
|
||||
#define sys_port_trace_k_thread_resume_exit(thread) sys_trace_k_thread_resume_exit(thread)
|
||||
|
||||
#define sys_port_trace_syscall_enter() sys_trace_syscall_enter()
|
||||
#define sys_port_trace_syscall_exit() sys_trace_syscall_exit()
|
||||
|
||||
void sys_trace_syscall_enter(void);
|
||||
void sys_trace_syscall_exit(void);
|
||||
void sys_trace_idle(void);
|
||||
void sys_trace_isr_enter(void);
|
||||
void sys_trace_isr_exit(void);
|
||||
void sys_trace_isr_exit_to_scheduler(void);
|
||||
void sys_trace_idle(void);
|
||||
void sys_trace_void(unsigned int id);
|
||||
void sys_trace_end_call(unsigned int id);
|
||||
void sys_trace_semaphore_init(struct k_sem *sem);
|
||||
void sys_trace_semaphore_take(struct k_sem *sem);
|
||||
void sys_trace_semaphore_give(struct k_sem *sem);
|
||||
void sys_trace_mutex_init(struct k_mutex *mutex);
|
||||
void sys_trace_mutex_lock(struct k_mutex *mutex);
|
||||
void sys_trace_mutex_unlock(struct k_mutex *mutex);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
void sys_trace_k_thread_abort_exit(struct k_thread *thread);
|
||||
void sys_trace_k_thread_abort_enter(struct k_thread *thread);
|
||||
void sys_trace_k_thread_resume_exit(struct k_thread *thread);
|
||||
void sys_trace_k_thread_sched_wakeup(struct k_thread *thread);
|
||||
void sys_trace_k_thread_sched_abort(struct k_thread *thread);
|
||||
void sys_trace_k_thread_sched_set_priority(struct k_thread *thread, int prio);
|
||||
void sys_trace_k_thread_sched_ready(struct k_thread *thread);
|
||||
void sys_trace_k_thread_sched_pend(struct k_thread *thread);
|
||||
void sys_trace_k_thread_sched_resume(struct k_thread *thread);
|
||||
void sys_trace_k_thread_sched_suspend(struct k_thread *thread);
|
||||
|
||||
void sys_trace_k_thread_foreach_enter(k_thread_user_cb_t user_cb, void *user_data);
|
||||
void sys_trace_k_thread_foreach_exit(k_thread_user_cb_t user_cb, void *user_data);
|
||||
void sys_trace_k_thread_foreach_unlocked_enter(k_thread_user_cb_t user_cb, void *user_data);
|
||||
void sys_trace_k_thread_foreach_unlocked_exit(k_thread_user_cb_t user_cb, void *user_data);
|
||||
void sys_trace_k_thread_create(struct k_thread *new_thread, size_t stack_size, int prio);
|
||||
void sys_trace_k_thread_user_mode_enter(k_thread_entry_t entry, void *p1, void *p2, void *p3);
|
||||
void sys_trace_k_thread_heap_assign(struct k_thread *thread, struct k_heap *heap);
|
||||
void sys_trace_k_thread_join_blocking(struct k_thread *thread, k_timeout_t timeout);
|
||||
void sys_trace_k_thread_join_exit(struct k_thread *thread, k_timeout_t timeout, int ret);
|
||||
void sys_trace_k_thread_sleep_enter(k_timeout_t timeout);
|
||||
void sys_trace_k_thread_sleep_exit(k_timeout_t timeout, int ret);
|
||||
void sys_trace_k_thread_msleep_enter(int32_t ms);
|
||||
void sys_trace_k_thread_msleep_exit(int32_t ms, int ret);
|
||||
void sys_trace_k_thread_usleep_enter(int32_t us);
|
||||
void sys_trace_k_thread_usleep_exit(int32_t us, int ret);
|
||||
|
||||
void sys_trace_k_thread_yield(void);
|
||||
void sys_trace_k_thread_wakeup(struct k_thread *thread);
|
||||
void sys_trace_k_thread_abort(struct k_thread *thread);
|
||||
void sys_trace_k_thread_start(struct k_thread *thread);
|
||||
void sys_trace_k_thread_priority_set(struct k_thread *thread);
|
||||
void sys_trace_k_thread_suspend(struct k_thread *thread);
|
||||
void sys_trace_k_thread_resume(struct k_thread *thread);
|
||||
void sys_trace_k_thread_sched_lock(void);
|
||||
void sys_trace_k_thread_sched_unlock(void);
|
||||
void sys_trace_k_thread_name_set(struct k_thread *thread, int ret);
|
||||
void sys_trace_k_thread_switched_out(void);
|
||||
void sys_trace_k_thread_switched_in(void);
|
||||
void sys_trace_k_thread_ready(struct k_thread *thread);
|
||||
void sys_trace_k_thread_pend(struct k_thread *thread);
|
||||
void sys_trace_k_thread_info(struct k_thread *thread);
|
||||
|
||||
void sys_trace_k_sem_init(struct k_sem *sem, int ret);
|
||||
void sys_trace_k_sem_give_enter(struct k_sem *sem);
|
||||
void sys_trace_k_sem_take_enter(struct k_sem *sem, k_timeout_t timeout);
|
||||
void sys_trace_k_sem_take_blocking(struct k_sem *sem, k_timeout_t timeout);
|
||||
void sys_trace_k_sem_take_exit(struct k_sem *sem, k_timeout_t timeout, int ret);
|
||||
void sys_trace_k_sem_reset(struct k_sem *sem);
|
||||
|
||||
void sys_trace_k_mutex_init(struct k_mutex *mutex, int ret);
|
||||
void sys_trace_k_mutex_lock_enter(struct k_mutex *mutex, k_timeout_t timeout);
|
||||
void sys_trace_k_mutex_lock_blocking(struct k_mutex *mutex, k_timeout_t timeout);
|
||||
void sys_trace_k_mutex_lock_exit(struct k_mutex *mutex, k_timeout_t timeout, int ret);
|
||||
void sys_trace_k_mutex_unlock_enter(struct k_mutex *mutex);
|
||||
void sys_trace_k_mutex_unlock_exit(struct k_mutex *mutex, int ret);
|
||||
|
||||
void sys_trace_k_condvar_init(struct k_condvar *condvar, int ret);
|
||||
void sys_trace_k_condvar_signal_enter(struct k_condvar *condvar);
|
||||
void sys_trace_k_condvar_signal_blocking(struct k_condvar *condvar);
|
||||
void sys_trace_k_condvar_signal_exit(struct k_condvar *condvar, int ret);
|
||||
void sys_trace_k_condvar_broadcast_enter(struct k_condvar *condvar);
|
||||
void sys_trace_k_condvar_broadcast_exit(struct k_condvar *condvar, int ret);
|
||||
void sys_trace_k_condvar_wait_enter(struct k_condvar *condvar, struct k_mutex *mutex,
|
||||
k_timeout_t timeout);
|
||||
void sys_trace_k_condvar_wait_exit(struct k_condvar *condvar, struct k_mutex *mutex,
|
||||
k_timeout_t timeout, int ret);
|
||||
|
||||
void sys_trace_k_queue_init(struct k_queue *queue);
|
||||
void sys_trace_k_queue_cancel_wait(struct k_queue *queue);
|
||||
void sys_trace_k_queue_queue_insert_enter(struct k_queue *queue, bool alloc, void *data);
|
||||
void sys_trace_k_queue_queue_insert_blocking(struct k_queue *queue, bool alloc, void *data);
|
||||
void sys_trace_k_queue_queue_insert_exit(struct k_queue *queue, bool alloc, void *data, int ret);
|
||||
void sys_trace_k_queue_append_enter(struct k_queue *queue, void *data);
|
||||
void sys_trace_k_queue_append_exit(struct k_queue *queue, void *data);
|
||||
void sys_trace_k_queue_alloc_append_enter(struct k_queue *queue, void *data);
|
||||
void sys_trace_k_queue_alloc_append_exit(struct k_queue *queue, void *data, int ret);
|
||||
void sys_trace_k_queue_prepend_enter(struct k_queue *queue, void *data);
|
||||
void sys_trace_k_queue_prepend_exit(struct k_queue *queue, void *data);
|
||||
void sys_trace_k_queue_alloc_prepend_enter(struct k_queue *queue, void *data);
|
||||
void sys_trace_k_queue_alloc_prepend_exit(struct k_queue *queue, void *data, int ret);
|
||||
void sys_trace_k_queue_insert_enter(struct k_queue *queue, void *prev, void *data);
|
||||
void sys_trace_k_queue_insert_exit(struct k_queue *queue, void *prev, void *data);
|
||||
void sys_trace_k_queue_append_list_exit(struct k_queue *queue, void *head, void *tail, int ret);
|
||||
void sys_trace_k_queue_merge_slist_enter(struct k_queue *queue, sys_slist_t *list);
|
||||
void sys_trace_k_queue_merge_slist_exit(struct k_queue *queue, sys_slist_t *list, int ret);
|
||||
void sys_trace_k_queue_get_blocking(struct k_queue *queue, k_timeout_t timeout);
|
||||
void sys_trace_k_queue_get_exit(struct k_queue *queue, k_timeout_t timeout, void *ret);
|
||||
void sys_trace_k_queue_remove_enter(struct k_queue *queue, void *data);
|
||||
void sys_trace_k_queue_remove_exit(struct k_queue *queue, void *data, bool ret);
|
||||
void sys_trace_k_queue_unique_append_enter(struct k_queue *queue, void *data);
|
||||
void sys_trace_k_queue_unique_append_exit(struct k_queue *queue, void *data, bool ret);
|
||||
void sys_trace_k_queue_peek_head(struct k_queue *queue, void *ret);
|
||||
void sys_trace_k_queue_peek_tail(struct k_queue *queue, void *ret);
|
||||
|
||||
void sys_trace_k_fifo_init_enter(struct k_fifo *fifo);
|
||||
void sys_trace_k_fifo_init_exit(struct k_fifo *fifo);
|
||||
void sys_trace_k_fifo_cancel_wait_enter(struct k_fifo *fifo);
|
||||
void sys_trace_k_fifo_cancel_wait_exit(struct k_fifo *fifo);
|
||||
void sys_trace_k_fifo_put_enter(struct k_fifo *fifo, void *data);
|
||||
void sys_trace_k_fifo_put_exit(struct k_fifo *fifo, void *data);
|
||||
void sys_trace_k_fifo_alloc_put_enter(struct k_fifo *fifo, void *data);
|
||||
void sys_trace_k_fifo_alloc_put_exit(struct k_fifo *fifo, void *data, int ret);
|
||||
void sys_trace_k_fifo_put_list_enter(struct k_fifo *fifo, void *head, void *tail);
|
||||
void sys_trace_k_fifo_put_list_exit(struct k_fifo *fifo, void *head, void *tail);
|
||||
void sys_trace_k_fifo_put_slist_enter(struct k_fifo *fifo, sys_slist_t *list);
|
||||
void sys_trace_k_fifo_put_slist_exit(struct k_fifo *fifo, sys_slist_t *list);
|
||||
void sys_trace_k_fifo_get_enter(struct k_fifo *fifo, k_timeout_t timeout);
|
||||
void sys_trace_k_fifo_get_exit(struct k_fifo *fifo, k_timeout_t timeout, void *ret);
|
||||
void sys_trace_k_fifo_peek_head_enter(struct k_fifo *fifo);
|
||||
void sys_trace_k_fifo_peek_head_exit(struct k_fifo *fifo, void *ret);
|
||||
void sys_trace_k_fifo_peek_tail_enter(struct k_fifo *fifo);
|
||||
void sys_trace_k_fifo_peek_tail_exit(struct k_fifo *fifo, void *ret);
|
||||
|
||||
void sys_trace_k_lifo_init_enter(struct k_lifo *lifo);
|
||||
void sys_trace_k_lifo_init_exit(struct k_lifo *lifo);
|
||||
void sys_trace_k_lifo_put_enter(struct k_lifo *lifo, void *data);
|
||||
void sys_trace_k_lifo_put_exit(struct k_lifo *lifo, void *data);
|
||||
void sys_trace_k_lifo_alloc_put_enter(struct k_lifo *lifo, void *data);
|
||||
void sys_trace_k_lifo_alloc_put_exit(struct k_lifo *lifo, void *data, int ret);
|
||||
void sys_trace_k_lifo_get_enter(struct k_lifo *lifo, k_timeout_t timeout);
|
||||
void sys_trace_k_lifo_get_exit(struct k_lifo *lifo, k_timeout_t timeout, void *ret);
|
||||
|
||||
void sys_trace_k_stack_init(struct k_stack *stack, stack_data_t *buffer, uint32_t num_entries);
|
||||
void sys_trace_k_stack_alloc_init_enter(struct k_stack *stack, uint32_t num_entries);
|
||||
void sys_trace_k_stack_alloc_init_exit(struct k_stack *stack, uint32_t num_entries, int ret);
|
||||
void sys_trace_k_stack_cleanup_enter(struct k_stack *stack);
|
||||
void sys_trace_k_stack_cleanup_exit(struct k_stack *stack, int ret);
|
||||
void sys_trace_k_stack_push_enter(struct k_stack *stack, stack_data_t data);
|
||||
void sys_trace_k_stack_push_exit(struct k_stack *stack, stack_data_t data, int ret);
|
||||
void sys_trace_k_stack_pop_blocking(struct k_stack *stack, stack_data_t *data, k_timeout_t timeout);
|
||||
void sys_trace_k_stack_pop_exit(struct k_stack *stack, stack_data_t *data, k_timeout_t timeout,
|
||||
int ret);
|
||||
|
||||
void sys_trace_k_mbox_init(struct k_mbox *mbox);
|
||||
void sys_trace_k_mbox_message_put_enter(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
|
||||
k_timeout_t timeout);
|
||||
void sys_trace_k_mbox_message_put_blocking(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
|
||||
k_timeout_t timeout);
|
||||
void sys_trace_k_mbox_message_put_exit(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
|
||||
k_timeout_t timeout, int ret);
|
||||
void sys_trace_k_mbox_put_enter(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
|
||||
k_timeout_t timeout);
|
||||
void sys_trace_k_mbox_put_exit(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout,
|
||||
int ret);
|
||||
void sys_trace_k_mbox_async_put_enter(struct k_mbox *mbox, struct k_sem *sem);
|
||||
void sys_trace_k_mbox_async_put_exit(struct k_mbox *mbox, struct k_sem *sem);
|
||||
void sys_trace_k_mbox_get_enter(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
|
||||
k_timeout_t timeout);
|
||||
void sys_trace_k_mbox_get_blocking(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
|
||||
k_timeout_t timeout);
|
||||
void sys_trace_k_mbox_get_exit(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
|
||||
k_timeout_t timeout, int ret);
|
||||
void sys_trace_k_mbox_data_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer);
|
||||
|
||||
void sys_trace_k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size);
|
||||
void sys_trace_k_pipe_cleanup_enter(struct k_pipe *pipe);
|
||||
void sys_trace_k_pipe_cleanup_exit(struct k_pipe *pipe, int ret);
|
||||
void sys_trace_k_pipe_alloc_init_enter(struct k_pipe *pipe, size_t size);
|
||||
void sys_trace_k_pipe_alloc_init_exit(struct k_pipe *pipe, size_t size, int ret);
|
||||
void sys_trace_k_pipe_put_enter(struct k_pipe *pipe, void *data, size_t bytes_to_write,
|
||||
size_t *bytes_written, size_t min_xfer, k_timeout_t timeout);
|
||||
void sys_trace_k_pipe_put_blocking(struct k_pipe *pipe, void *data, size_t bytes_to_write,
|
||||
size_t *bytes_written, size_t min_xfer, k_timeout_t timeout);
|
||||
void sys_trace_k_pipe_put_exit(struct k_pipe *pipe, void *data, size_t bytes_to_write,
|
||||
size_t *bytes_written, size_t min_xfer, k_timeout_t timeout,
|
||||
int ret);
|
||||
void sys_trace_k_pipe_get_enter(struct k_pipe *pipe, void *data, size_t bytes_to_read,
|
||||
size_t *bytes_read, size_t min_xfer, k_timeout_t timeout);
|
||||
void sys_trace_k_pipe_get_blocking(struct k_pipe *pipe, void *data, size_t bytes_to_read,
|
||||
size_t *bytes_read, size_t min_xfer, k_timeout_t timeout);
|
||||
void sys_trace_k_pipe_get_exit(struct k_pipe *pipe, void *data, size_t bytes_to_read,
|
||||
size_t *bytes_read, size_t min_xfer, k_timeout_t timeout, int ret);
|
||||
void sys_trace_k_pipe_block_put_enter(struct k_pipe *pipe, struct k_mem_block *block, size_t size,
|
||||
struct k_sem *sem);
|
||||
void sys_trace_k_pipe_block_put_exit(struct k_pipe *pipe, struct k_mem_block *block, size_t size,
|
||||
struct k_sem *sem);
|
||||
|
||||
void sys_trace_k_msgq_init(struct k_msgq *msgq);
|
||||
void sys_trace_k_msgq_alloc_init_enter(struct k_msgq *msgq, size_t msg_size, uint32_t max_msgs);
|
||||
void sys_trace_k_msgq_alloc_init_exit(struct k_msgq *msgq, size_t msg_size, uint32_t max_msgs,
|
||||
int ret);
|
||||
void sys_trace_k_msgq_cleanup_enter(struct k_msgq *msgq);
|
||||
void sys_trace_k_msgq_cleanup_exit(struct k_msgq *msgq, int ret);
|
||||
void sys_trace_k_msgq_put_enter(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
|
||||
void sys_trace_k_msgq_put_blocking(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
|
||||
void sys_trace_k_msgq_put_exit(struct k_msgq *msgq, const void *data, k_timeout_t timeout, int ret);
|
||||
void sys_trace_k_msgq_get_enter(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
|
||||
void sys_trace_k_msgq_get_blocking(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
|
||||
void sys_trace_k_msgq_get_exit(struct k_msgq *msgq, const void *data, k_timeout_t timeout, int ret);
|
||||
void sys_trace_k_msgq_peek(struct k_msgq *msgq, void *data, int ret);
|
||||
void sys_trace_k_msgq_purge(struct k_msgq *msgq);
|
||||
|
||||
void sys_trace_k_heap_init(struct k_heap *h, void *mem, size_t bytes);
|
||||
void sys_trace_k_heap_alloc_enter(struct k_heap *h, size_t bytes, k_timeout_t timeout);
|
||||
void sys_trace_k_heap_alloc_exit(struct k_heap *h, size_t bytes, k_timeout_t timeout, void *ret);
|
||||
void sys_trace_k_heap_aligned_alloc_enter(struct k_heap *h, size_t bytes, k_timeout_t timeout);
|
||||
void sys_trace_k_heap_aligned_alloc_blocking(struct k_heap *h, size_t bytes, k_timeout_t timeout);
|
||||
void sys_trace_k_heap_aligned_alloc_exit(struct k_heap *h, size_t bytes, k_timeout_t timeout,
|
||||
void *ret);
|
||||
void sys_trace_k_heap_free(struct k_heap *h, void *mem);
|
||||
void sys_trace_k_heap_sys_k_aligned_alloc_enter(struct k_heap *h, size_t align, size_t size);
|
||||
void sys_trace_k_heap_sys_k_aligned_alloc_exit(struct k_heap *h, size_t align, size_t size,
|
||||
void *ret);
|
||||
void sys_trace_k_heap_sys_k_malloc_enter(struct k_heap *h, size_t size);
|
||||
void sys_trace_k_heap_sys_k_malloc_exit(struct k_heap *h, size_t size, void *ret);
|
||||
void sys_trace_k_heap_sys_k_free_enter(struct k_heap *h);
|
||||
void sys_trace_k_heap_sys_k_free_exit(struct k_heap *h);
|
||||
void sys_trace_k_heap_sys_k_calloc_enter(struct k_heap *h, size_t nmemb, size_t size);
|
||||
void sys_trace_k_heap_sys_k_calloc_exit(struct k_heap *h, size_t nmemb, size_t size, void *ret);
|
||||
|
||||
void sys_trace_k_mem_slab_init(struct k_mem_slab *slab, void *buffer, size_t block_size,
|
||||
uint32_t num_blocks, int ret);
|
||||
void sys_trace_k_mem_slab_alloc_enter(struct k_mem_slab *slab, void **mem, k_timeout_t timeout);
|
||||
void sys_trace_k_mem_slab_alloc_blocking(struct k_mem_slab *slab, void **mem, k_timeout_t timeout);
|
||||
void sys_trace_k_mem_slab_alloc_exit(struct k_mem_slab *slab, void **mem, k_timeout_t timeout,
|
||||
int ret);
|
||||
void sys_trace_k_mem_slab_free_exit(struct k_mem_slab *slab, void **mem);
|
||||
|
||||
void sys_trace_k_timer_init(struct k_timer *timer, k_timer_expiry_t expiry_fn,
|
||||
k_timer_expiry_t stop_fn);
|
||||
void sys_trace_k_timer_start(struct k_timer *timer, k_timeout_t duration, k_timeout_t period);
|
||||
void sys_trace_k_timer_stop(struct k_timer *timer);
|
||||
void sys_trace_k_timer_status_sync_blocking(struct k_timer *timer);
|
||||
void sys_trace_k_timer_status_sync_exit(struct k_timer *timer, uint32_t result);
|
||||
|
||||
#endif /* ZEPHYR_TRACE_TEST_H */
|
||||
|
|
250
subsys/tracing/tracing_string_format_test.c
Normal file
250
subsys/tracing/tracing_string_format_test.c
Normal file
|
@ -0,0 +1,250 @@
|
|||
/*
|
||||
* Copyright (c) 2019 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <zephyr.h>
|
||||
#include <tracing_test.h>
|
||||
#include <tracing/tracing_format.h>
|
||||
|
||||
void sys_trace_k_thread_switched_out(void)
|
||||
{
|
||||
struct k_thread *thread;
|
||||
|
||||
thread = k_current_get();
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_switched_in(void)
|
||||
{
|
||||
struct k_thread *thread;
|
||||
|
||||
thread = k_current_get();
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_priority_set(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_create(struct k_thread *thread, size_t stack_size,
|
||||
int prio)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_start(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_abort(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_suspend(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_resume(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_ready(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_sched_ready(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_sched_pend(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_sched_abort(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_sched_resume(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_sched_suspend(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_sleep_enter(k_timeout_t timeout)
|
||||
{
|
||||
TRACING_STRING("%s\n", __func__);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_sleep_exit(k_timeout_t timeout, int ret)
|
||||
{
|
||||
TRACING_STRING("%s\n", __func__);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_busy_wait_enter(uint32_t usec_to_wait)
|
||||
{
|
||||
TRACING_STRING("%s\n", __func__);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_busy_wait_exit(uint32_t usec_to_wait)
|
||||
{
|
||||
TRACING_STRING("%s\n", __func__);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_abort_enter(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_abort_exit(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_yield(void)
|
||||
{
|
||||
TRACING_STRING("%s\n", __func__);
|
||||
}
|
||||
|
||||
void sys_trace_thread_yield(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_wakeup(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_pend(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_info(struct k_thread *thread)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_name_set(struct k_thread *thread, int ret)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, thread);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_sched_lock(void)
|
||||
{
|
||||
TRACING_STRING("%s\n", __func__);
|
||||
}
|
||||
|
||||
void sys_port_trace_k_thread_sched_unlock(void)
|
||||
{
|
||||
TRACING_STRING("%s\n", __func__);
|
||||
}
|
||||
|
||||
void sys_trace_isr_enter(void)
|
||||
{
|
||||
TRACING_STRING("%s\n", __func__);
|
||||
}
|
||||
|
||||
void sys_trace_isr_exit(void)
|
||||
{
|
||||
TRACING_STRING("%s\n", __func__);
|
||||
}
|
||||
|
||||
void sys_trace_isr_exit_to_scheduler(void)
|
||||
{
|
||||
TRACING_STRING("%s\n", __func__);
|
||||
}
|
||||
|
||||
void sys_trace_idle(void)
|
||||
{
|
||||
TRACING_STRING("%s\n", __func__);
|
||||
}
|
||||
|
||||
void sys_trace_k_sem_init(struct k_sem *sem, int ret)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, sem);
|
||||
}
|
||||
void sys_trace_k_sem_give_enter(struct k_sem *sem)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, sem);
|
||||
}
|
||||
|
||||
void sys_trace_k_sem_take_enter(struct k_sem *sem, k_timeout_t timeout)
|
||||
{
|
||||
TRACING_STRING("%s: %p, timeout: %d\n", __func__, sem, timeout);
|
||||
}
|
||||
|
||||
void sys_trace_k_sem_take_exit(struct k_sem *sem, k_timeout_t timeout, int ret)
|
||||
{
|
||||
TRACING_STRING("%s: %p, timeout: %d\n", __func__, sem, timeout);
|
||||
}
|
||||
|
||||
|
||||
void sys_trace_k_sem_take_blocking(struct k_sem *sem, k_timeout_t timeout)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, sem);
|
||||
}
|
||||
|
||||
|
||||
void sys_trace_k_mutex_init(struct k_mutex *mutex, int ret)
|
||||
{
|
||||
TRACING_STRING("%s: %p, returns %d\n", __func__, mutex, ret);
|
||||
}
|
||||
|
||||
void sys_trace_k_mutex_lock_enter(struct k_mutex *mutex, k_timeout_t timeout)
|
||||
{
|
||||
TRACING_STRING("%s: %p, timeout: %d\n", __func__, mutex, timeout);
|
||||
}
|
||||
|
||||
void sys_trace_k_mutex_lock_exit(struct k_mutex *mutex, k_timeout_t timeout, int ret)
|
||||
{
|
||||
TRACING_STRING("%s: %p, timeout: %d, returns: %d\n", __func__, mutex, timeout, ret);
|
||||
}
|
||||
|
||||
void sys_trace_k_mutex_lock_blocking(struct k_mutex *mutex, k_timeout_t timeout)
|
||||
{
|
||||
TRACING_STRING("%s: %p, timeout: %d\n", __func__, mutex, timeout);
|
||||
}
|
||||
|
||||
void sys_trace_k_mutex_unlock_enter(struct k_mutex *mutex)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, mutex);
|
||||
}
|
||||
|
||||
|
||||
void sys_trace_k_mutex_unlock_exit(struct k_mutex *mutex, int ret)
|
||||
{
|
||||
TRACING_STRING("%s: %p, return: %d\n", __func__, mutex, ret);
|
||||
}
|
||||
|
||||
void sys_trace_k_thread_sched_set_priority(struct k_thread *thread, int prio)
|
||||
{
|
||||
TRACING_STRING("%s: %p, priority: %d\n", __func__, thread, prio);
|
||||
}
|
||||
|
||||
void sys_trace_k_timer_start(struct k_timer *timer, k_timeout_t duration,
|
||||
k_timeout_t period)
|
||||
{
|
||||
TRACING_STRING("%s: %p, duration: %d, period: %d\n", __func__, timer, duration, period);
|
||||
}
|
||||
|
||||
void sys_trace_k_timer_init(struct k_timer *timer, k_timer_expiry_t expiry_fn,
|
||||
k_timer_expiry_t stop_fn)
|
||||
{
|
||||
TRACING_STRING("%s: %p\n", __func__, timer);
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue