tracing: support generic tracing hooks

Define generic interface and hooks for tracing to replace
kernel_event_logger and existing tracing facilities with something more
common.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2018-07-04 08:03:03 -05:00
commit b6304e66f6
17 changed files with 206 additions and 25 deletions

View file

@ -113,9 +113,9 @@ SECTION_FUNC(TEXT, _NanoIdleValClear)
*/ */
SECTION_FUNC(TEXT, k_cpu_idle) SECTION_FUNC(TEXT, k_cpu_idle)
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP #ifdef CONFIG_TRACING
push {lr} push {lr}
bl _sys_k_event_logger_enter_sleep bl sys_trace_idle
pop {r0} pop {r0}
mov lr, r0 mov lr, r0
#endif #endif
@ -157,9 +157,9 @@ SECTION_FUNC(TEXT, k_cpu_idle)
*/ */
SECTION_FUNC(TEXT, k_cpu_atomic_idle) SECTION_FUNC(TEXT, k_cpu_atomic_idle)
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP #ifdef CONFIG_TRACING
push {lr} push {lr}
bl _sys_k_event_logger_enter_sleep bl sys_trace_idle
pop {r1} pop {r1}
mov lr, r1 mov lr, r1
#endif #endif

View file

@ -23,7 +23,7 @@
#include <sw_isr_table.h> #include <sw_isr_table.h>
#include <irq.h> #include <irq.h>
#include <kernel_structs.h> #include <kernel_structs.h>
#include <logging/kernel_event_logger.h> #include <tracing.h>
extern void __reserved(void); extern void __reserved(void);
@ -137,7 +137,6 @@ void _irq_spurious(void *unused)
* arch/cpu.h and kernel_structs.h; the inline functions typically need to * arch/cpu.h and kernel_structs.h; the inline functions typically need to
* perform operations on _kernel. For now, leave as regular functions, a * perform operations on _kernel. For now, leave as regular functions, a
* future iteration will resolve this. * future iteration will resolve this.
* We have a similar issue with the k_event_logger functions.
* *
* See https://github.com/zephyrproject-rtos/zephyr/issues/3056 * See https://github.com/zephyrproject-rtos/zephyr/issues/3056
*/ */
@ -178,14 +177,10 @@ void _arch_isr_direct_pm(void)
} }
#endif #endif
#if defined(CONFIG_KERNEL_EVENT_LOGGER_SLEEP) || \
defined(CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT)
void _arch_isr_direct_header(void) void _arch_isr_direct_header(void)
{ {
_sys_k_event_logger_interrupt(); sys_trace_isr_enter();
_sys_k_event_logger_exit_sleep();
} }
#endif
#if defined(CONFIG_ARM_SECURE_FIRMWARE) #if defined(CONFIG_ARM_SECURE_FIRMWARE)
/** /**

View file

@ -47,12 +47,8 @@ SECTION_FUNC(TEXT, _isr_wrapper)
bl read_timer_start_of_isr bl read_timer_start_of_isr
#endif #endif
#ifdef CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT #ifdef CONFIG_TRACING
bl _sys_k_event_logger_interrupt bl sys_trace_isr_enter
#endif
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
bl _sys_k_event_logger_exit_sleep
#endif #endif
#ifdef CONFIG_SYS_POWER_MANAGEMENT #ifdef CONFIG_SYS_POWER_MANAGEMENT

View file

@ -43,10 +43,10 @@ GDATA(_kernel)
SECTION_FUNC(TEXT, __pendsv) SECTION_FUNC(TEXT, __pendsv)
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH #ifdef CONFIG_TRACING
/* Register the context switch */ /* Register the context switch */
push {lr} push {lr}
bl _sys_k_event_logger_context_switch bl sys_trace_thread_switched_in
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
pop {r0} pop {r0}
mov lr, r0 mov lr, r0

View file

@ -63,6 +63,7 @@ void _arch_irq_direct_pm(void)
void _arch_isr_direct_header(void) void _arch_isr_direct_header(void)
{ {
_int_latency_start(); _int_latency_start();
sys_trace_isr_enter();
_sys_k_event_logger_interrupt(); _sys_k_event_logger_interrupt();
_sys_k_event_logger_exit_sleep(); _sys_k_event_logger_exit_sleep();
@ -76,6 +77,7 @@ void _arch_isr_direct_footer(int swap)
{ {
_irq_controller_eoi(); _irq_controller_eoi();
_int_latency_stop(); _int_latency_stop();
sys_trace_isr_exit();
--_kernel.nested; --_kernel.nested;
/* Call swap if all the following is true: /* Call swap if all the following is true:

View file

@ -224,6 +224,7 @@ void _timer_int_handler(void *unused)
extern void _sys_k_event_logger_interrupt(void); extern void _sys_k_event_logger_interrupt(void);
_sys_k_event_logger_interrupt(); _sys_k_event_logger_interrupt();
#endif #endif
sys_trace_isr_enter();
#ifdef CONFIG_SYS_POWER_MANAGEMENT #ifdef CONFIG_SYS_POWER_MANAGEMENT
s32_t numIdleTicks; s32_t numIdleTicks;

View file

@ -472,6 +472,7 @@ void rtc1_nrf5_isr(void *arg)
extern void read_timer_start_of_tick_handler(void); extern void read_timer_start_of_tick_handler(void);
read_timer_start_of_tick_handler(); read_timer_start_of_tick_handler();
#endif #endif
sys_trace_isr_enter();
#ifdef CONFIG_TICKLESS_KERNEL #ifdef CONFIG_TICKLESS_KERNEL
if (!expected_sys_ticks) { if (!expected_sys_ticks) {
@ -500,6 +501,7 @@ void rtc1_nrf5_isr(void *arg)
extern void read_timer_end_of_tick_handler(void); extern void read_timer_end_of_tick_handler(void);
read_timer_end_of_tick_handler(); read_timer_end_of_tick_handler();
#endif #endif
sys_trace_isr_exit();
} }

View file

@ -107,22 +107,25 @@ extern void _arch_isr_direct_pm(void);
#define _ARCH_ISR_DIRECT_PM() do { } while (0) #define _ARCH_ISR_DIRECT_PM() do { } while (0)
#endif #endif
#if defined(CONFIG_KERNEL_EVENT_LOGGER_SLEEP) || \
defined(CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT)
#define _ARCH_ISR_DIRECT_HEADER() _arch_isr_direct_header() #define _ARCH_ISR_DIRECT_HEADER() _arch_isr_direct_header()
extern void _arch_isr_direct_header(void); extern void _arch_isr_direct_header(void);
#else
#define _ARCH_ISR_DIRECT_HEADER() do { } while (0)
#endif
#define _ARCH_ISR_DIRECT_FOOTER(swap) _arch_isr_direct_footer(swap) #define _ARCH_ISR_DIRECT_FOOTER(swap) _arch_isr_direct_footer(swap)
/* arch/arm/core/exc_exit.S */ /* arch/arm/core/exc_exit.S */
extern void _IntExit(void); extern void _IntExit(void);
#ifdef CONFIG_TRACING
extern void sys_trace_isr_exit_to_scheduler(void);
#endif
static inline void _arch_isr_direct_footer(int maybe_swap) static inline void _arch_isr_direct_footer(int maybe_swap)
{ {
if (maybe_swap) { if (maybe_swap) {
#ifdef CONFIG_TRACING
sys_trace_isr_exit_to_scheduler();
#endif
_IntExit(); _IntExit();
} }
} }

View file

@ -4967,6 +4967,7 @@ inline void *operator new[](size_t size, void *ptr)
#endif /* defined(CONFIG_CPLUSPLUS) && defined(__cplusplus) */ #endif /* defined(CONFIG_CPLUSPLUS) && defined(__cplusplus) */
#include <tracing.h>
#include <syscalls/kernel.h> #include <syscalls/kernel.h>
#endif /* !_ASMLANGUAGE */ #endif /* !_ASMLANGUAGE */

121
include/tracing.h Normal file
View file

@ -0,0 +1,121 @@
/*
* Copyright (c) 2018 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _KERNEL_TRACE_H
#define _KERNEL_TRACE_H
#include <kernel.h>
/* Below IDs are used with systemview, not final to the zephyr tracing API */
#define SYS_TRACE_ID_OFFSET (32u)
#define SYS_TRACE_ID_MUTEX_INIT (1u + SYS_TRACE_ID_OFFSET)
#define SYS_TRACE_ID_MUTEX_UNLOCK (2u + SYS_TRACE_ID_OFFSET)
#define SYS_TRACE_ID_MUTEX_LOCK (3u + SYS_TRACE_ID_OFFSET)
#define SYS_TRACE_ID_SEMA_INIT (4u + SYS_TRACE_ID_OFFSET)
#define SYS_TRACE_ID_SEMA_GIVE (5u + SYS_TRACE_ID_OFFSET)
#define SYS_TRACE_ID_SEMA_TAKE (6u + SYS_TRACE_ID_OFFSET)
#if CONFIG_TRACING
void z_sys_trace_idle(void);
void z_sys_trace_isr_enter(void);
void z_sys_trace_isr_exit_to_scheduler(void);
void z_sys_trace_thread_switched_in(void);
#endif
/**
* @brief Called before a thread has been selected to run
*/
#define sys_trace_thread_switched_out()
/**
* @brief Called after a thread has been selected to run
*/
#define sys_trace_thread_switched_in()
/**
* @brief Called when setting priority of a thread
*/
#define sys_trace_thread_priority_set(thread)
/**
* @brief Called when a thread is being created
*/
#define sys_trace_thread_create(thread)
/**
* @brief Called when a thread is being aborted
*
*/
#define sys_trace_thread_abort(thread)
/**
* @brief Called when a thread is being suspended
* @param thread Thread structure
*/
#define sys_trace_thread_suspend(thread)
/**
* @brief Called when a thread is being resumed from suspension
* @param thread Thread structure
*/
#define sys_trace_thread_resume(thread)
/**
* @brief Called when a thread is ready to run
* @param thread Thread structure
*/
#define sys_trace_thread_ready(thread)
/**
* @brief Called when a thread is pending
* @param thread Thread structure
*/
#define sys_trace_thread_pend(thread)
/**
* @brief Provide information about specific thread
* @param thread Thread structure
*/
#define sys_trace_thread_info(thread)
/**
* @brief Called when entering an ISR
*/
#define sys_trace_isr_enter()
/**
* @brief Called when exiting an ISR
*/
#define sys_trace_isr_exit()
/**
* @brief Called when exiting an ISR and switching to scheduler
*/
#define sys_trace_isr_exit_to_scheduler()
/**
* @brief Can be called with any id signifying a new call
* @param id ID of the operation that was started
*/
#define sys_trace_void(id)
/**
* @brief Can be called with any id signifying ending a call
* @param id ID of the operation that was completed
*/
#define sys_trace_end_call(id)
#define z_sys_trace_idle()
#define z_sys_trace_isr_enter()
#define z_sys_trace_isr_exit_to_scheduler()
#define z_sys_trace_thread_switched_in()
#endif

View file

@ -12,6 +12,7 @@
#ifdef CONFIG_KERNEL_EVENT_LOGGER #ifdef CONFIG_KERNEL_EVENT_LOGGER
#include <logging/kernel_event_logger.h> #include <logging/kernel_event_logger.h>
#endif /* CONFIG_KERNEL_EVENT_LOGGER */ #endif /* CONFIG_KERNEL_EVENT_LOGGER */
#include <tracing.h>
#ifdef CONFIG_MULTITHREADING #ifdef CONFIG_MULTITHREADING
#define _VALID_PRIO(prio, entry_point) \ #define _VALID_PRIO(prio, entry_point) \
@ -225,6 +226,8 @@ static inline void _ready_thread(struct k_thread *thread)
_add_thread_to_ready_q(thread); _add_thread_to_ready_q(thread);
} }
sys_trace_thread_ready(thread);
#ifdef CONFIG_KERNEL_EVENT_LOGGER_THREAD #ifdef CONFIG_KERNEL_EVENT_LOGGER_THREAD
_sys_k_event_logger_thread_ready(thread); _sys_k_event_logger_thread_ready(thread);
#endif #endif

View file

@ -33,6 +33,10 @@ extern void _sys_k_event_logger_context_switch(void);
void _smp_reacquire_global_lock(struct k_thread *thread); void _smp_reacquire_global_lock(struct k_thread *thread);
void _smp_release_global_lock(struct k_thread *thread); void _smp_release_global_lock(struct k_thread *thread);
#ifdef CONFIG_TRACING
extern void sys_trace_thread_switched_out(void);
#endif
/* context switching and scheduling-related routines */ /* context switching and scheduling-related routines */
#ifdef CONFIG_USE_SWITCH #ifdef CONFIG_USE_SWITCH
@ -56,6 +60,10 @@ static inline unsigned int _Swap(unsigned int key)
_check_stack_sentinel(); _check_stack_sentinel();
_update_time_slice_before_swap(); _update_time_slice_before_swap();
#ifdef CONFIG_TRACING
sys_trace_thread_switched_out();
#endif
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH #ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
_sys_k_event_logger_context_switch(); _sys_k_event_logger_context_switch();
#endif #endif
@ -80,6 +88,10 @@ static inline unsigned int _Swap(unsigned int key)
ret = _current->swap_retval; ret = _current->swap_retval;
} }
#ifdef CONFIG_TRACING
sys_trace_thread_switched_in();
#endif
irq_unlock(key); irq_unlock(key);
return ret; return ret;
@ -91,10 +103,19 @@ extern unsigned int __swap(unsigned int key);
static inline unsigned int _Swap(unsigned int key) static inline unsigned int _Swap(unsigned int key)
{ {
unsigned int ret;
_check_stack_sentinel(); _check_stack_sentinel();
_update_time_slice_before_swap(); _update_time_slice_before_swap();
return __swap(key); #ifdef CONFIG_TRACING
sys_trace_thread_switched_out();
#endif
ret = __swap(key);
#ifdef CONFIG_TRACING
sys_trace_thread_switched_in();
#endif
return ret;
} }
#endif #endif

View file

@ -31,6 +31,7 @@
#include <kswap.h> #include <kswap.h>
#include <entropy.h> #include <entropy.h>
#include <logging/log_ctrl.h> #include <logging/log_ctrl.h>
#include <tracing.h>
/* kernel build timestamp items */ /* kernel build timestamp items */
#define BUILD_TIMESTAMP "BUILD: " __DATE__ " " __TIME__ #define BUILD_TIMESTAMP "BUILD: " __DATE__ " " __TIME__
@ -334,12 +335,16 @@ static void prepare_multithreading(struct k_thread *dummy_thread)
MAIN_STACK_SIZE, bg_thread_main, MAIN_STACK_SIZE, bg_thread_main,
NULL, NULL, NULL, NULL, NULL, NULL,
CONFIG_MAIN_THREAD_PRIORITY, K_ESSENTIAL); CONFIG_MAIN_THREAD_PRIORITY, K_ESSENTIAL);
sys_trace_thread_create(_main_thread);
_mark_thread_as_started(_main_thread); _mark_thread_as_started(_main_thread);
_ready_thread(_main_thread); _ready_thread(_main_thread);
#ifdef CONFIG_MULTITHREADING #ifdef CONFIG_MULTITHREADING
init_idle_thread(_idle_thread, _idle_stack); init_idle_thread(_idle_thread, _idle_stack);
_kernel.cpus[0].idle_thread = _idle_thread; _kernel.cpus[0].idle_thread = _idle_thread;
sys_trace_thread_create(_idle_thread);
#endif #endif
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 1 #if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 1

View file

@ -36,6 +36,7 @@
#include <errno.h> #include <errno.h>
#include <init.h> #include <init.h>
#include <syscall_handler.h> #include <syscall_handler.h>
#include <tracing.h>
#define RECORD_STATE_CHANGE(mutex) do { } while ((0)) #define RECORD_STATE_CHANGE(mutex) do { } while ((0))
#define RECORD_CONFLICT(mutex) do { } while ((0)) #define RECORD_CONFLICT(mutex) do { } while ((0))
@ -72,13 +73,17 @@ void _impl_k_mutex_init(struct k_mutex *mutex)
mutex->owner = NULL; mutex->owner = NULL;
mutex->lock_count = 0; mutex->lock_count = 0;
sys_trace_void(SYS_TRACE_ID_MUTEX_INIT);
/* initialized upon first use */ /* initialized upon first use */
/* mutex->owner_orig_prio = 0; */ /* mutex->owner_orig_prio = 0; */
_waitq_init(&mutex->wait_q); _waitq_init(&mutex->wait_q);
SYS_TRACING_OBJ_INIT(k_mutex, mutex); SYS_TRACING_OBJ_INIT(k_mutex, mutex);
_k_object_init(mutex); _k_object_init(mutex);
sys_trace_end_call(SYS_TRACE_ID_MUTEX_INIT);
} }
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
@ -118,6 +123,7 @@ int _impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
int new_prio; int new_prio;
unsigned int key; unsigned int key;
sys_trace_void(SYS_TRACE_ID_MUTEX_LOCK);
_sched_lock(); _sched_lock();
if (likely(mutex->lock_count == 0 || mutex->owner == _current)) { if (likely(mutex->lock_count == 0 || mutex->owner == _current)) {
@ -136,6 +142,7 @@ int _impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
mutex->owner_orig_prio); mutex->owner_orig_prio);
k_sched_unlock(); k_sched_unlock();
sys_trace_end_call(SYS_TRACE_ID_MUTEX_LOCK);
return 0; return 0;
} }
@ -144,6 +151,7 @@ int _impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
if (unlikely(timeout == K_NO_WAIT)) { if (unlikely(timeout == K_NO_WAIT)) {
k_sched_unlock(); k_sched_unlock();
sys_trace_end_call(SYS_TRACE_ID_MUTEX_LOCK);
return -EBUSY; return -EBUSY;
} }
@ -167,6 +175,7 @@ int _impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
if (got_mutex == 0) { if (got_mutex == 0) {
k_sched_unlock(); k_sched_unlock();
sys_trace_end_call(SYS_TRACE_ID_MUTEX_LOCK);
return 0; return 0;
} }
@ -188,6 +197,7 @@ int _impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
k_sched_unlock(); k_sched_unlock();
sys_trace_end_call(SYS_TRACE_ID_MUTEX_LOCK);
return -EAGAIN; return -EAGAIN;
} }
@ -206,6 +216,7 @@ void _impl_k_mutex_unlock(struct k_mutex *mutex)
__ASSERT(mutex->lock_count > 0, ""); __ASSERT(mutex->lock_count > 0, "");
__ASSERT(mutex->owner == _current, ""); __ASSERT(mutex->owner == _current, "");
sys_trace_void(SYS_TRACE_ID_MUTEX_UNLOCK);
_sched_lock(); _sched_lock();
RECORD_STATE_CHANGE(); RECORD_STATE_CHANGE();

View file

@ -371,6 +371,7 @@ void _thread_priority_set(struct k_thread *thread, int prio)
thread->base.prio = prio; thread->base.prio = prio;
} }
} }
sys_trace_thread_priority_set(thread);
if (need_sched) { if (need_sched) {
_reschedule(irq_lock()); _reschedule(irq_lock());

View file

@ -27,6 +27,7 @@
#include <ksched.h> #include <ksched.h>
#include <init.h> #include <init.h>
#include <syscall_handler.h> #include <syscall_handler.h>
#include <tracing.h>
extern struct k_sem _k_sem_list_start[]; extern struct k_sem _k_sem_list_start[];
extern struct k_sem _k_sem_list_end[]; extern struct k_sem _k_sem_list_end[];
@ -60,6 +61,7 @@ void _impl_k_sem_init(struct k_sem *sem, unsigned int initial_count,
__ASSERT(limit != 0, "limit cannot be zero"); __ASSERT(limit != 0, "limit cannot be zero");
__ASSERT(initial_count <= limit, "count cannot be greater than limit"); __ASSERT(initial_count <= limit, "count cannot be greater than limit");
sys_trace_void(SYS_TRACE_ID_SEMA_INIT);
sem->count = initial_count; sem->count = initial_count;
sem->limit = limit; sem->limit = limit;
_waitq_init(&sem->wait_q); _waitq_init(&sem->wait_q);
@ -70,6 +72,7 @@ void _impl_k_sem_init(struct k_sem *sem, unsigned int initial_count,
SYS_TRACING_OBJ_INIT(k_sem, sem); SYS_TRACING_OBJ_INIT(k_sem, sem);
_k_object_init(sem); _k_object_init(sem);
sys_trace_end_call(SYS_TRACE_ID_SEMA_INIT);
} }
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
@ -134,7 +137,9 @@ void _impl_k_sem_give(struct k_sem *sem)
{ {
unsigned int key = irq_lock(); unsigned int key = irq_lock();
sys_trace_void(SYS_TRACE_ID_SEMA_GIVE);
do_sem_give(sem); do_sem_give(sem);
sys_trace_end_call(SYS_TRACE_ID_SEMA_GIVE);
_reschedule(key); _reschedule(key);
} }
@ -146,19 +151,24 @@ int _impl_k_sem_take(struct k_sem *sem, s32_t timeout)
{ {
__ASSERT(!_is_in_isr() || timeout == K_NO_WAIT, ""); __ASSERT(!_is_in_isr() || timeout == K_NO_WAIT, "");
sys_trace_void(SYS_TRACE_ID_SEMA_TAKE);
unsigned int key = irq_lock(); unsigned int key = irq_lock();
if (likely(sem->count > 0)) { if (likely(sem->count > 0)) {
sem->count--; sem->count--;
irq_unlock(key); irq_unlock(key);
sys_trace_end_call(SYS_TRACE_ID_SEMA_TAKE);
return 0; return 0;
} }
if (timeout == K_NO_WAIT) { if (timeout == K_NO_WAIT) {
irq_unlock(key); irq_unlock(key);
sys_trace_end_call(SYS_TRACE_ID_SEMA_TAKE);
return -EBUSY; return -EBUSY;
} }
sys_trace_end_call(SYS_TRACE_ID_SEMA_TAKE);
return _pend_current_thread(key, &sem->wait_q, timeout); return _pend_current_thread(key, &sem->wait_q, timeout);
} }

View file

@ -27,6 +27,7 @@
#include <kernel_internal.h> #include <kernel_internal.h>
#include <kswap.h> #include <kswap.h>
#include <init.h> #include <init.h>
#include <tracing.h>
extern struct _static_thread_data _static_thread_data_list_start[]; extern struct _static_thread_data _static_thread_data_list_start[];
extern struct _static_thread_data _static_thread_data_list_end[]; extern struct _static_thread_data _static_thread_data_list_end[];
@ -358,6 +359,7 @@ void _setup_new_thread(struct k_thread *new_thread,
new_thread->base.prio_deadline = 0; new_thread->base.prio_deadline = 0;
#endif #endif
new_thread->resource_pool = _current->resource_pool; new_thread->resource_pool = _current->resource_pool;
sys_trace_thread_create(new_thread);
} }
#ifdef CONFIG_MULTITHREADING #ifdef CONFIG_MULTITHREADING
@ -368,12 +370,14 @@ k_tid_t _impl_k_thread_create(struct k_thread *new_thread,
int prio, u32_t options, s32_t delay) int prio, u32_t options, s32_t delay)
{ {
__ASSERT(!_is_in_isr(), "Threads may not be created in ISRs"); __ASSERT(!_is_in_isr(), "Threads may not be created in ISRs");
_setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3, _setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
prio, options); prio, options);
if (delay != K_FOREVER) { if (delay != K_FOREVER) {
schedule_new_thread(new_thread, delay); schedule_new_thread(new_thread, delay);
} }
return new_thread; return new_thread;
} }
@ -503,6 +507,8 @@ void _impl_k_thread_suspend(struct k_thread *thread)
_k_thread_single_suspend(thread); _k_thread_single_suspend(thread);
sys_trace_thread_suspend(thread);
if (thread == _current) { if (thread == _current) {
_Swap(key); _Swap(key);
} else { } else {
@ -526,6 +532,7 @@ void _impl_k_thread_resume(struct k_thread *thread)
_k_thread_single_resume(thread); _k_thread_single_resume(thread);
sys_trace_thread_resume(thread);
_reschedule(key); _reschedule(key);
} }
@ -551,6 +558,8 @@ void _k_thread_single_abort(struct k_thread *thread)
} }
thread->base.thread_state |= _THREAD_DEAD; thread->base.thread_state |= _THREAD_DEAD;
sys_trace_thread_abort(thread);
#ifdef CONFIG_KERNEL_EVENT_LOGGER_THREAD #ifdef CONFIG_KERNEL_EVENT_LOGGER_THREAD
_sys_k_event_logger_thread_exit(thread); _sys_k_event_logger_thread_exit(thread);
#endif #endif