kernel: overhaul unused stack measurement
The existing stack_analyze APIs had some problems: 1. Not properly namespaced 2. Accepted the stack object as a parameter, yet the stack object does not contain the necessary information to get the associated buffer region, the thread object is needed for this 3. Caused a crash on certain platforms that do not allow inspection of unused stack space for the currently running thread 4. No user mode access 5. Separately passed in thread name We deprecate these functions and add a new API k_thread_stack_space_get() which addresses all of these issues. A helper API log_stack_usage() also added which resembles STACK_ANALYZE() in functionality. Fixes: #17852 Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
f02fdd2d7b
commit
efc5fe07a2
16 changed files with 229 additions and 46 deletions
|
@ -243,6 +243,14 @@ config STACK_GROWS_UP
|
|||
Select this option if the architecture has upward growing thread
|
||||
stacks. This is not common.
|
||||
|
||||
config NO_UNUSED_STACK_INSPECTION
|
||||
bool
|
||||
help
|
||||
Selected if the architecture will generate a fault if unused stack
|
||||
memory is examined, which is the region between the current stack
|
||||
pointer and the deepest available address in the current stack
|
||||
region.
|
||||
|
||||
config MAX_THREAD_BYTES
|
||||
int "Bytes to use when tracking object thread permissions"
|
||||
default 2
|
||||
|
|
|
@ -145,6 +145,7 @@ config ARC_CONNECT
|
|||
|
||||
config ARC_STACK_CHECKING
|
||||
bool
|
||||
select NO_UNUSED_STACK_INSPECTION
|
||||
help
|
||||
Use ARC STACK_CHECKING to do stack protection
|
||||
|
||||
|
|
|
@ -362,8 +362,8 @@ static void ack_timeout(struct k_work *work)
|
|||
h5_send(NULL, HCI_3WIRE_ACK_PKT, 0);
|
||||
|
||||
/* Analyze stacks */
|
||||
STACK_ANALYZE("tx_stack", tx_stack);
|
||||
STACK_ANALYZE("rx_stack", rx_stack);
|
||||
log_stack_usage(&tx_thread_data);
|
||||
log_stack_usage(&rx_thread_data);
|
||||
}
|
||||
|
||||
static void h5_process_complete_packet(u8_t *hdr)
|
||||
|
@ -713,6 +713,7 @@ static void h5_init(void)
|
|||
(k_thread_entry_t)tx_thread, NULL, NULL, NULL,
|
||||
K_PRIO_COOP(CONFIG_BT_HCI_TX_PRIO),
|
||||
0, K_NO_WAIT);
|
||||
k_thread_name_set(&tx_thread_data, "tx_thread");
|
||||
|
||||
k_fifo_init(&h5.rx_queue);
|
||||
k_thread_create(&rx_thread_data, rx_stack,
|
||||
|
@ -720,6 +721,7 @@ static void h5_init(void)
|
|||
(k_thread_entry_t)rx_thread, NULL, NULL, NULL,
|
||||
K_PRIO_COOP(CONFIG_BT_RX_PRIO),
|
||||
0, K_NO_WAIT);
|
||||
k_thread_name_set(&rx_thread_data, "rx_thread");
|
||||
|
||||
/* Unack queue */
|
||||
k_fifo_init(&h5.unack_queue);
|
||||
|
|
|
@ -60,6 +60,7 @@ static inline size_t stack_unused_space_get(const char *stack, size_t size)
|
|||
return unused;
|
||||
}
|
||||
|
||||
__deprecated
|
||||
static inline void stack_analyze(const char *name, const char *stack,
|
||||
unsigned int size)
|
||||
{
|
||||
|
@ -82,11 +83,33 @@ static inline void stack_analyze(const char *name, const char *stack,
|
|||
* @param name Name of the stack
|
||||
* @param sym The symbol of the stack
|
||||
*/
|
||||
#define STACK_ANALYZE(name, sym) \
|
||||
#define STACK_ANALYZE(name, sym) DEPRECATED_MACRO \
|
||||
do { \
|
||||
stack_analyze(name, \
|
||||
Z_THREAD_STACK_BUFFER(sym), \
|
||||
K_THREAD_STACK_SIZEOF(sym)); \
|
||||
} while (false)
|
||||
|
||||
static inline void log_stack_usage(const struct k_thread *thread)
|
||||
{
|
||||
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
|
||||
size_t unused, size = thread->stack_info.size;
|
||||
|
||||
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
|
||||
|
||||
if (k_thread_stack_space_get(thread, &unused) == 0) {
|
||||
unsigned int pcnt = ((size - unused) * 100U) / size;
|
||||
const char *tname;
|
||||
|
||||
tname = k_thread_name_get((k_tid_t)thread);
|
||||
if (tname == NULL) {
|
||||
tname = "unknown";
|
||||
}
|
||||
|
||||
LOG_INF("%p (%s):\tunused %zu\tusage %zu / %zu (%u %%)",
|
||||
thread, tname, unused, size - unused, size,
|
||||
pcnt);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif /* ZEPHYR_INCLUDE_DEBUG_STACK_H_ */
|
||||
|
|
|
@ -834,6 +834,31 @@ static inline void k_thread_resource_pool_assign(struct k_thread *thread,
|
|||
thread->resource_pool = pool;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
|
||||
/**
|
||||
* @brief Obtain stack usage information for the specified thread
|
||||
*
|
||||
* User threads will need to have permission on the target thread object.
|
||||
*
|
||||
* Some hardware may prevent inspection of a stack buffer currently in use.
|
||||
* If this API is called from supervisor mode, on the currently running thread,
|
||||
* on a platform which selects CONFIG_NO_UNUSED_STACK_INSPECTION, an error
|
||||
* will be generated.
|
||||
*
|
||||
* @param thread Thread to inspect stack information
|
||||
* @param unused_ptr Output parameter, filled in with the unused stack space
|
||||
* of the target thread in bytes.
|
||||
* @return 0 on success
|
||||
* @return -EBADF Bad thread object (user mode only)
|
||||
* @return -EPERM No permissions on thread object (user mode only)
|
||||
* #return -ENOTSUP Forbidden by hardware policy
|
||||
* @return -EINVAL Thread is uninitialized or exited (user mode only)
|
||||
* @return -EFAULT Bad memory address for unused_ptr (user mode only)
|
||||
*/
|
||||
__syscall int k_thread_stack_space_get(const struct k_thread *thread,
|
||||
size_t *unused_ptr);
|
||||
#endif
|
||||
|
||||
#if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
|
||||
/**
|
||||
* @brief Assign the system heap as a thread's resource pool
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include <string.h>
|
||||
#include <stdbool.h>
|
||||
#include <irq_offload.h>
|
||||
#include <sys/check.h>
|
||||
|
||||
static struct k_spinlock lock;
|
||||
|
||||
|
@ -882,3 +883,90 @@ void irq_offload(irq_offload_routine_t routine, void *parameter)
|
|||
k_sem_give(&offload_sem);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
|
||||
#ifdef CONFIG_STACK_GROWS_UP
|
||||
#error "Unsupported configuration for stack analysis"
|
||||
#endif
|
||||
|
||||
int z_impl_k_thread_stack_space_get(const struct k_thread *thread,
|
||||
size_t *unused_ptr)
|
||||
{
|
||||
const u8_t *start = (u8_t *)thread->stack_info.start;
|
||||
size_t size = thread->stack_info.size;
|
||||
size_t unused = 0;
|
||||
const u8_t *checked_stack = start;
|
||||
/* Take the address of any local variable as a shallow bound for the
|
||||
* stack pointer. Addresses above it are guaranteed to be
|
||||
* accessible.
|
||||
*/
|
||||
const u8_t *stack_pointer = (const u8_t *)&start;
|
||||
|
||||
/* If we are currently running on the stack being analyzed, some
|
||||
* memory management hardware will generate an exception if we
|
||||
* read unused stack memory.
|
||||
*
|
||||
* This never happens when invoked from user mode, as user mode
|
||||
* will always run this function on the privilege elevation stack.
|
||||
*/
|
||||
if ((stack_pointer > start) && (stack_pointer <= (start + size)) &&
|
||||
IS_ENABLED(CONFIG_NO_UNUSED_STACK_INSPECTION)) {
|
||||
/* TODO: We could add an arch_ API call to temporarily
|
||||
* disable the stack checking in the CPU, but this would
|
||||
* need to be properly managed wrt context switches/interrupts
|
||||
*/
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_STACK_SENTINEL)) {
|
||||
/* First 4 bytes of the stack buffer reserved for the
|
||||
* sentinel value, it won't be 0xAAAAAAAA for thread
|
||||
* stacks.
|
||||
*
|
||||
* FIXME: thread->stack_info.start ought to reflect
|
||||
* this!
|
||||
*/
|
||||
checked_stack += 4;
|
||||
size -= 4;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
if ((checked_stack[i]) == 0xaaU) {
|
||||
unused++;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
*unused_ptr = unused;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
int z_vrfy_k_thread_stack_space_get(const struct k_thread *thread,
|
||||
size_t *unused_ptr)
|
||||
{
|
||||
size_t unused;
|
||||
int ret;
|
||||
|
||||
ret = Z_SYSCALL_OBJ(thread, K_OBJ_THREAD);
|
||||
CHECKIF(ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = z_impl_k_thread_stack_space_get(thread, &unused);
|
||||
CHECKIF(ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = z_user_to_copy(unused_ptr, &unused, sizeof(size_t));
|
||||
CHECKIF(ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#include <syscalls/k_thread_stack_space_get_mrsh.c>
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
#endif /* CONFIG_INIT_STACKS && CONFIG_THREAD_STACK_INFO */
|
||||
|
|
|
@ -362,17 +362,19 @@ uint32_t osThreadGetStackSize(osThreadId_t thread_id)
|
|||
uint32_t osThreadGetStackSpace(osThreadId_t thread_id)
|
||||
{
|
||||
struct cv2_thread *tid = (struct cv2_thread *)thread_id;
|
||||
u32_t size = tid->z_thread.stack_info.size;
|
||||
u32_t unused = 0U;
|
||||
size_t unused;
|
||||
int ret;
|
||||
|
||||
__ASSERT(tid, "");
|
||||
__ASSERT(is_cmsis_rtos_v2_thread(tid), "");
|
||||
__ASSERT(!k_is_in_isr(), "");
|
||||
|
||||
unused = stack_unused_space_get((char *)tid->z_thread.stack_info.start,
|
||||
size);
|
||||
ret = k_thread_stack_space_get(&tid->z_thread, &unused);
|
||||
if (ret != 0) {
|
||||
unused = 0;
|
||||
}
|
||||
|
||||
return unused;
|
||||
return (uint32_t)unused;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -259,7 +259,7 @@ static void bt_tx_thread(void *p1, void *p2, void *p3)
|
|||
net_buf_unref(buf);
|
||||
}
|
||||
|
||||
STACK_ANALYZE("tx_stack", bt_tx_thread_stack);
|
||||
log_stack_usage(&bt_tx_thread_data);
|
||||
|
||||
/* Make sure other threads get a chance to run */
|
||||
k_yield();
|
||||
|
@ -312,6 +312,7 @@ void main(void)
|
|||
K_THREAD_STACK_SIZEOF(bt_tx_thread_stack),
|
||||
bt_tx_thread, NULL, NULL, NULL, K_PRIO_COOP(7),
|
||||
0, K_NO_WAIT);
|
||||
k_thread_name_set(&bt_tx_thread_data, "bt_tx_thread");
|
||||
|
||||
/* Send a vendor event to announce that the slave is initialized */
|
||||
buf = net_buf_alloc(&cmd_tx_pool, K_FOREVER);
|
||||
|
|
|
@ -62,7 +62,7 @@ static K_THREAD_STACK_DEFINE(prio_recv_thread_stack,
|
|||
struct k_thread recv_thread_data;
|
||||
static K_THREAD_STACK_DEFINE(recv_thread_stack, CONFIG_BT_RX_STACK_SIZE);
|
||||
|
||||
#if defined(CONFIG_INIT_STACKS)
|
||||
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
|
||||
static u32_t prio_ts;
|
||||
static u32_t rx_ts;
|
||||
#endif
|
||||
|
@ -154,10 +154,9 @@ static void prio_recv_thread(void *p1, void *p2, void *p3)
|
|||
/* Now, ULL mayfly has something to give to us */
|
||||
BT_DBG("sem taken");
|
||||
|
||||
#if defined(CONFIG_INIT_STACKS)
|
||||
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
|
||||
if (k_uptime_get_32() - prio_ts > K_SECONDS(5)) {
|
||||
STACK_ANALYZE("prio recv thread stack",
|
||||
prio_recv_thread_stack);
|
||||
log_stack_usage(&prio_recv_thread_data);
|
||||
prio_ts = k_uptime_get_32();
|
||||
}
|
||||
#endif
|
||||
|
@ -399,9 +398,9 @@ static void recv_thread(void *p1, void *p2, void *p3)
|
|||
|
||||
k_yield();
|
||||
|
||||
#if defined(CONFIG_INIT_STACKS)
|
||||
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
|
||||
if (k_uptime_get_32() - rx_ts > K_SECONDS(5)) {
|
||||
STACK_ANALYZE("recv thread stack", recv_thread_stack);
|
||||
log_stack_usage(&recv_thread_data);
|
||||
rx_ts = k_uptime_get_32();
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1022,9 +1022,9 @@ static void hci_disconn_complete(struct net_buf *buf)
|
|||
|
||||
/* Check stacks usage */
|
||||
#if !defined(CONFIG_BT_RECV_IS_RX_THREAD)
|
||||
STACK_ANALYZE("rx stack", rx_thread_stack);
|
||||
log_stack_usage(&rx_thread_data);
|
||||
#endif
|
||||
STACK_ANALYZE("tx stack", tx_thread_stack);
|
||||
log_stack_usage(&tx_thread_data);
|
||||
|
||||
bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
|
||||
conn->handle = 0U;
|
||||
|
|
|
@ -217,7 +217,7 @@ static void ecc_thread(void *p1, void *p2, void *p3)
|
|||
__ASSERT(0, "Unhandled ECC command");
|
||||
}
|
||||
|
||||
STACK_ANALYZE("ecc stack", ecc_thread_stack);
|
||||
log_stack_usage(&ecc_thread_data);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -244,7 +244,7 @@ static void rfcomm_dlc_destroy(struct bt_rfcomm_dlc *dlc)
|
|||
dlc->state = BT_RFCOMM_STATE_IDLE;
|
||||
dlc->session = NULL;
|
||||
|
||||
STACK_ANALYZE("dlc stack", dlc->stack);
|
||||
log_stack_usage(&dlc->tx_thread);
|
||||
|
||||
if (dlc->ops && dlc->ops->disconnected) {
|
||||
dlc->ops->disconnected(dlc);
|
||||
|
|
|
@ -150,10 +150,9 @@ static inline void adv_send(struct net_buf *buf)
|
|||
|
||||
static void adv_stack_dump(const struct k_thread *thread, void *user_data)
|
||||
{
|
||||
#if defined(CONFIG_THREAD_STACK_INFO)
|
||||
stack_analyze((char *)user_data, (char *)thread->stack_info.start,
|
||||
thread->stack_info.size);
|
||||
#endif
|
||||
ARG_UNUSED(user_data);
|
||||
|
||||
log_stack_usage(thread);
|
||||
}
|
||||
|
||||
static void adv_thread(void *p1, void *p2, void *p3)
|
||||
|
@ -190,8 +189,8 @@ static void adv_thread(void *p1, void *p2, void *p3)
|
|||
net_buf_unref(buf);
|
||||
}
|
||||
|
||||
STACK_ANALYZE("adv stack", adv_thread_stack);
|
||||
k_thread_foreach(adv_stack_dump, "BT_MESH");
|
||||
log_stack_usage(&adv_thread_data);
|
||||
k_thread_foreach(adv_stack_dump, NULL);
|
||||
|
||||
/* Give other threads a chance to run */
|
||||
k_yield();
|
||||
|
|
|
@ -50,20 +50,17 @@ static int cmd_kernel_cycles(const struct shell *shell,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_MONITOR) \
|
||||
&& defined(CONFIG_THREAD_STACK_INFO)
|
||||
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO) && \
|
||||
defined(CONFIG_THREAD_MONITOR)
|
||||
static void shell_tdata_dump(const struct k_thread *cthread, void *user_data)
|
||||
{
|
||||
struct k_thread *thread = (struct k_thread *)cthread;
|
||||
const struct shell *shell = (const struct shell *)user_data;
|
||||
unsigned int pcnt, unused = 0U;
|
||||
unsigned int size = thread->stack_info.size;
|
||||
unsigned int pcnt;
|
||||
size_t unused;
|
||||
size_t size = thread->stack_info.size;
|
||||
const char *tname;
|
||||
|
||||
unused = stack_unused_space_get((char *)thread->stack_info.start, size);
|
||||
|
||||
/* Calculate the real size reserved for the stack */
|
||||
pcnt = ((size - unused) * 100U) / size;
|
||||
int ret;
|
||||
|
||||
tname = k_thread_name_get(thread);
|
||||
|
||||
|
@ -76,8 +73,20 @@ static void shell_tdata_dump(const struct k_thread *cthread, void *user_data)
|
|||
thread->base.prio,
|
||||
thread->base.timeout.dticks);
|
||||
shell_print(shell, "\tstate: %s", k_thread_state_str(thread));
|
||||
shell_print(shell, "\tstack size %u, unused %u, usage %u / %u (%u %%)\n",
|
||||
size, unused, size - unused, size, pcnt);
|
||||
|
||||
ret = k_thread_stack_space_get(thread, &unused);
|
||||
if (ret) {
|
||||
shell_print(shell,
|
||||
"Unable to determine unused stack size (%d)\n",
|
||||
ret);
|
||||
} else {
|
||||
/* Calculate the real size reserved for the stack */
|
||||
pcnt = ((size - unused) * 100U) / size;
|
||||
|
||||
shell_print(shell,
|
||||
"\tstack size %zu, unused %zu, usage %zu / %zu (%u %%)\n",
|
||||
size, unused, size - unused, size, pcnt);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
@ -95,12 +104,22 @@ static int cmd_kernel_threads(const struct shell *shell,
|
|||
|
||||
static void shell_stack_dump(const struct k_thread *thread, void *user_data)
|
||||
{
|
||||
unsigned int pcnt, unused = 0U;
|
||||
unsigned int size = thread->stack_info.size;
|
||||
const struct shell *shell = (const struct shell *)user_data;
|
||||
unsigned int pcnt;
|
||||
size_t unused;
|
||||
size_t size = thread->stack_info.size;
|
||||
const char *tname;
|
||||
int ret;
|
||||
|
||||
ret = k_thread_stack_space_get(thread, &unused);
|
||||
if (ret) {
|
||||
shell_print(shell,
|
||||
"Unable to determine unused stack size (%d)\n",
|
||||
ret);
|
||||
return;
|
||||
}
|
||||
|
||||
tname = k_thread_name_get((struct k_thread *)thread);
|
||||
unused = stack_unused_space_get((char *)thread->stack_info.start, size);
|
||||
|
||||
/* Calculate the real size reserved for the stack */
|
||||
pcnt = ((size - unused) * 100U) / size;
|
||||
|
@ -153,8 +172,8 @@ SHELL_STATIC_SUBCMD_SET_CREATE(sub_kernel,
|
|||
#if defined(CONFIG_REBOOT)
|
||||
SHELL_CMD(reboot, &sub_kernel_reboot, "Reboot.", NULL),
|
||||
#endif
|
||||
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_MONITOR) \
|
||||
&& defined(CONFIG_THREAD_STACK_INFO)
|
||||
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO) && \
|
||||
defined(CONFIG_THREAD_MONITOR)
|
||||
SHELL_CMD(stacks, NULL, "List threads stack usage.", cmd_kernel_stacks),
|
||||
SHELL_CMD(threads, NULL, "List kernel threads.", cmd_kernel_threads),
|
||||
#endif
|
||||
|
|
|
@ -931,10 +931,11 @@ static inline int z_vrfy_check_perms(void *addr, size_t size, int write)
|
|||
|
||||
void stack_buffer_scenarios(k_thread_stack_t *stack_obj, size_t obj_size)
|
||||
{
|
||||
size_t stack_size;
|
||||
size_t stack_size, unused;
|
||||
u8_t val;
|
||||
char *stack_start, *stack_ptr, *stack_end, *obj_start, *obj_end;
|
||||
volatile char *pos;
|
||||
int ret, expected;
|
||||
|
||||
expect_fault = false;
|
||||
|
||||
|
@ -1016,6 +1017,18 @@ void stack_buffer_scenarios(k_thread_stack_t *stack_obj, size_t obj_size)
|
|||
stack_size);
|
||||
}
|
||||
|
||||
ret = k_thread_stack_space_get(k_current_get(), &unused);
|
||||
if (!arch_is_user_context() &&
|
||||
IS_ENABLED(CONFIG_NO_UNUSED_STACK_INSPECTION)) {
|
||||
expected = -ENOTSUP;
|
||||
} else {
|
||||
expected = 0;
|
||||
}
|
||||
|
||||
zassert_equal(ret, expected, "unexpected return value %d", ret);
|
||||
if (ret == 0) {
|
||||
printk("self-reported unused stack space: %zu\n", unused);
|
||||
}
|
||||
|
||||
k_sem_give(&uthread_end_sem);
|
||||
}
|
||||
|
@ -1035,14 +1048,18 @@ void stest_thread_entry(void *p1, void *p2, void *p3)
|
|||
void stest_thread_launch(void *stack_obj, size_t obj_size, u32_t flags,
|
||||
bool drop)
|
||||
{
|
||||
int ret;
|
||||
size_t unused;
|
||||
|
||||
k_thread_create(&uthread_thread, stack_obj, STEST_STACKSIZE,
|
||||
stest_thread_entry, stack_obj, (void *)obj_size,
|
||||
(void *)drop,
|
||||
-1, flags, K_NO_WAIT);
|
||||
k_sem_take(&uthread_end_sem, K_FOREVER);
|
||||
|
||||
stack_analyze("test_thread", (char *)uthread_thread.stack_info.start,
|
||||
uthread_thread.stack_info.size);
|
||||
ret = k_thread_stack_space_get(&uthread_thread, &unused);
|
||||
zassert_equal(ret, 0, "failed to calculate unused stack space\n");
|
||||
printk("target thread unused stack space: %zu\n", unused);
|
||||
}
|
||||
|
||||
void scenario_entry(void *stack_obj, size_t obj_size)
|
||||
|
|
|
@ -19,8 +19,7 @@ static struct k_sem sync_sema;
|
|||
/**TESTPOINT: stack analyze*/
|
||||
static void tdata_dump_callback(const struct k_thread *thread, void *user_data)
|
||||
{
|
||||
stack_analyze("Test", (char *)thread->stack_info.start,
|
||||
thread->stack_info.size);
|
||||
log_stack_usage(thread);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue