2016-11-04 08:09:17 -04:00
|
|
|
/*
|
2018-08-09 15:03:58 +02:00
|
|
|
* Copyright (c) 2018 Nordic Semiconductor ASA
|
2016-11-04 08:09:17 -04:00
|
|
|
* Copyright (c) 2016 Intel Corporation
|
|
|
|
*
|
2017-01-18 17:01:01 -08:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2016-11-04 08:09:17 -04:00
|
|
|
*/
|
|
|
|
|
2024-01-24 17:35:04 +08:00
|
|
|
#include <zephyr/version.h>
|
2024-02-05 22:21:50 +01:00
|
|
|
|
2022-05-06 11:12:04 +02:00
|
|
|
#include <zephyr/sys/printk.h>
|
|
|
|
#include <zephyr/shell/shell.h>
|
|
|
|
#include <zephyr/init.h>
|
|
|
|
#include <zephyr/sys/reboot.h>
|
|
|
|
#include <zephyr/debug/stack.h>
|
2018-04-22 22:41:36 +02:00
|
|
|
#include <string.h>
|
2022-05-06 11:12:04 +02:00
|
|
|
#include <zephyr/device.h>
|
|
|
|
#include <zephyr/drivers/timer/system_timer.h>
|
|
|
|
#include <zephyr/kernel.h>
|
2022-01-28 15:49:05 +01:00
|
|
|
#include <kernel_internal.h>
|
2022-05-25 09:17:47 +02:00
|
|
|
#include <stdlib.h>
|
kernel: Introduce a way to specify minimum system heap size
There are several subsystems and boards which require a relatively large
system heap (used by k_malloc()) to function properly. This became even
more notable with the recent introduction of the ACPICA library, which
causes ACPI-using boards to require a system heap of up to several
megabytes in size.
Until now, subsystems and boards have tried to solve this by having
Kconfig overlays which modify the default value of HEAP_MEM_POOL_SIZE.
This works ok, except when applications start explicitly setting values
in their prj.conf files:
$ git grep CONFIG_HEAP_MEM_POOL_SIZE= tests samples|wc -l
157
The vast majority of values set by current sample or test applications
is much too small for subsystems like ACPI, which results in the
application not being able to run on such boards.
To solve this situation, we introduce support for subsystems to specify
their own custom system heap size requirement. Subsystems do
this by defining Kconfig options with the prefix HEAP_MEM_POOL_ADD_SIZE_.
The final value of the system heap is the sum of the custom
minimum requirements, or the value existing HEAP_MEM_POOL_SIZE option,
whichever is greater.
We also introduce a new HEAP_MEM_POOL_IGNORE_MIN Kconfig option which
applications can use to force a lower value than what subsystems have
specficied, however this behavior is disabled by default.
Whenever the minimum is greater than the requested value a CMake warning
will be issued in the build output.
This patch ends up modifying several places outside of kernel code,
since the presence of the system heap is no longer detected using a
non-zero CONFIG_HEAP_MEM_POOL_SIZE value, rather it's now detected using
a new K_HEAP_MEM_POOL_SIZE value that's evaluated at build.
Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2023-11-29 11:22:39 +02:00
|
|
|
#if defined(CONFIG_SYS_HEAP_RUNTIME_STATS) && (K_HEAP_MEM_POOL_SIZE > 0)
|
2023-02-13 09:27:58 +02:00
|
|
|
#include <zephyr/sys/sys_heap.h>
|
|
|
|
#endif
|
2022-06-08 14:49:43 -07:00
|
|
|
#if defined(CONFIG_LOG_RUNTIME_FILTERING)
|
|
|
|
#include <zephyr/logging/log_ctrl.h>
|
|
|
|
#endif
|
shell: kernel: add command to unwind a thread
Add a shell command to unwind a thread using its thread id.
uart:~$ kernel threads
Scheduler: 11 since last call
Threads:
*0x80017138 shell_uart
options: 0x0, priority: 14 timeout: 0
state: queued, entry: 0x800029ac
stack size 3072, unused 1316, usage 1756 / 3072 (57 %)
0x80017ca8 sysworkq
options: 0x1, priority: -1 timeout: 0
state: pending, entry: 0x80006842
stack size 1024, unused 644, usage 380 / 1024 (37 %)
0x800177e0 idle
options: 0x1, priority: 15 timeout: 0
state: , entry: 0x800065ae
stack size 512, unused 180, usage 332 / 512 (64 %)
0x80017950 main
options: 0x1, priority: 0 timeout: 13
state: suspended, entry: 0x80006326
stack size 4096, unused 3604, usage 492 / 4096 (12 %)
uart:~$ kernel unwind 0x80017ca8
Unwinding 0x80017ca8 sysworkq
ra: 0x80007114 [z_swap+0x58]
ra: 0x80007ae8 [z_sched_wait+0x10]
ra: 0x8000689a [work_queue_main+0x58]
ra: 0x800006de [z_thread_entry+0x2e]
Signed-off-by: Yong Cong Sin <ycsin@meta.com>
2024-06-11 14:51:02 +08:00
|
|
|
#include <zephyr/debug/symtab.h>
|
2016-11-04 08:09:17 -04:00
|
|
|
|
2022-06-29 15:37:58 -05:00
|
|
|
#if defined(CONFIG_THREAD_MAX_NAME_LEN)
|
|
|
|
#define THREAD_MAX_NAM_LEN CONFIG_THREAD_MAX_NAME_LEN
|
|
|
|
#else
|
|
|
|
#define THREAD_MAX_NAM_LEN 10
|
|
|
|
#endif
|
|
|
|
|
2023-04-13 18:59:37 +02:00
|
|
|
static int cmd_kernel_version(const struct shell *sh,
|
2018-08-09 15:03:58 +02:00
|
|
|
size_t argc, char **argv)
|
2016-11-04 08:09:17 -04:00
|
|
|
{
|
2016-12-21 00:11:41 -06:00
|
|
|
ARG_UNUSED(argc);
|
|
|
|
ARG_UNUSED(argv);
|
|
|
|
|
2024-02-05 22:21:50 +01:00
|
|
|
shell_print(sh, "Zephyr version %s", KERNEL_VERSION_STRING);
|
2018-10-01 22:08:59 +02:00
|
|
|
return 0;
|
2016-11-04 08:09:17 -04:00
|
|
|
}
|
|
|
|
|
2023-09-13 10:16:55 +02:00
|
|
|
#define MINUTES_FACTOR (MSEC_PER_SEC * SEC_PER_MIN)
|
|
|
|
#define HOURS_FACTOR (MINUTES_FACTOR * MIN_PER_HOUR)
|
|
|
|
#define DAYS_FACTOR (HOURS_FACTOR * HOUR_PER_DAY)
|
|
|
|
|
|
|
|
static int cmd_kernel_uptime(const struct shell *sh, size_t argc, char **argv)
|
2016-11-04 08:09:17 -04:00
|
|
|
{
|
2016-12-21 00:11:41 -06:00
|
|
|
ARG_UNUSED(argc);
|
|
|
|
ARG_UNUSED(argv);
|
|
|
|
|
2023-09-13 10:16:55 +02:00
|
|
|
int64_t milliseconds = k_uptime_get();
|
|
|
|
int64_t days;
|
|
|
|
int64_t hours;
|
|
|
|
int64_t minutes;
|
|
|
|
int64_t seconds;
|
|
|
|
|
|
|
|
if (argc == 1) {
|
|
|
|
shell_print(sh, "Uptime: %llu ms", milliseconds);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No need to enable the getopt and getopt_long for just one option. */
|
|
|
|
if (strcmp("-p", argv[1]) && strcmp("--pretty", argv[1]) != 0) {
|
|
|
|
shell_error(sh, "Usupported option: %s", argv[1]);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
days = milliseconds / DAYS_FACTOR;
|
|
|
|
milliseconds %= DAYS_FACTOR;
|
|
|
|
hours = milliseconds / HOURS_FACTOR;
|
|
|
|
milliseconds %= HOURS_FACTOR;
|
|
|
|
minutes = milliseconds / MINUTES_FACTOR;
|
|
|
|
milliseconds %= MINUTES_FACTOR;
|
|
|
|
seconds = milliseconds / MSEC_PER_SEC;
|
|
|
|
milliseconds = milliseconds % MSEC_PER_SEC;
|
|
|
|
|
|
|
|
shell_print(sh,
|
|
|
|
"uptime: %llu days, %llu hours, %llu minutes, %llu seconds, %llu milliseconds",
|
|
|
|
days, hours, minutes, seconds, milliseconds);
|
|
|
|
|
2018-10-01 22:08:59 +02:00
|
|
|
return 0;
|
2016-11-04 08:09:17 -04:00
|
|
|
}
|
|
|
|
|
2023-04-13 18:59:37 +02:00
|
|
|
static int cmd_kernel_cycles(const struct shell *sh,
|
2018-08-09 15:03:58 +02:00
|
|
|
size_t argc, char **argv)
|
2016-11-04 08:09:17 -04:00
|
|
|
{
|
2016-12-21 00:11:41 -06:00
|
|
|
ARG_UNUSED(argc);
|
|
|
|
ARG_UNUSED(argv);
|
|
|
|
|
2023-04-13 18:59:37 +02:00
|
|
|
shell_print(sh, "cycles: %u hw cycles", k_cycle_get_32());
|
2018-10-01 22:08:59 +02:00
|
|
|
return 0;
|
2016-11-04 08:09:17 -04:00
|
|
|
}
|
|
|
|
|
2020-02-05 10:41:58 -08:00
|
|
|
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO) && \
|
|
|
|
defined(CONFIG_THREAD_MONITOR)
|
2019-07-31 12:43:54 +03:00
|
|
|
static void shell_tdata_dump(const struct k_thread *cthread, void *user_data)
|
2018-05-09 10:23:43 +05:30
|
|
|
{
|
2019-07-31 12:43:54 +03:00
|
|
|
struct k_thread *thread = (struct k_thread *)cthread;
|
2023-04-13 18:59:37 +02:00
|
|
|
const struct shell *sh = (const struct shell *)user_data;
|
2020-02-05 10:41:58 -08:00
|
|
|
unsigned int pcnt;
|
|
|
|
size_t unused;
|
|
|
|
size_t size = thread->stack_info.size;
|
2018-08-12 14:04:16 -05:00
|
|
|
const char *tname;
|
2020-02-05 10:41:58 -08:00
|
|
|
int ret;
|
2022-04-11 19:54:23 -04:00
|
|
|
char state_str[32];
|
2018-09-25 06:12:08 +05:30
|
|
|
|
2020-08-27 13:56:45 -07:00
|
|
|
#ifdef CONFIG_THREAD_RUNTIME_STATS
|
|
|
|
k_thread_runtime_stats_t rt_stats_thread;
|
|
|
|
k_thread_runtime_stats_t rt_stats_all;
|
|
|
|
#endif
|
|
|
|
|
2019-07-31 12:43:54 +03:00
|
|
|
tname = k_thread_name_get(thread);
|
2018-08-12 14:04:16 -05:00
|
|
|
|
2023-04-13 18:59:37 +02:00
|
|
|
shell_print(sh, "%s%p %-10s",
|
2018-08-09 15:03:58 +02:00
|
|
|
(thread == k_current_get()) ? "*" : " ",
|
|
|
|
thread,
|
2018-09-25 06:12:08 +05:30
|
|
|
tname ? tname : "NA");
|
2021-10-13 07:29:55 +02:00
|
|
|
/* Cannot use lld as it's less portable. */
|
2023-04-13 18:59:37 +02:00
|
|
|
shell_print(sh, "\toptions: 0x%x, priority: %d timeout: %" PRId64,
|
2018-08-09 15:03:58 +02:00
|
|
|
thread->base.user_options,
|
2019-07-31 12:43:54 +03:00
|
|
|
thread->base.prio,
|
2021-10-13 07:29:55 +02:00
|
|
|
(int64_t)thread->base.timeout.dticks);
|
2023-04-13 18:59:37 +02:00
|
|
|
shell_print(sh, "\tstate: %s, entry: %p",
|
2022-04-11 19:54:23 -04:00
|
|
|
k_thread_state_str(thread, state_str, sizeof(state_str)),
|
2021-10-13 07:29:55 +02:00
|
|
|
thread->entry.pEntry);
|
2020-02-05 10:41:58 -08:00
|
|
|
|
2024-08-23 15:57:38 +08:00
|
|
|
#ifdef CONFIG_SCHED_CPU_MASK
|
|
|
|
shell_print(sh, "\tcpu_mask: 0x%x", thread->base.cpu_mask);
|
|
|
|
#endif /* CONFIG_SCHED_CPU_MASK */
|
|
|
|
|
2020-08-27 13:56:45 -07:00
|
|
|
#ifdef CONFIG_THREAD_RUNTIME_STATS
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
if (k_thread_runtime_stats_get(thread, &rt_stats_thread) != 0) {
|
|
|
|
ret++;
|
2021-03-24 16:39:15 -07:00
|
|
|
}
|
2020-08-27 13:56:45 -07:00
|
|
|
|
|
|
|
if (k_thread_runtime_stats_all_get(&rt_stats_all) != 0) {
|
|
|
|
ret++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret == 0) {
|
|
|
|
pcnt = (rt_stats_thread.execution_cycles * 100U) /
|
|
|
|
rt_stats_all.execution_cycles;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* z_prf() does not support %llu by default unless
|
|
|
|
* CONFIG_MINIMAL_LIBC_LL_PRINTF=y. So do conditional
|
|
|
|
* compilation to avoid blindly enabling this kconfig
|
|
|
|
* so it won't increase RAM/ROM usage too much on 32-bit
|
|
|
|
* targets.
|
|
|
|
*/
|
2023-04-13 18:59:37 +02:00
|
|
|
shell_print(sh, "\tTotal execution cycles: %u (%u %%)",
|
2020-08-27 13:56:45 -07:00
|
|
|
(uint32_t)rt_stats_thread.execution_cycles,
|
|
|
|
pcnt);
|
2021-12-14 22:32:55 -05:00
|
|
|
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
|
2023-04-13 18:59:37 +02:00
|
|
|
shell_print(sh, "\tCurrent execution cycles: %u",
|
2021-12-14 22:32:55 -05:00
|
|
|
(uint32_t)rt_stats_thread.current_cycles);
|
2023-04-13 18:59:37 +02:00
|
|
|
shell_print(sh, "\tPeak execution cycles: %u",
|
2021-12-14 22:32:55 -05:00
|
|
|
(uint32_t)rt_stats_thread.peak_cycles);
|
2023-04-13 18:59:37 +02:00
|
|
|
shell_print(sh, "\tAverage execution cycles: %u",
|
2021-12-14 22:32:55 -05:00
|
|
|
(uint32_t)rt_stats_thread.average_cycles);
|
2020-08-27 13:56:45 -07:00
|
|
|
#endif
|
|
|
|
} else {
|
2023-04-13 18:59:37 +02:00
|
|
|
shell_print(sh, "\tTotal execution cycles: ? (? %%)");
|
2021-12-14 22:32:55 -05:00
|
|
|
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
|
2023-04-13 18:59:37 +02:00
|
|
|
shell_print(sh, "\tCurrent execution cycles: ?");
|
|
|
|
shell_print(sh, "\tPeak execution cycles: ?");
|
|
|
|
shell_print(sh, "\tAverage execution cycles: ?");
|
2021-12-14 22:32:55 -05:00
|
|
|
#endif
|
2020-08-27 13:56:45 -07:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-02-05 10:41:58 -08:00
|
|
|
ret = k_thread_stack_space_get(thread, &unused);
|
|
|
|
if (ret) {
|
2023-04-13 18:59:37 +02:00
|
|
|
shell_print(sh,
|
2020-02-05 10:41:58 -08:00
|
|
|
"Unable to determine unused stack size (%d)\n",
|
|
|
|
ret);
|
|
|
|
} else {
|
|
|
|
/* Calculate the real size reserved for the stack */
|
|
|
|
pcnt = ((size - unused) * 100U) / size;
|
|
|
|
|
2023-04-13 18:59:37 +02:00
|
|
|
shell_print(sh,
|
2020-02-05 10:41:58 -08:00
|
|
|
"\tstack size %zu, unused %zu, usage %zu / %zu (%u %%)\n",
|
|
|
|
size, unused, size - unused, size, pcnt);
|
|
|
|
}
|
2018-09-25 06:12:08 +05:30
|
|
|
|
2018-05-09 10:23:43 +05:30
|
|
|
}
|
|
|
|
|
2023-04-13 18:59:37 +02:00
|
|
|
static int cmd_kernel_threads(const struct shell *sh,
|
2018-08-09 15:03:58 +02:00
|
|
|
size_t argc, char **argv)
|
2016-12-24 07:10:20 -05:00
|
|
|
{
|
|
|
|
ARG_UNUSED(argc);
|
|
|
|
ARG_UNUSED(argv);
|
|
|
|
|
2023-04-13 18:59:37 +02:00
|
|
|
shell_print(sh, "Scheduler: %u since last call", sys_clock_elapsed());
|
|
|
|
shell_print(sh, "Threads:");
|
2023-06-14 11:33:14 -04:00
|
|
|
|
2023-12-19 16:42:34 +01:00
|
|
|
/*
|
|
|
|
* Use the unlocked version as the callback itself might call
|
|
|
|
* arch_irq_unlock.
|
|
|
|
*/
|
2023-06-14 11:33:14 -04:00
|
|
|
k_thread_foreach_unlocked(shell_tdata_dump, (void *)sh);
|
2023-12-19 16:42:34 +01:00
|
|
|
|
2018-10-01 22:08:59 +02:00
|
|
|
return 0;
|
2016-12-24 07:10:20 -05:00
|
|
|
}
|
2016-11-04 08:09:17 -04:00
|
|
|
|
shell: kernel: add command to unwind a thread
Add a shell command to unwind a thread using its thread id.
uart:~$ kernel threads
Scheduler: 11 since last call
Threads:
*0x80017138 shell_uart
options: 0x0, priority: 14 timeout: 0
state: queued, entry: 0x800029ac
stack size 3072, unused 1316, usage 1756 / 3072 (57 %)
0x80017ca8 sysworkq
options: 0x1, priority: -1 timeout: 0
state: pending, entry: 0x80006842
stack size 1024, unused 644, usage 380 / 1024 (37 %)
0x800177e0 idle
options: 0x1, priority: 15 timeout: 0
state: , entry: 0x800065ae
stack size 512, unused 180, usage 332 / 512 (64 %)
0x80017950 main
options: 0x1, priority: 0 timeout: 13
state: suspended, entry: 0x80006326
stack size 4096, unused 3604, usage 492 / 4096 (12 %)
uart:~$ kernel unwind 0x80017ca8
Unwinding 0x80017ca8 sysworkq
ra: 0x80007114 [z_swap+0x58]
ra: 0x80007ae8 [z_sched_wait+0x10]
ra: 0x8000689a [work_queue_main+0x58]
ra: 0x800006de [z_thread_entry+0x2e]
Signed-off-by: Yong Cong Sin <ycsin@meta.com>
2024-06-11 14:51:02 +08:00
|
|
|
#if defined(CONFIG_ARCH_HAS_STACKWALK)
|
|
|
|
|
|
|
|
static bool print_trace_address(void *arg, unsigned long ra)
|
|
|
|
{
|
|
|
|
const struct shell *sh = arg;
|
|
|
|
#ifdef CONFIG_SYMTAB
|
|
|
|
uint32_t offset = 0;
|
|
|
|
const char *name = symtab_find_symbol_name(ra, &offset);
|
|
|
|
|
|
|
|
shell_print(sh, "ra: %p [%s+0x%x]", (void *)ra, name, offset);
|
|
|
|
#else
|
|
|
|
shell_print(sh, "ra: %p", (void *)ra);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct unwind_entry {
|
|
|
|
const struct k_thread *const thread;
|
|
|
|
bool valid;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void is_valid_thread(const struct k_thread *cthread, void *user_data)
|
|
|
|
{
|
|
|
|
struct unwind_entry *entry = user_data;
|
|
|
|
|
|
|
|
if (cthread == entry->thread) {
|
|
|
|
entry->valid = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cmd_kernel_unwind(const struct shell *sh, size_t argc, char **argv)
|
|
|
|
{
|
|
|
|
struct k_thread *thread;
|
|
|
|
|
|
|
|
if (argc == 1) {
|
|
|
|
thread = _current;
|
|
|
|
} else {
|
|
|
|
thread = UINT_TO_POINTER(strtoll(argv[1], NULL, 16));
|
|
|
|
struct unwind_entry entry = {
|
|
|
|
.thread = thread,
|
|
|
|
.valid = false,
|
|
|
|
};
|
|
|
|
|
|
|
|
k_thread_foreach_unlocked(is_valid_thread, &entry);
|
|
|
|
|
|
|
|
if (!entry.valid) {
|
|
|
|
shell_error(sh, "Invalid thread id %p", (void *)thread);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
shell_print(sh, "Unwinding %p %s", (void *)thread, thread->name);
|
|
|
|
|
|
|
|
arch_stack_walk(print_trace_address, (void *)sh, thread, NULL);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_ARCH_HAS_STACKWALK */
|
|
|
|
|
2018-05-09 10:23:43 +05:30
|
|
|
static void shell_stack_dump(const struct k_thread *thread, void *user_data)
|
|
|
|
{
|
2023-04-13 18:59:37 +02:00
|
|
|
const struct shell *sh = (const struct shell *)user_data;
|
2020-02-05 10:41:58 -08:00
|
|
|
unsigned int pcnt;
|
|
|
|
size_t unused;
|
|
|
|
size_t size = thread->stack_info.size;
|
2018-08-12 14:04:16 -05:00
|
|
|
const char *tname;
|
2020-02-05 10:41:58 -08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = k_thread_stack_space_get(thread, &unused);
|
|
|
|
if (ret) {
|
2023-04-13 18:59:37 +02:00
|
|
|
shell_print(sh,
|
2020-02-05 10:41:58 -08:00
|
|
|
"Unable to determine unused stack size (%d)\n",
|
|
|
|
ret);
|
|
|
|
return;
|
|
|
|
}
|
2018-08-09 15:03:58 +02:00
|
|
|
|
2018-08-12 14:04:16 -05:00
|
|
|
tname = k_thread_name_get((struct k_thread *)thread);
|
2018-08-09 15:03:58 +02:00
|
|
|
|
|
|
|
/* Calculate the real size reserved for the stack */
|
2019-03-26 19:57:45 -06:00
|
|
|
pcnt = ((size - unused) * 100U) / size;
|
2018-08-09 15:03:58 +02:00
|
|
|
|
2022-06-29 15:37:58 -05:00
|
|
|
shell_print(
|
|
|
|
(const struct shell *)user_data, "%p %-" STRINGIFY(THREAD_MAX_NAM_LEN) "s "
|
2022-11-10 14:11:07 +01:00
|
|
|
"(real size %4zu):\tunused %4zu\tusage %4zu / %4zu (%2u %%)",
|
2022-06-29 15:37:58 -05:00
|
|
|
thread, tname ? tname : "NA", size, unused, size - unused, size, pcnt);
|
2018-05-09 10:23:43 +05:30
|
|
|
}
|
|
|
|
|
2022-10-12 10:55:36 -05:00
|
|
|
K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS,
|
2022-06-16 20:44:46 +09:00
|
|
|
CONFIG_ISR_STACK_SIZE);
|
2020-03-13 10:14:43 -07:00
|
|
|
|
2023-04-13 18:59:37 +02:00
|
|
|
static int cmd_kernel_stacks(const struct shell *sh,
|
2018-10-01 22:08:59 +02:00
|
|
|
size_t argc, char **argv)
|
2017-01-10 08:41:12 -05:00
|
|
|
{
|
2018-08-09 15:03:58 +02:00
|
|
|
ARG_UNUSED(argc);
|
|
|
|
ARG_UNUSED(argv);
|
2022-06-29 15:37:58 -05:00
|
|
|
char pad[THREAD_MAX_NAM_LEN] = { 0 };
|
|
|
|
|
|
|
|
memset(pad, ' ', MAX((THREAD_MAX_NAM_LEN - strlen("IRQ 00")), 1));
|
|
|
|
|
2023-12-19 16:42:34 +01:00
|
|
|
/*
|
|
|
|
* Use the unlocked version as the callback itself might call
|
|
|
|
* arch_irq_unlock.
|
|
|
|
*/
|
2023-06-14 11:33:14 -04:00
|
|
|
k_thread_foreach_unlocked(shell_stack_dump, (void *)sh);
|
2020-03-13 10:14:43 -07:00
|
|
|
|
|
|
|
/* Placeholder logic for interrupt stack until we have better
|
2020-03-12 15:37:29 -07:00
|
|
|
* kernel support, including dumping arch-specific exception-related
|
|
|
|
* stack buffers.
|
2020-03-13 10:14:43 -07:00
|
|
|
*/
|
2022-10-18 09:45:13 -05:00
|
|
|
unsigned int num_cpus = arch_num_cpus();
|
|
|
|
|
|
|
|
for (int i = 0; i < num_cpus; i++) {
|
2022-01-28 15:49:05 +01:00
|
|
|
size_t unused;
|
2024-03-22 12:56:12 -07:00
|
|
|
const uint8_t *buf = K_KERNEL_STACK_BUFFER(z_interrupt_stacks[i]);
|
2022-01-28 15:49:05 +01:00
|
|
|
size_t size = K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[i]);
|
|
|
|
int err = z_stack_space_get(buf, size, &unused);
|
|
|
|
|
|
|
|
(void)err;
|
|
|
|
__ASSERT_NO_MSG(err == 0);
|
2020-03-13 10:14:43 -07:00
|
|
|
|
2023-04-13 18:59:37 +02:00
|
|
|
shell_print(sh,
|
2022-11-10 14:11:07 +01:00
|
|
|
"%p IRQ %02d %s(real size %4zu):\tunused %4zu\tusage %4zu / %4zu (%2zu %%)",
|
2022-06-29 15:37:58 -05:00
|
|
|
&z_interrupt_stacks[i], i, pad, size, unused, size - unused, size,
|
|
|
|
((size - unused) * 100U) / size);
|
2020-03-12 15:37:29 -07:00
|
|
|
}
|
2020-03-13 10:14:43 -07:00
|
|
|
|
2018-10-01 22:08:59 +02:00
|
|
|
return 0;
|
2017-01-10 08:41:12 -05:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
kernel: Introduce a way to specify minimum system heap size
There are several subsystems and boards which require a relatively large
system heap (used by k_malloc()) to function properly. This became even
more notable with the recent introduction of the ACPICA library, which
causes ACPI-using boards to require a system heap of up to several
megabytes in size.
Until now, subsystems and boards have tried to solve this by having
Kconfig overlays which modify the default value of HEAP_MEM_POOL_SIZE.
This works ok, except when applications start explicitly setting values
in their prj.conf files:
$ git grep CONFIG_HEAP_MEM_POOL_SIZE= tests samples|wc -l
157
The vast majority of values set by current sample or test applications
is much too small for subsystems like ACPI, which results in the
application not being able to run on such boards.
To solve this situation, we introduce support for subsystems to specify
their own custom system heap size requirement. Subsystems do
this by defining Kconfig options with the prefix HEAP_MEM_POOL_ADD_SIZE_.
The final value of the system heap is the sum of the custom
minimum requirements, or the value existing HEAP_MEM_POOL_SIZE option,
whichever is greater.
We also introduce a new HEAP_MEM_POOL_IGNORE_MIN Kconfig option which
applications can use to force a lower value than what subsystems have
specficied, however this behavior is disabled by default.
Whenever the minimum is greater than the requested value a CMake warning
will be issued in the build output.
This patch ends up modifying several places outside of kernel code,
since the presence of the system heap is no longer detected using a
non-zero CONFIG_HEAP_MEM_POOL_SIZE value, rather it's now detected using
a new K_HEAP_MEM_POOL_SIZE value that's evaluated at build.
Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2023-11-29 11:22:39 +02:00
|
|
|
#if defined(CONFIG_SYS_HEAP_RUNTIME_STATS) && (K_HEAP_MEM_POOL_SIZE > 0)
|
2023-02-13 09:27:58 +02:00
|
|
|
extern struct sys_heap _system_heap;
|
|
|
|
|
|
|
|
static int cmd_kernel_heap(const struct shell *sh,
|
|
|
|
size_t argc, char **argv)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(argc);
|
|
|
|
ARG_UNUSED(argv);
|
|
|
|
|
|
|
|
int err;
|
|
|
|
struct sys_memory_stats stats;
|
|
|
|
|
|
|
|
err = sys_heap_runtime_stats_get(&_system_heap, &stats);
|
|
|
|
if (err) {
|
|
|
|
shell_error(sh, "Failed to read kernel system heap statistics (err %d)", err);
|
|
|
|
return -ENOEXEC;
|
|
|
|
}
|
|
|
|
|
|
|
|
shell_print(sh, "free: %zu", stats.free_bytes);
|
|
|
|
shell_print(sh, "allocated: %zu", stats.allocated_bytes);
|
|
|
|
shell_print(sh, "max. allocated: %zu", stats.max_allocated_bytes);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-05-25 09:17:47 +02:00
|
|
|
static int cmd_kernel_sleep(const struct shell *sh,
|
|
|
|
size_t argc, char **argv)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(sh);
|
|
|
|
ARG_UNUSED(argc);
|
|
|
|
|
|
|
|
uint32_t ms;
|
|
|
|
int err = 0;
|
|
|
|
|
2022-08-03 08:29:18 +02:00
|
|
|
ms = shell_strtoul(argv[1], 10, &err);
|
2022-05-25 09:17:47 +02:00
|
|
|
|
|
|
|
if (!err) {
|
|
|
|
k_msleep(ms);
|
|
|
|
} else {
|
|
|
|
shell_error(sh, "Unable to parse input (err %d)", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-06-08 14:49:43 -07:00
|
|
|
#if defined(CONFIG_LOG_RUNTIME_FILTERING)
|
|
|
|
static int cmd_kernel_log_level_set(const struct shell *sh,
|
|
|
|
size_t argc, char **argv)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(argc);
|
|
|
|
ARG_UNUSED(argv);
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
uint8_t severity = shell_strtoul(argv[2], 10, &err);
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
shell_error(sh, "Unable to parse log severity (err %d)", err);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (severity > LOG_LEVEL_DBG) {
|
|
|
|
shell_error(sh, "Invalid log level: %d", severity);
|
|
|
|
shell_help(sh);
|
|
|
|
return SHELL_CMD_HELP_PRINTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
int source_id = log_source_id_get(argv[1]);
|
|
|
|
|
|
|
|
/* log_filter_set() takes an int16_t for the source ID */
|
|
|
|
if (source_id < 0) {
|
|
|
|
shell_error(sh, "Unable to find log source: %s", argv[1]);
|
|
|
|
}
|
|
|
|
|
|
|
|
log_filter_set(NULL, 0, (int16_t)source_id, severity);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-04-22 22:41:36 +02:00
|
|
|
#if defined(CONFIG_REBOOT)
|
2023-04-13 18:59:37 +02:00
|
|
|
static int cmd_kernel_reboot_warm(const struct shell *sh,
|
2018-10-01 22:08:59 +02:00
|
|
|
size_t argc, char **argv)
|
2018-04-22 22:41:36 +02:00
|
|
|
{
|
2018-08-09 15:03:58 +02:00
|
|
|
ARG_UNUSED(argc);
|
|
|
|
ARG_UNUSED(argv);
|
2021-05-15 09:29:07 +10:00
|
|
|
#if (CONFIG_KERNEL_SHELL_REBOOT_DELAY > 0)
|
|
|
|
k_sleep(K_MSEC(CONFIG_KERNEL_SHELL_REBOOT_DELAY));
|
|
|
|
#endif
|
2018-08-09 15:03:58 +02:00
|
|
|
sys_reboot(SYS_REBOOT_WARM);
|
2018-10-01 22:08:59 +02:00
|
|
|
return 0;
|
2018-04-22 22:41:36 +02:00
|
|
|
}
|
2018-08-09 15:03:58 +02:00
|
|
|
|
2023-04-13 18:59:37 +02:00
|
|
|
static int cmd_kernel_reboot_cold(const struct shell *sh,
|
2018-10-01 22:08:59 +02:00
|
|
|
size_t argc, char **argv)
|
2018-08-09 15:03:58 +02:00
|
|
|
{
|
|
|
|
ARG_UNUSED(argc);
|
|
|
|
ARG_UNUSED(argv);
|
2021-05-15 09:29:07 +10:00
|
|
|
#if (CONFIG_KERNEL_SHELL_REBOOT_DELAY > 0)
|
|
|
|
k_sleep(K_MSEC(CONFIG_KERNEL_SHELL_REBOOT_DELAY));
|
|
|
|
#endif
|
2018-08-09 15:03:58 +02:00
|
|
|
sys_reboot(SYS_REBOOT_COLD);
|
2018-10-01 22:08:59 +02:00
|
|
|
return 0;
|
2018-08-09 15:03:58 +02:00
|
|
|
}
|
|
|
|
|
2019-02-13 14:53:29 +01:00
|
|
|
SHELL_STATIC_SUBCMD_SET_CREATE(sub_kernel_reboot,
|
2018-08-09 15:03:58 +02:00
|
|
|
SHELL_CMD(cold, NULL, "Cold reboot.", cmd_kernel_reboot_cold),
|
|
|
|
SHELL_CMD(warm, NULL, "Warm reboot.", cmd_kernel_reboot_warm),
|
|
|
|
SHELL_SUBCMD_SET_END /* Array terminated. */
|
2019-02-13 14:53:29 +01:00
|
|
|
);
|
2018-04-22 22:41:36 +02:00
|
|
|
#endif
|
|
|
|
|
2019-02-13 14:53:29 +01:00
|
|
|
SHELL_STATIC_SUBCMD_SET_CREATE(sub_kernel,
|
2018-08-09 15:03:58 +02:00
|
|
|
SHELL_CMD(cycles, NULL, "Kernel cycles.", cmd_kernel_cycles),
|
|
|
|
#if defined(CONFIG_REBOOT)
|
|
|
|
SHELL_CMD(reboot, &sub_kernel_reboot, "Reboot.", NULL),
|
2017-01-10 08:41:12 -05:00
|
|
|
#endif
|
2020-02-05 10:41:58 -08:00
|
|
|
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO) && \
|
|
|
|
defined(CONFIG_THREAD_MONITOR)
|
2018-08-09 15:03:58 +02:00
|
|
|
SHELL_CMD(stacks, NULL, "List threads stack usage.", cmd_kernel_stacks),
|
|
|
|
SHELL_CMD(threads, NULL, "List kernel threads.", cmd_kernel_threads),
|
shell: kernel: add command to unwind a thread
Add a shell command to unwind a thread using its thread id.
uart:~$ kernel threads
Scheduler: 11 since last call
Threads:
*0x80017138 shell_uart
options: 0x0, priority: 14 timeout: 0
state: queued, entry: 0x800029ac
stack size 3072, unused 1316, usage 1756 / 3072 (57 %)
0x80017ca8 sysworkq
options: 0x1, priority: -1 timeout: 0
state: pending, entry: 0x80006842
stack size 1024, unused 644, usage 380 / 1024 (37 %)
0x800177e0 idle
options: 0x1, priority: 15 timeout: 0
state: , entry: 0x800065ae
stack size 512, unused 180, usage 332 / 512 (64 %)
0x80017950 main
options: 0x1, priority: 0 timeout: 13
state: suspended, entry: 0x80006326
stack size 4096, unused 3604, usage 492 / 4096 (12 %)
uart:~$ kernel unwind 0x80017ca8
Unwinding 0x80017ca8 sysworkq
ra: 0x80007114 [z_swap+0x58]
ra: 0x80007ae8 [z_sched_wait+0x10]
ra: 0x8000689a [work_queue_main+0x58]
ra: 0x800006de [z_thread_entry+0x2e]
Signed-off-by: Yong Cong Sin <ycsin@meta.com>
2024-06-11 14:51:02 +08:00
|
|
|
#if defined(CONFIG_ARCH_HAS_STACKWALK)
|
|
|
|
SHELL_CMD_ARG(unwind, NULL, "Unwind a thread.", cmd_kernel_unwind, 1, 1),
|
|
|
|
#endif /* CONFIG_ARCH_HAS_STACKWALK */
|
2023-02-13 09:27:58 +02:00
|
|
|
#endif
|
kernel: Introduce a way to specify minimum system heap size
There are several subsystems and boards which require a relatively large
system heap (used by k_malloc()) to function properly. This became even
more notable with the recent introduction of the ACPICA library, which
causes ACPI-using boards to require a system heap of up to several
megabytes in size.
Until now, subsystems and boards have tried to solve this by having
Kconfig overlays which modify the default value of HEAP_MEM_POOL_SIZE.
This works ok, except when applications start explicitly setting values
in their prj.conf files:
$ git grep CONFIG_HEAP_MEM_POOL_SIZE= tests samples|wc -l
157
The vast majority of values set by current sample or test applications
is much too small for subsystems like ACPI, which results in the
application not being able to run on such boards.
To solve this situation, we introduce support for subsystems to specify
their own custom system heap size requirement. Subsystems do
this by defining Kconfig options with the prefix HEAP_MEM_POOL_ADD_SIZE_.
The final value of the system heap is the sum of the custom
minimum requirements, or the value existing HEAP_MEM_POOL_SIZE option,
whichever is greater.
We also introduce a new HEAP_MEM_POOL_IGNORE_MIN Kconfig option which
applications can use to force a lower value than what subsystems have
specficied, however this behavior is disabled by default.
Whenever the minimum is greater than the requested value a CMake warning
will be issued in the build output.
This patch ends up modifying several places outside of kernel code,
since the presence of the system heap is no longer detected using a
non-zero CONFIG_HEAP_MEM_POOL_SIZE value, rather it's now detected using
a new K_HEAP_MEM_POOL_SIZE value that's evaluated at build.
Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2023-11-29 11:22:39 +02:00
|
|
|
#if defined(CONFIG_SYS_HEAP_RUNTIME_STATS) && (K_HEAP_MEM_POOL_SIZE > 0)
|
2023-02-13 09:27:58 +02:00
|
|
|
SHELL_CMD(heap, NULL, "System heap usage statistics.", cmd_kernel_heap),
|
2016-12-24 07:10:20 -05:00
|
|
|
#endif
|
2023-09-13 10:16:55 +02:00
|
|
|
SHELL_CMD_ARG(uptime, NULL, "Kernel uptime. Can be called with the -p or --pretty options",
|
|
|
|
cmd_kernel_uptime, 1, 1),
|
2018-08-09 15:03:58 +02:00
|
|
|
SHELL_CMD(version, NULL, "Kernel version.", cmd_kernel_version),
|
2022-05-25 09:17:47 +02:00
|
|
|
SHELL_CMD_ARG(sleep, NULL, "ms", cmd_kernel_sleep, 2, 0),
|
2022-06-08 14:49:43 -07:00
|
|
|
#if defined(CONFIG_LOG_RUNTIME_FILTERING)
|
|
|
|
SHELL_CMD_ARG(log-level, NULL, "<module name> <severity (0-4)>",
|
|
|
|
cmd_kernel_log_level_set, 3, 0),
|
|
|
|
#endif
|
2018-08-09 15:03:58 +02:00
|
|
|
SHELL_SUBCMD_SET_END /* Array terminated. */
|
2019-02-13 14:53:29 +01:00
|
|
|
);
|
2016-11-04 08:09:17 -04:00
|
|
|
|
2018-08-09 15:03:58 +02:00
|
|
|
SHELL_CMD_REGISTER(kernel, &sub_kernel, "Kernel commands", NULL);
|