2016-11-04 08:09:17 -04:00
|
|
|
/*
|
2018-08-09 15:03:58 +02:00
|
|
|
* Copyright (c) 2018 Nordic Semiconductor ASA
|
2016-11-04 08:09:17 -04:00
|
|
|
* Copyright (c) 2016 Intel Corporation
|
|
|
|
*
|
2017-01-18 17:01:01 -08:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2016-11-04 08:09:17 -04:00
|
|
|
*/
|
|
|
|
|
2019-06-26 10:33:49 -04:00
|
|
|
#include <sys/printk.h>
|
2018-08-09 15:03:58 +02:00
|
|
|
#include <shell/shell.h>
|
2016-11-04 08:09:17 -04:00
|
|
|
#include <init.h>
|
2016-12-24 07:10:20 -05:00
|
|
|
#include <debug/object_tracing.h>
|
2019-06-26 10:44:43 -04:00
|
|
|
#include <power/reboot.h>
|
2019-06-26 10:42:45 -04:00
|
|
|
#include <debug/stack.h>
|
2018-04-22 22:41:36 +02:00
|
|
|
#include <string.h>
|
2018-10-05 09:38:55 -05:00
|
|
|
#include <device.h>
|
2019-07-31 12:43:54 +03:00
|
|
|
#include <drivers/timer/system_timer.h>
|
2020-08-27 13:56:45 -07:00
|
|
|
#include <kernel.h>
|
2016-11-04 08:09:17 -04:00
|
|
|
|
2018-10-01 22:08:59 +02:00
|
|
|
static int cmd_kernel_version(const struct shell *shell,
|
2018-08-09 15:03:58 +02:00
|
|
|
size_t argc, char **argv)
|
2016-11-04 08:09:17 -04:00
|
|
|
{
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t version = sys_kernel_version_get();
|
2016-11-04 08:09:17 -04:00
|
|
|
|
2016-12-21 00:11:41 -06:00
|
|
|
ARG_UNUSED(argc);
|
|
|
|
ARG_UNUSED(argv);
|
|
|
|
|
2019-07-31 12:43:54 +03:00
|
|
|
shell_print(shell, "Zephyr version %d.%d.%d",
|
2018-08-09 15:03:58 +02:00
|
|
|
SYS_KERNEL_VER_MAJOR(version),
|
|
|
|
SYS_KERNEL_VER_MINOR(version),
|
|
|
|
SYS_KERNEL_VER_PATCHLEVEL(version));
|
2018-10-01 22:08:59 +02:00
|
|
|
return 0;
|
2016-11-04 08:09:17 -04:00
|
|
|
}
|
|
|
|
|
2018-10-01 22:08:59 +02:00
|
|
|
static int cmd_kernel_uptime(const struct shell *shell,
|
|
|
|
size_t argc, char **argv)
|
2016-11-04 08:09:17 -04:00
|
|
|
{
|
2016-12-21 00:11:41 -06:00
|
|
|
ARG_UNUSED(argc);
|
|
|
|
ARG_UNUSED(argv);
|
|
|
|
|
2019-07-31 12:43:54 +03:00
|
|
|
shell_print(shell, "Uptime: %u ms", k_uptime_get_32());
|
2018-10-01 22:08:59 +02:00
|
|
|
return 0;
|
2016-11-04 08:09:17 -04:00
|
|
|
}
|
|
|
|
|
2018-10-01 22:08:59 +02:00
|
|
|
static int cmd_kernel_cycles(const struct shell *shell,
|
2018-08-09 15:03:58 +02:00
|
|
|
size_t argc, char **argv)
|
2016-11-04 08:09:17 -04:00
|
|
|
{
|
2016-12-21 00:11:41 -06:00
|
|
|
ARG_UNUSED(argc);
|
|
|
|
ARG_UNUSED(argv);
|
|
|
|
|
2019-07-31 12:43:54 +03:00
|
|
|
shell_print(shell, "cycles: %u hw cycles", k_cycle_get_32());
|
2018-10-01 22:08:59 +02:00
|
|
|
return 0;
|
2016-11-04 08:09:17 -04:00
|
|
|
}
|
|
|
|
|
2020-02-05 10:41:58 -08:00
|
|
|
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO) && \
|
|
|
|
defined(CONFIG_THREAD_MONITOR)
|
2019-07-31 12:43:54 +03:00
|
|
|
static void shell_tdata_dump(const struct k_thread *cthread, void *user_data)
|
2018-05-09 10:23:43 +05:30
|
|
|
{
|
2019-07-31 12:43:54 +03:00
|
|
|
struct k_thread *thread = (struct k_thread *)cthread;
|
|
|
|
const struct shell *shell = (const struct shell *)user_data;
|
2020-02-05 10:41:58 -08:00
|
|
|
unsigned int pcnt;
|
|
|
|
size_t unused;
|
|
|
|
size_t size = thread->stack_info.size;
|
2018-08-12 14:04:16 -05:00
|
|
|
const char *tname;
|
2020-02-05 10:41:58 -08:00
|
|
|
int ret;
|
2018-09-25 06:12:08 +05:30
|
|
|
|
2020-08-27 13:56:45 -07:00
|
|
|
#ifdef CONFIG_THREAD_RUNTIME_STATS
|
|
|
|
k_thread_runtime_stats_t rt_stats_thread;
|
|
|
|
k_thread_runtime_stats_t rt_stats_all;
|
|
|
|
#endif
|
|
|
|
|
2019-07-31 12:43:54 +03:00
|
|
|
tname = k_thread_name_get(thread);
|
2018-08-12 14:04:16 -05:00
|
|
|
|
2019-07-31 12:43:54 +03:00
|
|
|
shell_print(shell, "%s%p %-10s",
|
2018-08-09 15:03:58 +02:00
|
|
|
(thread == k_current_get()) ? "*" : " ",
|
|
|
|
thread,
|
2018-09-25 06:12:08 +05:30
|
|
|
tname ? tname : "NA");
|
2019-07-31 12:43:54 +03:00
|
|
|
shell_print(shell, "\toptions: 0x%x, priority: %d timeout: %d",
|
2018-08-09 15:03:58 +02:00
|
|
|
thread->base.user_options,
|
2019-07-31 12:43:54 +03:00
|
|
|
thread->base.prio,
|
|
|
|
thread->base.timeout.dticks);
|
|
|
|
shell_print(shell, "\tstate: %s", k_thread_state_str(thread));
|
2020-02-05 10:41:58 -08:00
|
|
|
|
2020-08-27 13:56:45 -07:00
|
|
|
#ifdef CONFIG_THREAD_RUNTIME_STATS
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
if (k_thread_runtime_stats_get(thread, &rt_stats_thread) != 0) {
|
|
|
|
ret++;
|
|
|
|
};
|
|
|
|
|
|
|
|
if (k_thread_runtime_stats_all_get(&rt_stats_all) != 0) {
|
|
|
|
ret++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret == 0) {
|
|
|
|
pcnt = (rt_stats_thread.execution_cycles * 100U) /
|
|
|
|
rt_stats_all.execution_cycles;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* z_prf() does not support %llu by default unless
|
|
|
|
* CONFIG_MINIMAL_LIBC_LL_PRINTF=y. So do conditional
|
|
|
|
* compilation to avoid blindly enabling this kconfig
|
|
|
|
* so it won't increase RAM/ROM usage too much on 32-bit
|
|
|
|
* targets.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
shell_print(shell, "\tTotal execution cycles: %llu (%u %%)",
|
|
|
|
rt_stats_thread.execution_cycles,
|
|
|
|
pcnt);
|
|
|
|
#else
|
|
|
|
shell_print(shell, "\tTotal execution cycles: %lu (%u %%)",
|
|
|
|
(uint32_t)rt_stats_thread.execution_cycles,
|
|
|
|
pcnt);
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
shell_print(shell, "\tTotal execution cycles: ? (? %%)");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-02-05 10:41:58 -08:00
|
|
|
ret = k_thread_stack_space_get(thread, &unused);
|
|
|
|
if (ret) {
|
|
|
|
shell_print(shell,
|
|
|
|
"Unable to determine unused stack size (%d)\n",
|
|
|
|
ret);
|
|
|
|
} else {
|
|
|
|
/* Calculate the real size reserved for the stack */
|
|
|
|
pcnt = ((size - unused) * 100U) / size;
|
|
|
|
|
|
|
|
shell_print(shell,
|
|
|
|
"\tstack size %zu, unused %zu, usage %zu / %zu (%u %%)\n",
|
|
|
|
size, unused, size - unused, size, pcnt);
|
|
|
|
}
|
2018-09-25 06:12:08 +05:30
|
|
|
|
2018-05-09 10:23:43 +05:30
|
|
|
}
|
|
|
|
|
2018-10-01 22:08:59 +02:00
|
|
|
static int cmd_kernel_threads(const struct shell *shell,
|
2018-08-09 15:03:58 +02:00
|
|
|
size_t argc, char **argv)
|
2016-12-24 07:10:20 -05:00
|
|
|
{
|
|
|
|
ARG_UNUSED(argc);
|
|
|
|
ARG_UNUSED(argv);
|
|
|
|
|
2019-07-31 12:43:54 +03:00
|
|
|
shell_print(shell, "Scheduler: %u since last call", z_clock_elapsed());
|
|
|
|
shell_print(shell, "Threads:");
|
2018-08-09 15:03:58 +02:00
|
|
|
k_thread_foreach(shell_tdata_dump, (void *)shell);
|
2018-10-01 22:08:59 +02:00
|
|
|
return 0;
|
2016-12-24 07:10:20 -05:00
|
|
|
}
|
2016-11-04 08:09:17 -04:00
|
|
|
|
2018-05-09 10:23:43 +05:30
|
|
|
static void shell_stack_dump(const struct k_thread *thread, void *user_data)
|
|
|
|
{
|
2020-02-05 10:41:58 -08:00
|
|
|
const struct shell *shell = (const struct shell *)user_data;
|
|
|
|
unsigned int pcnt;
|
|
|
|
size_t unused;
|
|
|
|
size_t size = thread->stack_info.size;
|
2018-08-12 14:04:16 -05:00
|
|
|
const char *tname;
|
2020-02-05 10:41:58 -08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = k_thread_stack_space_get(thread, &unused);
|
|
|
|
if (ret) {
|
|
|
|
shell_print(shell,
|
|
|
|
"Unable to determine unused stack size (%d)\n",
|
|
|
|
ret);
|
|
|
|
return;
|
|
|
|
}
|
2018-08-09 15:03:58 +02:00
|
|
|
|
2018-08-12 14:04:16 -05:00
|
|
|
tname = k_thread_name_get((struct k_thread *)thread);
|
2018-08-09 15:03:58 +02:00
|
|
|
|
|
|
|
/* Calculate the real size reserved for the stack */
|
2019-03-26 19:57:45 -06:00
|
|
|
pcnt = ((size - unused) * 100U) / size;
|
2018-08-09 15:03:58 +02:00
|
|
|
|
2019-07-31 12:43:54 +03:00
|
|
|
shell_print((const struct shell *)user_data,
|
2019-11-11 13:27:26 +00:00
|
|
|
"%p %-10s (real size %u):\tunused %u\tusage %u / %u (%u %%)",
|
|
|
|
thread,
|
2018-08-12 14:04:16 -05:00
|
|
|
tname ? tname : "NA",
|
|
|
|
size, unused, size - unused, size, pcnt);
|
2018-05-09 10:23:43 +05:30
|
|
|
}
|
|
|
|
|
2020-07-31 12:52:39 -07:00
|
|
|
extern K_KERNEL_STACK_ARRAY_DEFINE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS,
|
2020-03-12 15:37:29 -07:00
|
|
|
CONFIG_ISR_STACK_SIZE);
|
2020-03-13 10:14:43 -07:00
|
|
|
|
2018-10-01 22:08:59 +02:00
|
|
|
static int cmd_kernel_stacks(const struct shell *shell,
|
|
|
|
size_t argc, char **argv)
|
2017-01-10 08:41:12 -05:00
|
|
|
{
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t *buf;
|
2020-11-12 15:51:08 -08:00
|
|
|
size_t size, unused;
|
2020-03-13 10:14:43 -07:00
|
|
|
|
2018-08-09 15:03:58 +02:00
|
|
|
ARG_UNUSED(argc);
|
|
|
|
ARG_UNUSED(argv);
|
|
|
|
k_thread_foreach(shell_stack_dump, (void *)shell);
|
2020-03-13 10:14:43 -07:00
|
|
|
|
|
|
|
/* Placeholder logic for interrupt stack until we have better
|
2020-03-12 15:37:29 -07:00
|
|
|
* kernel support, including dumping arch-specific exception-related
|
|
|
|
* stack buffers.
|
2020-03-13 10:14:43 -07:00
|
|
|
*/
|
2020-03-12 15:37:29 -07:00
|
|
|
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
|
2020-04-25 15:19:23 -07:00
|
|
|
buf = Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[i]);
|
|
|
|
size = K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[i]);
|
2020-03-12 15:37:29 -07:00
|
|
|
|
2020-11-12 15:51:08 -08:00
|
|
|
unused = 0;
|
2020-03-12 15:37:29 -07:00
|
|
|
for (size_t i = 0; i < size; i++) {
|
|
|
|
if (buf[i] == 0xAAU) {
|
|
|
|
unused++;
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
2020-03-13 10:14:43 -07:00
|
|
|
}
|
|
|
|
|
2020-03-12 15:37:29 -07:00
|
|
|
shell_print(shell,
|
|
|
|
"%p IRQ %02d (real size %zu):\tunused %zu\tusage %zu / %zu (%zu %%)",
|
|
|
|
&z_interrupt_stacks[i], i, size, unused,
|
|
|
|
size - unused, size,
|
|
|
|
((size - unused) * 100U) / size);
|
|
|
|
}
|
2020-03-13 10:14:43 -07:00
|
|
|
|
2018-10-01 22:08:59 +02:00
|
|
|
return 0;
|
2017-01-10 08:41:12 -05:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-04-22 22:41:36 +02:00
|
|
|
#if defined(CONFIG_REBOOT)
|
2018-10-01 22:08:59 +02:00
|
|
|
static int cmd_kernel_reboot_warm(const struct shell *shell,
|
|
|
|
size_t argc, char **argv)
|
2018-04-22 22:41:36 +02:00
|
|
|
{
|
2018-08-09 15:03:58 +02:00
|
|
|
ARG_UNUSED(argc);
|
|
|
|
ARG_UNUSED(argv);
|
|
|
|
sys_reboot(SYS_REBOOT_WARM);
|
2018-10-01 22:08:59 +02:00
|
|
|
return 0;
|
2018-04-22 22:41:36 +02:00
|
|
|
}
|
2018-08-09 15:03:58 +02:00
|
|
|
|
2018-10-01 22:08:59 +02:00
|
|
|
static int cmd_kernel_reboot_cold(const struct shell *shell,
|
|
|
|
size_t argc, char **argv)
|
2018-08-09 15:03:58 +02:00
|
|
|
{
|
|
|
|
ARG_UNUSED(argc);
|
|
|
|
ARG_UNUSED(argv);
|
|
|
|
sys_reboot(SYS_REBOOT_COLD);
|
2018-10-01 22:08:59 +02:00
|
|
|
return 0;
|
2018-08-09 15:03:58 +02:00
|
|
|
}
|
|
|
|
|
2019-02-13 14:53:29 +01:00
|
|
|
SHELL_STATIC_SUBCMD_SET_CREATE(sub_kernel_reboot,
|
2018-08-09 15:03:58 +02:00
|
|
|
SHELL_CMD(cold, NULL, "Cold reboot.", cmd_kernel_reboot_cold),
|
|
|
|
SHELL_CMD(warm, NULL, "Warm reboot.", cmd_kernel_reboot_warm),
|
|
|
|
SHELL_SUBCMD_SET_END /* Array terminated. */
|
2019-02-13 14:53:29 +01:00
|
|
|
);
|
2018-04-22 22:41:36 +02:00
|
|
|
#endif
|
|
|
|
|
2019-02-13 14:53:29 +01:00
|
|
|
SHELL_STATIC_SUBCMD_SET_CREATE(sub_kernel,
|
2018-08-09 15:03:58 +02:00
|
|
|
SHELL_CMD(cycles, NULL, "Kernel cycles.", cmd_kernel_cycles),
|
|
|
|
#if defined(CONFIG_REBOOT)
|
|
|
|
SHELL_CMD(reboot, &sub_kernel_reboot, "Reboot.", NULL),
|
2017-01-10 08:41:12 -05:00
|
|
|
#endif
|
2020-02-05 10:41:58 -08:00
|
|
|
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO) && \
|
|
|
|
defined(CONFIG_THREAD_MONITOR)
|
2018-08-09 15:03:58 +02:00
|
|
|
SHELL_CMD(stacks, NULL, "List threads stack usage.", cmd_kernel_stacks),
|
|
|
|
SHELL_CMD(threads, NULL, "List kernel threads.", cmd_kernel_threads),
|
2016-12-24 07:10:20 -05:00
|
|
|
#endif
|
2018-08-09 15:03:58 +02:00
|
|
|
SHELL_CMD(uptime, NULL, "Kernel uptime.", cmd_kernel_uptime),
|
|
|
|
SHELL_CMD(version, NULL, "Kernel version.", cmd_kernel_version),
|
|
|
|
SHELL_SUBCMD_SET_END /* Array terminated. */
|
2019-02-13 14:53:29 +01:00
|
|
|
);
|
2016-11-04 08:09:17 -04:00
|
|
|
|
2018-08-09 15:03:58 +02:00
|
|
|
SHELL_CMD_REGISTER(kernel, &sub_kernel, "Kernel commands", NULL);
|