debug: add stack sentinel feature
This places a sentinel value at the lowest 4 bytes of a stack memory region and checks it at various intervals, including when servicing interrupts or context switching. This is implemented on all arches except ARC, which supports stack bounds checking directly in hardware. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
9175d20ceb
commit
5dcb279df8
19 changed files with 162 additions and 19 deletions
|
@ -103,4 +103,15 @@ _ExcExitWithGdbStub:
|
|||
_EXIT_EXC:
|
||||
#endif /* CONFIG_PREEMPT_ENABLED */
|
||||
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
push {lr}
|
||||
bl _check_stack_sentinel
|
||||
#if defined(CONFIG_ARMV6_M)
|
||||
pop {r0}
|
||||
mov lr, r0
|
||||
#else
|
||||
pop {lr}
|
||||
#endif /* CONFIG_ARMV6_M */
|
||||
#endif /* CONFIG_STACK_SENTINEL */
|
||||
|
||||
bx lr
|
||||
|
|
|
@ -50,7 +50,7 @@ void _NanoFatalErrorHandler(unsigned int reason,
|
|||
printk("***** Invalid Exit Software Error! *****\n");
|
||||
break;
|
||||
|
||||
#if defined(CONFIG_STACK_CANARIES)
|
||||
#if defined(CONFIG_STACK_CANARIES) || defined(CONFIG_STACK_SENTINEL)
|
||||
case _NANO_ERR_STACK_CHK_FAIL:
|
||||
printk("***** Stack Check Fail! *****\n");
|
||||
break;
|
||||
|
|
|
@ -47,13 +47,24 @@ GDATA(_kernel)
|
|||
|
||||
SECTION_FUNC(TEXT, __pendsv)
|
||||
|
||||
#if defined (CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH) || \
|
||||
defined(CONFIG_STACK_SENTINEL)
|
||||
/* Register the context switch */
|
||||
push {lr}
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
|
||||
/* Register the context switch */
|
||||
push {lr}
|
||||
bl _sys_k_event_logger_context_switch
|
||||
pop {r0}
|
||||
mov lr, r0
|
||||
bl _sys_k_event_logger_context_switch
|
||||
#endif
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
bl _check_stack_sentinel
|
||||
#endif
|
||||
#if defined(CONFIG_ARMV6_M)
|
||||
pop {r0}
|
||||
mov lr, r0
|
||||
#else
|
||||
pop {lr}
|
||||
#endif /* CONFIG_ARMV6_M */
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH || CONFIG_STACK_SENTINEL */
|
||||
|
||||
/* load _kernel into r1 and current k_thread into r2 */
|
||||
ldr r1, =_kernel
|
||||
|
|
|
@ -73,6 +73,11 @@ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
|
|||
printk("***** Kernel Panic! *****\n");
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
case _NANO_ERR_STACK_CHK_FAIL:
|
||||
printk("***** Stack overflow *****\n");
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
printk("**** Unknown Fatal Error %u! ****\n", reason);
|
||||
break;
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <misc/printk.h>
|
||||
#include <sw_isr_table.h>
|
||||
#include <logging/kernel_event_logger.h>
|
||||
#include <ksched.h>
|
||||
|
||||
void _irq_spurious(void *unused)
|
||||
{
|
||||
|
@ -91,5 +92,8 @@ void _enter_irq(u32_t ipending)
|
|||
}
|
||||
|
||||
_kernel.nested--;
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
_check_stack_sentinel();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -59,15 +59,16 @@ SECTION_FUNC(exception.other, __swap)
|
|||
|
||||
#if CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
|
||||
call _sys_k_event_logger_context_switch
|
||||
/* Restore caller-saved r10. We could have stuck its value
|
||||
* onto the stack, but less instructions to just use immediates
|
||||
*/
|
||||
movhi r10, %hi(_kernel)
|
||||
ori r10, r10, %lo(_kernel)
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH */
|
||||
|
||||
#endif
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
call _check_stack_sentinel
|
||||
#endif
|
||||
#if defined(CONFIG_STACK_SENTINEL) || \
|
||||
defined (CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH)
|
||||
/* restore caller-saved r10 */
|
||||
movhi r10, %hi(_kernel)
|
||||
ori r10, r10, %lo(_kernel)
|
||||
#endif
|
||||
|
||||
/* get cached thread to run */
|
||||
ldw r2, _kernel_offset_to_ready_q_cache(r10)
|
||||
|
|
|
@ -69,7 +69,7 @@ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
|
|||
printk("***** Invalid Exit Software Error! *****\n");
|
||||
break;
|
||||
|
||||
#if defined(CONFIG_STACK_CANARIES)
|
||||
#if defined(CONFIG_STACK_CANARIES) || defined(CONFIG_STACK_SENTINEL)
|
||||
case _NANO_ERR_STACK_CHK_FAIL:
|
||||
printk("***** Stack Check Fail! *****\n");
|
||||
break;
|
||||
|
|
|
@ -270,6 +270,11 @@ on_thread_stack:
|
|||
addi t2, t2, -1
|
||||
sw t2, _kernel_offset_to_nested(t1)
|
||||
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
call _check_stack_sentinel
|
||||
la t1, _kernel
|
||||
#endif
|
||||
|
||||
/* Restore thread stack pointer */
|
||||
lw t0, 0x00(sp)
|
||||
addi sp, t0, 0
|
||||
|
@ -304,7 +309,9 @@ reschedule:
|
|||
#if CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
|
||||
call _sys_k_event_logger_context_switch
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH */
|
||||
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
call _check_stack_sentinel
|
||||
#endif
|
||||
/* Get reference to _kernel */
|
||||
la t0, _kernel
|
||||
|
||||
|
@ -367,7 +374,6 @@ no_reschedule:
|
|||
/* Restore context at SOC level */
|
||||
jal ra, __soc_restore_context
|
||||
#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
|
||||
|
||||
/* Restore caller-saved registers from thread stack */
|
||||
lw ra, __NANO_ESF_ra_OFFSET(sp)
|
||||
lw gp, __NANO_ESF_gp_OFFSET(sp)
|
||||
|
|
|
@ -68,7 +68,7 @@ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
|
|||
printk("***** Invalid Exit Software Error! *****\n");
|
||||
break;
|
||||
|
||||
#if defined(CONFIG_STACK_CANARIES)
|
||||
#if defined(CONFIG_STACK_CANARIES) || defined(CONFIG_STACK_SENTINEL)
|
||||
case _NANO_ERR_STACK_CHK_FAIL:
|
||||
printk("***** Stack Check Fail! *****\n");
|
||||
break;
|
||||
|
|
|
@ -397,7 +397,9 @@ noReschedule:
|
|||
|
||||
popl %esp /* pop thread stack pointer */
|
||||
|
||||
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
call _check_stack_sentinel
|
||||
#endif
|
||||
/* fall through to 'nestedInterrupt' */
|
||||
|
||||
|
||||
|
|
|
@ -27,6 +27,9 @@
|
|||
|
||||
/* externs */
|
||||
GDATA(_k_neg_eagain)
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
GTEXT(_check_stack_sentinel)
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -139,6 +142,9 @@ SECTION_FUNC(TEXT, __swap)
|
|||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
|
||||
/* Register the context switch */
|
||||
call _sys_k_event_logger_context_switch
|
||||
#endif
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
call _check_stack_sentinel
|
||||
#endif
|
||||
movl _kernel_offset_to_ready_q_cache(%edi), %eax
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
|
|||
case _NANO_ERR_INVALID_TASK_EXIT:
|
||||
printk("***** Invalid Exit Software Error! *****\n");
|
||||
break;
|
||||
#if defined(CONFIG_STACK_CANARIES)
|
||||
#if defined(CONFIG_STACK_CANARIES) || defined(CONFIG_STACK_SENTINEL)
|
||||
case _NANO_ERR_STACK_CHK_FAIL:
|
||||
printk("***** Stack Check Fail! *****\n");
|
||||
break;
|
||||
|
|
|
@ -82,6 +82,13 @@ __swap:
|
|||
#else
|
||||
call4 _sys_k_event_logger_context_switch
|
||||
#endif
|
||||
#endif
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
#ifdef __XTENSA_CALL0_ABI__
|
||||
call0 _check_stack_sentinel
|
||||
#else
|
||||
call4 _check_stack_sentinel
|
||||
#endif
|
||||
#endif
|
||||
/* _thread := _kernel.ready_q.cache */
|
||||
l32i a3, a2, KERNEL_OFFSET(ready_q_cache)
|
||||
|
|
|
@ -68,6 +68,13 @@ _zxt_dispatch:
|
|||
/* Restore CPENABLE from task's co-processor save area. */
|
||||
l16ui a3, a3, THREAD_OFFSET(cpEnable) /* a3 := cp_state->cpenable */
|
||||
wsr a3, CPENABLE
|
||||
#endif
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
#ifdef __XTENSA_CALL0_ABI__
|
||||
call0 _check_stack_sentinel
|
||||
#else
|
||||
call4 _check_stack_sentinel
|
||||
#endif
|
||||
#endif
|
||||
/*
|
||||
* Interrupt stack frame.
|
||||
|
|
|
@ -45,6 +45,10 @@
|
|||
|
||||
/* end - states */
|
||||
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
/* Magic value in lowest bytes of the stack */
|
||||
#define STACK_SENTINEL 0xF0F0F0F0
|
||||
#endif
|
||||
|
||||
/* lowest value of _thread_base.preempt at which a thread is non-preemptible */
|
||||
#define _NON_PREEMPT_THRESHOLD 0x0080
|
||||
|
@ -154,6 +158,13 @@ static ALWAYS_INLINE void _new_thread_init(struct k_thread *thread,
|
|||
#ifdef CONFIG_INIT_STACKS
|
||||
memset(pStack, 0xaa, stackSize);
|
||||
#endif
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
/* Put the stack sentinel at the lowest 4 bytes of the stack area.
|
||||
* We periodically check that it's still present and kill the thread
|
||||
* if it isn't.
|
||||
*/
|
||||
*((u32_t *)pStack) = STACK_SENTINEL;
|
||||
#endif /* CONFIG_STACK_SENTINEL */
|
||||
/* Initialize various struct k_thread members */
|
||||
_init_thread_base(&thread->base, prio, _THREAD_PRESTART, options);
|
||||
|
||||
|
|
|
@ -31,6 +31,9 @@ extern void _update_time_slice_before_swap(void);
|
|||
extern s32_t _ms_to_ticks(s32_t ms);
|
||||
#endif
|
||||
extern void idle(void *, void *, void *);
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
extern void _check_stack_sentinel(void);
|
||||
#endif
|
||||
|
||||
/* find which one is the next thread to run */
|
||||
/* must be called with interrupts locked */
|
||||
|
|
|
@ -297,6 +297,9 @@ void k_yield(void)
|
|||
|
||||
if (_current == _get_next_ready_thread()) {
|
||||
irq_unlock(key);
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
_check_stack_sentinel();
|
||||
#endif
|
||||
} else {
|
||||
_Swap(key);
|
||||
}
|
||||
|
|
|
@ -133,6 +133,44 @@ void _thread_monitor_exit(struct k_thread *thread)
|
|||
}
|
||||
#endif /* CONFIG_THREAD_MONITOR */
|
||||
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
/* Check that the stack sentinel is still present
|
||||
*
|
||||
* The stack sentinel feature writes a magic value to the lowest 4 bytes of
|
||||
* the thread's stack when the thread is initialized. This value gets checked
|
||||
* in a few places:
|
||||
*
|
||||
* 1) In k_yield() if the current thread is not swapped out
|
||||
* 2) In the interrupt code, after the interrupt has been serviced, and
|
||||
* a decision *not* to call _Swap() has been made.
|
||||
* 3) In _Swap(), check the sentinel in the outgoing thread
|
||||
* 4) When a thread returns from its entry function to cooperatively terminate
|
||||
*
|
||||
* Items 2 and 3 require support in arch/ code.
|
||||
*
|
||||
* If the check fails, the thread will be terminated appropriately through
|
||||
* the system fatal error handler.
|
||||
*/
|
||||
void _check_stack_sentinel(void)
|
||||
{
|
||||
u32_t *stack;
|
||||
|
||||
if (_is_thread_prevented_from_running(_current)) {
|
||||
/* Filter out threads that are dummy threads or already
|
||||
* marked for termination (_THREAD_DEAD)
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
stack = (u32_t *)_current->stack_info.start;
|
||||
if (*stack != STACK_SENTINEL) {
|
||||
/* Restore it so further checks don't trigger this same error */
|
||||
*stack = STACK_SENTINEL;
|
||||
_k_except_reason(_NANO_ERR_STACK_CHK_FAIL);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Common thread entry point function (used by all threads)
|
||||
*
|
||||
|
@ -148,6 +186,9 @@ FUNC_NORETURN void _thread_entry(void (*entry)(void *, void *, void *),
|
|||
{
|
||||
entry(p1, p2, p3);
|
||||
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
_check_stack_sentinel();
|
||||
#endif
|
||||
#ifdef CONFIG_MULTITHREADING
|
||||
if (_is_thread_essential()) {
|
||||
_k_except_reason(_NANO_ERR_INVALID_TASK_EXIT);
|
||||
|
|
|
@ -23,6 +23,31 @@ config STACK_USAGE
|
|||
Generate an extra file that specifies the maximum amount of stack used,
|
||||
on a per-function basis.
|
||||
|
||||
config STACK_SENTINEL
|
||||
bool "Enable stack sentinel"
|
||||
select THREAD_STACK_INFO
|
||||
default n
|
||||
help
|
||||
Store a magic value at the lowest addresses of a thread's stack.
|
||||
Periodically check that this value is still present and kill the
|
||||
thread gracefully if it isn't. This is currently checked in four
|
||||
places:
|
||||
|
||||
1) Upon any context switch for the outgoing thread
|
||||
2) Any hardware interrupt that doesn't context switch, the check is
|
||||
performed for the interrupted thread
|
||||
3) When a thread returns from its entry point
|
||||
4) When a thread calls k_yield() but doesn't context switch
|
||||
|
||||
This feature doesn't prevent corruption and the system may be
|
||||
in an unusable state. However, given the bizarre behavior associated
|
||||
with stack overflows, knowledge that this is happening is very
|
||||
useful.
|
||||
|
||||
This feature is intended for those systems which lack hardware support
|
||||
for stack overflow protection, or have insufficient system resources
|
||||
to use that hardware support.
|
||||
|
||||
config PRINTK
|
||||
bool
|
||||
prompt "Send printk() to console"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue