stack_sentinel: change cooperative check

One of the stack sentinel policies was to check the sentinel
any time a cooperative context switch is done (i.e, _Swap is
called).

This was done by adding a hook to _check_stack_sentinel in
every arch's __swap function.

This way is cleaner as we just have the hook in one inline
function rather than implemented in several different assembly
dialects.

The check upon interrupt is now made unconditionally rather
than checking if we are calling __swap, since the check now
is only called on cooperative _Swap(). The interrupt is always
serviced first.

Issue: ZEP-2244
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2017-06-07 09:33:16 -07:00 committed by Maureen Helm
commit ae1a75b82e
9 changed files with 27 additions and 48 deletions

View file

@ -42,24 +42,17 @@ GDATA(_kernel)
SECTION_FUNC(TEXT, __pendsv) SECTION_FUNC(TEXT, __pendsv)
#if defined (CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH) || \ #ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
defined(CONFIG_STACK_SENTINEL)
/* Register the context switch */ /* Register the context switch */
push {lr} push {lr}
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
bl _sys_k_event_logger_context_switch bl _sys_k_event_logger_context_switch
#endif
#ifdef CONFIG_STACK_SENTINEL
bl _check_stack_sentinel
#endif
#if defined(CONFIG_ARMV6_M) #if defined(CONFIG_ARMV6_M)
pop {r0} pop {r0}
mov lr, r0 mov lr, r0
#else #else
pop {lr} pop {lr}
#endif /* CONFIG_ARMV6_M */ #endif /* CONFIG_ARMV6_M */
#endif /* CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH || CONFIG_STACK_SENTINEL */ #endif /* CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH */
/* load _kernel into r1 and current k_thread into r2 */ /* load _kernel into r1 and current k_thread into r2 */
ldr r1, =_kernel ldr r1, =_kernel

View file

@ -59,12 +59,6 @@ SECTION_FUNC(exception.other, __swap)
#if CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH #if CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
call _sys_k_event_logger_context_switch call _sys_k_event_logger_context_switch
#endif
#ifdef CONFIG_STACK_SENTINEL
call _check_stack_sentinel
#endif
#if defined(CONFIG_STACK_SENTINEL) || \
defined (CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH)
/* restore caller-saved r10 */ /* restore caller-saved r10 */
movhi r10, %hi(_kernel) movhi r10, %hi(_kernel)
ori r10, r10, %lo(_kernel) ori r10, r10, %lo(_kernel)

View file

@ -274,15 +274,15 @@ on_thread_stack:
addi t2, t2, -1 addi t2, t2, -1
sw t2, _kernel_offset_to_nested(t1) sw t2, _kernel_offset_to_nested(t1)
/* Restore thread stack pointer */
lw t0, 0x00(sp)
addi sp, t0, 0
#ifdef CONFIG_STACK_SENTINEL #ifdef CONFIG_STACK_SENTINEL
call _check_stack_sentinel call _check_stack_sentinel
la t1, _kernel la t1, _kernel
#endif #endif
/* Restore thread stack pointer */
lw t0, 0x00(sp)
addi sp, t0, 0
#ifdef CONFIG_PREEMPT_ENABLED #ifdef CONFIG_PREEMPT_ENABLED
/* /*
* Check if we need to perform a reschedule * Check if we need to perform a reschedule
@ -316,9 +316,6 @@ reschedule:
#if CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH #if CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
call _sys_k_event_logger_context_switch call _sys_k_event_logger_context_switch
#endif /* CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH */ #endif /* CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH */
#ifdef CONFIG_STACK_SENTINEL
call _check_stack_sentinel
#endif
/* Get reference to _kernel */ /* Get reference to _kernel */
la t0, _kernel la t0, _kernel

View file

@ -342,6 +342,9 @@ alreadyOnIntStack:
#if defined(CONFIG_TIMESLICING) #if defined(CONFIG_TIMESLICING)
call _update_time_slice_before_swap call _update_time_slice_before_swap
#endif
#ifdef CONFIG_STACK_SENTINEL
call _check_stack_sentinel
#endif #endif
pushfl /* push KERNEL_LOCK_KEY argument */ pushfl /* push KERNEL_LOCK_KEY argument */
#ifdef CONFIG_X86_IAMCU #ifdef CONFIG_X86_IAMCU
@ -398,8 +401,9 @@ noReschedule:
popl %esp /* pop thread stack pointer */ popl %esp /* pop thread stack pointer */
#ifdef CONFIG_STACK_SENTINEL #ifdef CONFIG_STACK_SENTINEL
call _check_stack_sentinel call _check_stack_sentinel
#endif #endif
/* fall through to 'nestedInterrupt' */ /* fall through to 'nestedInterrupt' */

View file

@ -27,9 +27,6 @@
/* externs */ /* externs */
GDATA(_k_neg_eagain) GDATA(_k_neg_eagain)
#ifdef CONFIG_STACK_SENTINEL
GTEXT(_check_stack_sentinel)
#endif
/** /**
* *
@ -142,9 +139,6 @@ SECTION_FUNC(TEXT, __swap)
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH #ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
/* Register the context switch */ /* Register the context switch */
call _sys_k_event_logger_context_switch call _sys_k_event_logger_context_switch
#endif
#ifdef CONFIG_STACK_SENTINEL
call _check_stack_sentinel
#endif #endif
movl _kernel_offset_to_ready_q_cache(%edi), %eax movl _kernel_offset_to_ready_q_cache(%edi), %eax

View file

@ -82,13 +82,6 @@ __swap:
#else #else
call4 _sys_k_event_logger_context_switch call4 _sys_k_event_logger_context_switch
#endif #endif
#endif
#ifdef CONFIG_STACK_SENTINEL
#ifdef __XTENSA_CALL0_ABI__
call0 _check_stack_sentinel
#else
call4 _check_stack_sentinel
#endif
#endif #endif
/* _thread := _kernel.ready_q.cache */ /* _thread := _kernel.ready_q.cache */
l32i a3, a2, KERNEL_OFFSET(ready_q_cache) l32i a3, a2, KERNEL_OFFSET(ready_q_cache)

View file

@ -31,9 +31,6 @@ extern void _update_time_slice_before_swap(void);
extern s32_t _ms_to_ticks(s32_t ms); extern s32_t _ms_to_ticks(s32_t ms);
#endif #endif
extern void idle(void *, void *, void *); extern void idle(void *, void *, void *);
#ifdef CONFIG_STACK_SENTINEL
extern void _check_stack_sentinel(void);
#endif
/* find which one is the next thread to run */ /* find which one is the next thread to run */
/* must be called with interrupts locked */ /* must be called with interrupts locked */

View file

@ -52,19 +52,27 @@ extern void _new_thread(struct k_thread *thread, char *pStack, size_t stackSize,
extern unsigned int __swap(unsigned int key); extern unsigned int __swap(unsigned int key);
#if defined(CONFIG_TIMESLICING) #ifdef CONFIG_TIMESLICING
extern void _update_time_slice_before_swap(void); extern void _update_time_slice_before_swap(void);
#endif
static inline unsigned int _time_slice_swap(unsigned int key) #ifdef CONFIG_STACK_SENTINEL
extern void _check_stack_sentinel(void);
#endif
static inline unsigned int _Swap(unsigned int key)
{ {
#ifdef CONFIG_STACK_SENTINEL
_check_stack_sentinel();
#endif
#ifdef CONFIG_TIMESLICING
_update_time_slice_before_swap(); _update_time_slice_before_swap();
#endif
return __swap(key); return __swap(key);
} }
#define _Swap(x) _time_slice_swap(x)
#else
#define _Swap(x) __swap(x)
#endif
/* set and clear essential fiber/task flag */ /* set and clear essential fiber/task flag */
extern void _thread_essential_set(void); extern void _thread_essential_set(void);

View file

@ -141,12 +141,11 @@ void _thread_monitor_exit(struct k_thread *thread)
* in a few places: * in a few places:
* *
* 1) In k_yield() if the current thread is not swapped out * 1) In k_yield() if the current thread is not swapped out
* 2) In the interrupt code, after the interrupt has been serviced, and * 2) After servicing a non-nested interrupt
* a decision *not* to call _Swap() has been made.
* 3) In _Swap(), check the sentinel in the outgoing thread * 3) In _Swap(), check the sentinel in the outgoing thread
* 4) When a thread returns from its entry function to cooperatively terminate * 4) When a thread returns from its entry function to cooperatively terminate
* *
* Items 2 and 3 require support in arch/ code. * Item 2 requires support in arch/ code.
* *
* If the check fails, the thread will be terminated appropriately through * If the check fails, the thread will be terminated appropriately through
* the system fatal error handler. * the system fatal error handler.