diff --git a/arch/x86/core/intel64/thread.c b/arch/x86/core/intel64/thread.c index 451f34b50ca..d0e44503948 100644 --- a/arch/x86/core/intel64/thread.c +++ b/arch/x86/core/intel64/thread.c @@ -12,6 +12,14 @@ extern void x86_sse_init(struct k_thread *); /* in locore.S */ +/* FIXME: This exists to make space for a "return address" at the top + * of the stack. Obviously this is unused at runtime, but is required + * for alignment: stacks at runtime should be 16-byte aligned, and a + * CALL will therefore push a return address that leaves the stack + * misaligned. Effectively we're wasting 8 bytes here to undo (!) the + * alignment that the upper level code already tried to do for us. We + * should clean this up. + */ struct x86_initial_frame { /* zeroed return address for ABI */ uint64_t rip; diff --git a/arch/x86/core/userspace.c b/arch/x86/core/userspace.c index 98027ff50fa..43d02af23ec 100644 --- a/arch/x86/core/userspace.c +++ b/arch/x86/core/userspace.c @@ -87,7 +87,7 @@ void *z_x86_userspace_prepare_thread(struct k_thread *thread) FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3) { - uint32_t stack_end; + size_t stack_end; /* Transition will reset stack pointer to initial, discarding * any old context since this is a one-way operation @@ -96,6 +96,15 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, _current->stack_info.size - _current->stack_info.delta); +#ifdef CONFIG_X86_64 + /* x86_64 SysV ABI requires 16 byte stack alignment, which + * means that on entry to a C function (which follows a CALL + * that pushes 8 bytes) the stack must be MISALIGNED by + * exactly 8 bytes. + */ + stack_end -= 8; +#endif + z_x86_userspace_enter(user_entry, p1, p2, p3, stack_end, _current->stack_info.start); CODE_UNREACHABLE;