x86: add system call functions for 64-bit

Nothing too fancy here, we try as much as possible to
use the same register layout as the C calling convention.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2019-11-19 18:08:21 -08:00 committed by Anas Nashif
commit c71e66e2a5
3 changed files with 192 additions and 2 deletions

View file

@ -5,7 +5,7 @@
*/
#include <generated_dts_board.h>
#include <arch/cpu.h>
#include <kernel.h>
#include <sys/util.h>
/* Super-primitive 8250/16550 serial output-only driver, 115200 8n1 */

View file

@ -9,8 +9,12 @@
#ifndef ZEPHYR_INCLUDE_ARCH_SYSCALL_H_
#define ZEPHYR_INCLUDE_ARCH_SYSCALL_H_
#if defined(CONFIG_X86) && !defined(CONFIG_X86_64)
#if defined(CONFIG_X86)
#if defined(CONFIG_X86_64)
#include <arch/x86/intel64/syscall.h>
#else
#include <arch/x86/ia32/syscall.h>
#endif
#elif defined(CONFIG_ARM)
#include <arch/arm/aarch32/syscall.h>
#elif defined(CONFIG_ARC)

View file

@ -0,0 +1,186 @@
/*
* Copyright (c) 2019 Intel Corporation.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief x86 (INTEL64) specific sycall header
*
* This header contains the x86 specific sycall interface. It is
* included by the syscall interface architecture-abstraction header
* (include/arch/syscall.h)
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_INTEL64_SYSCALL_H_
#define ZEPHYR_INCLUDE_ARCH_X86_INTEL64_SYSCALL_H_
#ifdef CONFIG_USERSPACE
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* x86_64 System V calling convention:
* First six arguments passed in via RDI, RSI, RDX, RCX, R8, R9
* We'll use RAX for the call_id, and the return value
*
* Arrange registers so that they are in-place as much as possible when
* doing the system call. Because RCX get overwritten by the CPU, put arg 4
* in r10 instead.
*
* SYSCALL instruction stores return address in RCX and RFLAGS in R11. RIP is
* loaded from LSTAR MSR, masks RFLAGS with the low 32 bits of EFER.SFMASK. CS
* and SS are loaded from values derived from bits 47:32 of STAR MSR (+0
* for CS, +8 for SS)
*
* SYSRET loads RIP from RCX and RFLAGS from r11. CS and SS are set with
* values derived from STAR MSR bits 63:48 (+8 for CS, +16 for SS)
*
* The kernel is in charge of not clobbering across the system call
* the remaining registers: RBX, RBP, R12-R15, SIMD/FPU, and any unused
* argument registers.
*/
static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5, uintptr_t arg6,
uintptr_t call_id)
{
register uintptr_t rax __asm__("%rax") = call_id;
register uintptr_t rdi __asm__("%rdi") = arg1;
register uintptr_t rsi __asm__("%rsi") = arg2;
register uintptr_t rdx __asm__("%rdx") = arg3;
register uintptr_t r10 __asm__("%r10") = arg4; /* RCX unavailable */
register uintptr_t r8 __asm__("%r8") = arg5;
register uintptr_t r9 __asm__("%r9") = arg6;
__asm__ volatile("syscall\n\t"
: "=r" (rax)
: "r" (rax), "r" (rdi), "r" (rsi), "r" (rdx),
"r" (r10), "r" (r8), "r" (r9)
: "memory", "rcx", "r11");
return rax;
}
static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5,
uintptr_t call_id)
{
register uintptr_t rax __asm__("%rax") = call_id;
register uintptr_t rdi __asm__("%rdi") = arg1;
register uintptr_t rsi __asm__("%rsi") = arg2;
register uintptr_t rdx __asm__("%rdx") = arg3;
register uintptr_t r10 __asm__("%r10") = arg4; /* RCX unavailable */
register uintptr_t r8 __asm__("%r8") = arg5;
__asm__ volatile("syscall\n\t"
: "=r" (rax)
: "r" (rax), "r" (rdi), "r" (rsi), "r" (rdx),
"r" (r10), "r" (r8)
: "memory", "rcx", "r11");
return rax;
}
static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t call_id)
{
register uintptr_t rax __asm__("%rax") = call_id;
register uintptr_t rdi __asm__("%rdi") = arg1;
register uintptr_t rsi __asm__("%rsi") = arg2;
register uintptr_t rdx __asm__("%rdx") = arg3;
register uintptr_t r10 __asm__("%r10") = arg4; /* RCX unavailable */
__asm__ volatile("syscall\n\t"
: "=r" (rax)
: "r" (rax), "r" (rdi), "r" (rsi), "r" (rdx),
"r" (r10)
: "memory", "rcx", "r11");
return rax;
}
static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3,
uintptr_t call_id)
{
register uintptr_t rax __asm__("%rax") = call_id;
register uintptr_t rdi __asm__("%rdi") = arg1;
register uintptr_t rsi __asm__("%rsi") = arg2;
register uintptr_t rdx __asm__("%rdx") = arg3;
__asm__ volatile("syscall\n\t"
: "=r" (rax)
: "r" (rax), "r" (rdi), "r" (rsi), "r" (rdx)
: "memory", "rcx", "r11");
return rax;
}
static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
uintptr_t call_id)
{
register uintptr_t rax __asm__("%rax") = call_id;
register uintptr_t rdi __asm__("%rdi") = arg1;
register uintptr_t rsi __asm__("%rsi") = arg2;
__asm__ volatile("syscall\n\t"
: "=r" (rax)
: "r" (rax), "r" (rdi), "r" (rsi)
: "memory", "rcx", "r11");
return rax;
}
static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1,
uintptr_t call_id)
{
register uintptr_t rax __asm__("%rax") = call_id;
register uintptr_t rdi __asm__("%rdi") = arg1;
__asm__ volatile("syscall\n\t"
: "=r" (rax)
: "r" (rax), "r" (rdi)
: "memory", "rcx", "r11");
return rax;
}
static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
{
register uintptr_t rax __asm__("%rax") = call_id;
__asm__ volatile("syscall\n\t"
: "=r" (rax)
: "r" (rax)
: "memory", "rcx", "r11");
return rax;
}
static inline bool arch_is_user_context(void)
{
int cs;
__asm__ volatile ("mov %%cs, %[cs_val]" : [cs_val] "=r" (cs));
return (cs & 0x3) != 0;
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* CONFIG_USERSPACE */
#endif /* ZEPHYR_INCLUDE_ARCH_X86_INTEL64_SYSCALL_H_ */