syscall: Move arch specific syscall code into its own header

Split out the arch specific syscall code to reduce include pollution
from other arch related headers.  For example on ARM its possible to get
errno.h included via SoC specific headers.  Which created an interesting
compile issue because of the order of syscall & errno/errno syscall
inclusion.

Signed-off-by: Kumar Gala <kumar.gala@linaro.org>
This commit is contained in:
Kumar Gala 2018-07-31 12:42:59 -05:00 committed by Kumar Gala
commit 4b22ba7e4b
11 changed files with 566 additions and 449 deletions

View file

@ -8,7 +8,7 @@
#include <arch/x86/asm.h>
#include <arch/cpu.h>
#include <offsets_short.h>
#include <syscall.h>
#include <arch/x86/syscall.h>
/* Exports */
GTEXT(_x86_syscall_entry_stub)

View file

@ -224,163 +224,6 @@ extern "C" {
typedef u32_t k_mem_partition_attr_t;
#endif /* _ASMLANGUAGE */
#ifdef CONFIG_USERSPACE
#ifndef _ASMLANGUAGE
/* Syscall invocation macros. arc-specific machine constraints used to ensure
* args land in the proper registers. Currently, they are all stub functions
* just for enabling CONFIG_USERSPACE on arc w/o errors.
*/
static inline u32_t _arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t arg5, u32_t arg6,
u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2;
register u32_t r2 __asm__("r2") = arg3;
register u32_t r3 __asm__("r3") = arg4;
register u32_t r4 __asm__("r4") = arg5;
register u32_t r5 __asm__("r5") = arg6;
register u32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"trap_s %[trap_s_id]\n"
: "=r"(ret)
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r4), "r" (r5), "r" (r6));
return ret;
}
static inline u32_t _arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t arg5, u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2;
register u32_t r2 __asm__("r2") = arg3;
register u32_t r3 __asm__("r3") = arg4;
register u32_t r4 __asm__("r4") = arg5;
register u32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"trap_s %[trap_s_id]\n"
: "=r"(ret)
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r4), "r" (r6));
return ret;
}
static inline u32_t _arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2;
register u32_t r2 __asm__("r2") = arg3;
register u32_t r3 __asm__("r3") = arg4;
register u32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"trap_s %[trap_s_id]\n"
: "=r"(ret)
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r6));
return ret;
}
static inline u32_t _arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2;
register u32_t r2 __asm__("r2") = arg3;
register u32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"trap_s %[trap_s_id]\n"
: "=r"(ret)
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r6));
return ret;
}
static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2;
register u32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"trap_s %[trap_s_id]\n"
: "=r"(ret)
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r6));
return ret;
}
static inline u32_t _arch_syscall_invoke1(u32_t arg1, u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"trap_s %[trap_s_id]\n"
: "=r"(ret)
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
"r" (ret), "r" (r6));
return ret;
}
static inline u32_t _arch_syscall_invoke0(u32_t call_id)
{
register u32_t ret __asm__("r0");
register u32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"trap_s %[trap_s_id]\n"
: "=r"(ret)
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
"r" (ret), "r" (r6));
return ret;
}
static inline int _arch_is_user_context(void)
{
u32_t status;
compiler_barrier();
__asm__ volatile("lr %0, [%[status32]]\n"
: "=r"(status)
: [status32] "i" (_ARC_V2_STATUS32));
return !(status & _ARC_V2_STATUS32_US);
}
#endif /* _ASMLANGUAGE */
#endif /* CONFIG_USERSPACE */
#ifdef __cplusplus
}
#endif

194
include/arch/arc/syscall.h Normal file
View file

@ -0,0 +1,194 @@
/*
* Copyright (c) 2018 Linaro Limited.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief ARC specific sycall header
*
* This header contains the ARC specific sycall interface. It is
* included by the syscall interface architecture-abstraction header
* (include/arch/syscall.h)
*/
#ifndef _ARC_SYSCALL__H_
#define _ARC_SYSCALL__H_
#define _TRAP_S_SCALL_IRQ_OFFLOAD 1
#define _TRAP_S_CALL_RUNTIME_EXCEPT 2
#define _TRAP_S_CALL_SYSTEM_CALL 3
#ifdef CONFIG_USERSPACE
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#ifdef CONFIG_CPU_ARCV2
#include <arch/arc/v2/aux_regs.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* Syscall invocation macros. arc-specific machine constraints used to ensure
* args land in the proper registers. Currently, they are all stub functions
* just for enabling CONFIG_USERSPACE on arc w/o errors.
*/
static inline u32_t _arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t arg5, u32_t arg6,
u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2;
register u32_t r2 __asm__("r2") = arg3;
register u32_t r3 __asm__("r3") = arg4;
register u32_t r4 __asm__("r4") = arg5;
register u32_t r5 __asm__("r5") = arg6;
register u32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"trap_s %[trap_s_id]\n"
: "=r"(ret)
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r4), "r" (r5), "r" (r6));
return ret;
}
static inline u32_t _arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t arg5, u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2;
register u32_t r2 __asm__("r2") = arg3;
register u32_t r3 __asm__("r3") = arg4;
register u32_t r4 __asm__("r4") = arg5;
register u32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"trap_s %[trap_s_id]\n"
: "=r"(ret)
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r4), "r" (r6));
return ret;
}
static inline u32_t _arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2;
register u32_t r2 __asm__("r2") = arg3;
register u32_t r3 __asm__("r3") = arg4;
register u32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"trap_s %[trap_s_id]\n"
: "=r"(ret)
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r6));
return ret;
}
static inline u32_t _arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2;
register u32_t r2 __asm__("r2") = arg3;
register u32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"trap_s %[trap_s_id]\n"
: "=r"(ret)
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r6));
return ret;
}
static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2;
register u32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"trap_s %[trap_s_id]\n"
: "=r"(ret)
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r6));
return ret;
}
static inline u32_t _arch_syscall_invoke1(u32_t arg1, u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"trap_s %[trap_s_id]\n"
: "=r"(ret)
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
"r" (ret), "r" (r6));
return ret;
}
static inline u32_t _arch_syscall_invoke0(u32_t call_id)
{
register u32_t ret __asm__("r0");
register u32_t r6 __asm__("r6") = call_id;
compiler_barrier();
__asm__ volatile(
"trap_s %[trap_s_id]\n"
: "=r"(ret)
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
"r" (ret), "r" (r6));
return ret;
}
static inline int _arch_is_user_context(void)
{
u32_t status;
compiler_barrier();
__asm__ volatile("lr %0, [%[status32]]\n"
: "=r"(status)
: [status32] "i" (_ARC_V2_STATUS32));
return !(status & _ARC_V2_STATUS32_US);
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* CONFIG_USERSPACE */
#endif /* _ARC_SYSCALL__H_ */

View file

@ -14,6 +14,7 @@
#ifndef _ARCH_ARC_V2_ERROR_H_
#define _ARCH_ARC_V2_ERROR_H_
#include <arch/arc/syscall.h>
#include <arch/arc/v2/exc.h>
#ifdef __cplusplus
@ -33,10 +34,6 @@ extern void _SysFatalErrorHandler(unsigned int cause, const NANO_ESF *esf);
#define _NANO_ERR_KERNEL_PANIC (5) /* Kernel panic (fatal to system) */
#define _TRAP_S_SCALL_IRQ_OFFLOAD 1
#define _TRAP_S_CALL_RUNTIME_EXCEPT 2
#define _TRAP_S_CALL_SYSTEM_CALL 3
/*
* the exception caused by kernel will be handled in interrupt context
* when the processor is already in interrupt context, no need to raise

View file

@ -327,150 +327,6 @@ extern "C" {
typedef u32_t k_mem_partition_attr_t;
#endif /* _ASMLANGUAGE */
#ifdef CONFIG_USERSPACE
#ifndef _ASMLANGUAGE
/* Syscall invocation macros. arm-specific machine constraints used to ensure
* args land in the proper registers.
*/
static inline u32_t _arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t arg5, u32_t arg6,
u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2;
register u32_t r2 __asm__("r2") = arg3;
register u32_t r3 __asm__("r3") = arg4;
register u32_t r4 __asm__("r4") = arg5;
register u32_t r5 __asm__("r5") = arg6;
register u32_t r6 __asm__("r6") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r4), "r" (r5), "r" (r6)
: "r8", "memory");
return ret;
}
static inline u32_t _arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t arg5, u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2;
register u32_t r2 __asm__("r2") = arg3;
register u32_t r3 __asm__("r3") = arg4;
register u32_t r4 __asm__("r4") = arg5;
register u32_t r6 __asm__("r6") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r4), "r" (r6)
: "r8", "memory");
return ret;
}
static inline u32_t _arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2;
register u32_t r2 __asm__("r2") = arg3;
register u32_t r3 __asm__("r3") = arg4;
register u32_t r6 __asm__("r6") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r6)
: "r8", "memory");
return ret;
}
static inline u32_t _arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2;
register u32_t r2 __asm__("r2") = arg3;
register u32_t r6 __asm__("r6") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r6)
: "r8", "memory", "r3");
return ret;
}
static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2;
register u32_t r6 __asm__("r6") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r6)
: "r8", "memory", "r2", "r3");
return ret;
}
static inline u32_t _arch_syscall_invoke1(u32_t arg1, u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r6 __asm__("r6") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r6)
: "r8", "memory", "r1", "r2", "r3");
return ret;
}
static inline u32_t _arch_syscall_invoke0(u32_t call_id)
{
register u32_t ret __asm__("r0");
register u32_t r6 __asm__("r6") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r6)
: "r8", "memory", "r1", "r2", "r3");
return ret;
}
static inline int _arch_is_user_context(void)
{
u32_t value;
/* check for handler mode */
__asm__ volatile("mrs %0, IPSR\n\t" : "=r"(value));
if (value) {
return 0;
}
/* if not handler mode, return mode information */
__asm__ volatile("mrs %0, CONTROL\n\t" : "=r"(value));
return value & 0x1;
}
#endif /* _ASMLANGUAGE */
#endif /* CONFIG_USERSPACE */
#ifdef __cplusplus
}
#endif

View file

@ -14,6 +14,7 @@
#ifndef _ARCH_ARM_CORTEXM_ERROR_H_
#define _ARCH_ARM_CORTEXM_ERROR_H_
#include <arch/arm/syscall.h>
#include <arch/arm/cortex_m/exc.h>
#ifdef __cplusplus
@ -32,10 +33,6 @@ extern void _SysFatalErrorHandler(unsigned int reason, const NANO_ESF *esf);
#define _NANO_ERR_KERNEL_PANIC (5) /* Kernel panic (fatal to system) */
#define _NANO_ERR_RECOVERABLE (6) /* Recoverable error */
#define _SVC_CALL_IRQ_OFFLOAD 1
#define _SVC_CALL_RUNTIME_EXCEPT 2
#define _SVC_CALL_SYSTEM_CALL 3
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* ARMv6 will hard-fault if SVC is called with interrupts locked. Just
* force them unlocked, the thread is in an undefined state anyway

177
include/arch/arm/syscall.h Normal file
View file

@ -0,0 +1,177 @@
/*
* Copyright (c) 2018 Linaro Limited.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief ARM specific sycall header
*
* This header contains the ARM specific sycall interface. It is
* included by the syscall interface architecture-abstraction header
* (include/arch/syscall.h)
*/
#ifndef _ARM_SYSCALL__H_
#define _ARM_SYSCALL__H_
#define _SVC_CALL_IRQ_OFFLOAD 1
#define _SVC_CALL_RUNTIME_EXCEPT 2
#define _SVC_CALL_SYSTEM_CALL 3
#ifdef CONFIG_USERSPACE
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Syscall invocation macros. arm-specific machine constraints used to ensure
* args land in the proper registers.
*/
static inline u32_t _arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t arg5, u32_t arg6,
u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2;
register u32_t r2 __asm__("r2") = arg3;
register u32_t r3 __asm__("r3") = arg4;
register u32_t r4 __asm__("r4") = arg5;
register u32_t r5 __asm__("r5") = arg6;
register u32_t r6 __asm__("r6") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r4), "r" (r5), "r" (r6)
: "r8", "memory");
return ret;
}
static inline u32_t _arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t arg5, u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2;
register u32_t r2 __asm__("r2") = arg3;
register u32_t r3 __asm__("r3") = arg4;
register u32_t r4 __asm__("r4") = arg5;
register u32_t r6 __asm__("r6") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r4), "r" (r6)
: "r8", "memory");
return ret;
}
static inline u32_t _arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2;
register u32_t r2 __asm__("r2") = arg3;
register u32_t r3 __asm__("r3") = arg4;
register u32_t r6 __asm__("r6") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r6)
: "r8", "memory");
return ret;
}
static inline u32_t _arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2;
register u32_t r2 __asm__("r2") = arg3;
register u32_t r6 __asm__("r6") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r6)
: "r8", "memory", "r3");
return ret;
}
static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2;
register u32_t r6 __asm__("r6") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r6)
: "r8", "memory", "r2", "r3");
return ret;
}
static inline u32_t _arch_syscall_invoke1(u32_t arg1, u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r6 __asm__("r6") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r6)
: "r8", "memory", "r1", "r2", "r3");
return ret;
}
static inline u32_t _arch_syscall_invoke0(u32_t call_id)
{
register u32_t ret __asm__("r0");
register u32_t r6 __asm__("r6") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r6)
: "r8", "memory", "r1", "r2", "r3");
return ret;
}
static inline int _arch_is_user_context(void)
{
u32_t value;
/* check for handler mode */
__asm__ volatile("mrs %0, IPSR\n\t" : "=r"(value));
if (value) {
return 0;
}
/* if not handler mode, return mode information */
__asm__ volatile("mrs %0, CONTROL\n\t" : "=r"(value));
return value & 0x1;
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* CONFIG_USERSPACE */
#endif /* _ARM_SYSCALL__H_ */

20
include/arch/syscall.h Normal file
View file

@ -0,0 +1,20 @@
/* syscall.h - automatically selects the correct syscall.h file to include */
/*
* Copyright (c) 1997-2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef __ARCHSYSCALL_H__
#define __ARCHSYSCALL_H__
#if defined(CONFIG_X86)
#include <arch/x86/syscall.h>
#elif defined(CONFIG_ARM)
#include <arch/arm/syscall.h>
#elif defined(CONFIG_ARC)
#include <arch/arc/syscall.h>
#endif
#endif /* __ARCHSYSCALL_H__ */

View file

@ -35,8 +35,6 @@ extern "C" {
#define DATA_SEG 0x10
#define MAIN_TSS 0x18
#define DF_TSS 0x20
#define USER_CODE_SEG 0x2b /* at dpl=3 */
#define USER_DATA_SEG 0x33 /* at dpl=3 */
/**
* Macro used internally by NANO_CPU_INT_REGISTER and NANO_CPU_INT_REGISTER_ASM.
@ -545,142 +543,6 @@ extern FUNC_NORETURN void _SysFatalErrorHandler(unsigned int reason,
extern struct task_state_segment _main_tss;
#endif
#ifdef CONFIG_USERSPACE
/* Syscall invocation macros. x86-specific machine constraints used to ensure
* args land in the proper registers, see implementation of
* _x86_syscall_entry_stub in userspace.S
*
* the entry stub clobbers EDX and ECX on IAMCU systems
*/
static inline u32_t _arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t arg5, u32_t arg6,
u32_t call_id)
{
u32_t ret;
__asm__ volatile("push %%ebp\n\t"
"mov %[arg6], %%ebp\n\t"
"int $0x80\n\t"
"pop %%ebp\n\t"
: "=a" (ret)
#ifdef CONFIG_X86_IAMCU
, "=d" (arg2), "=c" (arg3)
#endif
: "S" (call_id), "a" (arg1), "d" (arg2),
"c" (arg3), "b" (arg4), "D" (arg5),
[arg6] "m" (arg6)
: "memory", "esp");
return ret;
}
static inline u32_t _arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t arg5, u32_t call_id)
{
u32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
#ifdef CONFIG_X86_IAMCU
, "=d" (arg2), "=c" (arg3)
#endif
: "S" (call_id), "a" (arg1), "d" (arg2),
"c" (arg3), "b" (arg4), "D" (arg5)
: "memory");
return ret;
}
static inline u32_t _arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t call_id)
{
u32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
#ifdef CONFIG_X86_IAMCU
, "=d" (arg2), "=c" (arg3)
#endif
: "S" (call_id), "a" (arg1), "d" (arg2), "c" (arg3),
"b" (arg4)
: "memory");
return ret;
}
static inline u32_t _arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t call_id)
{
u32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
#ifdef CONFIG_X86_IAMCU
, "=d" (arg2), "=c" (arg3)
#endif
: "S" (call_id), "a" (arg1), "d" (arg2), "c" (arg3)
: "memory");
return ret;
}
static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id)
{
u32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
#ifdef CONFIG_X86_IAMCU
, "=d" (arg2)
#endif
: "S" (call_id), "a" (arg1), "d" (arg2)
: "memory"
#ifdef CONFIG_X86_IAMCU
, "ecx"
#endif
);
return ret;
}
static inline u32_t _arch_syscall_invoke1(u32_t arg1, u32_t call_id)
{
u32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
: "S" (call_id), "a" (arg1)
: "memory"
#ifdef CONFIG_X86_IAMCU
, "edx", "ecx"
#endif
);
return ret;
}
static inline u32_t _arch_syscall_invoke0(u32_t call_id)
{
u32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
: "S" (call_id)
: "memory"
#ifdef CONFIG_X86_IAMCU
, "edx", "ecx"
#endif
);
return ret;
}
static inline int _arch_is_user_context(void)
{
int cs;
/* On x86, read the CS register (which cannot be manually set) */
__asm__ volatile ("mov %%cs, %[cs_val]" : [cs_val] "=r" (cs));
return cs == USER_CODE_SEG;
}
#endif /* CONFIG_USERSPACE */
#if defined(CONFIG_HW_STACK_PROTECTION) && defined(CONFIG_USERSPACE)
/* With both hardware stack protection and userspace enabled, stacks are
* arranged as follows:

171
include/arch/x86/syscall.h Normal file
View file

@ -0,0 +1,171 @@
/*
* Copyright (c) 2018 Linaro Limited.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief x86 specific sycall header
*
* This header contains the x86 specific sycall interface. It is
* included by the syscall interface architecture-abstraction header
* (include/arch/syscall.h)
*/
#ifndef _X86_SYSCALL__H_
#define _X86_SYSCALL__H_
#define USER_CODE_SEG 0x2b /* at dpl=3 */
#define USER_DATA_SEG 0x33 /* at dpl=3 */
#ifdef CONFIG_USERSPACE
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Syscall invocation macros. x86-specific machine constraints used to ensure
* args land in the proper registers, see implementation of
* _x86_syscall_entry_stub in userspace.S
*
* the entry stub clobbers EDX and ECX on IAMCU systems
*/
static inline u32_t _arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t arg5, u32_t arg6,
u32_t call_id)
{
u32_t ret;
__asm__ volatile("push %%ebp\n\t"
"mov %[arg6], %%ebp\n\t"
"int $0x80\n\t"
"pop %%ebp\n\t"
: "=a" (ret)
#ifdef CONFIG_X86_IAMCU
, "=d" (arg2), "=c" (arg3)
#endif
: "S" (call_id), "a" (arg1), "d" (arg2),
"c" (arg3), "b" (arg4), "D" (arg5),
[arg6] "m" (arg6)
: "memory", "esp");
return ret;
}
static inline u32_t _arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t arg5, u32_t call_id)
{
u32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
#ifdef CONFIG_X86_IAMCU
, "=d" (arg2), "=c" (arg3)
#endif
: "S" (call_id), "a" (arg1), "d" (arg2),
"c" (arg3), "b" (arg4), "D" (arg5)
: "memory");
return ret;
}
static inline u32_t _arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t call_id)
{
u32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
#ifdef CONFIG_X86_IAMCU
, "=d" (arg2), "=c" (arg3)
#endif
: "S" (call_id), "a" (arg1), "d" (arg2), "c" (arg3),
"b" (arg4)
: "memory");
return ret;
}
static inline u32_t _arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t call_id)
{
u32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
#ifdef CONFIG_X86_IAMCU
, "=d" (arg2), "=c" (arg3)
#endif
: "S" (call_id), "a" (arg1), "d" (arg2), "c" (arg3)
: "memory");
return ret;
}
static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id)
{
u32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
#ifdef CONFIG_X86_IAMCU
, "=d" (arg2)
#endif
: "S" (call_id), "a" (arg1), "d" (arg2)
: "memory"
#ifdef CONFIG_X86_IAMCU
, "ecx"
#endif
);
return ret;
}
static inline u32_t _arch_syscall_invoke1(u32_t arg1, u32_t call_id)
{
u32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
: "S" (call_id), "a" (arg1)
: "memory"
#ifdef CONFIG_X86_IAMCU
, "edx", "ecx"
#endif
);
return ret;
}
static inline u32_t _arch_syscall_invoke0(u32_t call_id)
{
u32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
: "S" (call_id)
: "memory"
#ifdef CONFIG_X86_IAMCU
, "edx", "ecx"
#endif
);
return ret;
}
static inline int _arch_is_user_context(void)
{
int cs;
/* On x86, read the CS register (which cannot be manually set) */
__asm__ volatile ("mov %%cs, %[cs_val]" : [cs_val] "=r" (cs));
return cs == USER_CODE_SEG;
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* CONFIG_USERSPACE */
#endif /* _X86_SYSCALL__H_ */

View file

@ -12,7 +12,7 @@
#include <zephyr/types.h>
#include <syscall_list.h>
#include <syscall_macros.h>
#include <arch/cpu.h>
#include <arch/syscall.h>
#ifdef __cplusplus
extern "C" {