diff --git a/arch/x86/core/userspace.S b/arch/x86/core/userspace.S index 7fb0291cbb3..666c6978f6f 100644 --- a/arch/x86/core/userspace.S +++ b/arch/x86/core/userspace.S @@ -8,7 +8,7 @@ #include #include #include -#include +#include /* Exports */ GTEXT(_x86_syscall_entry_stub) diff --git a/include/arch/arc/arch.h b/include/arch/arc/arch.h index 7126adb09e6..19f898a2a41 100644 --- a/include/arch/arc/arch.h +++ b/include/arch/arc/arch.h @@ -224,163 +224,6 @@ extern "C" { typedef u32_t k_mem_partition_attr_t; #endif /* _ASMLANGUAGE */ -#ifdef CONFIG_USERSPACE -#ifndef _ASMLANGUAGE -/* Syscall invocation macros. arc-specific machine constraints used to ensure - * args land in the proper registers. Currently, they are all stub functions - * just for enabling CONFIG_USERSPACE on arc w/o errors. - */ - -static inline u32_t _arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3, - u32_t arg4, u32_t arg5, u32_t arg6, - u32_t call_id) -{ - register u32_t ret __asm__("r0") = arg1; - register u32_t r1 __asm__("r1") = arg2; - register u32_t r2 __asm__("r2") = arg3; - register u32_t r3 __asm__("r3") = arg4; - register u32_t r4 __asm__("r4") = arg5; - register u32_t r5 __asm__("r5") = arg6; - register u32_t r6 __asm__("r6") = call_id; - - compiler_barrier(); - - __asm__ volatile( - "trap_s %[trap_s_id]\n" - : "=r"(ret) - : [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL), - "r" (ret), "r" (r1), "r" (r2), "r" (r3), - "r" (r4), "r" (r5), "r" (r6)); - - return ret; -} - -static inline u32_t _arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3, - u32_t arg4, u32_t arg5, u32_t call_id) -{ - register u32_t ret __asm__("r0") = arg1; - register u32_t r1 __asm__("r1") = arg2; - register u32_t r2 __asm__("r2") = arg3; - register u32_t r3 __asm__("r3") = arg4; - register u32_t r4 __asm__("r4") = arg5; - register u32_t r6 __asm__("r6") = call_id; - - compiler_barrier(); - - __asm__ volatile( - "trap_s %[trap_s_id]\n" - : "=r"(ret) - : [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL), - "r" (ret), "r" (r1), "r" (r2), "r" (r3), - "r" (r4), "r" (r6)); - - return ret; -} - -static inline u32_t _arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3, - u32_t arg4, u32_t call_id) -{ - register u32_t ret __asm__("r0") = arg1; - register u32_t r1 __asm__("r1") = arg2; - register u32_t r2 __asm__("r2") = arg3; - register u32_t r3 __asm__("r3") = arg4; - register u32_t r6 __asm__("r6") = call_id; - - compiler_barrier(); - - __asm__ volatile( - "trap_s %[trap_s_id]\n" - : "=r"(ret) - : [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL), - "r" (ret), "r" (r1), "r" (r2), "r" (r3), - "r" (r6)); - - return ret; -} - -static inline u32_t _arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3, - u32_t call_id) -{ - register u32_t ret __asm__("r0") = arg1; - register u32_t r1 __asm__("r1") = arg2; - register u32_t r2 __asm__("r2") = arg3; - register u32_t r6 __asm__("r6") = call_id; - - compiler_barrier(); - - __asm__ volatile( - "trap_s %[trap_s_id]\n" - : "=r"(ret) - : [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL), - "r" (ret), "r" (r1), "r" (r2), "r" (r6)); - - return ret; -} - -static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id) -{ - register u32_t ret __asm__("r0") = arg1; - register u32_t r1 __asm__("r1") = arg2; - register u32_t r6 __asm__("r6") = call_id; - - compiler_barrier(); - - __asm__ volatile( - "trap_s %[trap_s_id]\n" - : "=r"(ret) - : [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL), - "r" (ret), "r" (r1), "r" (r6)); - - return ret; -} - -static inline u32_t _arch_syscall_invoke1(u32_t arg1, u32_t call_id) -{ - register u32_t ret __asm__("r0") = arg1; - register u32_t r6 __asm__("r6") = call_id; - - compiler_barrier(); - - __asm__ volatile( - "trap_s %[trap_s_id]\n" - : "=r"(ret) - : [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL), - "r" (ret), "r" (r6)); - - return ret; -} - -static inline u32_t _arch_syscall_invoke0(u32_t call_id) -{ - register u32_t ret __asm__("r0"); - register u32_t r6 __asm__("r6") = call_id; - - compiler_barrier(); - - __asm__ volatile( - "trap_s %[trap_s_id]\n" - : "=r"(ret) - : [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL), - "r" (ret), "r" (r6)); - - return ret; -} - -static inline int _arch_is_user_context(void) -{ - u32_t status; - - compiler_barrier(); - - __asm__ volatile("lr %0, [%[status32]]\n" - : "=r"(status) - : [status32] "i" (_ARC_V2_STATUS32)); - - return !(status & _ARC_V2_STATUS32_US); -} - -#endif /* _ASMLANGUAGE */ -#endif /* CONFIG_USERSPACE */ #ifdef __cplusplus } #endif diff --git a/include/arch/arc/syscall.h b/include/arch/arc/syscall.h new file mode 100644 index 00000000000..72d2a51a0ab --- /dev/null +++ b/include/arch/arc/syscall.h @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2018 Linaro Limited. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * @brief ARC specific sycall header + * + * This header contains the ARC specific sycall interface. It is + * included by the syscall interface architecture-abstraction header + * (include/arch/syscall.h) + */ + +#ifndef _ARC_SYSCALL__H_ +#define _ARC_SYSCALL__H_ + +#define _TRAP_S_SCALL_IRQ_OFFLOAD 1 +#define _TRAP_S_CALL_RUNTIME_EXCEPT 2 +#define _TRAP_S_CALL_SYSTEM_CALL 3 + +#ifdef CONFIG_USERSPACE +#ifndef _ASMLANGUAGE + +#include + +#ifdef CONFIG_CPU_ARCV2 +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif +/* Syscall invocation macros. arc-specific machine constraints used to ensure + * args land in the proper registers. Currently, they are all stub functions + * just for enabling CONFIG_USERSPACE on arc w/o errors. + */ + +static inline u32_t _arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3, + u32_t arg4, u32_t arg5, u32_t arg6, + u32_t call_id) +{ + register u32_t ret __asm__("r0") = arg1; + register u32_t r1 __asm__("r1") = arg2; + register u32_t r2 __asm__("r2") = arg3; + register u32_t r3 __asm__("r3") = arg4; + register u32_t r4 __asm__("r4") = arg5; + register u32_t r5 __asm__("r5") = arg6; + register u32_t r6 __asm__("r6") = call_id; + + compiler_barrier(); + + __asm__ volatile( + "trap_s %[trap_s_id]\n" + : "=r"(ret) + : [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL), + "r" (ret), "r" (r1), "r" (r2), "r" (r3), + "r" (r4), "r" (r5), "r" (r6)); + + return ret; +} + +static inline u32_t _arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3, + u32_t arg4, u32_t arg5, u32_t call_id) +{ + register u32_t ret __asm__("r0") = arg1; + register u32_t r1 __asm__("r1") = arg2; + register u32_t r2 __asm__("r2") = arg3; + register u32_t r3 __asm__("r3") = arg4; + register u32_t r4 __asm__("r4") = arg5; + register u32_t r6 __asm__("r6") = call_id; + + compiler_barrier(); + + __asm__ volatile( + "trap_s %[trap_s_id]\n" + : "=r"(ret) + : [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL), + "r" (ret), "r" (r1), "r" (r2), "r" (r3), + "r" (r4), "r" (r6)); + + return ret; +} + +static inline u32_t _arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3, + u32_t arg4, u32_t call_id) +{ + register u32_t ret __asm__("r0") = arg1; + register u32_t r1 __asm__("r1") = arg2; + register u32_t r2 __asm__("r2") = arg3; + register u32_t r3 __asm__("r3") = arg4; + register u32_t r6 __asm__("r6") = call_id; + + compiler_barrier(); + + __asm__ volatile( + "trap_s %[trap_s_id]\n" + : "=r"(ret) + : [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL), + "r" (ret), "r" (r1), "r" (r2), "r" (r3), + "r" (r6)); + + return ret; +} + +static inline u32_t _arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3, + u32_t call_id) +{ + register u32_t ret __asm__("r0") = arg1; + register u32_t r1 __asm__("r1") = arg2; + register u32_t r2 __asm__("r2") = arg3; + register u32_t r6 __asm__("r6") = call_id; + + compiler_barrier(); + + __asm__ volatile( + "trap_s %[trap_s_id]\n" + : "=r"(ret) + : [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL), + "r" (ret), "r" (r1), "r" (r2), "r" (r6)); + + return ret; +} + +static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id) +{ + register u32_t ret __asm__("r0") = arg1; + register u32_t r1 __asm__("r1") = arg2; + register u32_t r6 __asm__("r6") = call_id; + + compiler_barrier(); + + __asm__ volatile( + "trap_s %[trap_s_id]\n" + : "=r"(ret) + : [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL), + "r" (ret), "r" (r1), "r" (r6)); + + return ret; +} + +static inline u32_t _arch_syscall_invoke1(u32_t arg1, u32_t call_id) +{ + register u32_t ret __asm__("r0") = arg1; + register u32_t r6 __asm__("r6") = call_id; + + compiler_barrier(); + + __asm__ volatile( + "trap_s %[trap_s_id]\n" + : "=r"(ret) + : [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL), + "r" (ret), "r" (r6)); + + return ret; +} + +static inline u32_t _arch_syscall_invoke0(u32_t call_id) +{ + register u32_t ret __asm__("r0"); + register u32_t r6 __asm__("r6") = call_id; + + compiler_barrier(); + + __asm__ volatile( + "trap_s %[trap_s_id]\n" + : "=r"(ret) + : [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL), + "r" (ret), "r" (r6)); + + return ret; +} + +static inline int _arch_is_user_context(void) +{ + u32_t status; + + compiler_barrier(); + + __asm__ volatile("lr %0, [%[status32]]\n" + : "=r"(status) + : [status32] "i" (_ARC_V2_STATUS32)); + + return !(status & _ARC_V2_STATUS32_US); +} + +#ifdef __cplusplus +} +#endif + +#endif /* _ASMLANGUAGE */ +#endif /* CONFIG_USERSPACE */ +#endif /* _ARC_SYSCALL__H_ */ diff --git a/include/arch/arc/v2/error.h b/include/arch/arc/v2/error.h index 6238c9a9dab..cfda73670f2 100644 --- a/include/arch/arc/v2/error.h +++ b/include/arch/arc/v2/error.h @@ -14,6 +14,7 @@ #ifndef _ARCH_ARC_V2_ERROR_H_ #define _ARCH_ARC_V2_ERROR_H_ +#include #include #ifdef __cplusplus @@ -33,10 +34,6 @@ extern void _SysFatalErrorHandler(unsigned int cause, const NANO_ESF *esf); #define _NANO_ERR_KERNEL_PANIC (5) /* Kernel panic (fatal to system) */ -#define _TRAP_S_SCALL_IRQ_OFFLOAD 1 -#define _TRAP_S_CALL_RUNTIME_EXCEPT 2 -#define _TRAP_S_CALL_SYSTEM_CALL 3 - /* * the exception caused by kernel will be handled in interrupt context * when the processor is already in interrupt context, no need to raise diff --git a/include/arch/arm/arch.h b/include/arch/arm/arch.h index 3ef19754cff..2cc9fb41f20 100644 --- a/include/arch/arm/arch.h +++ b/include/arch/arm/arch.h @@ -327,150 +327,6 @@ extern "C" { typedef u32_t k_mem_partition_attr_t; #endif /* _ASMLANGUAGE */ -#ifdef CONFIG_USERSPACE -#ifndef _ASMLANGUAGE - -/* Syscall invocation macros. arm-specific machine constraints used to ensure - * args land in the proper registers. - */ -static inline u32_t _arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3, - u32_t arg4, u32_t arg5, u32_t arg6, - u32_t call_id) -{ - register u32_t ret __asm__("r0") = arg1; - register u32_t r1 __asm__("r1") = arg2; - register u32_t r2 __asm__("r2") = arg3; - register u32_t r3 __asm__("r3") = arg4; - register u32_t r4 __asm__("r4") = arg5; - register u32_t r5 __asm__("r5") = arg6; - register u32_t r6 __asm__("r6") = call_id; - - __asm__ volatile("svc %[svid]\n" - : "=r"(ret) - : [svid] "i" (_SVC_CALL_SYSTEM_CALL), - "r" (ret), "r" (r1), "r" (r2), "r" (r3), - "r" (r4), "r" (r5), "r" (r6) - : "r8", "memory"); - - return ret; -} - -static inline u32_t _arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3, - u32_t arg4, u32_t arg5, u32_t call_id) -{ - register u32_t ret __asm__("r0") = arg1; - register u32_t r1 __asm__("r1") = arg2; - register u32_t r2 __asm__("r2") = arg3; - register u32_t r3 __asm__("r3") = arg4; - register u32_t r4 __asm__("r4") = arg5; - register u32_t r6 __asm__("r6") = call_id; - - __asm__ volatile("svc %[svid]\n" - : "=r"(ret) - : [svid] "i" (_SVC_CALL_SYSTEM_CALL), - "r" (ret), "r" (r1), "r" (r2), "r" (r3), - "r" (r4), "r" (r6) - : "r8", "memory"); - - return ret; -} - -static inline u32_t _arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3, - u32_t arg4, u32_t call_id) -{ - register u32_t ret __asm__("r0") = arg1; - register u32_t r1 __asm__("r1") = arg2; - register u32_t r2 __asm__("r2") = arg3; - register u32_t r3 __asm__("r3") = arg4; - register u32_t r6 __asm__("r6") = call_id; - - __asm__ volatile("svc %[svid]\n" - : "=r"(ret) - : [svid] "i" (_SVC_CALL_SYSTEM_CALL), - "r" (ret), "r" (r1), "r" (r2), "r" (r3), - "r" (r6) - : "r8", "memory"); - - return ret; -} - -static inline u32_t _arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3, - u32_t call_id) -{ - register u32_t ret __asm__("r0") = arg1; - register u32_t r1 __asm__("r1") = arg2; - register u32_t r2 __asm__("r2") = arg3; - register u32_t r6 __asm__("r6") = call_id; - - __asm__ volatile("svc %[svid]\n" - : "=r"(ret) - : [svid] "i" (_SVC_CALL_SYSTEM_CALL), - "r" (ret), "r" (r1), "r" (r2), "r" (r6) - : "r8", "memory", "r3"); - - return ret; -} - -static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id) -{ - register u32_t ret __asm__("r0") = arg1; - register u32_t r1 __asm__("r1") = arg2; - register u32_t r6 __asm__("r6") = call_id; - - __asm__ volatile("svc %[svid]\n" - : "=r"(ret) - : [svid] "i" (_SVC_CALL_SYSTEM_CALL), - "r" (ret), "r" (r1), "r" (r6) - : "r8", "memory", "r2", "r3"); - - return ret; -} - -static inline u32_t _arch_syscall_invoke1(u32_t arg1, u32_t call_id) -{ - register u32_t ret __asm__("r0") = arg1; - register u32_t r6 __asm__("r6") = call_id; - - __asm__ volatile("svc %[svid]\n" - : "=r"(ret) - : [svid] "i" (_SVC_CALL_SYSTEM_CALL), - "r" (ret), "r" (r6) - : "r8", "memory", "r1", "r2", "r3"); - return ret; -} - -static inline u32_t _arch_syscall_invoke0(u32_t call_id) -{ - register u32_t ret __asm__("r0"); - register u32_t r6 __asm__("r6") = call_id; - - __asm__ volatile("svc %[svid]\n" - : "=r"(ret) - : [svid] "i" (_SVC_CALL_SYSTEM_CALL), - "r" (ret), "r" (r6) - : "r8", "memory", "r1", "r2", "r3"); - - return ret; -} - -static inline int _arch_is_user_context(void) -{ - u32_t value; - - /* check for handler mode */ - __asm__ volatile("mrs %0, IPSR\n\t" : "=r"(value)); - if (value) { - return 0; - } - - /* if not handler mode, return mode information */ - __asm__ volatile("mrs %0, CONTROL\n\t" : "=r"(value)); - return value & 0x1; -} - -#endif /* _ASMLANGUAGE */ -#endif /* CONFIG_USERSPACE */ - #ifdef __cplusplus } #endif diff --git a/include/arch/arm/cortex_m/error.h b/include/arch/arm/cortex_m/error.h index db9c42d09b4..94d1ec628ef 100644 --- a/include/arch/arm/cortex_m/error.h +++ b/include/arch/arm/cortex_m/error.h @@ -14,6 +14,7 @@ #ifndef _ARCH_ARM_CORTEXM_ERROR_H_ #define _ARCH_ARM_CORTEXM_ERROR_H_ +#include #include #ifdef __cplusplus @@ -32,10 +33,6 @@ extern void _SysFatalErrorHandler(unsigned int reason, const NANO_ESF *esf); #define _NANO_ERR_KERNEL_PANIC (5) /* Kernel panic (fatal to system) */ #define _NANO_ERR_RECOVERABLE (6) /* Recoverable error */ -#define _SVC_CALL_IRQ_OFFLOAD 1 -#define _SVC_CALL_RUNTIME_EXCEPT 2 -#define _SVC_CALL_SYSTEM_CALL 3 - #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) /* ARMv6 will hard-fault if SVC is called with interrupts locked. Just * force them unlocked, the thread is in an undefined state anyway diff --git a/include/arch/arm/syscall.h b/include/arch/arm/syscall.h new file mode 100644 index 00000000000..11ad287ba01 --- /dev/null +++ b/include/arch/arm/syscall.h @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2018 Linaro Limited. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * @brief ARM specific sycall header + * + * This header contains the ARM specific sycall interface. It is + * included by the syscall interface architecture-abstraction header + * (include/arch/syscall.h) + */ + +#ifndef _ARM_SYSCALL__H_ +#define _ARM_SYSCALL__H_ + +#define _SVC_CALL_IRQ_OFFLOAD 1 +#define _SVC_CALL_RUNTIME_EXCEPT 2 +#define _SVC_CALL_SYSTEM_CALL 3 + +#ifdef CONFIG_USERSPACE +#ifndef _ASMLANGUAGE + +#include + +#ifdef __cplusplus +extern "C" { +#endif + + +/* Syscall invocation macros. arm-specific machine constraints used to ensure + * args land in the proper registers. + */ +static inline u32_t _arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3, + u32_t arg4, u32_t arg5, u32_t arg6, + u32_t call_id) +{ + register u32_t ret __asm__("r0") = arg1; + register u32_t r1 __asm__("r1") = arg2; + register u32_t r2 __asm__("r2") = arg3; + register u32_t r3 __asm__("r3") = arg4; + register u32_t r4 __asm__("r4") = arg5; + register u32_t r5 __asm__("r5") = arg6; + register u32_t r6 __asm__("r6") = call_id; + + __asm__ volatile("svc %[svid]\n" + : "=r"(ret) + : [svid] "i" (_SVC_CALL_SYSTEM_CALL), + "r" (ret), "r" (r1), "r" (r2), "r" (r3), + "r" (r4), "r" (r5), "r" (r6) + : "r8", "memory"); + + return ret; +} + +static inline u32_t _arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3, + u32_t arg4, u32_t arg5, u32_t call_id) +{ + register u32_t ret __asm__("r0") = arg1; + register u32_t r1 __asm__("r1") = arg2; + register u32_t r2 __asm__("r2") = arg3; + register u32_t r3 __asm__("r3") = arg4; + register u32_t r4 __asm__("r4") = arg5; + register u32_t r6 __asm__("r6") = call_id; + + __asm__ volatile("svc %[svid]\n" + : "=r"(ret) + : [svid] "i" (_SVC_CALL_SYSTEM_CALL), + "r" (ret), "r" (r1), "r" (r2), "r" (r3), + "r" (r4), "r" (r6) + : "r8", "memory"); + + return ret; +} + +static inline u32_t _arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3, + u32_t arg4, u32_t call_id) +{ + register u32_t ret __asm__("r0") = arg1; + register u32_t r1 __asm__("r1") = arg2; + register u32_t r2 __asm__("r2") = arg3; + register u32_t r3 __asm__("r3") = arg4; + register u32_t r6 __asm__("r6") = call_id; + + __asm__ volatile("svc %[svid]\n" + : "=r"(ret) + : [svid] "i" (_SVC_CALL_SYSTEM_CALL), + "r" (ret), "r" (r1), "r" (r2), "r" (r3), + "r" (r6) + : "r8", "memory"); + + return ret; +} + +static inline u32_t _arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3, + u32_t call_id) +{ + register u32_t ret __asm__("r0") = arg1; + register u32_t r1 __asm__("r1") = arg2; + register u32_t r2 __asm__("r2") = arg3; + register u32_t r6 __asm__("r6") = call_id; + + __asm__ volatile("svc %[svid]\n" + : "=r"(ret) + : [svid] "i" (_SVC_CALL_SYSTEM_CALL), + "r" (ret), "r" (r1), "r" (r2), "r" (r6) + : "r8", "memory", "r3"); + + return ret; +} + +static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id) +{ + register u32_t ret __asm__("r0") = arg1; + register u32_t r1 __asm__("r1") = arg2; + register u32_t r6 __asm__("r6") = call_id; + + __asm__ volatile("svc %[svid]\n" + : "=r"(ret) + : [svid] "i" (_SVC_CALL_SYSTEM_CALL), + "r" (ret), "r" (r1), "r" (r6) + : "r8", "memory", "r2", "r3"); + + return ret; +} + +static inline u32_t _arch_syscall_invoke1(u32_t arg1, u32_t call_id) +{ + register u32_t ret __asm__("r0") = arg1; + register u32_t r6 __asm__("r6") = call_id; + + __asm__ volatile("svc %[svid]\n" + : "=r"(ret) + : [svid] "i" (_SVC_CALL_SYSTEM_CALL), + "r" (ret), "r" (r6) + : "r8", "memory", "r1", "r2", "r3"); + return ret; +} + +static inline u32_t _arch_syscall_invoke0(u32_t call_id) +{ + register u32_t ret __asm__("r0"); + register u32_t r6 __asm__("r6") = call_id; + + __asm__ volatile("svc %[svid]\n" + : "=r"(ret) + : [svid] "i" (_SVC_CALL_SYSTEM_CALL), + "r" (ret), "r" (r6) + : "r8", "memory", "r1", "r2", "r3"); + + return ret; +} + +static inline int _arch_is_user_context(void) +{ + u32_t value; + + /* check for handler mode */ + __asm__ volatile("mrs %0, IPSR\n\t" : "=r"(value)); + if (value) { + return 0; + } + + /* if not handler mode, return mode information */ + __asm__ volatile("mrs %0, CONTROL\n\t" : "=r"(value)); + return value & 0x1; +} + +#ifdef __cplusplus +} +#endif + +#endif /* _ASMLANGUAGE */ +#endif /* CONFIG_USERSPACE */ +#endif /* _ARM_SYSCALL__H_ */ diff --git a/include/arch/syscall.h b/include/arch/syscall.h new file mode 100644 index 00000000000..6872ce3ce32 --- /dev/null +++ b/include/arch/syscall.h @@ -0,0 +1,20 @@ +/* syscall.h - automatically selects the correct syscall.h file to include */ + +/* + * Copyright (c) 1997-2014 Wind River Systems, Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef __ARCHSYSCALL_H__ +#define __ARCHSYSCALL_H__ + +#if defined(CONFIG_X86) +#include +#elif defined(CONFIG_ARM) +#include +#elif defined(CONFIG_ARC) +#include +#endif + +#endif /* __ARCHSYSCALL_H__ */ diff --git a/include/arch/x86/arch.h b/include/arch/x86/arch.h index e88c2c360c9..01bb41b4e55 100644 --- a/include/arch/x86/arch.h +++ b/include/arch/x86/arch.h @@ -35,8 +35,6 @@ extern "C" { #define DATA_SEG 0x10 #define MAIN_TSS 0x18 #define DF_TSS 0x20 -#define USER_CODE_SEG 0x2b /* at dpl=3 */ -#define USER_DATA_SEG 0x33 /* at dpl=3 */ /** * Macro used internally by NANO_CPU_INT_REGISTER and NANO_CPU_INT_REGISTER_ASM. @@ -545,142 +543,6 @@ extern FUNC_NORETURN void _SysFatalErrorHandler(unsigned int reason, extern struct task_state_segment _main_tss; #endif -#ifdef CONFIG_USERSPACE -/* Syscall invocation macros. x86-specific machine constraints used to ensure - * args land in the proper registers, see implementation of - * _x86_syscall_entry_stub in userspace.S - * - * the entry stub clobbers EDX and ECX on IAMCU systems - */ - -static inline u32_t _arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3, - u32_t arg4, u32_t arg5, u32_t arg6, - u32_t call_id) -{ - u32_t ret; - - __asm__ volatile("push %%ebp\n\t" - "mov %[arg6], %%ebp\n\t" - "int $0x80\n\t" - "pop %%ebp\n\t" - : "=a" (ret) -#ifdef CONFIG_X86_IAMCU - , "=d" (arg2), "=c" (arg3) -#endif - : "S" (call_id), "a" (arg1), "d" (arg2), - "c" (arg3), "b" (arg4), "D" (arg5), - [arg6] "m" (arg6) - : "memory", "esp"); - return ret; -} - -static inline u32_t _arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3, - u32_t arg4, u32_t arg5, u32_t call_id) -{ - u32_t ret; - - __asm__ volatile("int $0x80" - : "=a" (ret) -#ifdef CONFIG_X86_IAMCU - , "=d" (arg2), "=c" (arg3) -#endif - : "S" (call_id), "a" (arg1), "d" (arg2), - "c" (arg3), "b" (arg4), "D" (arg5) - : "memory"); - return ret; -} - -static inline u32_t _arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3, - u32_t arg4, u32_t call_id) -{ - u32_t ret; - - __asm__ volatile("int $0x80" - : "=a" (ret) -#ifdef CONFIG_X86_IAMCU - , "=d" (arg2), "=c" (arg3) -#endif - : "S" (call_id), "a" (arg1), "d" (arg2), "c" (arg3), - "b" (arg4) - : "memory"); - return ret; -} - -static inline u32_t _arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3, - u32_t call_id) -{ - u32_t ret; - - __asm__ volatile("int $0x80" - : "=a" (ret) -#ifdef CONFIG_X86_IAMCU - , "=d" (arg2), "=c" (arg3) -#endif - : "S" (call_id), "a" (arg1), "d" (arg2), "c" (arg3) - : "memory"); - return ret; -} - -static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id) -{ - u32_t ret; - - __asm__ volatile("int $0x80" - : "=a" (ret) -#ifdef CONFIG_X86_IAMCU - , "=d" (arg2) -#endif - : "S" (call_id), "a" (arg1), "d" (arg2) - : "memory" -#ifdef CONFIG_X86_IAMCU - , "ecx" -#endif - ); - return ret; -} - -static inline u32_t _arch_syscall_invoke1(u32_t arg1, u32_t call_id) -{ - u32_t ret; - - __asm__ volatile("int $0x80" - : "=a" (ret) - : "S" (call_id), "a" (arg1) - : "memory" -#ifdef CONFIG_X86_IAMCU - , "edx", "ecx" -#endif - ); - return ret; -} - -static inline u32_t _arch_syscall_invoke0(u32_t call_id) -{ - u32_t ret; - - __asm__ volatile("int $0x80" - : "=a" (ret) - : "S" (call_id) - : "memory" -#ifdef CONFIG_X86_IAMCU - , "edx", "ecx" -#endif - ); - return ret; -} - -static inline int _arch_is_user_context(void) -{ - int cs; - - /* On x86, read the CS register (which cannot be manually set) */ - __asm__ volatile ("mov %%cs, %[cs_val]" : [cs_val] "=r" (cs)); - - return cs == USER_CODE_SEG; -} -#endif /* CONFIG_USERSPACE */ - - #if defined(CONFIG_HW_STACK_PROTECTION) && defined(CONFIG_USERSPACE) /* With both hardware stack protection and userspace enabled, stacks are * arranged as follows: diff --git a/include/arch/x86/syscall.h b/include/arch/x86/syscall.h new file mode 100644 index 00000000000..db99fd10efc --- /dev/null +++ b/include/arch/x86/syscall.h @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2018 Linaro Limited. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * @brief x86 specific sycall header + * + * This header contains the x86 specific sycall interface. It is + * included by the syscall interface architecture-abstraction header + * (include/arch/syscall.h) + */ + +#ifndef _X86_SYSCALL__H_ +#define _X86_SYSCALL__H_ + +#define USER_CODE_SEG 0x2b /* at dpl=3 */ +#define USER_DATA_SEG 0x33 /* at dpl=3 */ + +#ifdef CONFIG_USERSPACE +#ifndef _ASMLANGUAGE + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* Syscall invocation macros. x86-specific machine constraints used to ensure + * args land in the proper registers, see implementation of + * _x86_syscall_entry_stub in userspace.S + * + * the entry stub clobbers EDX and ECX on IAMCU systems + */ + +static inline u32_t _arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3, + u32_t arg4, u32_t arg5, u32_t arg6, + u32_t call_id) +{ + u32_t ret; + + __asm__ volatile("push %%ebp\n\t" + "mov %[arg6], %%ebp\n\t" + "int $0x80\n\t" + "pop %%ebp\n\t" + : "=a" (ret) +#ifdef CONFIG_X86_IAMCU + , "=d" (arg2), "=c" (arg3) +#endif + : "S" (call_id), "a" (arg1), "d" (arg2), + "c" (arg3), "b" (arg4), "D" (arg5), + [arg6] "m" (arg6) + : "memory", "esp"); + return ret; +} + +static inline u32_t _arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3, + u32_t arg4, u32_t arg5, u32_t call_id) +{ + u32_t ret; + + __asm__ volatile("int $0x80" + : "=a" (ret) +#ifdef CONFIG_X86_IAMCU + , "=d" (arg2), "=c" (arg3) +#endif + : "S" (call_id), "a" (arg1), "d" (arg2), + "c" (arg3), "b" (arg4), "D" (arg5) + : "memory"); + return ret; +} + +static inline u32_t _arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3, + u32_t arg4, u32_t call_id) +{ + u32_t ret; + + __asm__ volatile("int $0x80" + : "=a" (ret) +#ifdef CONFIG_X86_IAMCU + , "=d" (arg2), "=c" (arg3) +#endif + : "S" (call_id), "a" (arg1), "d" (arg2), "c" (arg3), + "b" (arg4) + : "memory"); + return ret; +} + +static inline u32_t _arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3, + u32_t call_id) +{ + u32_t ret; + + __asm__ volatile("int $0x80" + : "=a" (ret) +#ifdef CONFIG_X86_IAMCU + , "=d" (arg2), "=c" (arg3) +#endif + : "S" (call_id), "a" (arg1), "d" (arg2), "c" (arg3) + : "memory"); + return ret; +} + +static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id) +{ + u32_t ret; + + __asm__ volatile("int $0x80" + : "=a" (ret) +#ifdef CONFIG_X86_IAMCU + , "=d" (arg2) +#endif + : "S" (call_id), "a" (arg1), "d" (arg2) + : "memory" +#ifdef CONFIG_X86_IAMCU + , "ecx" +#endif + ); + return ret; +} + +static inline u32_t _arch_syscall_invoke1(u32_t arg1, u32_t call_id) +{ + u32_t ret; + + __asm__ volatile("int $0x80" + : "=a" (ret) + : "S" (call_id), "a" (arg1) + : "memory" +#ifdef CONFIG_X86_IAMCU + , "edx", "ecx" +#endif + ); + return ret; +} + +static inline u32_t _arch_syscall_invoke0(u32_t call_id) +{ + u32_t ret; + + __asm__ volatile("int $0x80" + : "=a" (ret) + : "S" (call_id) + : "memory" +#ifdef CONFIG_X86_IAMCU + , "edx", "ecx" +#endif + ); + return ret; +} + +static inline int _arch_is_user_context(void) +{ + int cs; + + /* On x86, read the CS register (which cannot be manually set) */ + __asm__ volatile ("mov %%cs, %[cs_val]" : [cs_val] "=r" (cs)); + + return cs == USER_CODE_SEG; +} + + +#ifdef __cplusplus +} +#endif + +#endif /* _ASMLANGUAGE */ +#endif /* CONFIG_USERSPACE */ +#endif /* _X86_SYSCALL__H_ */ diff --git a/include/syscall.h b/include/syscall.h index 281efa630da..5cd2a604cab 100644 --- a/include/syscall.h +++ b/include/syscall.h @@ -12,7 +12,7 @@ #include #include #include -#include +#include #ifdef __cplusplus extern "C" {