xtensa: Enable userspace
Userspace support for Xtensa architecture using Xtensa MMU. Some considerations: - Syscalls are not inline functions like in other architectures because some compiler issues when using multiple registers to pass parameters to the syscall. So here we have a function call so we can use registers as we need. - TLS is not supported by xcc in xtensa and reading PS register is a privileged instruction. So, we have to use threadptr to know if a thread is an user mode thread. Signed-off-by: Flavio Ceolin <flavio.ceolin@intel.com> Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
parent
fff91cb542
commit
a651862b30
19 changed files with 1646 additions and 68 deletions
|
@ -125,6 +125,7 @@ config XTENSA
|
|||
select IRQ_OFFLOAD_NESTED if IRQ_OFFLOAD
|
||||
select ARCH_HAS_CODE_DATA_RELOCATION
|
||||
select ARCH_HAS_TIMING_FUNCTIONS
|
||||
select ARCH_MEM_DOMAIN_DATA if USERSPACE
|
||||
imply ATOMIC_OPERATIONS_ARCH
|
||||
help
|
||||
Xtensa architecture
|
||||
|
|
|
@ -113,6 +113,7 @@ config XTENSA_MMU
|
|||
bool "Xtensa MMU Support"
|
||||
default n
|
||||
select MMU
|
||||
select ARCH_MEM_DOMAIN_SYNCHRONOUS_API if USERSPACE
|
||||
select XTENSA_SMALL_VECTOR_TABLE_ENTRY
|
||||
select KERNEL_VM_USE_CUSTOM_MEM_RANGE_CHECK if XTENSA_RPO_CACHE
|
||||
help
|
||||
|
@ -144,8 +145,18 @@ if XTENSA_MMU
|
|||
The bit shift number for the virtual address for Xtensa
|
||||
page table (PTEVADDR).
|
||||
|
||||
config XTENSA_MMU_NUM_L1_TABLES
|
||||
int "Number of L1 page tables"
|
||||
default 1 if !USERSPACE
|
||||
default 4
|
||||
help
|
||||
This option specifies the maximum number of traslation tables.
|
||||
Translation tables are directly related to the number of
|
||||
memory domains in the target, considering the kernel itself requires one.
|
||||
|
||||
config XTENSA_MMU_NUM_L2_TABLES
|
||||
int "Number of L2 page tables"
|
||||
default 20 if USERSPACE
|
||||
default 10
|
||||
help
|
||||
Each table can address up to 4MB memory address.
|
||||
|
@ -159,6 +170,15 @@ if XTENSA_MMU
|
|||
|
||||
endif # XTENSA_MMU
|
||||
|
||||
config XTENSA_SYSCALL_USE_HELPER
|
||||
bool "Use userspace syscall helper"
|
||||
default y if "$(ZEPHYR_TOOLCHAIN_VARIANT)" = "xcc-clang"
|
||||
depends on USERSPACE
|
||||
help
|
||||
Use syscall helpers for passing more then 3 arguments.
|
||||
This is a workaround for toolchains where they have
|
||||
issue modeling register usage.
|
||||
|
||||
endif # CPU_HAS_MMU
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -22,6 +22,8 @@ zephyr_library_sources_ifdef(CONFIG_DEBUG_COREDUMP coredump.c)
|
|||
zephyr_library_sources_ifdef(CONFIG_TIMING_FUNCTIONS timing.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_GDBSTUB gdbstub.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_XTENSA_MMU xtensa_mmu.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_USERSPACE userspace.S)
|
||||
zephyr_library_sources_ifdef(CONFIG_XTENSA_SYSCALL_USE_HELPER syscall_helper.c)
|
||||
|
||||
zephyr_library_sources_ifdef(
|
||||
CONFIG_KERNEL_VM_USE_CUSTOM_MEM_RANGE_CHECK
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#endif
|
||||
#endif
|
||||
#include <zephyr/debug/coredump.h>
|
||||
#include <zephyr/arch/common/exc_handle.h>
|
||||
#include <zephyr/logging/log.h>
|
||||
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
|
||||
|
||||
|
@ -120,6 +121,14 @@ void z_xtensa_fatal_error(unsigned int reason, const z_arch_esf_t *esf)
|
|||
z_fatal_error(reason, esf);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
Z_EXC_DECLARE(z_xtensa_user_string_nlen);
|
||||
|
||||
static const struct z_exc_handle exceptions[] = {
|
||||
Z_EXC_HANDLE(z_xtensa_user_string_nlen)
|
||||
};
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
#ifdef XT_SIMULATOR
|
||||
void exit(int return_code)
|
||||
{
|
||||
|
|
|
@ -18,18 +18,37 @@
|
|||
#define Z_XTENSA_PTE_VPN_MASK 0xFFFFF000U
|
||||
#define Z_XTENSA_PTE_PPN_MASK 0xFFFFF000U
|
||||
#define Z_XTENSA_PTE_ATTR_MASK 0x0000000FU
|
||||
#define Z_XTENSA_PTE_ATTR_CACHED_MASK 0x0000000CU
|
||||
#define Z_XTENSA_L1_MASK 0x3FF00000U
|
||||
#define Z_XTENSA_L2_MASK 0x3FFFFFU
|
||||
|
||||
#define Z_XTENSA_PPN_SHIFT 12U
|
||||
|
||||
#define Z_XTENSA_PTE_RING_MASK 0x00000030U
|
||||
#define Z_XTENSA_PTE_RING_SHIFT 4U
|
||||
|
||||
#define Z_XTENSA_PTE(paddr, ring, attr) \
|
||||
(((paddr) & Z_XTENSA_PTE_PPN_MASK) | \
|
||||
(((ring) << 4) & Z_XTENSA_PTE_RING_MASK) | \
|
||||
(((ring) << Z_XTENSA_PTE_RING_SHIFT) & Z_XTENSA_PTE_RING_MASK) | \
|
||||
((attr) & Z_XTENSA_PTE_ATTR_MASK))
|
||||
|
||||
#define Z_XTENSA_PTE_ATTR_GET(pte) \
|
||||
(pte) & Z_XTENSA_PTE_ATTR_MASK
|
||||
|
||||
#define Z_XTENSA_PTE_ATTR_SET(pte, attr) \
|
||||
(((pte) & ~Z_XTENSA_PTE_ATTR_MASK) | (attr))
|
||||
|
||||
#define Z_XTENSA_PTE_RING_SET(pte, ring) \
|
||||
(((pte) & ~Z_XTENSA_PTE_RING_MASK) | \
|
||||
((ring) << Z_XTENSA_PTE_RING_SHIFT))
|
||||
|
||||
#define Z_XTENSA_PTE_RING_GET(pte) \
|
||||
(((pte) & ~Z_XTENSA_PTE_RING_MASK) >> Z_XTENSA_PTE_RING_SHIFT)
|
||||
|
||||
#define Z_XTENSA_PTE_ASID_GET(pte, rasid) \
|
||||
(((rasid) >> ((((pte) & Z_XTENSA_PTE_RING_MASK) \
|
||||
>> Z_XTENSA_PTE_RING_SHIFT) * 8)) & 0xFF)
|
||||
|
||||
#define Z_XTENSA_TLB_ENTRY(vaddr, way) \
|
||||
(((vaddr) & Z_XTENSA_PTE_PPN_MASK) | (way))
|
||||
|
||||
|
@ -38,11 +57,38 @@
|
|||
(((vaddr) >> Z_XTENSA_PPN_SHIFT) & 0x03U))
|
||||
|
||||
#define Z_XTENSA_L2_POS(vaddr) \
|
||||
(((vaddr) & Z_XTENSA_L2_MASK) >> Z_XTENSA_PPN_SHIFT)
|
||||
(((vaddr) & Z_XTENSA_L2_MASK) >> 12U)
|
||||
|
||||
#define Z_XTENSA_L1_POS(vaddr) \
|
||||
((vaddr) >> 22U)
|
||||
|
||||
/* PTE attributes for entries in the L1 page table. Should never be
|
||||
* writable, may be cached in non-SMP contexts only
|
||||
*/
|
||||
#if CONFIG_MP_MAX_NUM_CPUS == 1
|
||||
#define Z_XTENSA_PAGE_TABLE_ATTR Z_XTENSA_MMU_CACHED_WB
|
||||
#else
|
||||
#define Z_XTENSA_PAGE_TABLE_ATTR 0
|
||||
#endif
|
||||
|
||||
/* This ASID is shared between all domains and kernel. */
|
||||
#define Z_XTENSA_MMU_SHARED_ASID 255
|
||||
|
||||
/* Fixed data TLB way to map the page table */
|
||||
#define Z_XTENSA_MMU_PTE_WAY 7
|
||||
|
||||
/* Fixed data TLB way to map the vecbase */
|
||||
#define Z_XTENSA_MMU_VECBASE_WAY 8
|
||||
|
||||
/* Kernel specific ASID. Ring field in the PTE */
|
||||
#define Z_XTENSA_KERNEL_RING 0
|
||||
|
||||
/* User specific ASID. Ring field in the PTE */
|
||||
#define Z_XTENSA_USER_RING 2
|
||||
|
||||
/* Ring value for MMU_SHARED_ASID */
|
||||
#define Z_XTENSA_SHARED_RING 3
|
||||
|
||||
/* Number of data TLB ways [0-9] */
|
||||
#define Z_XTENSA_DTLB_WAYS 10
|
||||
|
||||
|
@ -96,6 +142,14 @@
|
|||
#define Z_XTENSA_PAGE_TABLE_VADDR \
|
||||
Z_XTENSA_PTE_ENTRY_VADDR(Z_XTENSA_PTEVADDR)
|
||||
|
||||
/*
|
||||
* Get asid for a given ring from rasid register.
|
||||
* rasid contains four asid, one per ring.
|
||||
*/
|
||||
|
||||
#define Z_XTENSA_RASID_ASID_GET(rasid, ring) \
|
||||
(((rasid) >> ((ring) * 8)) & 0xff)
|
||||
|
||||
static ALWAYS_INLINE void xtensa_rasid_set(uint32_t rasid)
|
||||
{
|
||||
__asm__ volatile("wsr %0, rasid\n\t"
|
||||
|
@ -110,6 +164,16 @@ static ALWAYS_INLINE uint32_t xtensa_rasid_get(void)
|
|||
return rasid;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void xtensa_rasid_asid_set(uint8_t asid, uint8_t pos)
|
||||
{
|
||||
uint32_t rasid = xtensa_rasid_get();
|
||||
|
||||
rasid = (rasid & ~(0xff << (pos * 8))) | ((uint32_t)asid << (pos * 8));
|
||||
|
||||
xtensa_rasid_set(rasid);
|
||||
}
|
||||
|
||||
|
||||
static ALWAYS_INLINE void xtensa_itlb_entry_invalidate(uint32_t entry)
|
||||
{
|
||||
__asm__ volatile("iitlb %0\n\t"
|
||||
|
@ -201,6 +265,21 @@ static ALWAYS_INLINE void xtensa_ptevaddr_set(void *ptables)
|
|||
__asm__ volatile("wsr.ptevaddr %0" : : "a"((uint32_t)ptables));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the current page tables.
|
||||
*
|
||||
* The page tables is obtained by reading ptevaddr address.
|
||||
*
|
||||
* @return ptables The page tables address (virtual address)
|
||||
*/
|
||||
static ALWAYS_INLINE void *xtensa_ptevaddr_get(void)
|
||||
{
|
||||
uint32_t ptables;
|
||||
|
||||
__asm__ volatile("rsr.ptevaddr %0" : "=a" (ptables));
|
||||
|
||||
return (void *)ptables;
|
||||
}
|
||||
/*
|
||||
* The following functions are helpful when debugging.
|
||||
*/
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
|
||||
#include <gen_offset.h>
|
||||
#include <kernel_offsets.h>
|
||||
#include <zephyr/arch/xtensa/thread.h>
|
||||
|
||||
#include <xtensa-asm2-context.h>
|
||||
|
||||
|
@ -60,4 +61,10 @@ GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fpu14);
|
|||
GEN_OFFSET_SYM(_xtensa_irq_bsa_t, fpu15);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
GEN_OFFSET_SYM(_thread_arch_t, psp);
|
||||
GEN_OFFSET_SYM(_thread_arch_t, ptables);
|
||||
#endif
|
||||
|
||||
|
||||
GEN_ABS_SYM_END
|
||||
|
|
124
arch/xtensa/core/syscall_helper.c
Normal file
124
arch/xtensa/core/syscall_helper.c
Normal file
|
@ -0,0 +1,124 @@
|
|||
/*
|
||||
* Copyright (c) 2022 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <zephyr/arch/xtensa/syscall.h>
|
||||
|
||||
uintptr_t arch_syscall_invoke6_helper(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t arg5, uintptr_t arg6,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
register uintptr_t a2 __asm__("%a2") = call_id;
|
||||
register uintptr_t a6 __asm__("%a6") = arg1;
|
||||
register uintptr_t a3 __asm__("%a3") = arg2;
|
||||
register uintptr_t a4 __asm__("%a4") = arg3;
|
||||
register uintptr_t a5 __asm__("%a5") = arg4;
|
||||
register uintptr_t a8 __asm__("%a8") = arg5;
|
||||
register uintptr_t a9 __asm__("%a9") = arg6;
|
||||
|
||||
__asm__ volatile("syscall\n\t"
|
||||
: "=r" (a2)
|
||||
: "r" (a2), "r" (a6), "r" (a3), "r" (a4),
|
||||
"r" (a5), "r" (a8), "r" (a9)
|
||||
: "memory");
|
||||
|
||||
return a2;
|
||||
}
|
||||
|
||||
uintptr_t arch_syscall_invoke5_helper(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t arg5, uintptr_t call_id)
|
||||
{
|
||||
register uintptr_t a2 __asm__("%a2") = call_id;
|
||||
register uintptr_t a6 __asm__("%a6") = arg1;
|
||||
register uintptr_t a3 __asm__("%a3") = arg2;
|
||||
register uintptr_t a4 __asm__("%a4") = arg3;
|
||||
register uintptr_t a5 __asm__("%a5") = arg4;
|
||||
register uintptr_t a8 __asm__("%a8") = arg5;
|
||||
|
||||
__asm__ volatile("syscall\n\t"
|
||||
: "=r" (a2)
|
||||
: "r" (a2), "r" (a6), "r" (a3), "r" (a4),
|
||||
"r" (a5), "r" (a8)
|
||||
: "memory");
|
||||
|
||||
return a2;
|
||||
}
|
||||
|
||||
uintptr_t arch_syscall_invoke4_helper(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
register uintptr_t a2 __asm__("%a2") = call_id;
|
||||
register uintptr_t a6 __asm__("%a6") = arg1;
|
||||
register uintptr_t a3 __asm__("%a3") = arg2;
|
||||
register uintptr_t a4 __asm__("%a4") = arg3;
|
||||
register uintptr_t a5 __asm__("%a5") = arg4;
|
||||
|
||||
__asm__ volatile("syscall\n\t"
|
||||
: "=r" (a2)
|
||||
: "r" (a2), "r" (a6), "r" (a3), "r" (a4),
|
||||
"r" (a5)
|
||||
: "memory");
|
||||
|
||||
return a2;
|
||||
}
|
||||
|
||||
uintptr_t arch_syscall_invoke3_helper(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t call_id)
|
||||
{
|
||||
register uintptr_t a2 __asm__("%a2") = call_id;
|
||||
register uintptr_t a6 __asm__("%a6") = arg1;
|
||||
register uintptr_t a3 __asm__("%a3") = arg2;
|
||||
register uintptr_t a4 __asm__("%a4") = arg3;
|
||||
|
||||
__asm__ volatile("syscall\n\t"
|
||||
: "=r" (a2)
|
||||
: "r" (a2), "r" (a6), "r" (a3), "r" (a4)
|
||||
: "memory");
|
||||
|
||||
return a2;
|
||||
}
|
||||
|
||||
uintptr_t arch_syscall_invoke2_helper(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
register uintptr_t a2 __asm__("%a2") = call_id;
|
||||
register uintptr_t a6 __asm__("%a6") = arg1;
|
||||
register uintptr_t a3 __asm__("%a3") = arg2;
|
||||
|
||||
__asm__ volatile("syscall\n\t"
|
||||
: "=r" (a2)
|
||||
: "r" (a2), "r" (a6), "r" (a3)
|
||||
: "memory");
|
||||
|
||||
return a2;
|
||||
}
|
||||
|
||||
uintptr_t arch_syscall_invoke1_helper(uintptr_t arg1, uintptr_t call_id)
|
||||
{
|
||||
register uintptr_t a2 __asm__("%a2") = call_id;
|
||||
register uintptr_t a6 __asm__("%a6") = arg1;
|
||||
|
||||
__asm__ volatile("syscall\n\t"
|
||||
: "=r" (a2)
|
||||
: "r" (a2), "r" (a6)
|
||||
: "memory");
|
||||
|
||||
return a2;
|
||||
}
|
||||
|
||||
uintptr_t arch_syscall_invoke0_helper(uintptr_t call_id)
|
||||
{
|
||||
register uintptr_t a2 __asm__("%a2") = call_id;
|
||||
|
||||
__asm__ volatile("syscall\n\t"
|
||||
: "=r" (a2)
|
||||
: "r" (a2)
|
||||
: "memory");
|
||||
|
||||
return a2;
|
||||
}
|
302
arch/xtensa/core/userspace.S
Normal file
302
arch/xtensa/core/userspace.S
Normal file
|
@ -0,0 +1,302 @@
|
|||
/*
|
||||
* Copyright (c) 2022, Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <xtensa-asm2-s.h>
|
||||
#include <offsets.h>
|
||||
#include <offsets_short.h>
|
||||
#include <zephyr/syscall.h>
|
||||
#include <zsr.h>
|
||||
|
||||
/**
|
||||
* syscall number arg1, arg2, arg3, arg4, arg5, arg6
|
||||
* -------------- ----------------------------------
|
||||
* a2 a6, a3, a4, a5, a8, a9
|
||||
*
|
||||
**/
|
||||
.pushsection .text.z_xtensa_do_syscall, "ax"
|
||||
.global z_xtensa_do_syscall
|
||||
.align 4
|
||||
z_xtensa_do_syscall:
|
||||
rsr a0, ZSR_CPU
|
||||
l32i a0, a0, ___cpu_t_current_OFFSET
|
||||
l32i a0, a0, _thread_offset_to_psp
|
||||
|
||||
addi a0, a0, -___xtensa_irq_bsa_t_SIZEOF
|
||||
|
||||
s32i a1, a0, ___xtensa_irq_bsa_t_scratch_OFFSET
|
||||
s32i a2, a0, ___xtensa_irq_bsa_t_a2_OFFSET
|
||||
s32i a3, a0, ___xtensa_irq_bsa_t_a3_OFFSET
|
||||
rsr a2, ZSR_A0SAVE
|
||||
s32i a2, a0, ___xtensa_irq_bsa_t_a0_OFFSET
|
||||
rsr.ps a2
|
||||
movi a3, ~PS_OWB_MASK
|
||||
and a2, a2, a3
|
||||
s32i a2, a0, ___xtensa_irq_bsa_t_ps_OFFSET
|
||||
rsr.epc1 a2
|
||||
s32i a2, a0, ___xtensa_irq_bsa_t_pc_OFFSET
|
||||
|
||||
movi a2, PS_WOE|PS_INTLEVEL(XCHAL_NMILEVEL)
|
||||
rsr.ps a3
|
||||
or a3, a3, a2
|
||||
movi a2, ~(PS_EXCM | PS_RING_MASK)
|
||||
and a3, a3, a2
|
||||
wsr.ps a3
|
||||
rsync
|
||||
l32i a2, a0, ___xtensa_irq_bsa_t_a2_OFFSET
|
||||
l32i a3, a0, ___xtensa_irq_bsa_t_a3_OFFSET
|
||||
SPILL_ALL_WINDOWS
|
||||
|
||||
rsr a0, ZSR_CPU
|
||||
l32i a0, a0, ___cpu_t_current_OFFSET
|
||||
l32i a0, a0, _thread_offset_to_psp
|
||||
addi a0, a0, -___xtensa_irq_bsa_t_SIZEOF
|
||||
|
||||
mov a1, a0
|
||||
|
||||
l32i a3, a1, ___xtensa_irq_bsa_t_pc_OFFSET
|
||||
#if XCHAL_HAVE_LOOPS
|
||||
/* If the syscall instruction was the last instruction in the body of
|
||||
* a zero-overhead loop, and the loop will execute again, decrement
|
||||
* the loop count and resume execution at the head of the loop.
|
||||
*/
|
||||
rsr.lend a2
|
||||
addi a3, a3, 3
|
||||
bne a2, a3, end_loop
|
||||
rsr.lcount a2
|
||||
beqz a2, end_loop
|
||||
addi a2, a2, -1
|
||||
wsr.lcount a2
|
||||
rsr.lbeg a3
|
||||
end_loop:
|
||||
#else
|
||||
/* EPC1 (and now a3) contains the address that invoked syscall.
|
||||
* We need to increment it to execute the next instruction when
|
||||
* we return. The instruction size is 3 bytes, so lets just add it.
|
||||
*/
|
||||
addi a3, a3, 3
|
||||
#endif
|
||||
s32i a3, a1, ___xtensa_irq_bsa_t_pc_OFFSET
|
||||
ODD_REG_SAVE
|
||||
|
||||
call0 xtensa_save_high_regs
|
||||
|
||||
l32i a2, a1, 0
|
||||
l32i a2, a2, ___xtensa_irq_bsa_t_a2_OFFSET
|
||||
movi a0, K_SYSCALL_LIMIT
|
||||
bgeu a2, a0, _bad_syscall
|
||||
|
||||
_id_ok:
|
||||
/* Find the function handler for the given syscall id. */
|
||||
movi a3, _k_syscall_table
|
||||
slli a2, a2, 2
|
||||
add a2, a2, a3
|
||||
l32i a2, a2, 0
|
||||
|
||||
/* Clear up the threadptr because it is used
|
||||
* to check if a thread is running on user mode. Since
|
||||
* we are in a interruption we don't want the system
|
||||
* thinking it is possibly running in user mode.
|
||||
*/
|
||||
movi a0, 0
|
||||
wur.THREADPTR a0
|
||||
|
||||
/* Set syscall parameters. We have an initial call4 to set up the
|
||||
* the stack and then a new call4 for the syscall function itself.
|
||||
* So parameters should be put as if it was a call8.
|
||||
*/
|
||||
mov a10, a8
|
||||
mov a11, a9
|
||||
mov a8, a4
|
||||
mov a9, a5
|
||||
l32i a3, a1, 0
|
||||
l32i a7, a3, ___xtensa_irq_bsa_t_a3_OFFSET
|
||||
|
||||
|
||||
/* Since we are unmasking EXCM, we need to set RING bits to kernel
|
||||
* mode, otherwise we won't be able to run the exception handler in C.
|
||||
*/
|
||||
movi a0, PS_WOE|PS_CALLINC(0)|PS_UM|PS_INTLEVEL(0)
|
||||
wsr.ps a0
|
||||
rsync
|
||||
|
||||
call4 _syscall_call0
|
||||
|
||||
/* copy return value. Lets put it in the top of stack
|
||||
* because registers will be clobbered in
|
||||
* xtensa_restore_high_regs
|
||||
*/
|
||||
l32i a3, a1, 0
|
||||
s32i a6, a3, ___xtensa_irq_bsa_t_a2_OFFSET
|
||||
|
||||
j _syscall_returned
|
||||
|
||||
.align 4
|
||||
_syscall_call0:
|
||||
/* We want an ENTRY to set a bit in windowstart */
|
||||
jx a2
|
||||
|
||||
|
||||
_syscall_returned:
|
||||
call0 xtensa_restore_high_regs
|
||||
|
||||
l32i a3, a1, ___xtensa_irq_bsa_t_sar_OFFSET
|
||||
wsr a3, SAR
|
||||
#if XCHAL_HAVE_LOOPS
|
||||
l32i a3, a1, ___xtensa_irq_bsa_t_lbeg_OFFSET
|
||||
wsr a3, LBEG
|
||||
l32i a3, a1, ___xtensa_irq_bsa_t_lend_OFFSET
|
||||
wsr a3, LEND
|
||||
l32i a3, a1, ___xtensa_irq_bsa_t_lcount_OFFSET
|
||||
wsr a3, LCOUNT
|
||||
#endif
|
||||
#if XCHAL_HAVE_S32C1I
|
||||
l32i a3, a1, ___xtensa_irq_bsa_t_scompare1_OFFSET
|
||||
wsr a3, SCOMPARE1
|
||||
#endif
|
||||
|
||||
rsr a3, ZSR_CPU
|
||||
l32i a3, a3, ___cpu_t_current_OFFSET
|
||||
wur.THREADPTR a3
|
||||
|
||||
l32i a3, a1, ___xtensa_irq_bsa_t_ps_OFFSET
|
||||
wsr.ps a3
|
||||
|
||||
l32i a3, a1, ___xtensa_irq_bsa_t_pc_OFFSET
|
||||
wsr.epc1 a3
|
||||
|
||||
l32i a0, a1, ___xtensa_irq_bsa_t_a0_OFFSET
|
||||
l32i a2, a1, ___xtensa_irq_bsa_t_a2_OFFSET
|
||||
l32i a3, a1, ___xtensa_irq_bsa_t_a3_OFFSET
|
||||
|
||||
l32i a1, a1, ___xtensa_irq_bsa_t_scratch_OFFSET
|
||||
rsync
|
||||
|
||||
rfe
|
||||
|
||||
_bad_syscall:
|
||||
movi a2, K_SYSCALL_BAD
|
||||
j _id_ok
|
||||
|
||||
.popsection
|
||||
|
||||
/* FUNC_NORETURN void z_xtensa_userspace_enter(k_thread_entry_t user_entry,
|
||||
* void *p1, void *p2, void *p3,
|
||||
* uint32_t stack_end,
|
||||
* uint32_t stack_start)
|
||||
*
|
||||
* A one-way trip to userspace.
|
||||
*/
|
||||
.global z_xtensa_userspace_enter
|
||||
.type z_xtensa_userspace_enter, @function
|
||||
.align 4
|
||||
z_xtensa_userspace_enter:
|
||||
/* Call entry to set a bit in the windowstart and
|
||||
* do the rotation, but we are going to set our own
|
||||
* stack.
|
||||
*/
|
||||
entry a1, 16
|
||||
|
||||
/* We have to switch to kernel stack before spill kernel data and
|
||||
* erase user stack to avoid leak from previous context.
|
||||
*/
|
||||
mov a1, a7 /* stack start (low address) */
|
||||
addi a1, a1, -16
|
||||
|
||||
SPILL_ALL_WINDOWS
|
||||
|
||||
rsr a0, ZSR_CPU
|
||||
l32i a0, a0, ___cpu_t_current_OFFSET
|
||||
|
||||
addi a1, a1, -28
|
||||
s32i a0, a1, 24
|
||||
s32i a2, a1, 20
|
||||
s32i a3, a1, 16
|
||||
s32i a4, a1, 12
|
||||
s32i a5, a1, 8
|
||||
s32i a6, a1, 4
|
||||
s32i a7, a1, 0
|
||||
|
||||
l32i a6, a1, 24
|
||||
call4 xtensa_user_stack_perms
|
||||
|
||||
l32i a6, a1, 24
|
||||
call4 z_xtensa_swap_update_page_tables
|
||||
|
||||
/* Set threadptr with the thread address, we are going to user mode. */
|
||||
l32i a0, a1, 24
|
||||
wur.THREADPTR a0
|
||||
|
||||
/* Set now z_thread_entry parameters, we are simulating a call4
|
||||
* call, so parameters start at a6, a7, ...
|
||||
*/
|
||||
l32i a6, a1, 20
|
||||
l32i a7, a1, 16
|
||||
l32i a8, a1, 12
|
||||
l32i a9, a1, 8
|
||||
|
||||
/* stash user stack */
|
||||
l32i a0, a1, 4
|
||||
|
||||
addi a1, a1, 28
|
||||
|
||||
/* Go back to user stack */
|
||||
mov a1, a0
|
||||
|
||||
movi a0, z_thread_entry
|
||||
wsr.epc2 a0
|
||||
|
||||
/* Configuring PS register.
|
||||
* We have to set callinc as well, since the called
|
||||
* function will do "entry"
|
||||
*/
|
||||
movi a0, PS_WOE|PS_CALLINC(1)|PS_UM|PS_RING(2)
|
||||
wsr a0, EPS2
|
||||
|
||||
movi a0, 0
|
||||
|
||||
rfi 2
|
||||
|
||||
/*
|
||||
* size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg)
|
||||
*/
|
||||
.global arch_user_string_nlen
|
||||
.type arch_user_string_nlen, @function
|
||||
.align 4
|
||||
arch_user_string_nlen:
|
||||
entry a1, 32
|
||||
|
||||
/* error value, set to -1. */
|
||||
movi a5, -1
|
||||
s32i a5, a4, 0
|
||||
|
||||
/* length count */
|
||||
xor a5, a5, a5
|
||||
|
||||
/* This code might page fault */
|
||||
strlen_loop:
|
||||
.global z_xtensa_user_string_nlen_fault_start
|
||||
z_xtensa_user_string_nlen_fault_start:
|
||||
l8ui a6, a2, 0 /* Current char */
|
||||
|
||||
.global z_xtensa_user_string_nlen_fault_end
|
||||
z_xtensa_user_string_nlen_fault_end:
|
||||
beqz a6, strlen_done
|
||||
addi a5, a5, 1
|
||||
addi a2, a2, 1
|
||||
beq a5, a3, strlen_done
|
||||
j strlen_loop
|
||||
|
||||
strlen_done:
|
||||
/* Set return value */
|
||||
mov a2, a5
|
||||
|
||||
/* Set error value to 0 since we succeeded */
|
||||
movi a5, 0x0
|
||||
s32i a5, a4, 0
|
||||
|
||||
.global z_xtensa_user_string_nlen_fixup
|
||||
z_xtensa_user_string_nlen_fixup:
|
||||
retw
|
|
@ -174,7 +174,8 @@ _restore_context:
|
|||
l32i a0, a1, ___xtensa_irq_bsa_t_scompare1_OFFSET
|
||||
wsr a0, SCOMPARE1
|
||||
#endif
|
||||
#if XCHAL_HAVE_THREADPTR && defined(CONFIG_THREAD_LOCAL_STORAGE)
|
||||
#if XCHAL_HAVE_THREADPTR && \
|
||||
(defined(CONFIG_USERSPACE) || defined(CONFIG_THREAD_LOCAL_STORAGE))
|
||||
l32i a0, a1, ___xtensa_irq_bsa_t_threadptr_OFFSET
|
||||
wur a0, THREADPTR
|
||||
#endif
|
||||
|
@ -258,6 +259,16 @@ noflush:
|
|||
l32i a3, a2, ___xtensa_irq_bsa_t_a3_OFFSET
|
||||
s32i a1, a3, 0
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/* Switch page tables */
|
||||
rsr a6, ZSR_CPU
|
||||
l32i a6, a6, ___cpu_t_current_OFFSET
|
||||
call4 z_xtensa_swap_update_page_tables
|
||||
|
||||
l32i a2, a3, 0
|
||||
l32i a2, a2, 0
|
||||
#endif
|
||||
|
||||
/* Switch stack pointer and restore. The jump to
|
||||
* _restore_context does not return as such, but we arrange
|
||||
* for the restored "next" address to be immediately after for
|
||||
|
@ -347,6 +358,9 @@ _Level1RealVector:
|
|||
rsr.exccause a0
|
||||
#ifdef CONFIG_XTENSA_MMU
|
||||
beqi a0, EXCCAUSE_ITLB_MISS, _handle_tlb_miss_user
|
||||
#ifdef CONFIG_USERSPACE
|
||||
beqi a0, EXCCAUSE_SYSCALL, _syscall
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
addi a0, a0, -EXCCAUSE_DTLB_MISS
|
||||
beqz a0, _handle_tlb_miss_user
|
||||
rsr.exccause a0
|
||||
|
@ -376,6 +390,11 @@ _handle_tlb_miss_user:
|
|||
l32i a0, a0, 0
|
||||
rsr a0, ZSR_A0SAVE
|
||||
rfe
|
||||
#ifdef CONFIG_USERSPACE
|
||||
_syscall:
|
||||
rsr a0, ZSR_A0SAVE
|
||||
j z_xtensa_do_syscall
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
#endif /* CONFIG_XTENSA_MMU */
|
||||
.popsection
|
||||
|
||||
|
|
|
@ -26,6 +26,13 @@ void *xtensa_init_stack(struct k_thread *thread, int *stack_top,
|
|||
{
|
||||
void *ret;
|
||||
_xtensa_irq_stack_frame_a11_t *frame;
|
||||
#ifdef CONFIG_USERSPACE
|
||||
struct z_xtensa_thread_stack_header *header =
|
||||
(struct z_xtensa_thread_stack_header *)thread->stack_obj;
|
||||
|
||||
thread->arch.psp = header->privilege_stack +
|
||||
sizeof(header->privilege_stack);
|
||||
#endif
|
||||
|
||||
/* Not-a-cpu ID Ensures that the first time this is run, the
|
||||
* stack will be invalidated. That covers the edge case of
|
||||
|
@ -48,11 +55,23 @@ void *xtensa_init_stack(struct k_thread *thread, int *stack_top,
|
|||
|
||||
(void)memset(frame, 0, bsasz);
|
||||
|
||||
frame->bsa.pc = (uintptr_t)z_thread_entry;
|
||||
frame->bsa.ps = PS_WOE | PS_UM | PS_CALLINC(1);
|
||||
#ifdef CONFIG_USERSPACE
|
||||
if ((thread->base.user_options & K_USER) == K_USER) {
|
||||
frame->bsa.pc = (uintptr_t)arch_user_mode_enter;
|
||||
} else {
|
||||
frame->bsa.pc = (uintptr_t)z_thread_entry;
|
||||
}
|
||||
#else
|
||||
frame->bsa.pc = (uintptr_t)z_thread_entry;
|
||||
#endif
|
||||
|
||||
#if XCHAL_HAVE_THREADPTR && defined(CONFIG_THREAD_LOCAL_STORAGE)
|
||||
#if XCHAL_HAVE_THREADPTR
|
||||
#ifdef CONFIG_THREAD_LOCAL_STORAGE
|
||||
frame->bsa.threadptr = thread->tls;
|
||||
#elif CONFIG_USERSPACE
|
||||
frame->bsa.threadptr = (uintptr_t)((thread->base.user_options & K_USER) ? thread : NULL);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* Arguments to z_thread_entry(). Remember these start at A6,
|
||||
|
@ -471,3 +490,24 @@ void arch_spin_relax(void)
|
|||
#undef NOP1
|
||||
}
|
||||
#endif /* CONFIG_XTENSA_MORE_SPIN_RELAX_NOPS */
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
struct k_thread *current = _current;
|
||||
size_t stack_end;
|
||||
|
||||
/* Transition will reset stack pointer to initial, discarding
|
||||
* any old context since this is a one-way operation
|
||||
*/
|
||||
stack_end = Z_STACK_PTR_ALIGN(current->stack_info.start +
|
||||
current->stack_info.size -
|
||||
current->stack_info.delta);
|
||||
|
||||
z_xtensa_userspace_enter(user_entry, p1, p2, p3,
|
||||
stack_end, current->stack_info.start);
|
||||
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
*/
|
||||
#include <zephyr/kernel.h>
|
||||
#include <zephyr/cache.h>
|
||||
#include <zephyr/arch/xtensa/arch.h>
|
||||
#include <zephyr/arch/xtensa/xtensa_mmu.h>
|
||||
#include <zephyr/linker/linker-defs.h>
|
||||
#include <zephyr/logging/log.h>
|
||||
|
@ -15,22 +16,24 @@
|
|||
#include <kernel_arch_func.h>
|
||||
#include <mmu.h>
|
||||
|
||||
/* Fixed data TLB way to map the page table */
|
||||
#define MMU_PTE_WAY 7
|
||||
|
||||
/* Fixed data TLB way to map VECBASE */
|
||||
#define MMU_VECBASE_WAY 8
|
||||
|
||||
/* Level 1 contains page table entries
|
||||
* necessary to map the page table itself.
|
||||
*/
|
||||
#define XTENSA_L1_PAGE_TABLE_ENTRIES 1024U
|
||||
|
||||
/* Size of level 1 page table.
|
||||
*/
|
||||
#define XTENSA_L1_PAGE_TABLE_SIZE (XTENSA_L1_PAGE_TABLE_ENTRIES * sizeof(uint32_t))
|
||||
|
||||
/* Level 2 contains page table entries
|
||||
* necessary to map the page table itself.
|
||||
*/
|
||||
#define XTENSA_L2_PAGE_TABLE_ENTRIES 1024U
|
||||
|
||||
/* Size of level 2 page table.
|
||||
*/
|
||||
#define XTENSA_L2_PAGE_TABLE_SIZE (XTENSA_L2_PAGE_TABLE_ENTRIES * sizeof(uint32_t))
|
||||
|
||||
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
|
||||
|
||||
BUILD_ASSERT(CONFIG_MMU_PAGE_SIZE == 0x1000,
|
||||
|
@ -40,8 +43,18 @@ BUILD_ASSERT(CONFIG_MMU_PAGE_SIZE == 0x1000,
|
|||
* Level 1 page table has to be 4Kb to fit into one of the wired entries.
|
||||
* All entries are initialized as INVALID, so an attempt to read an unmapped
|
||||
* area will cause a double exception.
|
||||
*
|
||||
* Each memory domain contains its own l1 page table. The kernel l1 page table is
|
||||
* located at the index 0.
|
||||
*/
|
||||
uint32_t l1_page_table[XTENSA_L1_PAGE_TABLE_ENTRIES] __aligned(KB(4));
|
||||
static uint32_t l1_page_table[CONFIG_XTENSA_MMU_NUM_L1_TABLES][XTENSA_L1_PAGE_TABLE_ENTRIES]
|
||||
__aligned(KB(4));
|
||||
|
||||
|
||||
/*
|
||||
* That is an alias for the page tables set used by the kernel.
|
||||
*/
|
||||
uint32_t *z_xtensa_kernel_ptables = (uint32_t *)l1_page_table[0];
|
||||
|
||||
/*
|
||||
* Each table in the level 2 maps a 4Mb memory range. It consists of 1024 entries each one
|
||||
|
@ -50,12 +63,41 @@ uint32_t l1_page_table[XTENSA_L1_PAGE_TABLE_ENTRIES] __aligned(KB(4));
|
|||
static uint32_t l2_page_tables[CONFIG_XTENSA_MMU_NUM_L2_TABLES][XTENSA_L2_PAGE_TABLE_ENTRIES]
|
||||
__aligned(KB(4));
|
||||
|
||||
/*
|
||||
* This additional variable tracks which l1 tables are in use. This is kept separated from
|
||||
* the tables to keep alignment easier.
|
||||
*
|
||||
* @note: The first bit is set because it is used for the kernel page tables.
|
||||
*/
|
||||
static ATOMIC_DEFINE(l1_page_table_track, CONFIG_XTENSA_MMU_NUM_L1_TABLES);
|
||||
|
||||
/*
|
||||
* This additional variable tracks which l2 tables are in use. This is kept separated from
|
||||
* the tables to keep alignment easier.
|
||||
*/
|
||||
static ATOMIC_DEFINE(l2_page_tables_track, CONFIG_XTENSA_MMU_NUM_L2_TABLES);
|
||||
|
||||
/*
|
||||
* Protects xtensa_domain_list and serializes access to page tables.
|
||||
*/
|
||||
static struct k_spinlock xtensa_mmu_lock;
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
||||
/*
|
||||
* Each domain has its own ASID. ASID can go through 1 (kernel) to 255.
|
||||
* When a TLB entry matches, the hw will check the ASID in the entry and finds
|
||||
* the correspondent position in the RASID register. This position will then be
|
||||
* compared with the current ring (CRING) to check the permission.
|
||||
*/
|
||||
static uint8_t asid_count = 3;
|
||||
|
||||
/*
|
||||
* List with all active and initialized memory domains.
|
||||
*/
|
||||
static sys_slist_t xtensa_domain_list;
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
extern char _heap_end[];
|
||||
extern char _heap_start[];
|
||||
extern char __data_start[];
|
||||
|
@ -100,18 +142,61 @@ static const struct xtensa_mmu_range mmu_zephyr_ranges[] = {
|
|||
{
|
||||
.start = (uint32_t)__text_region_start,
|
||||
.end = (uint32_t)__text_region_end,
|
||||
.attrs = Z_XTENSA_MMU_X | Z_XTENSA_MMU_CACHED_WB,
|
||||
.attrs = Z_XTENSA_MMU_X | Z_XTENSA_MMU_CACHED_WB | Z_XTENSA_MMU_MAP_SHARED,
|
||||
.name = "text",
|
||||
},
|
||||
/* Mark rodata segment cacheable, read only and non-executable */
|
||||
{
|
||||
.start = (uint32_t)__rodata_region_start,
|
||||
.end = (uint32_t)__rodata_region_end,
|
||||
.attrs = Z_XTENSA_MMU_CACHED_WB,
|
||||
.attrs = Z_XTENSA_MMU_CACHED_WB | Z_XTENSA_MMU_MAP_SHARED,
|
||||
.name = "rodata",
|
||||
},
|
||||
};
|
||||
|
||||
static inline uint32_t *thread_page_tables_get(const struct k_thread *thread)
|
||||
{
|
||||
#ifdef CONFIG_USERSPACE
|
||||
if ((thread->base.user_options & K_USER) != 0U) {
|
||||
return thread->arch.ptables;
|
||||
}
|
||||
#endif
|
||||
|
||||
return z_xtensa_kernel_ptables;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check if the page table entry is illegal.
|
||||
*
|
||||
* @param[in] Page table entry.
|
||||
*/
|
||||
static inline bool is_pte_illegal(uint32_t pte)
|
||||
{
|
||||
uint32_t attr = pte & Z_XTENSA_PTE_ATTR_MASK;
|
||||
|
||||
/*
|
||||
* The ISA manual states only 12 and 14 are illegal values.
|
||||
* 13 and 15 are not. So we need to be specific than simply
|
||||
* testing if bits 2 and 3 are set.
|
||||
*/
|
||||
return (attr == 12) || (attr == 14);
|
||||
}
|
||||
|
||||
/*
|
||||
* @brief Initialize all page table entries to be illegal.
|
||||
*
|
||||
* @param[in] Pointer to page table.
|
||||
* @param[in] Number of page table entries in the page table.
|
||||
*/
|
||||
static void init_page_table(uint32_t *ptable, size_t num_entries)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_entries; i++) {
|
||||
ptable[i] = Z_XTENSA_MMU_ILLEGAL;
|
||||
}
|
||||
}
|
||||
|
||||
static inline uint32_t *alloc_l2_table(void)
|
||||
{
|
||||
uint16_t idx;
|
||||
|
@ -125,45 +210,86 @@ static inline uint32_t *alloc_l2_table(void)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Switch page tables
|
||||
*
|
||||
* This switches the page tables to the incoming ones (@a ptables).
|
||||
* Since data TLBs to L2 page tables are auto-filled, @a dtlb_inv
|
||||
* can be used to invalidate these data TLBs. @a cache_inv can be
|
||||
* set to true to invalidate cache to the page tables.
|
||||
*
|
||||
* @param[in] ptables Page tables to be switched to.
|
||||
* @param[in] dtlb_inv True if to invalidate auto-fill data TLBs.
|
||||
* @param[in] cache_inv True if to invalidate cache to page tables.
|
||||
*/
|
||||
static ALWAYS_INLINE void switch_page_tables(uint32_t *ptables, bool dtlb_inv, bool cache_inv)
|
||||
{
|
||||
if (cache_inv) {
|
||||
sys_cache_data_invd_range((void *)ptables, XTENSA_L1_PAGE_TABLE_SIZE);
|
||||
sys_cache_data_invd_range((void *)l2_page_tables, sizeof(l2_page_tables));
|
||||
}
|
||||
|
||||
/* Invalidate data TLB to L1 page table */
|
||||
xtensa_dtlb_vaddr_invalidate((void *)Z_XTENSA_PAGE_TABLE_VADDR);
|
||||
|
||||
/* Now map the pagetable itself with KERNEL asid to avoid user thread
|
||||
* from tampering with it.
|
||||
*/
|
||||
xtensa_dtlb_entry_write_sync(
|
||||
Z_XTENSA_PTE((uint32_t)ptables, Z_XTENSA_KERNEL_RING, Z_XTENSA_PAGE_TABLE_ATTR),
|
||||
Z_XTENSA_TLB_ENTRY(Z_XTENSA_PAGE_TABLE_VADDR, Z_XTENSA_MMU_PTE_WAY));
|
||||
|
||||
if (dtlb_inv) {
|
||||
/* Since L2 page tables are auto-refilled,
|
||||
* invalidate all of them to flush the old entries out.
|
||||
*/
|
||||
xtensa_tlb_autorefill_invalidate();
|
||||
}
|
||||
}
|
||||
|
||||
static void map_memory_range(const uint32_t start, const uint32_t end,
|
||||
const uint32_t attrs)
|
||||
const uint32_t attrs, bool shared)
|
||||
{
|
||||
uint32_t page, *table;
|
||||
|
||||
for (page = start; page < end; page += CONFIG_MMU_PAGE_SIZE) {
|
||||
uint32_t pte = Z_XTENSA_PTE(page, Z_XTENSA_KERNEL_RING, attrs);
|
||||
uint32_t pte = Z_XTENSA_PTE(page,
|
||||
shared ? Z_XTENSA_SHARED_RING : Z_XTENSA_KERNEL_RING,
|
||||
attrs);
|
||||
uint32_t l2_pos = Z_XTENSA_L2_POS(page);
|
||||
uint32_t l1_pos = page >> 22;
|
||||
uint32_t l1_pos = Z_XTENSA_L1_POS(page);
|
||||
|
||||
if (l1_page_table[l1_pos] == Z_XTENSA_MMU_ILLEGAL) {
|
||||
if (is_pte_illegal(z_xtensa_kernel_ptables[l1_pos])) {
|
||||
table = alloc_l2_table();
|
||||
|
||||
__ASSERT(table != NULL, "There is no l2 page table available to "
|
||||
"map 0x%08x\n", page);
|
||||
|
||||
l1_page_table[l1_pos] =
|
||||
init_page_table(table, XTENSA_L2_PAGE_TABLE_ENTRIES);
|
||||
|
||||
z_xtensa_kernel_ptables[l1_pos] =
|
||||
Z_XTENSA_PTE((uint32_t)table, Z_XTENSA_KERNEL_RING,
|
||||
Z_XTENSA_MMU_CACHED_WT);
|
||||
Z_XTENSA_PAGE_TABLE_ATTR);
|
||||
}
|
||||
|
||||
table = (uint32_t *)(l1_page_table[l1_pos] & Z_XTENSA_PTE_PPN_MASK);
|
||||
table = (uint32_t *)(z_xtensa_kernel_ptables[l1_pos] & Z_XTENSA_PTE_PPN_MASK);
|
||||
table[l2_pos] = pte;
|
||||
}
|
||||
}
|
||||
|
||||
static void map_memory(const uint32_t start, const uint32_t end,
|
||||
const uint32_t attrs)
|
||||
const uint32_t attrs, bool shared)
|
||||
{
|
||||
map_memory_range(start, end, attrs);
|
||||
map_memory_range(start, end, attrs, shared);
|
||||
|
||||
#ifdef CONFIG_XTENSA_MMU_DOUBLE_MAP
|
||||
if (arch_xtensa_is_ptr_uncached((void *)start)) {
|
||||
map_memory_range(POINTER_TO_UINT(z_soc_cached_ptr((void *)start)),
|
||||
POINTER_TO_UINT(z_soc_cached_ptr((void *)end)),
|
||||
attrs | Z_XTENSA_MMU_CACHED_WB);
|
||||
attrs | Z_XTENSA_MMU_CACHED_WB, shared);
|
||||
} else if (arch_xtensa_is_ptr_cached((void *)start)) {
|
||||
map_memory_range(POINTER_TO_UINT(z_soc_uncached_ptr((void *)start)),
|
||||
POINTER_TO_UINT(z_soc_uncached_ptr((void *)end)), attrs);
|
||||
POINTER_TO_UINT(z_soc_uncached_ptr((void *)end)), attrs, shared);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -171,16 +297,19 @@ static void map_memory(const uint32_t start, const uint32_t end,
|
|||
static void xtensa_init_page_tables(void)
|
||||
{
|
||||
volatile uint8_t entry;
|
||||
uint32_t page;
|
||||
|
||||
for (page = 0; page < XTENSA_L1_PAGE_TABLE_ENTRIES; page++) {
|
||||
l1_page_table[page] = Z_XTENSA_MMU_ILLEGAL;
|
||||
}
|
||||
init_page_table(z_xtensa_kernel_ptables, XTENSA_L1_PAGE_TABLE_ENTRIES);
|
||||
atomic_set_bit(l1_page_table_track, 0);
|
||||
|
||||
for (entry = 0; entry < ARRAY_SIZE(mmu_zephyr_ranges); entry++) {
|
||||
const struct xtensa_mmu_range *range = &mmu_zephyr_ranges[entry];
|
||||
bool shared;
|
||||
uint32_t attrs;
|
||||
|
||||
map_memory(range->start, range->end, range->attrs);
|
||||
shared = !!(range->attrs & Z_XTENSA_MMU_MAP_SHARED);
|
||||
attrs = range->attrs & ~Z_XTENSA_MMU_MAP_SHARED;
|
||||
|
||||
map_memory(range->start, range->end, attrs, shared);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -198,8 +327,13 @@ static void xtensa_init_page_tables(void)
|
|||
#endif
|
||||
for (entry = 0; entry < xtensa_soc_mmu_ranges_num; entry++) {
|
||||
const struct xtensa_mmu_range *range = &xtensa_soc_mmu_ranges[entry];
|
||||
bool shared;
|
||||
uint32_t attrs;
|
||||
|
||||
map_memory(range->start, range->end, range->attrs);
|
||||
shared = !!(range->attrs & Z_XTENSA_MMU_MAP_SHARED);
|
||||
attrs = range->attrs & ~Z_XTENSA_MMU_MAP_SHARED;
|
||||
|
||||
map_memory(range->start, range->end, attrs, shared);
|
||||
}
|
||||
#if defined(__GNUC__)
|
||||
#pragma GCC diagnostic pop
|
||||
|
@ -231,6 +365,9 @@ void z_xtensa_mmu_init(void)
|
|||
/* Set the page table location in the virtual address */
|
||||
xtensa_ptevaddr_set((void *)Z_XTENSA_PTEVADDR);
|
||||
|
||||
/* Set rasid */
|
||||
xtensa_rasid_asid_set(Z_XTENSA_MMU_SHARED_ASID, Z_XTENSA_SHARED_RING);
|
||||
|
||||
/* Next step is to invalidate the tlb entry that contains the top level
|
||||
* page table. This way we don't cause a multi hit exception.
|
||||
*/
|
||||
|
@ -243,9 +380,9 @@ void z_xtensa_mmu_init(void)
|
|||
* Lets use one of the wired entry, so we never have tlb miss for
|
||||
* the top level table.
|
||||
*/
|
||||
xtensa_dtlb_entry_write(Z_XTENSA_PTE((uint32_t)l1_page_table, Z_XTENSA_KERNEL_RING,
|
||||
Z_XTENSA_MMU_CACHED_WT),
|
||||
Z_XTENSA_TLB_ENTRY(Z_XTENSA_PAGE_TABLE_VADDR, MMU_PTE_WAY));
|
||||
xtensa_dtlb_entry_write(Z_XTENSA_PTE((uint32_t)z_xtensa_kernel_ptables,
|
||||
Z_XTENSA_KERNEL_RING, Z_XTENSA_PAGE_TABLE_ATTR),
|
||||
Z_XTENSA_TLB_ENTRY(Z_XTENSA_PAGE_TABLE_VADDR, Z_XTENSA_MMU_PTE_WAY));
|
||||
|
||||
/* Before invalidate the text region in the TLB entry 6, we need to
|
||||
* map the exception vector into one of the wired entries to avoid
|
||||
|
@ -297,7 +434,7 @@ void z_xtensa_mmu_init(void)
|
|||
xtensa_dtlb_entry_write(
|
||||
Z_XTENSA_PTE((uint32_t)vecbase,
|
||||
Z_XTENSA_KERNEL_RING, Z_XTENSA_MMU_CACHED_WB),
|
||||
Z_XTENSA_TLB_ENTRY((uint32_t)vecbase, MMU_VECBASE_WAY));
|
||||
Z_XTENSA_TLB_ENTRY((uint32_t)vecbase, Z_XTENSA_MMU_VECBASE_WAY));
|
||||
|
||||
/*
|
||||
* Pre-load TLB for vecbase so exception handling won't result
|
||||
|
@ -325,6 +462,12 @@ void z_xtensa_mmu_init(void)
|
|||
xtensa_dtlb_entry_invalidate_sync(Z_XTENSA_TLB_ENTRY(Z_XTENSA_PTEVADDR + MB(4), 3));
|
||||
xtensa_itlb_entry_invalidate_sync(Z_XTENSA_TLB_ENTRY(Z_XTENSA_PTEVADDR + MB(4), 3));
|
||||
|
||||
/*
|
||||
* Clear out THREADPTR as we use it to indicate
|
||||
* whether we are in user mode or not.
|
||||
*/
|
||||
XTENSA_WUR("THREADPTR", 0);
|
||||
|
||||
arch_xtensa_mmu_post_init(_current_cpu->id == 0);
|
||||
}
|
||||
|
||||
|
@ -351,32 +494,121 @@ __weak void arch_reserved_pages_update(void)
|
|||
}
|
||||
#endif /* CONFIG_ARCH_HAS_RESERVED_PAGE_FRAMES */
|
||||
|
||||
static bool l2_page_table_map(void *vaddr, uintptr_t phys, uint32_t flags)
|
||||
static bool l2_page_table_map(uint32_t *l1_table, void *vaddr, uintptr_t phys,
|
||||
uint32_t flags, bool is_user)
|
||||
{
|
||||
uint32_t l1_pos = (uint32_t)vaddr >> 22;
|
||||
uint32_t pte = Z_XTENSA_PTE(phys, Z_XTENSA_KERNEL_RING, flags);
|
||||
uint32_t l2_pos = Z_XTENSA_L2_POS((uint32_t)vaddr);
|
||||
uint32_t *table;
|
||||
|
||||
if (l1_page_table[l1_pos] == Z_XTENSA_MMU_ILLEGAL) {
|
||||
sys_cache_data_invd_range((void *)&l1_table[l1_pos], sizeof(l1_table[0]));
|
||||
|
||||
if (is_pte_illegal(l1_table[l1_pos])) {
|
||||
table = alloc_l2_table();
|
||||
|
||||
if (table == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
l1_page_table[l1_pos] = Z_XTENSA_PTE((uint32_t)table, Z_XTENSA_KERNEL_RING,
|
||||
Z_XTENSA_MMU_CACHED_WT);
|
||||
init_page_table(table, XTENSA_L2_PAGE_TABLE_ENTRIES);
|
||||
|
||||
l1_table[l1_pos] = Z_XTENSA_PTE((uint32_t)table, Z_XTENSA_KERNEL_RING,
|
||||
Z_XTENSA_PAGE_TABLE_ATTR);
|
||||
|
||||
sys_cache_data_flush_range((void *)&l1_table[l1_pos], sizeof(l1_table[0]));
|
||||
}
|
||||
|
||||
table = (uint32_t *)(l1_page_table[l1_pos] & Z_XTENSA_PTE_PPN_MASK);
|
||||
table[l2_pos] = pte;
|
||||
table = (uint32_t *)(l1_table[l1_pos] & Z_XTENSA_PTE_PPN_MASK);
|
||||
table[l2_pos] = Z_XTENSA_PTE(phys, is_user ? Z_XTENSA_USER_RING : Z_XTENSA_KERNEL_RING,
|
||||
flags);
|
||||
|
||||
if ((flags & Z_XTENSA_MMU_X) == Z_XTENSA_MMU_X) {
|
||||
sys_cache_data_flush_range((void *)&table[l2_pos], sizeof(table[0]));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void __arch_mem_map(void *va, uintptr_t pa, uint32_t xtensa_flags, bool is_user)
|
||||
{
|
||||
bool ret;
|
||||
void *vaddr, *vaddr_uc;
|
||||
uintptr_t paddr, paddr_uc;
|
||||
uint32_t flags, flags_uc;
|
||||
|
||||
if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) {
|
||||
if (arch_xtensa_is_ptr_cached(va)) {
|
||||
vaddr = va;
|
||||
vaddr_uc = arch_xtensa_uncached_ptr(va);
|
||||
} else {
|
||||
vaddr = arch_xtensa_cached_ptr(va);
|
||||
vaddr_uc = va;
|
||||
}
|
||||
|
||||
if (arch_xtensa_is_ptr_cached((void *)pa)) {
|
||||
paddr = pa;
|
||||
paddr_uc = (uintptr_t)arch_xtensa_uncached_ptr((void *)pa);
|
||||
} else {
|
||||
paddr = (uintptr_t)arch_xtensa_cached_ptr((void *)pa);
|
||||
paddr_uc = pa;
|
||||
}
|
||||
|
||||
flags_uc = (xtensa_flags & ~Z_XTENSA_PTE_ATTR_CACHED_MASK);
|
||||
flags = flags_uc | Z_XTENSA_MMU_CACHED_WB;
|
||||
} else {
|
||||
vaddr = va;
|
||||
paddr = pa;
|
||||
flags = xtensa_flags;
|
||||
}
|
||||
|
||||
ret = l2_page_table_map(z_xtensa_kernel_ptables, (void *)vaddr, paddr,
|
||||
flags, is_user);
|
||||
__ASSERT(ret, "Virtual address (%p) already mapped", va);
|
||||
|
||||
if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP) && ret) {
|
||||
ret = l2_page_table_map(z_xtensa_kernel_ptables, (void *)vaddr_uc, paddr_uc,
|
||||
flags_uc, is_user);
|
||||
__ASSERT(ret, "Virtual address (%p) already mapped", vaddr_uc);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USERSPACE
|
||||
ARG_UNUSED(ret);
|
||||
#else
|
||||
if (ret) {
|
||||
sys_snode_t *node;
|
||||
struct arch_mem_domain *domain;
|
||||
k_spinlock_key_t key;
|
||||
|
||||
key = k_spin_lock(&z_mem_domain_lock);
|
||||
SYS_SLIST_FOR_EACH_NODE(&xtensa_domain_list, node) {
|
||||
domain = CONTAINER_OF(node, struct arch_mem_domain, node);
|
||||
|
||||
ret = l2_page_table_map(domain->ptables, (void *)vaddr, paddr,
|
||||
flags, is_user);
|
||||
__ASSERT(ret, "Virtual address (%p) already mapped for domain %p",
|
||||
vaddr, domain);
|
||||
|
||||
if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP) && ret) {
|
||||
ret = l2_page_table_map(domain->ptables,
|
||||
(void *)vaddr_uc, paddr_uc,
|
||||
flags_uc, is_user);
|
||||
__ASSERT(ret, "Virtual address (%p) already mapped for domain %p",
|
||||
vaddr_uc, domain);
|
||||
}
|
||||
}
|
||||
k_spin_unlock(&z_mem_domain_lock, key);
|
||||
}
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
if ((xtensa_flags & Z_XTENSA_MMU_X) == Z_XTENSA_MMU_X) {
|
||||
xtensa_itlb_vaddr_invalidate(vaddr);
|
||||
}
|
||||
xtensa_dtlb_vaddr_invalidate(vaddr);
|
||||
return true;
|
||||
|
||||
if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) {
|
||||
if (xtensa_flags & Z_XTENSA_MMU_X) {
|
||||
xtensa_itlb_vaddr_invalidate(vaddr_uc);
|
||||
}
|
||||
xtensa_dtlb_vaddr_invalidate(vaddr_uc);
|
||||
}
|
||||
}
|
||||
|
||||
void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
|
||||
|
@ -385,7 +617,8 @@ void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
|
|||
uint32_t pa = (uint32_t)phys;
|
||||
uint32_t rem_size = (uint32_t)size;
|
||||
uint32_t xtensa_flags = 0;
|
||||
int key;
|
||||
k_spinlock_key_t key;
|
||||
bool is_user;
|
||||
|
||||
if (size == 0) {
|
||||
LOG_ERR("Cannot map physical memory at 0x%08X: invalid "
|
||||
|
@ -414,63 +647,130 @@ void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
|
|||
xtensa_flags |= Z_XTENSA_MMU_X;
|
||||
}
|
||||
|
||||
key = arch_irq_lock();
|
||||
is_user = (flags & K_MEM_PERM_USER) == K_MEM_PERM_USER;
|
||||
|
||||
key = k_spin_lock(&xtensa_mmu_lock);
|
||||
|
||||
while (rem_size > 0) {
|
||||
bool ret = l2_page_table_map((void *)va, pa, xtensa_flags);
|
||||
__arch_mem_map((void *)va, pa, xtensa_flags, is_user);
|
||||
|
||||
ARG_UNUSED(ret);
|
||||
__ASSERT(ret, "Virtual address (%u) already mapped", (uint32_t)virt);
|
||||
rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size;
|
||||
va += KB(4);
|
||||
pa += KB(4);
|
||||
}
|
||||
|
||||
arch_irq_unlock(key);
|
||||
k_spin_unlock(&xtensa_mmu_lock, key);
|
||||
}
|
||||
|
||||
static void l2_page_table_unmap(void *vaddr)
|
||||
/**
|
||||
* @return True if page is executable (thus need to invalidate ITLB),
|
||||
* false if not.
|
||||
*/
|
||||
static bool l2_page_table_unmap(uint32_t *l1_table, void *vaddr)
|
||||
{
|
||||
uint32_t l1_pos = (uint32_t)vaddr >> 22;
|
||||
uint32_t l2_pos = Z_XTENSA_L2_POS((uint32_t)vaddr);
|
||||
uint32_t *table;
|
||||
uint32_t *l2_table;
|
||||
uint32_t table_pos;
|
||||
bool exec;
|
||||
|
||||
if (l1_page_table[l1_pos] == Z_XTENSA_MMU_ILLEGAL) {
|
||||
return;
|
||||
sys_cache_data_invd_range((void *)&l1_table[l1_pos], sizeof(l1_table[0]));
|
||||
|
||||
if (is_pte_illegal(l1_table[l1_pos])) {
|
||||
/* We shouldn't be unmapping an illegal entry.
|
||||
* Return true so that we can invalidate ITLB too.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
exec = l1_page_table[l1_pos] & Z_XTENSA_MMU_X;
|
||||
exec = l1_table[l1_pos] & Z_XTENSA_MMU_X;
|
||||
|
||||
table = (uint32_t *)(l1_page_table[l1_pos] & Z_XTENSA_PTE_PPN_MASK);
|
||||
table[l2_pos] = Z_XTENSA_MMU_ILLEGAL;
|
||||
l2_table = (uint32_t *)(l1_table[l1_pos] & Z_XTENSA_PTE_PPN_MASK);
|
||||
|
||||
sys_cache_data_invd_range((void *)&l2_table[l2_pos], sizeof(l2_table[0]));
|
||||
|
||||
l2_table[l2_pos] = Z_XTENSA_MMU_ILLEGAL;
|
||||
|
||||
sys_cache_data_flush_range((void *)&l2_table[l2_pos], sizeof(l2_table[0]));
|
||||
|
||||
for (l2_pos = 0; l2_pos < XTENSA_L2_PAGE_TABLE_ENTRIES; l2_pos++) {
|
||||
if (table[l2_pos] != Z_XTENSA_MMU_ILLEGAL) {
|
||||
if (!is_pte_illegal(l2_table[l2_pos])) {
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
l1_page_table[l1_pos] = Z_XTENSA_MMU_ILLEGAL;
|
||||
table_pos = (table - (uint32_t *)l2_page_tables) / (XTENSA_L2_PAGE_TABLE_ENTRIES);
|
||||
l1_table[l1_pos] = Z_XTENSA_MMU_ILLEGAL;
|
||||
sys_cache_data_flush_range((void *)&l1_table[l1_pos], sizeof(l1_table[0]));
|
||||
|
||||
table_pos = (l2_table - (uint32_t *)l2_page_tables) / (XTENSA_L2_PAGE_TABLE_ENTRIES);
|
||||
atomic_clear_bit(l2_page_tables_track, table_pos);
|
||||
|
||||
/* Need to invalidate L2 page table as it is no longer valid. */
|
||||
xtensa_dtlb_vaddr_invalidate((void *)table);
|
||||
xtensa_dtlb_vaddr_invalidate((void *)l2_table);
|
||||
|
||||
end:
|
||||
if (exec) {
|
||||
return exec;
|
||||
}
|
||||
|
||||
static inline void __arch_mem_unmap(void *va)
|
||||
{
|
||||
bool is_exec;
|
||||
void *vaddr, *vaddr_uc;
|
||||
|
||||
if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) {
|
||||
if (arch_xtensa_is_ptr_cached(va)) {
|
||||
vaddr = va;
|
||||
vaddr_uc = arch_xtensa_uncached_ptr(va);
|
||||
} else {
|
||||
vaddr = arch_xtensa_cached_ptr(va);
|
||||
vaddr_uc = va;
|
||||
}
|
||||
} else {
|
||||
vaddr = va;
|
||||
}
|
||||
|
||||
is_exec = l2_page_table_unmap(z_xtensa_kernel_ptables, (void *)vaddr);
|
||||
|
||||
if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) {
|
||||
(void)l2_page_table_unmap(z_xtensa_kernel_ptables, (void *)vaddr_uc);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
sys_snode_t *node;
|
||||
struct arch_mem_domain *domain;
|
||||
k_spinlock_key_t key;
|
||||
|
||||
key = k_spin_lock(&z_mem_domain_lock);
|
||||
SYS_SLIST_FOR_EACH_NODE(&xtensa_domain_list, node) {
|
||||
domain = CONTAINER_OF(node, struct arch_mem_domain, node);
|
||||
|
||||
(void)l2_page_table_unmap(domain->ptables, (void *)vaddr);
|
||||
|
||||
if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) {
|
||||
(void)l2_page_table_unmap(domain->ptables, (void *)vaddr_uc);
|
||||
}
|
||||
}
|
||||
k_spin_unlock(&z_mem_domain_lock, key);
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
if (is_exec) {
|
||||
xtensa_itlb_vaddr_invalidate(vaddr);
|
||||
}
|
||||
xtensa_dtlb_vaddr_invalidate(vaddr);
|
||||
|
||||
if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) {
|
||||
if (is_exec) {
|
||||
xtensa_itlb_vaddr_invalidate(vaddr_uc);
|
||||
}
|
||||
xtensa_dtlb_vaddr_invalidate(vaddr_uc);
|
||||
}
|
||||
}
|
||||
|
||||
void arch_mem_unmap(void *addr, size_t size)
|
||||
{
|
||||
uint32_t va = (uint32_t)addr;
|
||||
uint32_t rem_size = (uint32_t)size;
|
||||
int key;
|
||||
k_spinlock_key_t key;
|
||||
|
||||
if (addr == NULL) {
|
||||
LOG_ERR("Cannot unmap NULL pointer");
|
||||
|
@ -482,13 +782,363 @@ void arch_mem_unmap(void *addr, size_t size)
|
|||
return;
|
||||
}
|
||||
|
||||
key = arch_irq_lock();
|
||||
key = k_spin_lock(&xtensa_mmu_lock);
|
||||
|
||||
while (rem_size > 0) {
|
||||
l2_page_table_unmap((void *)va);
|
||||
__arch_mem_unmap((void *)va);
|
||||
|
||||
rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size;
|
||||
va += KB(4);
|
||||
}
|
||||
|
||||
arch_irq_unlock(key);
|
||||
k_spin_unlock(&xtensa_mmu_lock, key);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
||||
static inline uint32_t *alloc_l1_table(void)
|
||||
{
|
||||
uint16_t idx;
|
||||
|
||||
for (idx = 0; idx < CONFIG_XTENSA_MMU_NUM_L1_TABLES; idx++) {
|
||||
if (!atomic_test_and_set_bit(l1_page_table_track, idx)) {
|
||||
return (uint32_t *)&l1_page_table[idx];
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static uint32_t *dup_table(uint32_t *source_table)
|
||||
{
|
||||
uint16_t i, j;
|
||||
uint32_t *dst_table = alloc_l1_table();
|
||||
|
||||
if (!dst_table) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < XTENSA_L1_PAGE_TABLE_ENTRIES; i++) {
|
||||
uint32_t *l2_table, *src_l2_table;
|
||||
|
||||
if (is_pte_illegal(source_table[i])) {
|
||||
dst_table[i] = Z_XTENSA_MMU_ILLEGAL;
|
||||
continue;
|
||||
}
|
||||
|
||||
src_l2_table = (uint32_t *)(source_table[i] & Z_XTENSA_PTE_PPN_MASK);
|
||||
l2_table = alloc_l2_table();
|
||||
if (l2_table == NULL) {
|
||||
goto err;
|
||||
}
|
||||
|
||||
for (j = 0; j < XTENSA_L2_PAGE_TABLE_ENTRIES; j++) {
|
||||
l2_table[j] = src_l2_table[j];
|
||||
}
|
||||
|
||||
/* The page table is using kernel ASID because we don't
|
||||
* user thread manipulate it.
|
||||
*/
|
||||
dst_table[i] = Z_XTENSA_PTE((uint32_t)l2_table, Z_XTENSA_KERNEL_RING,
|
||||
Z_XTENSA_PAGE_TABLE_ATTR);
|
||||
|
||||
sys_cache_data_flush_range((void *)l2_table, XTENSA_L2_PAGE_TABLE_SIZE);
|
||||
}
|
||||
|
||||
sys_cache_data_flush_range((void *)dst_table, XTENSA_L1_PAGE_TABLE_SIZE);
|
||||
|
||||
return dst_table;
|
||||
|
||||
err:
|
||||
/* TODO: Cleanup failed allocation*/
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int arch_mem_domain_init(struct k_mem_domain *domain)
|
||||
{
|
||||
uint32_t *ptables;
|
||||
k_spinlock_key_t key;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* For now, lets just assert if we have reached the maximum number
|
||||
* of asid we assert.
|
||||
*/
|
||||
__ASSERT(asid_count < (Z_XTENSA_MMU_SHARED_ASID), "Reached maximum of ASID available");
|
||||
|
||||
key = k_spin_lock(&xtensa_mmu_lock);
|
||||
ptables = dup_table(z_xtensa_kernel_ptables);
|
||||
|
||||
if (ptables == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
domain->arch.ptables = ptables;
|
||||
domain->arch.asid = ++asid_count;
|
||||
|
||||
sys_slist_append(&xtensa_domain_list, &domain->arch.node);
|
||||
|
||||
ret = 0;
|
||||
|
||||
err:
|
||||
k_spin_unlock(&xtensa_mmu_lock, key);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int region_map_update(uint32_t *ptables, uintptr_t start,
|
||||
size_t size, uint32_t ring, uint32_t flags)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
for (size_t offset = 0; offset < size; offset += CONFIG_MMU_PAGE_SIZE) {
|
||||
uint32_t *l2_table, pte;
|
||||
uint32_t page = start + offset;
|
||||
uint32_t l1_pos = page >> 22;
|
||||
uint32_t l2_pos = Z_XTENSA_L2_POS(page);
|
||||
|
||||
/* Make sure we grab a fresh copy of L1 page table */
|
||||
sys_cache_data_invd_range((void *)&ptables[l1_pos], sizeof(ptables[0]));
|
||||
|
||||
l2_table = (uint32_t *)(ptables[l1_pos] & Z_XTENSA_PTE_PPN_MASK);
|
||||
|
||||
sys_cache_data_invd_range((void *)&l2_table[l2_pos], sizeof(l2_table[0]));
|
||||
|
||||
pte = Z_XTENSA_PTE_RING_SET(l2_table[l2_pos], ring);
|
||||
pte = Z_XTENSA_PTE_ATTR_SET(pte, flags);
|
||||
|
||||
l2_table[l2_pos] = pte;
|
||||
|
||||
sys_cache_data_flush_range((void *)&l2_table[l2_pos], sizeof(l2_table[0]));
|
||||
|
||||
xtensa_dtlb_vaddr_invalidate(
|
||||
(void *)(pte & Z_XTENSA_PTE_PPN_MASK));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int update_region(uint32_t *ptables, uintptr_t start,
|
||||
size_t size, uint32_t ring, uint32_t flags)
|
||||
{
|
||||
int ret;
|
||||
k_spinlock_key_t key;
|
||||
|
||||
key = k_spin_lock(&xtensa_mmu_lock);
|
||||
|
||||
#ifdef CONFIG_XTENSA_MMU_DOUBLE_MAP
|
||||
uintptr_t va, va_uc;
|
||||
uint32_t new_flags, new_flags_uc;
|
||||
|
||||
if (arch_xtensa_is_ptr_cached((void *)start)) {
|
||||
va = start;
|
||||
va_uc = (uintptr_t)arch_xtensa_uncached_ptr((void *)start);
|
||||
} else {
|
||||
va = (uintptr_t)arch_xtensa_cached_ptr((void *)start);
|
||||
va_uc = start;
|
||||
}
|
||||
|
||||
new_flags_uc = (flags & ~Z_XTENSA_PTE_ATTR_CACHED_MASK);
|
||||
new_flags = new_flags_uc | Z_XTENSA_MMU_CACHED_WB;
|
||||
|
||||
ret = region_map_update(ptables, va, size, ring, new_flags);
|
||||
|
||||
if (ret == 0) {
|
||||
ret = region_map_update(ptables, va_uc, size, ring, new_flags_uc);
|
||||
}
|
||||
#else
|
||||
ret = region_map_update(ptables, start, size, ring, flags);
|
||||
#endif /* CONFIG_XTENSA_MMU_DOUBLE_MAP */
|
||||
|
||||
k_spin_unlock(&xtensa_mmu_lock, key);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int reset_region(uint32_t *ptables, uintptr_t start, size_t size)
|
||||
{
|
||||
return update_region(ptables, start, size, Z_XTENSA_KERNEL_RING, Z_XTENSA_MMU_W);
|
||||
}
|
||||
|
||||
void xtensa_set_stack_perms(struct k_thread *thread)
|
||||
{
|
||||
if ((thread->base.user_options & K_USER) == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
update_region(thread_page_tables_get(thread),
|
||||
thread->stack_info.start, thread->stack_info.size,
|
||||
Z_XTENSA_USER_RING, Z_XTENSA_MMU_W | Z_XTENSA_MMU_CACHED_WB);
|
||||
}
|
||||
|
||||
void xtensa_user_stack_perms(struct k_thread *thread)
|
||||
{
|
||||
(void)memset((void *)thread->stack_info.start, 0xAA,
|
||||
thread->stack_info.size - thread->stack_info.delta);
|
||||
|
||||
update_region(thread_page_tables_get(thread),
|
||||
thread->stack_info.start, thread->stack_info.size,
|
||||
Z_XTENSA_USER_RING, Z_XTENSA_MMU_W | Z_XTENSA_MMU_CACHED_WB);
|
||||
}
|
||||
|
||||
int arch_mem_domain_max_partitions_get(void)
|
||||
{
|
||||
return CONFIG_MAX_DOMAIN_PARTITIONS;
|
||||
}
|
||||
|
||||
int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
|
||||
uint32_t partition_id)
|
||||
{
|
||||
struct k_mem_partition *partition = &domain->partitions[partition_id];
|
||||
|
||||
/* Reset the partition's region back to defaults */
|
||||
return reset_region(domain->arch.ptables, partition->start,
|
||||
partition->size);
|
||||
}
|
||||
|
||||
int arch_mem_domain_partition_add(struct k_mem_domain *domain,
|
||||
uint32_t partition_id)
|
||||
{
|
||||
uint32_t ring = domain->arch.asid == 0 ? Z_XTENSA_KERNEL_RING : Z_XTENSA_USER_RING;
|
||||
struct k_mem_partition *partition = &domain->partitions[partition_id];
|
||||
|
||||
return update_region(domain->arch.ptables, partition->start,
|
||||
partition->size, ring, partition->attr);
|
||||
}
|
||||
|
||||
/* These APIs don't need to do anything */
|
||||
int arch_mem_domain_thread_add(struct k_thread *thread)
|
||||
{
|
||||
int ret = 0;
|
||||
bool is_user, is_migration;
|
||||
uint32_t *old_ptables;
|
||||
struct k_mem_domain *domain;
|
||||
|
||||
old_ptables = thread->arch.ptables;
|
||||
domain = thread->mem_domain_info.mem_domain;
|
||||
thread->arch.ptables = domain->arch.ptables;
|
||||
|
||||
is_user = (thread->base.user_options & K_USER) != 0;
|
||||
is_migration = (old_ptables != NULL) && is_user;
|
||||
|
||||
/* Give access to the thread's stack in its new
|
||||
* memory domain if it is migrating.
|
||||
*/
|
||||
if (is_migration) {
|
||||
xtensa_set_stack_perms(thread);
|
||||
}
|
||||
|
||||
if (is_migration) {
|
||||
ret = reset_region(old_ptables,
|
||||
thread->stack_info.start,
|
||||
thread->stack_info.size);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int arch_mem_domain_thread_remove(struct k_thread *thread)
|
||||
{
|
||||
struct k_mem_domain *domain = thread->mem_domain_info.mem_domain;
|
||||
|
||||
if ((thread->base.user_options & K_USER) == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ((thread->base.thread_state & _THREAD_DEAD) == 0) {
|
||||
/* Thread is migrating to another memory domain and not
|
||||
* exiting for good; we weren't called from
|
||||
* z_thread_abort(). Resetting the stack region will
|
||||
* take place in the forthcoming thread_add() call.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Restore permissions on the thread's stack area since it is no
|
||||
* longer a member of the domain.
|
||||
*/
|
||||
return reset_region(domain->arch.ptables,
|
||||
thread->stack_info.start,
|
||||
thread->stack_info.size);
|
||||
}
|
||||
|
||||
static bool page_validate(uint32_t *ptables, uint32_t page, uint8_t ring, bool write)
|
||||
{
|
||||
uint8_t asid_ring;
|
||||
uint32_t rasid, pte, *l2_table;
|
||||
uint32_t l1_pos = page >> 22;
|
||||
uint32_t l2_pos = Z_XTENSA_L2_POS(page);
|
||||
|
||||
if (is_pte_illegal(ptables[l1_pos])) {
|
||||
return false;
|
||||
}
|
||||
|
||||
l2_table = (uint32_t *)(ptables[l1_pos] & Z_XTENSA_PTE_PPN_MASK);
|
||||
pte = l2_table[l2_pos];
|
||||
|
||||
if (is_pte_illegal(pte)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
asid_ring = 0;
|
||||
rasid = xtensa_rasid_get();
|
||||
for (uint32_t i = 0; i < 4; i++) {
|
||||
if (Z_XTENSA_PTE_ASID_GET(pte, rasid) ==
|
||||
Z_XTENSA_RASID_ASID_GET(rasid, i)) {
|
||||
asid_ring = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (ring > asid_ring) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (write) {
|
||||
return (Z_XTENSA_PTE_ATTR_GET((pte)) & Z_XTENSA_MMU_W) != 0;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int arch_buffer_validate(void *addr, size_t size, int write)
|
||||
{
|
||||
int ret = 0;
|
||||
uint8_t *virt;
|
||||
size_t aligned_size;
|
||||
const struct k_thread *thread = _current;
|
||||
uint32_t *ptables = thread_page_tables_get(thread);
|
||||
uint8_t ring = ((thread->base.user_options & K_USER) != 0) ?
|
||||
Z_XTENSA_USER_RING : Z_XTENSA_KERNEL_RING;
|
||||
|
||||
/* addr/size arbitrary, fix this up into an aligned region */
|
||||
k_mem_region_align((uintptr_t *)&virt, &aligned_size,
|
||||
(uintptr_t)addr, size, CONFIG_MMU_PAGE_SIZE);
|
||||
|
||||
for (size_t offset = 0; offset < aligned_size;
|
||||
offset += CONFIG_MMU_PAGE_SIZE) {
|
||||
if (!page_validate(ptables, (uint32_t)(virt + offset), ring, write)) {
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void z_xtensa_swap_update_page_tables(struct k_thread *incoming)
|
||||
{
|
||||
uint32_t *ptables = incoming->arch.ptables;
|
||||
struct arch_mem_domain *domain =
|
||||
&(incoming->mem_domain_info.mem_domain->arch);
|
||||
|
||||
/* Lets set the asid for the incoming thread */
|
||||
if ((incoming->base.user_options & K_USER) != 0) {
|
||||
xtensa_rasid_asid_set(domain->asid, Z_XTENSA_USER_RING);
|
||||
}
|
||||
|
||||
switch_page_tables(ptables, true, false);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
|
|
@ -174,6 +174,13 @@ static inline bool arch_is_in_isr(void)
|
|||
return arch_curr_cpu()->nested != 0U;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
extern void z_xtensa_userspace_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3,
|
||||
uintptr_t stack_end,
|
||||
uintptr_t stack_start);
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -2,4 +2,18 @@
|
|||
* Copyright (c) 2021 Intel Corporation
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
/* Empty File */
|
||||
#ifndef ZEPHYR_ARCH_XTENSA_INCLUDE_OFFSETS_SHORT_ARCH_H_
|
||||
#define ZEPHYR_ARCH_XTENSA_INCLUDE_OFFSETS_SHORT_ARCH_H_
|
||||
|
||||
#define _thread_offset_to_flags \
|
||||
(___thread_t_arch_OFFSET + ___thread_arch_t_flags_OFFSET)
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
#define _thread_offset_to_psp \
|
||||
(___thread_t_arch_OFFSET + ___thread_arch_t_psp_OFFSET)
|
||||
|
||||
#define _thread_offset_to_ptables \
|
||||
(___thread_t_arch_OFFSET + ___thread_arch_t_ptables_OFFSET)
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
#endif /* ZEPHYR_ARCH_XTENSA_INCLUDE_OFFSETS_SHORT_ARCH_H_ */
|
||||
|
|
|
@ -176,7 +176,8 @@
|
|||
rsr.SCOMPARE1 a0
|
||||
s32i a0, a1, ___xtensa_irq_bsa_t_scompare1_OFFSET
|
||||
#endif
|
||||
#if XCHAL_HAVE_THREADPTR && defined(CONFIG_THREAD_LOCAL_STORAGE)
|
||||
#if XCHAL_HAVE_THREADPTR && \
|
||||
(defined(CONFIG_USERSPACE) || defined(CONFIG_THREAD_LOCAL_STORAGE))
|
||||
rur.THREADPTR a0
|
||||
s32i a0, a1, ___xtensa_irq_bsa_t_threadptr_OFFSET
|
||||
#endif
|
||||
|
@ -409,6 +410,16 @@ _xstack_returned_\@:
|
|||
l32i a2, a1, 0
|
||||
l32i a2, a2, ___xtensa_irq_bsa_t_scratch_OFFSET
|
||||
|
||||
#if XCHAL_HAVE_THREADPTR && defined(CONFIG_USERSPACE)
|
||||
/* Clear up the threadptr because it is used
|
||||
* to check if a thread is runnig on user mode. Since
|
||||
* we are in a interruption we don't want the system
|
||||
* thinking it is possbly running in user mode.
|
||||
*/
|
||||
movi.n a0, 0
|
||||
wur.THREADPTR a0
|
||||
#endif /* XCHAL_HAVE_THREADPTR && CONFIG_USERSPACE */
|
||||
|
||||
/* There's a gotcha with level 1 handlers: the INTLEVEL field
|
||||
* gets left at zero and not set like high priority interrupts
|
||||
* do. That works fine for exceptions, but for L1 interrupts,
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
#include <zephyr/arch/arc/syscall.h>
|
||||
#elif defined(CONFIG_RISCV)
|
||||
#include <zephyr/arch/riscv/syscall.h>
|
||||
#elif defined(CONFIG_XTENSA)
|
||||
#include <zephyr/arch/xtensa/syscall.h>
|
||||
#endif
|
||||
|
||||
#endif /* ZEPHYR_INCLUDE_ARCH_SYSCALL_H_ */
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <zephyr/arch/common/addr_types.h>
|
||||
#include <zephyr/arch/xtensa/gdbstub.h>
|
||||
#include <zephyr/debug/sparse.h>
|
||||
#include <zephyr/sys/slist.h>
|
||||
|
||||
#include <zephyr/arch/xtensa/xtensa_mmu.h>
|
||||
|
||||
|
@ -47,6 +48,15 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct arch_mem_domain {
|
||||
#ifdef CONFIG_XTENSA_MMU
|
||||
uint32_t *ptables __aligned(CONFIG_MMU_PAGE_SIZE);
|
||||
uint8_t asid;
|
||||
bool dirty;
|
||||
#endif
|
||||
sys_snode_t node;
|
||||
};
|
||||
|
||||
extern void xtensa_arch_except(int reason_p);
|
||||
|
||||
#define ARCH_EXCEPT(reason_p) do { \
|
||||
|
|
238
include/zephyr/arch/xtensa/syscall.h
Normal file
238
include/zephyr/arch/xtensa/syscall.h
Normal file
|
@ -0,0 +1,238 @@
|
|||
/*
|
||||
* Copyright (c) 2022 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @brief Xtensa specific syscall header
|
||||
*
|
||||
* This header contains the Xtensa specific syscall interface. It is
|
||||
* included by the syscall interface architecture-abstraction header
|
||||
* (include/arch/syscall.h)
|
||||
*/
|
||||
|
||||
#ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_SYSCALL_H_
|
||||
#define ZEPHYR_INCLUDE_ARCH_XTENSA_SYSCALL_H_
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
#ifndef _ASMLANGUAGE
|
||||
|
||||
#include <zephyr/types.h>
|
||||
#include <stdbool.h>
|
||||
#include <zephyr/linker/sections.h>
|
||||
#include <zephyr/sys/util_macro.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
|
||||
uintptr_t arch_syscall_invoke6_helper(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t arg5, uintptr_t arg6,
|
||||
uintptr_t call_id);
|
||||
|
||||
uintptr_t arch_syscall_invoke5_helper(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t arg5,
|
||||
uintptr_t call_id);
|
||||
|
||||
uintptr_t arch_syscall_invoke4_helper(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t call_id);
|
||||
|
||||
uintptr_t arch_syscall_invoke3_helper(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t call_id);
|
||||
|
||||
uintptr_t arch_syscall_invoke2_helper(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t call_id);
|
||||
|
||||
uintptr_t arch_syscall_invoke1_helper(uintptr_t arg1, uintptr_t call_id);
|
||||
|
||||
uintptr_t arch_syscall_invoke0_helper(uintptr_t call_id);
|
||||
#endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
|
||||
|
||||
/**
|
||||
* We are following Linux Xtensa syscall ABI:
|
||||
*
|
||||
* syscall number arg1, arg2, arg3, arg4, arg5, arg6
|
||||
* -------------- ----------------------------------
|
||||
* a2 a6, a3, a4, a5, a8, a9
|
||||
*
|
||||
**/
|
||||
|
||||
static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t arg5, uintptr_t arg6,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
#ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
|
||||
return arch_syscall_invoke6_helper(arg1, arg2, arg3,
|
||||
arg4, arg5, arg6,
|
||||
call_id);
|
||||
#else
|
||||
register uintptr_t a2 __asm__("%a2") = call_id;
|
||||
register uintptr_t a6 __asm__("%a6") = arg1;
|
||||
register uintptr_t a3 __asm__("%a3") = arg2;
|
||||
register uintptr_t a4 __asm__("%a4") = arg3;
|
||||
register uintptr_t a5 __asm__("%a5") = arg4;
|
||||
register uintptr_t a8 __asm__("%a8") = arg5;
|
||||
register uintptr_t a9 __asm__("%a9") = arg6;
|
||||
|
||||
__asm__ volatile("syscall\n\t"
|
||||
: "=r" (a2)
|
||||
: "r" (a2), "r" (a6), "r" (a3), "r" (a4),
|
||||
"r" (a5), "r" (a8), "r" (a9)
|
||||
: "memory");
|
||||
|
||||
return a2;
|
||||
#endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
|
||||
}
|
||||
|
||||
static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t arg5, uintptr_t call_id)
|
||||
{
|
||||
#ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
|
||||
return arch_syscall_invoke5_helper(arg1, arg2, arg3,
|
||||
arg4, arg5, call_id);
|
||||
#else
|
||||
register uintptr_t a2 __asm__("%a2") = call_id;
|
||||
register uintptr_t a6 __asm__("%a6") = arg1;
|
||||
register uintptr_t a3 __asm__("%a3") = arg2;
|
||||
register uintptr_t a4 __asm__("%a4") = arg3;
|
||||
register uintptr_t a5 __asm__("%a5") = arg4;
|
||||
register uintptr_t a8 __asm__("%a8") = arg5;
|
||||
|
||||
__asm__ volatile("syscall\n\t"
|
||||
: "=r" (a2)
|
||||
: "r" (a2), "r" (a6), "r" (a3), "r" (a4),
|
||||
"r" (a5), "r" (a8)
|
||||
: "memory");
|
||||
|
||||
return a2;
|
||||
#endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
|
||||
}
|
||||
|
||||
static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
#ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
|
||||
return arch_syscall_invoke4_helper(arg1, arg2, arg3, arg4, call_id);
|
||||
#else
|
||||
register uintptr_t a2 __asm__("%a2") = call_id;
|
||||
register uintptr_t a6 __asm__("%a6") = arg1;
|
||||
register uintptr_t a3 __asm__("%a3") = arg2;
|
||||
register uintptr_t a4 __asm__("%a4") = arg3;
|
||||
register uintptr_t a5 __asm__("%a5") = arg4;
|
||||
|
||||
__asm__ volatile("syscall\n\t"
|
||||
: "=r" (a2)
|
||||
: "r" (a2), "r" (a6), "r" (a3), "r" (a4),
|
||||
"r" (a5)
|
||||
: "memory");
|
||||
|
||||
return a2;
|
||||
#endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
|
||||
}
|
||||
|
||||
static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t call_id)
|
||||
{
|
||||
#ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
|
||||
return arch_syscall_invoke3_helper(arg1, arg2, arg3, call_id);
|
||||
#else
|
||||
register uintptr_t a2 __asm__("%a2") = call_id;
|
||||
register uintptr_t a6 __asm__("%a6") = arg1;
|
||||
register uintptr_t a3 __asm__("%a3") = arg2;
|
||||
register uintptr_t a4 __asm__("%a4") = arg3;
|
||||
|
||||
__asm__ volatile("syscall\n\t"
|
||||
: "=r" (a2)
|
||||
: "r" (a2), "r" (a6), "r" (a3), "r" (a4)
|
||||
: "memory");
|
||||
|
||||
return a2;
|
||||
#endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
|
||||
}
|
||||
|
||||
static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
#ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
|
||||
return arch_syscall_invoke2_helper(arg1, arg2, call_id);
|
||||
#else
|
||||
register uintptr_t a2 __asm__("%a2") = call_id;
|
||||
register uintptr_t a6 __asm__("%a6") = arg1;
|
||||
register uintptr_t a3 __asm__("%a3") = arg2;
|
||||
|
||||
__asm__ volatile("syscall\n\t"
|
||||
: "=r" (a2)
|
||||
: "r" (a2), "r" (a6), "r" (a3)
|
||||
: "memory");
|
||||
|
||||
return a2;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
#ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
|
||||
return arch_syscall_invoke1_helper(arg1, call_id);
|
||||
#else
|
||||
register uintptr_t a2 __asm__("%a2") = call_id;
|
||||
register uintptr_t a6 __asm__("%a6") = arg1;
|
||||
|
||||
__asm__ volatile("syscall\n\t"
|
||||
: "=r" (a2)
|
||||
: "r" (a2), "r" (a6)
|
||||
: "memory");
|
||||
|
||||
return a2;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
|
||||
{
|
||||
#ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
|
||||
return arch_syscall_invoke0_helper(call_id);
|
||||
#else
|
||||
register uintptr_t a2 __asm__("%a2") = call_id;
|
||||
|
||||
__asm__ volatile("syscall\n\t"
|
||||
: "=r" (a2)
|
||||
: "r" (a2)
|
||||
: "memory");
|
||||
|
||||
return a2;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* There is no easy (or generic) way to figure out if a thread is runnining
|
||||
* in un-privileged mode. Reading the currrent ring (PS.CRING) is a privileged
|
||||
* instruction and not thread local storage is not available in xcc.
|
||||
*/
|
||||
static inline bool arch_is_user_context(void)
|
||||
{
|
||||
uint32_t thread;
|
||||
|
||||
__asm__ volatile(
|
||||
"rur.THREADPTR %0\n\t"
|
||||
: "=a" (thread)
|
||||
);
|
||||
|
||||
return !!thread;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
#endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_SYSCALL_H_ */
|
|
@ -7,6 +7,7 @@
|
|||
#ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_THREAD_H_
|
||||
#define ZEPHYR_INCLUDE_ARCH_XTENSA_THREAD_H_
|
||||
|
||||
#include <stdint.h>
|
||||
#ifndef _ASMLANGUAGE
|
||||
|
||||
/* Xtensa doesn't use these structs, but Zephyr core requires they be
|
||||
|
@ -22,6 +23,14 @@ typedef struct _callee_saved _callee_saved_t;
|
|||
|
||||
struct _thread_arch {
|
||||
uint32_t last_cpu;
|
||||
#ifdef CONFIG_USERSPACE
|
||||
uint32_t *ptables;
|
||||
|
||||
/* Initial privilege mode stack pointer when doing a system call.
|
||||
* Un-set for surpervisor threads.
|
||||
*/
|
||||
uint8_t *psp;
|
||||
#endif
|
||||
};
|
||||
|
||||
typedef struct _thread_arch _thread_arch_t;
|
||||
|
|
|
@ -9,8 +9,40 @@
|
|||
|
||||
#define Z_XTENSA_MMU_X BIT(0)
|
||||
#define Z_XTENSA_MMU_W BIT(1)
|
||||
#define Z_XTENSA_MMU_XW (BIT(1) | BIT(0))
|
||||
|
||||
#define Z_XTENSA_MMU_CACHED_WB BIT(2)
|
||||
#define Z_XTENSA_MMU_CACHED_WT BIT(3)
|
||||
|
||||
#define K_MEM_PARTITION_IS_EXECUTABLE(attr) (((attr) & Z_XTENSA_MMU_X) != 0)
|
||||
#define K_MEM_PARTITION_IS_WRITABLE(attr) (((attr) & Z_XENSA_MMU_W) != 0)
|
||||
|
||||
/* Read-Write access permission attributes */
|
||||
#define K_MEM_PARTITION_P_RW_U_RW ((k_mem_partition_attr_t) \
|
||||
{Z_XTENSA_MMU_W})
|
||||
#define K_MEM_PARTITION_P_RW_U_NA ((k_mem_partition_attr_t) \
|
||||
{0})
|
||||
#define K_MEM_PARTITION_P_RO_U_RO ((k_mem_partition_attr_t) \
|
||||
{0})
|
||||
#define K_MEM_PARTITION_P_RO_U_NA ((k_mem_partition_attr_t) \
|
||||
{0})
|
||||
#define K_MEM_PARTITION_P_NA_U_NA ((k_mem_partition_attr_t) \
|
||||
{0})
|
||||
|
||||
/* Execution-allowed attributes */
|
||||
#define K_MEM_PARTITION_P_RX_U_RX ((k_mem_partition_attr_t) \
|
||||
{Z_XTENSA_MMU_X})
|
||||
|
||||
/*
|
||||
* This BIT tells the mapping code whether the uncached pointer should
|
||||
* be shared between all threads. That is not used in the HW, it is
|
||||
* just for the implementation.
|
||||
*
|
||||
* The pte mapping this memory will use an ASID that is set in the
|
||||
* ring 4 spot in RASID.
|
||||
*/
|
||||
#define Z_XTENSA_MMU_MAP_SHARED BIT(30)
|
||||
|
||||
#define Z_XTENSA_MMU_ILLEGAL (BIT(3) | BIT(2))
|
||||
|
||||
/* Struct used to map a memory region */
|
||||
|
@ -21,6 +53,8 @@ struct xtensa_mmu_range {
|
|||
const uint32_t attrs;
|
||||
};
|
||||
|
||||
typedef uint32_t k_mem_partition_attr_t;
|
||||
|
||||
extern const struct xtensa_mmu_range xtensa_soc_mmu_ranges[];
|
||||
extern int xtensa_soc_mmu_ranges_num;
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue