arch: arm64: Support all the ELn

While QEMU's Cortex-A53 emulation by default only emulates a CPU in EL1,
other QEMU forks (for example the QEMU released by Xilinx) and real
hardware starts in EL3.

To support all the ELn we introduce a macro to identify at run-time the
Exception Level and take the correct actions.

Signed-off-by: Carlo Caione <ccaione@baylibre.com>
This commit is contained in:
Carlo Caione 2019-11-14 17:02:34 +00:00 committed by Anas Nashif
commit 528319bff7
3 changed files with 108 additions and 7 deletions

View file

@ -0,0 +1,24 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _MACRO_H_
#define _MACRO_H_
#ifdef _ASMLANGUAGE
.macro switch_el, xreg, el3_label, el2_label, el1_label
mrs \xreg, CurrentEL
cmp \xreg, 0xc
beq \el3_label
cmp \xreg, 0x8
beq \el2_label
cmp \xreg, 0x4
beq \el1_label
.endm
#endif /* _ASMLANGUAGE */
#endif /* _MACRO_H_ */

View file

@ -15,6 +15,7 @@
#include <linker/sections.h>
#include <arch/cpu.h>
#include "vector_table.h"
#include "macro.h"
/**
*
@ -43,13 +44,57 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
/* Setup vector table */
adr x0, _vector_table
switch_el x1, 3f, 2f, 1f
3:
/* Initialize VBAR */
msr vbar_el3, x0
/* SError, IRQ and FIQ routing enablement in EL3 */
mrs x0, scr_el3
orr x0, x0, #(SCR_EL3_IRQ | SCR_EL3_FIQ | SCR_EL3_EA)
msr scr_el3, x0
/* Disable access trapping in EL3 for NEON/FP */
msr cptr_el3, xzr
/*
* Enable the instruction cache, stack pointer and data access
* alignment checks and disable speculative loads.
*/
mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
mrs x0, sctlr_el3
orr x0, x0, x1
msr sctlr_el3, x0
b 0f
2:
/* Initialize VBAR */
msr vbar_el2, x0
/* SError, IRQ and FIQ routing enablement in EL2 */
mrs x0, hcr_el2
orr x0, x0, #(HCR_EL2_FMO | HCR_EL2_IMO | HCR_EL2_AMO)
msr hcr_el2, x0
/* Disable access trapping in EL2 for NEON/FP */
msr cptr_el2, xzr
/*
* Enable the instruction cache, stack pointer and data access
* alignment checks and disable speculative loads.
*/
mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
mrs x0, sctlr_el2
orr x0, x0, x1
msr sctlr_el2, x0
b 0f
1:
/* Initialize VBAR */
msr vbar_el1, x0
isb
/* Disable access trapping in EL1 for NEON/FP */
mov x1, #(CPACR_EL1_FPEN_NOTRAP)
msr cpacr_el1, x1
isb
/*
* Enable the instruction cache, stack pointer and data access
@ -59,6 +104,7 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
mrs x0, sctlr_el1
orr x0, x0, x1
msr sctlr_el1, x0
0:
isb
/* Enable the SError interrupt */

View file

@ -17,6 +17,7 @@
#include <offsets_short.h>
#include <arch/cpu.h>
#include <syscall.h>
#include "macro.h"
GDATA(_kernel)
GDATA(_k_neg_eagain)
@ -46,7 +47,7 @@ SECTION_FUNC(TEXT, z_arm64_pendsv)
ldr x0, =_thread_offset_to_callee_saved
add x0, x0, x2
/* Store rest of process context including x30, SPSR_EL1 and ELR_EL1 */
/* Store rest of process context including x30, SPSR_ELn and ELR_ELn */
stp x19, x20, [x0], #16
stp x21, x22, [x0], #16
stp x23, x24, [x0], #16
@ -54,8 +55,19 @@ SECTION_FUNC(TEXT, z_arm64_pendsv)
stp x27, x28, [x0], #16
stp x29, x30, [x0], #16
switch_el x3, 3f, 2f, 1f
3:
mrs x4, spsr_el3
mrs x5, elr_el3
b 0f
2:
mrs x4, spsr_el2
mrs x5, elr_el2
b 0f
1:
mrs x4, spsr_el1
mrs x5, elr_el1
0:
stp x4, x5, [x0], #16
/* Save the current SP */
@ -84,9 +96,19 @@ SECTION_FUNC(TEXT, z_arm64_pendsv)
ldp x4, x5, [x0], #16
switch_el x3, 3f, 2f, 1f
3:
msr spsr_el3, x4
msr elr_el3, x5
b 0f
2:
msr spsr_el2, x4
msr elr_el2, x5
b 0f
1:
msr spsr_el1, x4
msr elr_el1, x5
0:
ldr x6, [x0]
mov sp, x6
@ -125,7 +147,7 @@ SECTION_FUNC(TEXT, z_thread_entry_wrapper)
ldp x0, x1, [sp], #16
ldp x2, x3, [sp], #16
/* ELR_EL1 was set in thread.c to z_thread_entry() */
/* ELR_ELn was set in thread.c to z_thread_entry() */
eret
/**
@ -155,7 +177,16 @@ SECTION_FUNC(TEXT, z_arm64_svc)
stp x16, x17, [sp, #-16]!
stp x18, x30, [sp, #-16]!
switch_el x3, 3f, 2f, 1f
3:
mrs x0, esr_el3
b 0f
2:
mrs x0, esr_el2
b 0f
1:
mrs x0, esr_el1
0:
lsr x1, x0, #26
cmp x1, #0x15 /* 0x15 = SVC */
@ -205,7 +236,7 @@ inv:
*
* This is the common exit point for z_arm64_pendsv() and _isr_wrapper(). We
* restore the registers saved on the process stack including X30. The return
* address used by eret (in ELR_EL1) is either restored by z_arm64_pendsv() if
* address used by eret (in ELR_ELn) is either restored by z_arm64_pendsv() if
* a context-switch happened or not touched at all by the ISR if there was no
* context-switch.
*
@ -235,7 +266,7 @@ SECTION_FUNC(TEXT, z_arm64_exit_exc)
ldp x0, x1, [sp], #16
/*
* In general in the ELR_EL1 register we can find:
* In general in the ELR_ELn register we can find:
*
* - The address of ret in z_arm64_call_svc() in case of arch_swap()
* (see swap.c)