aarch64: Use helpers instead of inline assembly

No need to rely on inline assembly when helpers are available.

Signed-off-by: Carlo Caione <ccaione@baylibre.com>
This commit is contained in:
Carlo Caione 2021-02-12 13:58:06 +01:00 committed by Anas Nashif
commit bba7abe975
5 changed files with 29 additions and 91 deletions

View file

@ -181,18 +181,18 @@ void z_arm64_fatal_error(unsigned int reason, z_arch_esf_t *esf)
uint64_t el;
if (reason != K_ERR_SPURIOUS_IRQ) {
__asm__ volatile("mrs %0, CurrentEL" : "=r" (el));
el = read_currentel();
switch (GET_EL(el)) {
case MODE_EL1:
__asm__ volatile("mrs %0, esr_el1" : "=r" (esr));
__asm__ volatile("mrs %0, far_el1" : "=r" (far));
__asm__ volatile("mrs %0, elr_el1" : "=r" (elr));
esr = read_esr_el1();
far = read_far_el1();
elr = read_elr_el1();
break;
case MODE_EL3:
__asm__ volatile("mrs %0, esr_el3" : "=r" (esr));
__asm__ volatile("mrs %0, far_el3" : "=r" (far));
__asm__ volatile("mrs %0, elr_el3" : "=r" (elr));
esr = read_esr_el3();
far = read_far_el3();
elr = read_elr_el3();
break;
}

View file

@ -11,6 +11,7 @@
#include <kernel_arch_interface.h>
#include <logging/log.h>
#include <arch/arm/aarch64/cpu.h>
#include <arch/arm/aarch64/lib_helpers.h>
#include <arch/arm/aarch64/arm_mmu.h>
#include <linker/linker-defs.h>
#include <sys/util.h>
@ -491,31 +492,19 @@ static void enable_mmu_el1(struct arm_mmu_ptables *ptables, unsigned int flags)
uint64_t val;
/* Set MAIR, TCR and TBBR registers */
__asm__ volatile("msr mair_el1, %0"
:
: "r" (MEMORY_ATTRIBUTES)
: "memory", "cc");
__asm__ volatile("msr tcr_el1, %0"
:
: "r" (get_tcr(1))
: "memory", "cc");
__asm__ volatile("msr ttbr0_el1, %0"
:
: "r" ((uint64_t)ptables->base_xlat_table)
: "memory", "cc");
write_mair_el1(MEMORY_ATTRIBUTES);
write_tcr_el1(get_tcr(1));
write_ttbr0_el1((uint64_t)ptables->base_xlat_table);
/* Ensure these changes are seen before MMU is enabled */
__ISB();
isb();
/* Enable the MMU and data cache */
__asm__ volatile("mrs %0, sctlr_el1" : "=r" (val));
__asm__ volatile("msr sctlr_el1, %0"
:
: "r" (val | SCTLR_M_BIT | SCTLR_C_BIT)
: "memory", "cc");
val = read_sctlr_el1();
write_sctlr_el1(val | SCTLR_M_BIT | SCTLR_C_BIT);
/* Ensure the MMU enable takes effect immediately */
__ISB();
isb();
MMU_DEBUG("MMU enabled with dcache\n");
}
@ -532,21 +521,16 @@ static struct arm_mmu_ptables kernel_ptables;
*/
void z_arm64_mmu_init(void)
{
uint64_t val;
unsigned int flags = 0;
/* Current MMU code supports only EL1 */
__asm__ volatile("mrs %0, CurrentEL" : "=r" (val));
__ASSERT(CONFIG_MMU_PAGE_SIZE == KB(4),
"Only 4K page size is supported\n");
__ASSERT(GET_EL(val) == MODE_EL1,
__ASSERT(GET_EL(read_currentel()) == MODE_EL1,
"Exception level not EL1, MMU not enabled!\n");
/* Ensure that MMU is already not enabled */
__asm__ volatile("mrs %0, sctlr_el1" : "=r" (val));
__ASSERT((val & SCTLR_M_BIT) == 0, "MMU is already enabled\n");
__ASSERT((read_sctlr_el1() & SCTLR_M_BIT) == 0, "MMU is already enabled\n");
kernel_ptables.base_xlat_table = new_table();
setup_page_tables(&kernel_ptables);

View file

@ -16,28 +16,13 @@
#ifndef _ASMLANGUAGE
#include <arch/arm/aarch64/cpu.h>
#include <arch/arm/aarch64/lib_helpers.h>
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
static ALWAYS_INLINE void __DSB(void)
{
__asm__ volatile ("dsb sy" : : : "memory");
}
static ALWAYS_INLINE void __DMB(void)
{
__asm__ volatile ("dmb sy" : : : "memory");
}
static ALWAYS_INLINE void __ISB(void)
{
__asm__ volatile ("isb" : : : "memory");
}
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
{
unsigned int key;
@ -46,21 +31,15 @@ static ALWAYS_INLINE unsigned int arch_irq_lock(void)
* Return the whole DAIF register as key but use DAIFSET to disable
* IRQs.
*/
__asm__ volatile("mrs %0, daif;"
"msr daifset, %1;"
: "=r" (key)
: "i" (DAIFSET_IRQ_BIT)
: "memory");
key = read_daif();
disable_irq();
return key;
}
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
{
__asm__ volatile("msr daif, %0;"
:
: "r" (key)
: "memory");
write_daif(key);
}
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)

View file

@ -201,19 +201,4 @@
#endif /* CONFIG_CPU_CORTEX_A72 */
#ifndef _ASMLANGUAGE
/* Core sysreg macros */
#define read_sysreg(reg) ({ \
uint64_t val; \
__asm__ volatile("mrs %0, " STRINGIFY(reg) : "=r" (val));\
val; \
})
#define write_sysreg(val, reg) ({ \
__asm__ volatile("msr " STRINGIFY(reg) ", %0" : : "r" (val));\
})
#endif /* !_ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_CPU_H_ */

View file

@ -26,16 +26,14 @@ static ALWAYS_INLINE void arm_arch_timer_init(void)
static ALWAYS_INLINE void arm_arch_timer_set_compare(uint64_t val)
{
__asm__ volatile("msr cntv_cval_el0, %0\n\t"
: : "r" (val) : "memory");
write_cntv_cval_el0(val);
}
static ALWAYS_INLINE void arm_arch_timer_enable(unsigned char enable)
{
uint32_t cntv_ctl;
uint64_t cntv_ctl;
__asm__ volatile("mrs %0, cntv_ctl_el0\n\t"
: "=r" (cntv_ctl) : : "memory");
cntv_ctl = read_cntv_ctl_el0();
if (enable) {
cntv_ctl |= CNTV_CTL_ENABLE_BIT;
@ -43,16 +41,14 @@ static ALWAYS_INLINE void arm_arch_timer_enable(unsigned char enable)
cntv_ctl &= ~CNTV_CTL_ENABLE_BIT;
}
__asm__ volatile("msr cntv_ctl_el0, %0\n\t"
: : "r" (cntv_ctl) : "memory");
write_cntv_ctl_el0(cntv_ctl);
}
static ALWAYS_INLINE void arm_arch_timer_set_irq_mask(bool mask)
{
uint32_t cntv_ctl;
uint64_t cntv_ctl;
__asm__ volatile("mrs %0, cntv_ctl_el0\n\t"
: "=r" (cntv_ctl) : : "memory");
cntv_ctl = read_cntv_ctl_el0();
if (mask) {
cntv_ctl |= CNTV_CTL_IMASK_BIT;
@ -60,18 +56,12 @@ static ALWAYS_INLINE void arm_arch_timer_set_irq_mask(bool mask)
cntv_ctl &= ~CNTV_CTL_IMASK_BIT;
}
__asm__ volatile("msr cntv_ctl_el0, %0\n\t"
: : "r" (cntv_ctl) : "memory");
write_cntv_ctl_el0(cntv_ctl);
}
static ALWAYS_INLINE uint64_t arm_arch_timer_count(void)
{
uint64_t cntvct_el0;
__asm__ volatile("mrs %0, cntvct_el0\n\t"
: "=r" (cntvct_el0) : : "memory");
return cntvct_el0;
return read_cntvct_el0();
}
#ifdef __cplusplus