arch: arm64: Introduce ARM64 (AArch64) architecture

Introduce the basic ARM64 architecture support.

A new CONFIG_ARM64 symbol is introduced for the new architecture and new
cmake / Kconfig files are added to switch between ARM and ARM64.

Signed-off-by: Carlo Caione <ccaione@baylibre.com>
This commit is contained in:
Carlo Caione 2019-11-10 16:17:19 +00:00 committed by Anas Nashif
commit 1be0c05311
43 changed files with 2579 additions and 170 deletions

View file

@ -17,7 +17,9 @@
/arch/arc/ @vonhust @ruuddw
/arch/arm/ @MaureenHelm @galak @ioannisg
/arch/arm/core/aarch32/cortex_m/cmse/ @ioannisg
/arch/arm/core/aarch64/ @carlocaione
/arch/arm/include/aarch32/cortex_m/cmse.h @ioannisg
/arch/arm/include/aarch64/ @carlocaione
/arch/arm/core/aarch32/cortex_r/ @MaureenHelm @galak @ioannisg @bbolen @stephanosio
/arch/common/ @andrewboie @ioannisg @andyross
/soc/arc/snps_*/ @vonhust @ruuddw
@ -265,6 +267,7 @@
/include/arch/arc/arch.h @andrewboie
/include/arch/arc/v2/irq.h @andrewboie
/include/arch/arm/aarch32/ @MaureenHelm @galak @ioannisg
/include/arch/arm/aarch64/ @carlocaione
/include/arch/arm/aarch32/irq.h @andrewboie
/include/arch/nios2/ @andrewboie
/include/arch/nios2/arch.h @andrewboie

View file

@ -1,3 +1,7 @@
# SPDX-License-Identifier: Apache-2.0
include(aarch32.cmake)
if(CONFIG_ARM64)
include(aarch64.cmake)
else()
include(aarch32.cmake)
endif()

View file

@ -6,9 +6,14 @@
menu "ARM Options"
depends on ARM
source "arch/arm/core/aarch32/Kconfig"
rsource "core/aarch32/Kconfig"
rsource "core/aarch64/Kconfig"
config ARCH
default "arm"
config ARM64
bool
select 64BIT
endmenu

3
arch/arm/aarch64.cmake Normal file
View file

@ -0,0 +1,3 @@
# SPDX-License-Identifier: Apache-2.0
add_subdirectory(core/aarch64)

View file

@ -3,6 +3,8 @@
# Copyright (c) 2015 Wind River Systems, Inc.
# SPDX-License-Identifier: Apache-2.0
if !ARM64
config CPU_CORTEX
bool
help
@ -267,9 +269,11 @@ config FP_SOFTABI
endchoice
source "arch/arm/core/aarch32/cortex_m/Kconfig"
source "arch/arm/core/aarch32/cortex_r/Kconfig"
rsource "cortex_m/Kconfig"
rsource "cortex_r/Kconfig"
source "arch/arm/core/aarch32/cortex_m/mpu/Kconfig"
rsource "cortex_m/mpu/Kconfig"
source "arch/arm/core/aarch32/cortex_m/tz/Kconfig"
rsource "cortex_m/tz/Kconfig"
endif # !ARM64

View file

@ -0,0 +1,22 @@
# SPDX-License-Identifier: Apache-2.0
zephyr_library()
if (CONFIG_COVERAGE)
toolchain_cc_coverage()
endif ()
zephyr_library_sources(
cpu_idle.S
fatal.c
irq_manage.c
prep_c.c
reset.S
swap.c
swap_helper.S
thread.c
vector_table.S
)
zephyr_library_sources_ifdef(CONFIG_GEN_SW_ISR_TABLE isr_wrapper.S)
zephyr_library_sources_ifdef(CONFIG_IRQ_OFFLOAD irq_offload.c)

View file

@ -0,0 +1,76 @@
# ARM64 core configuration options
# Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
# SPDX-License-Identifier: Apache-2.0
if ARM64
config CPU_CORTEX
bool
help
This option signifies the use of a CPU of the Cortex family.
config CPU_CORTEX_A
bool
select CPU_CORTEX
select HAS_FLASH_LOAD_OFFSET
help
This option signifies the use of a CPU of the Cortex-A family.
config CPU_CORTEX_A53
bool
select CPU_CORTEX_A
select ARMV8_A
help
This option signifies the use of a Cortex-A53 CPU
config NUM_IRQS
int
config MAIN_STACK_SIZE
default 4096
config IDLE_STACK_SIZE
default 4096
config ISR_STACK_SIZE
default 4096
config TEST_EXTRA_STACKSIZE
default 2048
config SYSTEM_WORKQUEUE_STACK_SIZE
default 4096
config OFFLOAD_WORKQUEUE_STACK_SIZE
default 4096
if CPU_CORTEX_A
config ARMV8_A
bool
select ATOMIC_OPERATIONS_BUILTIN
help
This option signifies the use of an ARMv8-A processor
implementation.
From https://developer.arm.com/products/architecture/cpu-architecture/a-profile:
The Armv8-A architecture introduces the ability to use 64-bit and
32-bit Execution states, known as AArch64 and AArch32 respectively.
The AArch64 Execution state supports the A64 instruction set, holds
addresses in 64-bit registers and allows instructions in the base
instruction set to use 64-bit registers for their processing. The AArch32
Execution state is a 32-bit Execution state that preserves backwards
compatibility with the Armv7-A architecture and enhances that profile
so that it can support some features included in the AArch64 state.
It supports the T32 and A32 instruction sets.
config GEN_ISR_TABLES
default y
config GEN_IRQ_VECTOR_TABLE
default n
endif # CPU_CORTEX_A
endif # ARM64

View file

@ -0,0 +1,32 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief ARM64 Cortex-A power management
*/
#include <toolchain.h>
#include <linker/sections.h>
#include <arch/cpu.h>
GTEXT(arch_cpu_idle)
SECTION_FUNC(TEXT, arch_cpu_idle)
dsb sy
wfi
msr daifclr, #(DAIFSET_IRQ)
ret
GTEXT(arch_cpu_atomic_idle)
SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
msr daifset, #(DAIFSET_IRQ)
isb
wfe
tst x0, #(DAIF_IRQ)
beq _irq_disabled
msr daifclr, #(DAIFSET_IRQ)
_irq_disabled:
ret

View file

@ -0,0 +1,176 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Kernel fatal error handler for ARM64 Cortex-A
*
* This module provides the z_arm64_fatal_error() routine for ARM64 Cortex-A
* CPUs
*/
#include <kernel.h>
#include <logging/log.h>
LOG_MODULE_DECLARE(os);
static void print_EC_cause(u64_t esr)
{
u32_t EC = (u32_t)esr >> 26;
switch (EC) {
case 0b000000:
LOG_ERR("Unknown reason");
break;
case 0b000001:
LOG_ERR("Trapped WFI or WFE instruction execution");
break;
case 0b000011:
LOG_ERR("Trapped MCR or MRC access with (coproc==0b1111) that "
"is not reported using EC 0b000000");
break;
case 0b000100:
LOG_ERR("Trapped MCRR or MRRC access with (coproc==0b1111) "
"that is not reported using EC 0b000000");
break;
case 0b000101:
LOG_ERR("Trapped MCR or MRC access with (coproc==0b1110)");
break;
case 0b000110:
LOG_ERR("Trapped LDC or STC access");
break;
case 0b000111:
LOG_ERR("Trapped access to SVE, Advanced SIMD, or "
"floating-point functionality");
break;
case 0b001100:
LOG_ERR("Trapped MRRC access with (coproc==0b1110)");
break;
case 0b001101:
LOG_ERR("Branch Target Exception");
break;
case 0b001110:
LOG_ERR("Illegal Execution state");
break;
case 0b010001:
LOG_ERR("SVC instruction execution in AArch32 state");
break;
case 0b011000:
LOG_ERR("Trapped MSR, MRS or System instruction execution in "
"AArch64 state, that is not reported using EC "
"0b000000, 0b000001 or 0b000111");
break;
case 0b011001:
LOG_ERR("Trapped access to SVE functionality");
break;
case 0b100000:
LOG_ERR("Instruction Abort from a lower Exception level, that "
"might be using AArch32 or AArch64");
break;
case 0b100001:
LOG_ERR("Instruction Abort taken without a change in Exception "
"level.");
break;
case 0b100010:
LOG_ERR("PC alignment fault exception.");
break;
case 0b100100:
LOG_ERR("Data Abort from a lower Exception level, that might "
"be using AArch32 or AArch64");
break;
case 0b100101:
LOG_ERR("Data Abort taken without a change in Exception level");
break;
case 0b100110:
LOG_ERR("SP alignment fault exception");
break;
case 0b101000:
LOG_ERR("Trapped floating-point exception taken from AArch32 "
"state");
break;
case 0b101100:
LOG_ERR("Trapped floating-point exception taken from AArch64 "
"state.");
break;
case 0b101111:
LOG_ERR("SError interrupt");
break;
case 0b110000:
LOG_ERR("Breakpoint exception from a lower Exception level, "
"that might be using AArch32 or AArch64");
break;
case 0b110001:
LOG_ERR("Breakpoint exception taken without a change in "
"Exception level");
break;
case 0b110010:
LOG_ERR("Software Step exception from a lower Exception level, "
"that might be using AArch32 or AArch64");
break;
case 0b110011:
LOG_ERR("Software Step exception taken without a change in "
"Exception level");
break;
case 0b110100:
LOG_ERR("Watchpoint exception from a lower Exception level, "
"that might be using AArch32 or AArch64");
break;
case 0b110101:
LOG_ERR("Watchpoint exception taken without a change in "
"Exception level.");
break;
case 0b111000:
LOG_ERR("BKPT instruction execution in AArch32 state");
break;
case 0b111100:
LOG_ERR("BRK instruction execution in AArch64 state.");
break;
}
}
void z_arm64_fatal_error(unsigned int reason)
{
u64_t el, esr, elr, far;
if (reason != K_ERR_SPURIOUS_IRQ) {
__asm__ volatile("mrs %0, CurrentEL" : "=r" (el));
switch (GET_EL(el)) {
case MODE_EL1:
__asm__ volatile("mrs %0, esr_el1" : "=r" (esr));
__asm__ volatile("mrs %0, far_el1" : "=r" (far));
__asm__ volatile("mrs %0, elr_el1" : "=r" (elr));
break;
case MODE_EL2:
__asm__ volatile("mrs %0, esr_el2" : "=r" (esr));
__asm__ volatile("mrs %0, far_el2" : "=r" (far));
__asm__ volatile("mrs %0, elr_el2" : "=r" (elr));
break;
case MODE_EL3:
__asm__ volatile("mrs %0, esr_el3" : "=r" (esr));
__asm__ volatile("mrs %0, far_el3" : "=r" (far));
__asm__ volatile("mrs %0, elr_el3" : "=r" (elr));
break;
default:
/* Just to keep the compiler happy */
esr = elr = far = 0;
break;
}
if (GET_EL(el) != MODE_EL0) {
LOG_ERR("ESR_ELn: %llx", esr);
LOG_ERR("FAR_ELn: %llx", far);
LOG_ERR("ELR_ELn: %llx", elr);
print_EC_cause(esr);
}
}
z_fatal_error(reason, NULL);
CODE_UNREACHABLE;
}

View file

@ -0,0 +1,60 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief ARM64 Cortex-A interrupt management
*/
#include <kernel.h>
#include <arch/cpu.h>
#include <device.h>
#include <debug/tracing.h>
#include <irq.h>
#include <irq_nextlevel.h>
#include <toolchain.h>
#include <linker/sections.h>
#include <sw_isr_table.h>
extern void z_arm64_fatal_error(unsigned int reason);
void arch_irq_enable(unsigned int irq)
{
struct device *dev = _sw_isr_table[0].arg;
irq_enable_next_level(dev, (irq >> 8) - 1);
}
void arch_irq_disable(unsigned int irq)
{
struct device *dev = _sw_isr_table[0].arg;
irq_disable_next_level(dev, (irq >> 8) - 1);
}
int arch_irq_is_enabled(unsigned int irq)
{
struct device *dev = _sw_isr_table[0].arg;
return irq_is_enabled_next_level(dev);
}
void z_arm64_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
{
struct device *dev = _sw_isr_table[0].arg;
if (irq == 0)
return;
irq_set_priority_next_level(dev, (irq >> 8) - 1, prio, flags);
}
void z_irq_spurious(void *unused)
{
ARG_UNUSED(unused);
z_arm64_fatal_error(K_ERR_SPURIOUS_IRQ);
}

View file

@ -0,0 +1,34 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Software interrupts utility code - ARM64 implementation
*/
#include <kernel.h>
#include <irq_offload.h>
#include <aarch64/exc.h>
volatile irq_offload_routine_t offload_routine;
static void *offload_param;
void z_irq_do_offload(void)
{
offload_routine(offload_param);
}
void arch_irq_offload(irq_offload_routine_t routine, void *parameter)
{
k_sched_lock();
offload_routine = routine;
offload_param = parameter;
z_arm64_offload();
offload_routine = NULL;
k_sched_unlock();
}

View file

@ -0,0 +1,93 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief ARM64 Cortex-A ISRs wrapper
*/
#include <toolchain.h>
#include <linker/sections.h>
#include <offsets_short.h>
#include <arch/cpu.h>
#include <sw_isr_table.h>
GDATA(_sw_isr_table)
/**
* @brief Wrapper around ISRs when inserted in software ISR table
*
* When inserted in the vector table, _isr_wrapper() demuxes the ISR table
* using the running interrupt number as the index, and invokes the registered
* ISR with its corresponding argument. When returning from the ISR, it
* determines if a context switch needs to happen.
*
* @return N/A
*/
GTEXT(_isr_wrapper)
SECTION_FUNC(TEXT, _isr_wrapper)
/*
* Save x0-x18 (and x30) on the process stack because they can be
* clobbered by the ISR and/or context switch.
*
* Two things can happen:
*
* - No context-switch: in this case x19-x28 are callee-saved register
* so we can be sure they are not going to be clobbered by ISR.
* - Context-switch: the callee-saved registers are saved by
* z_arm64_pendsv() in the kernel structure.
*/
stp x0, x1, [sp, #-16]!
stp x2, x3, [sp, #-16]!
stp x4, x5, [sp, #-16]!
stp x6, x7, [sp, #-16]!
stp x8, x9, [sp, #-16]!
stp x10, x11, [sp, #-16]!
stp x12, x13, [sp, #-16]!
stp x14, x15, [sp, #-16]!
stp x16, x17, [sp, #-16]!
stp x18, x30, [sp, #-16]!
/* ++(_kernel->nested) to be checked by arch_is_in_isr() */
ldr x1, =_kernel
ldr x2, [x1, #_kernel_offset_to_nested]
add x2, x2, #1
str x2, [x1, #_kernel_offset_to_nested]
/* Cortex-A has one IRQ line so the main handler will be at offset 0 */
ldr x1, =_sw_isr_table
ldp x0, x3, [x1] /* arg in x0, ISR in x3 */
blr x3
/* --(_kernel->nested) */
ldr x1, =_kernel
ldr x2, [x1, #_kernel_offset_to_nested]
sub x2, x2, #1
str x2, [x1, #_kernel_offset_to_nested]
/* Check if we need to context switch */
ldr x2, [x1, #_kernel_offset_to_current]
ldr x3, [x1, #_kernel_offset_to_ready_q_cache]
cmp x2, x3
beq exit
/* Switch thread */
bl z_arm64_pendsv
/* We return here in two cases:
*
* - The ISR was taken and no context switch was performed.
* - A context-switch was performed during the ISR in the past and now
* the thread has been switched in again and we return here from the
* ret in z_arm64_pendsv() because x30 was saved and restored.
*/
exit:
#ifdef CONFIG_STACK_SENTINEL
bl z_check_stack_sentinel
#endif
b z_arm64_exit_exc

View file

@ -0,0 +1,34 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Full C support initialization
*
* Initialization of full C support: zero the .bss and call z_cstart().
*
* Stack is available in this module, but not the global data/bss until their
* initialization is performed.
*/
#include <kernel_internal.h>
extern FUNC_NORETURN void z_cstart(void);
/**
*
* @brief Prepare to and run C code
*
* This routine prepares for the execution of and runs C code.
*
* @return N/A
*/
void z_arm64_prep_c(void)
{
z_bss_zero();
z_cstart();
CODE_UNREACHABLE;
}

View file

@ -0,0 +1,73 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Reset handler
*
* Reset handler that prepares the system for running C code.
*/
#include <toolchain.h>
#include <linker/sections.h>
#include <arch/cpu.h>
#include "vector_table.h"
/**
*
* @brief Reset vector
*
* Ran when the system comes out of reset. The processor is in thread mode with
* privileged level. At this point, neither SP_EL0 nor SP_ELx point to a valid
* area in SRAM.
*
* When these steps are completed, jump to z_arm64_prep_c(), which will finish
* setting up the system for running C code.
*
* @return N/A
*/
GTEXT(__reset)
SECTION_SUBSEC_FUNC(TEXT,_reset_section,__reset)
/*
* The entry point is located at the __reset symbol, which is fetched by a XIP
* image playing the role of a bootloader, which jumps to it, not through the
* reset vector mechanism. Such bootloaders might want to search for a __start
* symbol instead, so create that alias here.
*/
GTEXT(__start)
SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
/* Setup vector table */
adr x0, _vector_table
msr vbar_el1, x0
isb
/* Disable access trapping in EL1 for NEON/FP */
mov x1, #(CPACR_EL1_FPEN_NOTRAP)
msr cpacr_el1, x1
isb
/*
* Enable the instruction cache, stack pointer and data access
* alignment checks and disable speculative loads.
*/
mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
mrs x0, sctlr_el1
orr x0, x0, x1
msr sctlr_el1, x0
isb
/* Enable the SError interrupt */
msr daifclr, #(DAIFSET_ABT)
/* Switch to SP_ELn and setup the stack */
msr spsel, #1
ldr x0, =(_interrupt_stack)
add x0, x0, #(CONFIG_ISR_STACK_SIZE)
mov sp, x0
bl z_arm64_prep_c

View file

@ -0,0 +1,23 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
#include <kernel_internal.h>
extern const int _k_neg_eagain;
int arch_swap(unsigned int key)
{
_current->arch.swap_return_value = _k_neg_eagain;
z_arm64_call_svc();
irq_unlock(key);
/* Context switch is performed here. Returning implies the
* thread has been context-switched-in again.
*/
return _current->arch.swap_return_value;
}

View file

@ -0,0 +1,259 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Thread context switching for ARM64 Cortex-A
*
* This module implements the routines necessary for thread context switching
* on ARM64 Cortex-A.
*/
#include <toolchain.h>
#include <linker/sections.h>
#include <offsets_short.h>
#include <arch/cpu.h>
#include <syscall.h>
GDATA(_kernel)
GDATA(_k_neg_eagain)
/**
* @brief PendSV exception handler, handling context switches
*
* The PendSV exception is the only execution context in the system that can
* perform context switching. When an execution context finds out it has to
* switch contexts, it pends the PendSV exception.
*
* When PendSV is pended, the decision that a context switch must happen has
* already been taken. In other words, when z_arm64_pendsv() runs, we *know* we
* have to swap *something*.
*
* For Cortex-A, PendSV exception is not supported by the architecture and this
* function is directly called either by _isr_wrapper() in case of preemption,
* or z_arm64_svc() in case of cooperative switching.
*/
GTEXT(z_arm64_pendsv)
SECTION_FUNC(TEXT, z_arm64_pendsv)
/* load _kernel into x1 and current k_thread into x2 */
ldr x1, =_kernel
ldr x2, [x1, #_kernel_offset_to_current]
/* addr of callee-saved regs in thread in x0 */
ldr x0, =_thread_offset_to_callee_saved
add x0, x0, x2
/* Store rest of process context including x30, SPSR_EL1 and ELR_EL1 */
stp x19, x20, [x0], #16
stp x21, x22, [x0], #16
stp x23, x24, [x0], #16
stp x25, x26, [x0], #16
stp x27, x28, [x0], #16
stp x29, x30, [x0], #16
mrs x4, spsr_el1
mrs x5, elr_el1
stp x4, x5, [x0], #16
/* Save the current SP */
mov x6, sp
str x6, [x0]
/* fetch the thread to run from the ready queue cache */
ldr x2, [x1, #_kernel_offset_to_ready_q_cache]
str x2, [x1, #_kernel_offset_to_current]
/* load _kernel into x1 and current k_thread into x2 */
ldr x1, =_kernel
ldr x2, [x1, #_kernel_offset_to_current]
/* addr of callee-saved regs in thread in x0 */
ldr x0, =_thread_offset_to_callee_saved
add x0, x0, x2
/* Restore x19-x29 plus x30, SPSR_ELn and ELR_ELn */
ldp x19, x20, [x0], #16
ldp x21, x22, [x0], #16
ldp x23, x24, [x0], #16
ldp x25, x26, [x0], #16
ldp x27, x28, [x0], #16
ldp x29, x30, [x0], #16
ldp x4, x5, [x0], #16
msr spsr_el1, x4
msr elr_el1, x5
ldr x6, [x0]
mov sp, x6
/* We restored x30 from the process stack. There are three possible
* cases:
*
* - We return to z_arm64_svc() when swappin in a thread that was
* swapped out by z_arm64_svc() before jumping into
* z_arm64_exit_exc()
* - We return to _isr_wrapper() when swappin in a thread that was
* swapped out by _isr_wrapper() before jumping into
* z_arm64_exit_exc()
* - We return (jump) into z_thread_entry_wrapper() for new threads
* (see thread.c)
*/
ret
/**
*
* @brief Entry wrapper for new threads
*
* @return N/A
*/
GTEXT(z_thread_entry_wrapper)
SECTION_FUNC(TEXT, z_thread_entry_wrapper)
/*
* z_thread_entry_wrapper is called for every new thread upon the return
* of arch_swap() or ISR. Its address, as well as its input function
* arguments thread_entry_t, void *, void *, void * are restored from
* the thread stack (see thread.c).
* In this case, thread_entry_t, * void *, void * and void * are stored
* in registers x0, x1, x2 and x3. These registers are used as arguments
* to function z_thread_entry.
*/
ldp x0, x1, [sp], #16
ldp x2, x3, [sp], #16
/* ELR_EL1 was set in thread.c to z_thread_entry() */
eret
/**
*
* @brief Service call handler
*
* The service call (SVC) is used in the following occasions:
* - Cooperative context switching
* - IRQ offloading
*
* @return N/A
*/
GTEXT(z_arm64_svc)
SECTION_FUNC(TEXT, z_arm64_svc)
/*
* Save the volatile registers and x30 on the process stack. This is
* needed if the thread is switched out.
*/
stp x0, x1, [sp, #-16]!
stp x2, x3, [sp, #-16]!
stp x4, x5, [sp, #-16]!
stp x6, x7, [sp, #-16]!
stp x8, x9, [sp, #-16]!
stp x10, x11, [sp, #-16]!
stp x12, x13, [sp, #-16]!
stp x14, x15, [sp, #-16]!
stp x16, x17, [sp, #-16]!
stp x18, x30, [sp, #-16]!
mrs x0, esr_el1
lsr x1, x0, #26
cmp x1, #0x15 /* 0x15 = SVC */
bne inv
/* Demux the SVC call */
and x2, x0, #0xff
cmp x2, #_SVC_CALL_CONTEXT_SWITCH
beq context_switch
#ifdef CONFIG_IRQ_OFFLOAD
cmp x2, #_SVC_CALL_IRQ_OFFLOAD
beq offload
b inv
offload:
/* ++(_kernel->nested) to be checked by arch_is_in_isr() */
ldr x1, =_kernel
ldr x2, [x1, #_kernel_offset_to_nested]
add x2, x2, #1
str x2, [x1, #_kernel_offset_to_nested]
bl z_irq_do_offload
/* --(_kernel->nested) */
ldr x1, =_kernel
ldr x2, [x1, #_kernel_offset_to_nested]
sub x2, x2, #1
str x2, [x1, #_kernel_offset_to_nested]
b exit
#endif
b inv
context_switch:
bl z_arm64_pendsv
exit:
b z_arm64_exit_exc
inv:
/* K_ERR_CPU_EXCEPTION */
mov x0, #0
b z_arm64_fatal_error
/**
* @brief Restore volatile registers and x30
*
* This is the common exit point for z_arm64_pendsv() and _isr_wrapper(). We
* restore the registers saved on the process stack including X30. The return
* address used by eret (in ELR_EL1) is either restored by z_arm64_pendsv() if
* a context-switch happened or not touched at all by the ISR if there was no
* context-switch.
*
* @return N/A
*/
GTEXT(z_arm64_exit_exc)
SECTION_FUNC(TEXT, z_arm64_exit_exc)
/*
* In x30 we can have:
*
* - The address of irq_unlock() in swap.c when swapping in a thread
* that was cooperatively swapped out (used by ret in
* z_arm64_call_svc())
* - A previos generic value if the thread that we are swapping in was
* swapped out preemptively by the ISR.
*/
ldp x18, x30, [sp], #16
ldp x16, x17, [sp], #16
ldp x14, x15, [sp], #16
ldp x12, x13, [sp], #16
ldp x10, x11, [sp], #16
ldp x8, x9, [sp], #16
ldp x6, x7, [sp], #16
ldp x4, x5, [sp], #16
ldp x2, x3, [sp], #16
ldp x0, x1, [sp], #16
/*
* In general in the ELR_EL1 register we can find:
*
* - The address of ret in z_arm64_call_svc() in case of arch_swap()
* (see swap.c)
* - The address of the next instruction at the time of the IRQ when the
* thread was switched out.
* - The address of z_thread_entry() for new threads (see thread.c).
*/
eret
GTEXT(z_arm64_call_svc)
SECTION_FUNC(TEXT, z_arm64_call_svc)
svc #_SVC_CALL_CONTEXT_SWITCH
ret
#ifdef CONFIG_IRQ_OFFLOAD
GTEXT(z_arm64_offload)
SECTION_FUNC(TEXT, z_arm64_offload)
svc #_SVC_CALL_IRQ_OFFLOAD
ret
#endif

View file

@ -0,0 +1,82 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief New thread creation for ARM64 Cortex-A
*
* Core thread related primitives for the ARM64 Cortex-A
*/
#include <kernel.h>
#include <ksched.h>
#include <wait_q.h>
#include <arch/cpu.h>
/**
*
* @brief Initialize a new thread from its stack space
*
* The control structure (thread) is put at the lower address of the stack. An
* initial context, to be "restored" by z_arm64_pendsv(), is put at the other
* end of the stack, and thus reusable by the stack when not needed anymore.
*
* <options> is currently unused.
*
* @param stack pointer to the aligned stack memory
* @param stackSize size of the available stack memory in bytes
* @param pEntry the entry point
* @param parameter1 entry point to the first param
* @param parameter2 entry point to the second param
* @param parameter3 entry point to the third param
* @param priority thread priority
* @param options thread options: K_ESSENTIAL, K_FP_REGS
*
* @return N/A
*/
void z_thread_entry_wrapper(k_thread_entry_t k, void *p1, void *p2, void *p3);
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
size_t stackSize, k_thread_entry_t pEntry,
void *parameter1, void *parameter2, void *parameter3,
int priority, unsigned int options)
{
char *pStackMem = Z_THREAD_STACK_BUFFER(stack);
char *stackEnd;
struct __esf *pInitCtx;
stackEnd = pStackMem + stackSize;
z_new_thread_init(thread, pStackMem, stackSize, priority, options);
pInitCtx = (struct __esf *)(STACK_ROUND_DOWN(stackEnd -
sizeof(struct __basic_sf)));
pInitCtx->basic.x0 = (u64_t)pEntry;
pInitCtx->basic.x1 = (u64_t)parameter1;
pInitCtx->basic.x2 = (u64_t)parameter2;
pInitCtx->basic.x3 = (u64_t)parameter3;
/*
* We are saving:
*
* - SP: to pop out pEntry and parameters when going through
* z_thread_entry_wrapper().
* - x30: to be used by ret in z_arm64_pendsv() when the new task is
* first scheduled.
* - ELR_EL1: to be used by eret in z_thread_entry_wrapper() to return
* to z_thread_entry() with pEntry in x0 and the parameters already
* in place in x1, x2, x3.
* - SPSR_EL1: to enable IRQs (we are masking debug exceptions, SError
* interrupts and FIQs).
*/
thread->callee_saved.sp = (u64_t)pInitCtx;
thread->callee_saved.x30 = (u64_t)z_thread_entry_wrapper;
thread->callee_saved.elr = (u64_t)z_thread_entry;
thread->callee_saved.spsr = SPSR_MODE_EL1H | DAIF_FIQ;
}

View file

@ -0,0 +1,91 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Populated vector table
*/
#include <toolchain.h>
#include <linker/sections.h>
#include "vector_table.h"
/*
* Four types of exceptions:
* - synchronous: aborts from MMU, SP/CP alignment checking, unallocated
* instructions, SVCs/SMCs/HVCs, ...)
* - IRQ: group 1 (normal) interrupts
* - FIQ: group 0 or secure interrupts
* - SError: fatal system errors
*
* Four different contexts:
* - from same exception level, when using the SP_EL0 stack pointer
* - from same exception level, when using the SP_ELx stack pointer
* - from lower exception level, when this is AArch64
* - from lower exception level, when this is AArch32
*
* +------------------+------------------+-------------------------+
* | Address | Exception type | Description |
* +------------------+------------------+-------------------------+
* | VBAR_ELn + 0x000 | Synchronous | Current EL with SP0 |
* | + 0x080 | IRQ / vIRQ | |
* | + 0x100 | FIQ / vFIQ | |
* | + 0x180 | SError / vSError | |
* +------------------+------------------+-------------------------+
* | + 0x200 | Synchronous | Current EL with SPx |
* | + 0x280 | IRQ / vIRQ | |
* | + 0x300 | FIQ / vFIQ | |
* | + 0x380 | SError / vSError | |
* +------------------+------------------+-------------------------+
* | + 0x400 | Synchronous | Lower EL using AArch64 |
* | + 0x480 | IRQ / vIRQ | |
* | + 0x500 | FIQ / vFIQ | |
* | + 0x580 | SError / vSError | |
* +------------------+------------------+-------------------------+
* | + 0x600 | Synchronous | Lower EL using AArch64 |
* | + 0x680 | IRQ / vIRQ | |
* | + 0x700 | FIQ / vFIQ | |
* | + 0x780 | SError / vSError | |
* +------------------+------------------+-------------------------+
*/
/* The whole table must be 2K aligned */
.align 11
SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,_vector_table)
/* Current EL with SP0 / Synchronous */
.align 7
b .
/* Current EL with SP0 / IRQ */
.align 7
b .
/* Current EL with SP0 / FIQ */
.align 7
b .
/* Current EL with SP0 / SError */
.align 7
b .
/* Current EL with SPx / Synchronous */
.align 7
b z_arm64_svc
/* Current EL with SPx / IRQ */
.align 7
b _isr_wrapper
/* Current EL with SPx / FIQ */
.align 7
b .
/* Current EL with SPx / SError */
.align 7
mov x0, #0 /* K_ERR_CPU_EXCEPTION */
b z_arm64_fatal_error

View file

@ -0,0 +1,47 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Definitions for the boot vector table
*
*
* Definitions for the boot vector table.
*
* System exception handler names all have the same format:
*
* __<exception name with underscores>
*
* No other symbol has the same format, so they are easy to spot.
*/
#ifndef _VECTOR_TABLE_H_
#define _VECTOR_TABLE_H_
#ifdef _ASMLANGUAGE
#include <toolchain.h>
#include <linker/sections.h>
GTEXT(__start)
GTEXT(_vector_table)
GTEXT(_isr_wrapper)
#else /* _ASMLANGUAGE */
#ifdef __cplusplus
extern "C" {
#endif
extern void *_vector_table[];
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* _VECTOR_TABLE_H_ */

View file

@ -1,90 +1,15 @@
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief ARM kernel structure member offset definition file
*
* This module is responsible for the generation of the absolute symbols whose
* value represents the member offsets for various ARM kernel structures.
*
* All of the absolute symbols defined by this module will be present in the
* final kernel ELF image (due to the linker's reference to the _OffsetAbsSyms
* symbol).
*
* INTERNAL
* It is NOT necessary to define the offset for every member of a structure.
* Typically, only those members that are accessed by assembly language routines
* are defined; however, it doesn't hurt to define all fields for the sake of
* completeness.
*/
#include <kernel.h>
#include <kernel_arch_data.h>
#include <gen_offset.h>
#include <kernel_offsets.h>
GEN_OFFSET_SYM(_thread_arch_t, basepri);
GEN_OFFSET_SYM(_thread_arch_t, swap_return_value);
#if defined(CONFIG_USERSPACE) || defined(CONFIG_FP_SHARING)
GEN_OFFSET_SYM(_thread_arch_t, mode);
#if defined(CONFIG_USERSPACE)
GEN_OFFSET_SYM(_thread_arch_t, priv_stack_start);
#endif
#endif
#if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING)
GEN_OFFSET_SYM(_thread_arch_t, preempt_float);
#endif
GEN_OFFSET_SYM(_basic_sf_t, a1);
GEN_OFFSET_SYM(_basic_sf_t, a2);
GEN_OFFSET_SYM(_basic_sf_t, a3);
GEN_OFFSET_SYM(_basic_sf_t, a4);
GEN_OFFSET_SYM(_basic_sf_t, ip);
GEN_OFFSET_SYM(_basic_sf_t, lr);
GEN_OFFSET_SYM(_basic_sf_t, pc);
GEN_OFFSET_SYM(_basic_sf_t, xpsr);
#if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING)
GEN_OFFSET_SYM(_esf_t, s);
GEN_OFFSET_SYM(_esf_t, fpscr);
#endif
GEN_ABSOLUTE_SYM(___esf_t_SIZEOF, sizeof(_esf_t));
GEN_OFFSET_SYM(_callee_saved_t, v1);
GEN_OFFSET_SYM(_callee_saved_t, v2);
GEN_OFFSET_SYM(_callee_saved_t, v3);
GEN_OFFSET_SYM(_callee_saved_t, v4);
GEN_OFFSET_SYM(_callee_saved_t, v5);
GEN_OFFSET_SYM(_callee_saved_t, v6);
GEN_OFFSET_SYM(_callee_saved_t, v7);
GEN_OFFSET_SYM(_callee_saved_t, v8);
GEN_OFFSET_SYM(_callee_saved_t, psp);
#if defined(CONFIG_CPU_CORTEX_R)
GEN_OFFSET_SYM(_callee_saved_t, spsr);
GEN_OFFSET_SYM(_callee_saved_t, lr);
#endif
/* size of the entire preempt registers structure */
GEN_ABSOLUTE_SYM(___callee_saved_t_SIZEOF, sizeof(struct _callee_saved));
/*
* size of the struct k_thread structure sans save area for floating
* point registers.
*/
#if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING)
GEN_ABSOLUTE_SYM(_K_THREAD_NO_FLOAT_SIZEOF, sizeof(struct k_thread) -
sizeof(struct _preempt_float));
#if defined(CONFIG_ARM64)
#include "offsets_aarch64.c"
#else
GEN_ABSOLUTE_SYM(_K_THREAD_NO_FLOAT_SIZEOF, sizeof(struct k_thread));
#include "offsets_aarch32.c"
#endif
GEN_ABS_SYM_END

View file

@ -0,0 +1,87 @@
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief ARM kernel structure member offset definition file
*
* This module is responsible for the generation of the absolute symbols whose
* value represents the member offsets for various ARM kernel structures.
*
* All of the absolute symbols defined by this module will be present in the
* final kernel ELF image (due to the linker's reference to the _OffsetAbsSyms
* symbol).
*
* INTERNAL
* It is NOT necessary to define the offset for every member of a structure.
* Typically, only those members that are accessed by assembly language routines
* are defined; however, it doesn't hurt to define all fields for the sake of
* completeness.
*/
#include <kernel.h>
#include <kernel_arch_data.h>
#include <kernel_offsets.h>
GEN_OFFSET_SYM(_thread_arch_t, basepri);
GEN_OFFSET_SYM(_thread_arch_t, swap_return_value);
#if defined(CONFIG_USERSPACE) || defined(CONFIG_FP_SHARING)
GEN_OFFSET_SYM(_thread_arch_t, mode);
#if defined(CONFIG_USERSPACE)
GEN_OFFSET_SYM(_thread_arch_t, priv_stack_start);
#endif
#endif
#if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING)
GEN_OFFSET_SYM(_thread_arch_t, preempt_float);
#endif
GEN_OFFSET_SYM(_basic_sf_t, a1);
GEN_OFFSET_SYM(_basic_sf_t, a2);
GEN_OFFSET_SYM(_basic_sf_t, a3);
GEN_OFFSET_SYM(_basic_sf_t, a4);
GEN_OFFSET_SYM(_basic_sf_t, ip);
GEN_OFFSET_SYM(_basic_sf_t, lr);
GEN_OFFSET_SYM(_basic_sf_t, pc);
GEN_OFFSET_SYM(_basic_sf_t, xpsr);
#if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING)
GEN_OFFSET_SYM(_esf_t, s);
GEN_OFFSET_SYM(_esf_t, fpscr);
#endif
GEN_ABSOLUTE_SYM(___esf_t_SIZEOF, sizeof(_esf_t));
GEN_OFFSET_SYM(_callee_saved_t, v1);
GEN_OFFSET_SYM(_callee_saved_t, v2);
GEN_OFFSET_SYM(_callee_saved_t, v3);
GEN_OFFSET_SYM(_callee_saved_t, v4);
GEN_OFFSET_SYM(_callee_saved_t, v5);
GEN_OFFSET_SYM(_callee_saved_t, v6);
GEN_OFFSET_SYM(_callee_saved_t, v7);
GEN_OFFSET_SYM(_callee_saved_t, v8);
GEN_OFFSET_SYM(_callee_saved_t, psp);
#if defined(CONFIG_CPU_CORTEX_R)
GEN_OFFSET_SYM(_callee_saved_t, spsr);
GEN_OFFSET_SYM(_callee_saved_t, lr);
#endif
/* size of the entire preempt registers structure */
GEN_ABSOLUTE_SYM(___callee_saved_t_SIZEOF, sizeof(struct _callee_saved));
/*
* size of the struct k_thread structure sans save area for floating
* point registers.
*/
#if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING)
GEN_ABSOLUTE_SYM(_K_THREAD_NO_FLOAT_SIZEOF, sizeof(struct k_thread) -
sizeof(struct _preempt_float));
#else
GEN_ABSOLUTE_SYM(_K_THREAD_NO_FLOAT_SIZEOF, sizeof(struct k_thread));
#endif

View file

@ -0,0 +1,29 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief ARM64 kernel structure member offset definition file
*
* This module is responsible for the generation of the absolute symbols whose
* value represents the member offsets for various ARM kernel structures.
*
* All of the absolute symbols defined by this module will be present in the
* final kernel ELF image (due to the linker's reference to the _OffsetAbsSyms
* symbol).
*
* INTERNAL
* It is NOT necessary to define the offset for every member of a structure.
* Typically, only those members that are accessed by assembly language routines
* are defined; however, it doesn't hurt to define all fields for the sake of
* completeness.
*/
#include <kernel.h>
#include <kernel_arch_data.h>
#include <kernel_offsets.h>
GEN_OFFSET_SYM(_thread_arch_t, swap_return_value);

View file

@ -0,0 +1,65 @@
/*
* Copyright (c) 2013-2016 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Private kernel definitions (ARM)
*
* This file contains private kernel function definitions and various
* other definitions for the ARM Cortex-M processor architecture family.
*
* This file is also included by assembly language files which must #define
* _ASMLANGUAGE before including this header file. Note that kernel
* assembly source files obtains structure offset values via "absolute symbols"
* in the offsets.o module.
*/
#ifndef ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_KERNEL_ARCH_FUNC_H_
#define ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_KERNEL_ARCH_FUNC_H_
#include <kernel_arch_data.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _ASMLANGUAGE
extern void z_arm_fault_init(void);
extern void z_arm_cpu_idle_init(void);
#ifdef CONFIG_ARM_MPU
extern void z_arm_configure_static_mpu_regions(void);
extern void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread);
#endif /* CONFIG_ARM_MPU */
static ALWAYS_INLINE void arch_kernel_init(void)
{
z_arm_interrupt_stack_setup();
z_arm_exc_setup();
z_arm_fault_init();
z_arm_cpu_idle_init();
z_arm_clear_faults();
}
static ALWAYS_INLINE void
arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{
thread->arch.swap_return_value = value;
}
extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3,
u32_t stack_end,
u32_t stack_start);
extern void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_KERNEL_ARCH_FUNC_H_ */

View file

@ -0,0 +1,41 @@
/*
* Copyright (c) 2016 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_OFFSETS_SHORT_ARCH_H_
#define ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_OFFSETS_SHORT_ARCH_H_
#include <offsets.h>
/* kernel */
/* nothing for now */
/* end - kernel */
/* threads */
#define _thread_offset_to_basepri \
(___thread_t_arch_OFFSET + ___thread_arch_t_basepri_OFFSET)
#define _thread_offset_to_swap_return_value \
(___thread_t_arch_OFFSET + ___thread_arch_t_swap_return_value_OFFSET)
#define _thread_offset_to_preempt_float \
(___thread_t_arch_OFFSET + ___thread_arch_t_preempt_float_OFFSET)
#if defined(CONFIG_USERSPACE) || defined(CONFIG_FP_SHARING)
#define _thread_offset_to_mode \
(___thread_t_arch_OFFSET + ___thread_arch_t_mode_OFFSET)
#ifdef CONFIG_USERSPACE
#define _thread_offset_to_priv_stack_start \
(___thread_t_arch_OFFSET + ___thread_arch_t_priv_stack_start_OFFSET)
#endif
#endif
/* end - threads */
#endif /* ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_OFFSETS_SHORT_ARCH_H_ */

View file

@ -0,0 +1,49 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Exception/interrupt context helpers for Cortex-A CPUs
*
* Exception/interrupt context helpers.
*/
#ifndef ZEPHYR_ARCH_ARM_INCLUDE_AARCH64_EXC_H_
#define ZEPHYR_ARCH_ARM_INCLUDE_AARCH64_EXC_H_
#include <arch/cpu.h>
#ifdef _ASMLANGUAGE
/* nothing */
#else
#include <irq_offload.h>
#ifdef __cplusplus
extern "C" {
#endif
#if defined(CONFIG_IRQ_OFFLOAD)
extern void z_arm64_offload(void);
#endif
static ALWAYS_INLINE bool arch_is_in_isr(void)
{
return _kernel.nested != 0U;
}
extern void z_arm64_call_svc(void);
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_ARCH_ARM_INCLUDE_AARCH64_EXC_H_ */

View file

@ -0,0 +1,49 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com<
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Private kernel definitions (ARM64)
*
* This file contains private kernel function definitions and various
* other definitions for the ARM Cortex-A processor architecture family.
*
* This file is also included by assembly language files which must #define
* _ASMLANGUAGE before including this header file. Note that kernel
* assembly source files obtains structure offset values via "absolute symbols"
* in the offsets.o module.
*/
#ifndef ZEPHYR_ARCH_ARM_INCLUDE_AARCH64_KERNEL_ARCH_FUNC_H_
#define ZEPHYR_ARCH_ARM_INCLUDE_AARCH64_KERNEL_ARCH_FUNC_H_
#include <kernel_arch_data.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _ASMLANGUAGE
static ALWAYS_INLINE void arch_kernel_init(void)
{
}
static ALWAYS_INLINE void
arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{
thread->arch.swap_return_value = value;
}
extern void z_arm64_fatal_error(unsigned int reason);
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_ARCH_ARM_INCLUDE_AARCH64_KERNEL_ARCH_FUNC_H_ */

View file

@ -0,0 +1,15 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_ARCH_ARM_INCLUDE_AARCH64_OFFSETS_SHORT_ARCH_H_
#define ZEPHYR_ARCH_ARM_INCLUDE_AARCH64_OFFSETS_SHORT_ARCH_H_
#include <offsets.h>
#define _thread_offset_to_swap_return_value \
(___thread_t_arch_OFFSET + ___thread_arch_t_swap_return_value_OFFSET)
#endif /* ZEPHYR_ARCH_ARM_INCLUDE_AARCH64_OFFSETS_SHORT_ARCH_H_ */

View file

@ -29,12 +29,14 @@
#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE)
#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE)
#ifdef CONFIG_CPU_CORTEX_M
#if defined(CONFIG_CPU_CORTEX_M)
#include <aarch32/cortex_m/stack.h>
#include <aarch32/cortex_m/exc.h>
#elif defined(CONFIG_CPU_CORTEX_R)
#include <aarch32/cortex_r/stack.h>
#include <aarch32/cortex_r/exc.h>
#elif defined(CONFIG_CPU_CORTEX_A)
#include <aarch64/exc.h>
#endif
#ifndef _ASMLANGUAGE

View file

@ -1,65 +1,16 @@
/*
* Copyright (c) 2013-2016 Wind River Systems, Inc.
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Private kernel definitions (ARM)
*
* This file contains private kernel function definitions and various
* other definitions for the ARM Cortex-M processor architecture family.
*
* This file is also included by assembly language files which must #define
* _ASMLANGUAGE before including this header file. Note that kernel
* assembly source files obtains structure offset values via "absolute symbols"
* in the offsets.o module.
*/
#ifndef ZEPHYR_ARCH_ARM_INCLUDE_KERNEL_ARCH_FUNC_H_
#define ZEPHYR_ARCH_ARM_INCLUDE_KERNEL_ARCH_FUNC_H_
#include <kernel_arch_data.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _ASMLANGUAGE
extern void z_arm_fault_init(void);
extern void z_arm_cpu_idle_init(void);
#ifdef CONFIG_ARM_MPU
extern void z_arm_configure_static_mpu_regions(void);
extern void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread);
#endif /* CONFIG_ARM_MPU */
static ALWAYS_INLINE void arch_kernel_init(void)
{
z_arm_interrupt_stack_setup();
z_arm_exc_setup();
z_arm_fault_init();
z_arm_cpu_idle_init();
z_arm_clear_faults();
}
static ALWAYS_INLINE void
arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{
thread->arch.swap_return_value = value;
}
extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3,
u32_t stack_end,
u32_t stack_start);
extern void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#if defined(CONFIG_ARM64)
#include <aarch64/kernel_arch_func.h>
#else
#include <aarch32/kernel_arch_func.h>
#endif
#endif /* ZEPHYR_ARCH_ARM_INCLUDE_KERNEL_ARCH_FUNC_H_ */

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016 Wind River Systems, Inc.
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -7,35 +7,10 @@
#ifndef ZEPHYR_ARCH_ARM_INCLUDE_OFFSETS_SHORT_ARCH_H_
#define ZEPHYR_ARCH_ARM_INCLUDE_OFFSETS_SHORT_ARCH_H_
#include <offsets.h>
/* kernel */
/* nothing for now */
/* end - kernel */
/* threads */
#define _thread_offset_to_basepri \
(___thread_t_arch_OFFSET + ___thread_arch_t_basepri_OFFSET)
#define _thread_offset_to_swap_return_value \
(___thread_t_arch_OFFSET + ___thread_arch_t_swap_return_value_OFFSET)
#define _thread_offset_to_preempt_float \
(___thread_t_arch_OFFSET + ___thread_arch_t_preempt_float_OFFSET)
#if defined(CONFIG_USERSPACE) || defined(CONFIG_FP_SHARING)
#define _thread_offset_to_mode \
(___thread_t_arch_OFFSET + ___thread_arch_t_mode_OFFSET)
#ifdef CONFIG_USERSPACE
#define _thread_offset_to_priv_stack_start \
(___thread_t_arch_OFFSET + ___thread_arch_t_priv_stack_start_OFFSET)
#if defined(CONFIG_ARM64)
#include <aarch64/offsets_short_arch.h>
#else
#include <aarch32/offsets_short_arch.h>
#endif
#endif
/* end - threads */
#endif /* ZEPHYR_ARCH_ARM_INCLUDE_OFFSETS_SHORT_ARCH_H_ */

View file

@ -0,0 +1,51 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief ARM64 specific kernel interface header
*
* This header contains the ARM64 specific kernel interface. It is
* included by the kernel interface architecture-abstraction header
* (include/arm/aarch64/cpu.h)
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_ARCH_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_ARCH_H_
/* Add include for DTS generated information */
#include <devicetree.h>
#include <arch/arm/aarch64/thread.h>
#include <arch/arm/aarch64/exc.h>
#include <arch/arm/aarch64/irq.h>
#include <arch/arm/aarch64/misc.h>
#include <arch/arm/aarch64/asm_inline.h>
#include <arch/arm/aarch64/cpu.h>
#include <arch/arm/aarch64/sys_io.h>
#include <arch/arm/aarch64/timer.h>
#include <arch/common/addr_types.h>
#include <arch/common/ffs.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Declare the STACK_ALIGN_SIZE
*
* Denotes the required alignment of the stack pointer on public API
* boundaries
*
*/
#define STACK_ALIGN 16
#define STACK_ALIGN_SIZE STACK_ALIGN
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_ARCH_H_ */

View file

@ -0,0 +1,21 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_ASM_INLINE_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_ASM_INLINE_H_
/*
* The file must not be included directly
* Include kernel.h instead
*/
#if defined(__GNUC__)
#include <arch/arm/aarch64/asm_inline_gcc.h>
#else
#include <arch/arm/asm_inline_other.h>
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_ASM_INLINE_H_ */

View file

@ -0,0 +1,65 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/* Either public functions or macros or invoked by public functions */
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_ASM_INLINE_GCC_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_ASM_INLINE_GCC_H_
/*
* The file must not be included directly
* Include arch/cpu.h instead
*/
#ifndef _ASMLANGUAGE
#include <arch/arm/aarch64/cpu.h>
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
{
unsigned int key;
/*
* Return the whole DAIF register as key but use DAIFSET to disable
* IRQs.
*/
__asm__ volatile("mrs %0, daif;"
"msr daifset, %1;"
"isb"
: "=r" (key)
: "i" (DAIFSET_IRQ)
: "memory", "cc");
return key;
}
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
{
__asm__ volatile("msr daif, %0;"
"isb"
:
: "r" (key)
: "memory", "cc");
}
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
{
/* We only check the (I)RQ bit on the DAIF register */
return (key & DAIF_IRQ) == 0;
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_ASM_INLINE_GCC_H_ */

View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_CPU_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_CPU_H_
#include <sys/util.h>
#define DAIFSET_FIQ BIT(0)
#define DAIFSET_IRQ BIT(1)
#define DAIFSET_ABT BIT(2)
#define DAIFSET_DBG BIT(3)
#define DAIF_FIQ BIT(6)
#define DAIF_IRQ BIT(7)
#define DAIF_ABT BIT(8)
#define DAIF_DBG BIT(9)
#define DAIF_MASK (0xf << 6)
#define SPSR_MODE_EL1H (0x5)
#define SCTLR_M_BIT BIT(0)
#define SCTLR_A_BIT BIT(1)
#define SCTLR_C_BIT BIT(2)
#define SCTLR_SA_BIT BIT(3)
#define SCTLR_I_BIT BIT(12)
#define CPACR_EL1_FPEN_NOTRAP (0x3 << 20)
#define SCR_EL3_NS BIT(0)
#define SCR_EL3_IRQ BIT(1)
#define SCR_EL3_FIQ BIT(2)
#define SCR_EL3_EA BIT(3)
#define SCR_EL3_RW BIT(10)
#define HCR_EL2_FMO BIT(3)
#define HCR_EL2_IMO BIT(4)
#define HCR_EL2_AMO BIT(5)
#define SPSR_EL3_h BIT(0)
#define SPSR_EL3_TO_EL1 (0x2 << 1)
#define __ISB() __asm__ volatile ("isb sy" : : : "memory")
#define __DMB() __asm__ volatile ("dmb sy" : : : "memory")
#define MODE_EL_SHIFT (0x2)
#define MODE_EL_MASK (0x3)
#define MODE_EL3 (0x3)
#define MODE_EL2 (0x2)
#define MODE_EL1 (0x1)
#define MODE_EL0 (0x0)
#define GET_EL(_mode) (((_mode) >> MODE_EL_SHIFT) & MODE_EL_MASK)
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_CPU_H_ */

View file

@ -0,0 +1,44 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Cortex-A public exception handling
*
* ARM-specific kernel exception handling interface. Included by arm64/arch.h.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_EXC_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_EXC_H_
/* for assembler, only works with constants */
#ifdef _ASMLANGUAGE
#else
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
struct __esf {
struct __basic_sf {
u64_t x0;
u64_t x1;
u64_t x2;
u64_t x3;
} basic;
};
typedef struct __esf z_arch_esf_t;
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_EXC_H_ */

View file

@ -0,0 +1,80 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Cortex-A public interrupt handling
*
* ARM64-specific kernel interrupt handling interface.
* Included by arm/aarch64/arch.h.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_IRQ_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_IRQ_H_
#include <irq.h>
#include <sw_isr_table.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef _ASMLANGUAGE
GTEXT(arch_irq_enable)
GTEXT(arch_irq_disable)
GTEXT(arch_irq_is_enabled)
#else
extern void arch_irq_enable(unsigned int irq);
extern void arch_irq_disable(unsigned int irq);
extern int arch_irq_is_enabled(unsigned int irq);
/* internal routine documented in C file, needed by IRQ_CONNECT() macro */
extern void z_arm64_irq_priority_set(unsigned int irq, unsigned int prio,
u32_t flags);
/* All arguments must be computable by the compiler at build time.
*
* Z_ISR_DECLARE will populate the .intList section with the interrupt's
* parameters, which will then be used by gen_irq_tables.py to create
* the vector table and the software ISR table. This is all done at
* build-time.
*
* We additionally set the priority in the interrupt controller at
* runtime.
*/
#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
({ \
Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
z_arm64_irq_priority_set(irq_p, priority_p, flags_p); \
irq_p; \
})
#define ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
({ \
Z_ISR_DECLARE(irq_p, ISR_FLAG_DIRECT, isr_p, NULL); \
z_arm64_irq_priority_set(irq_p, priority_p, flags_p); \
irq_p; \
})
/* Spurious interrupt handler. Throws an error if called */
extern void z_irq_spurious(void *unused);
#ifdef CONFIG_GEN_SW_ISR_TABLE
/* Architecture-specific common entry point for interrupts from the vector
* table. Most likely implemented in assembly. Looks up the correct handler
* and parameter from the _sw_isr_table and executes it.
*/
extern void _isr_wrapper(void);
#endif
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_IRQ_H_ */

View file

@ -0,0 +1,41 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Cortex-A public kernel miscellaneous
*
* ARM64-specific kernel miscellaneous interface. Included by
* arm/aarch64/arch.h.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_MISC_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_MISC_H_
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _ASMLANGUAGE
extern u32_t z_timer_cycle_get_32(void);
static inline u32_t arch_k_cycle_get_32(void)
{
return z_timer_cycle_get_32();
}
static ALWAYS_INLINE void arch_nop(void)
{
__asm__ volatile("nop");
}
#endif
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_MISC_H_ */

View file

@ -0,0 +1,439 @@
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Linker command/script file
*
* Linker script for the Cortex-A platforms.
*/
#include <autoconf.h>
#include <linker/sections.h>
#include <devicetree.h>
#include <linker/linker-defs.h>
#include <linker/linker-tool.h>
/* physical address of RAM */
#ifdef CONFIG_XIP
#define ROMABLE_REGION FLASH
#define RAMABLE_REGION SRAM
#else
#define ROMABLE_REGION SRAM
#define RAMABLE_REGION SRAM
#endif
#if defined(CONFIG_XIP)
#define _DATA_IN_ROM __data_rom_start
#else
#define _DATA_IN_ROM
#endif
#if !defined(CONFIG_XIP) && (CONFIG_FLASH_SIZE == 0)
#define ROM_ADDR RAM_ADDR
#else
#define ROM_ADDR (CONFIG_FLASH_BASE_ADDRESS + CONFIG_FLASH_LOAD_OFFSET)
#endif
#if CONFIG_FLASH_LOAD_SIZE > 0
#define ROM_SIZE CONFIG_FLASH_LOAD_SIZE
#else
#define ROM_SIZE (CONFIG_FLASH_SIZE*1K - CONFIG_FLASH_LOAD_OFFSET)
#endif
#if defined(CONFIG_XIP)
#if defined(CONFIG_IS_BOOTLOADER)
#define RAM_SIZE (CONFIG_BOOTLOADER_SRAM_SIZE * 1K)
#define RAM_ADDR (CONFIG_SRAM_BASE_ADDRESS + \
(CONFIG_SRAM_SIZE * 1K - RAM_SIZE))
#else
#define RAM_SIZE (CONFIG_SRAM_SIZE * 1K)
#define RAM_ADDR CONFIG_SRAM_BASE_ADDRESS
#endif
#else
#define RAM_SIZE (CONFIG_SRAM_SIZE * 1K - CONFIG_BOOTLOADER_SRAM_SIZE * 1K)
#define RAM_ADDR CONFIG_SRAM_BASE_ADDRESS
#endif
/* Set alignment to CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE
* to make linker section alignment comply with MPU granularity.
*/
#if defined(CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE)
_region_min_align = CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE;
#else
/* If building without MPU support, use default 4-byte alignment. */
_region_min_align = 4;
#endif
#if defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
#define MPU_ALIGN(region_size) \
. = ALIGN(_region_min_align); \
. = ALIGN( 1 << LOG2CEIL(region_size))
#else
#define MPU_ALIGN(region_size) \
. = ALIGN(_region_min_align)
#endif
MEMORY
{
FLASH (rx) : ORIGIN = ROM_ADDR, LENGTH = ROM_SIZE
#ifdef DT_CCM_BASE_ADDRESS
CCM (rw) : ORIGIN = DT_CCM_BASE_ADDRESS, LENGTH = DT_CCM_SIZE * 1K
#endif
SRAM (wx) : ORIGIN = RAM_ADDR, LENGTH = RAM_SIZE
/* Used by and documented in include/linker/intlist.ld */
IDT_LIST (wx) : ORIGIN = (RAM_ADDR + RAM_SIZE), LENGTH = 2K
}
ENTRY(CONFIG_KERNEL_ENTRY)
SECTIONS
{
#include <linker/rel-sections.ld>
/*
* .plt and .iplt are here according to 'arm-zephyr-elf-ld --verbose',
* before text section.
*/
SECTION_PROLOGUE(.plt,,)
{
*(.plt)
}
SECTION_PROLOGUE(.iplt,,)
{
*(.iplt)
}
GROUP_START(ROMABLE_REGION)
_image_rom_start = ROM_ADDR;
SECTION_PROLOGUE(_TEXT_SECTION_NAME,,)
{
. = CONFIG_TEXT_SECTION_OFFSET;
#if defined(CONFIG_SW_VECTOR_RELAY)
KEEP(*(.vector_relay_table))
KEEP(*(".vector_relay_table.*"))
KEEP(*(.vector_relay_handler))
KEEP(*(".vector_relay_handler.*"))
#endif
_vector_start = .;
KEEP(*(.exc_vector_table))
KEEP(*(".exc_vector_table.*"))
KEEP(*(IRQ_VECTOR_TABLE))
KEEP(*(.vectors))
KEEP(*(.openocd_dbg))
KEEP(*(".openocd_dbg.*"))
_vector_end = .;
} GROUP_LINK_IN(ROMABLE_REGION)
#ifdef CONFIG_CODE_DATA_RELOCATION
#include <linker_relocate.ld>
#endif /* CONFIG_CODE_DATA_RELOCATION */
SECTION_PROLOGUE(_TEXT_SECTION_NAME_2,,)
{
_image_text_start = .;
*(.text)
*(".text.*")
*(.gnu.linkonce.t.*)
/*
* These are here according to 'arm-zephyr-elf-ld --verbose',
* after .gnu.linkonce.t.*
*/
*(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
#include <linker/priv_stacks-text.ld>
#include <linker/kobject-text.ld>
} GROUP_LINK_IN(ROMABLE_REGION)
_image_text_end = .;
#if defined (CONFIG_CPLUSPLUS)
SECTION_PROLOGUE(.ARM.extab,,)
{
/*
* .ARM.extab section containing exception unwinding information.
*/
*(.ARM.extab* .gnu.linkonce.armextab.*)
} GROUP_LINK_IN(ROMABLE_REGION)
#endif
SECTION_PROLOGUE(.ARM.exidx,,)
{
/*
* This section, related to stack and exception unwinding, is placed
* explicitly to prevent it from being shared between multiple regions.
* It must be defined for gcc to support 64-bit math and avoid
* section overlap.
*/
__exidx_start = .;
#if defined (__GCC_LINKER_CMD__)
*(.ARM.exidx* gnu.linkonce.armexidx.*)
#endif
__exidx_end = .;
} GROUP_LINK_IN(ROMABLE_REGION)
_image_rodata_start = .;
#include <linker/common-rom.ld>
SECTION_PROLOGUE(_RODATA_SECTION_NAME,,)
{
*(.rodata)
*(".rodata.*")
*(.gnu.linkonce.r.*)
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rodata.ld>
#ifdef CONFIG_SOC_RODATA_LD
#include <soc-rodata.ld>
#endif
#ifdef CONFIG_CUSTOM_RODATA_LD
/* Located in project source directory */
#include <custom-rodata.ld>
#endif
#include <linker/priv_stacks-rom.ld>
#include <linker/kobject-rom.ld>
/*
* For XIP images, in order to avoid the situation when __data_rom_start
* is 32-bit aligned, but the actual data is placed right after rodata
* section, which may not end exactly at 32-bit border, pad rodata
* section, so __data_rom_start points at data and it is 32-bit aligned.
*
* On non-XIP images this may enlarge image size up to 3 bytes. This
* generally is not an issue, since modern ROM and FLASH memory is
* usually 4k aligned.
*/
. = ALIGN(4);
} GROUP_LINK_IN(ROMABLE_REGION)
#include <linker/cplusplus-rom.ld>
_image_rodata_end = .;
MPU_ALIGN(_image_rodata_end -_image_rom_start);
_image_rom_end = .;
GROUP_END(ROMABLE_REGION)
/*
* These are here according to 'arm-zephyr-elf-ld --verbose',
* before data section.
*/
SECTION_PROLOGUE(.got,,)
{
*(.got.plt)
*(.igot.plt)
*(.got)
*(.igot)
}
GROUP_START(RAMABLE_REGION)
. = RAM_ADDR;
/* Align the start of image SRAM with the
* minimum granularity required by MPU.
*/
. = ALIGN(_region_min_align);
_image_ram_start = .;
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-ram-sections.ld>
#if defined(CONFIG_USERSPACE)
#define APP_SHARED_ALIGN . = ALIGN(_region_min_align);
#define SMEM_PARTITION_ALIGN MPU_ALIGN
#include <app_smem.ld>
_app_smem_size = _app_smem_end - _app_smem_start;
_app_smem_rom_start = LOADADDR(_APP_SMEM_SECTION_NAME);
#endif /* CONFIG_USERSPACE */
SECTION_DATA_PROLOGUE(_BSS_SECTION_NAME,(NOLOAD),)
{
/*
* For performance, BSS section is assumed to be 4 byte aligned and
* a multiple of 4 bytes
*/
. = ALIGN(4);
__bss_start = .;
__kernel_ram_start = .;
*(.bss)
*(".bss.*")
*(COMMON)
*(".kernel_bss.*")
#ifdef CONFIG_CODE_DATA_RELOCATION
#include <linker_sram_bss_relocate.ld>
#endif
/*
* As memory is cleared in words only, it is simpler to ensure the BSS
* section ends on a 4 byte boundary. This wastes a maximum of 3 bytes.
*/
__bss_end = ALIGN(4);
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
SECTION_PROLOGUE(_NOINIT_SECTION_NAME,(NOLOAD),)
{
/*
* This section is used for non-initialized objects that
* will not be cleared during the boot process.
*/
*(.noinit)
*(".noinit.*")
*(".kernel_noinit.*")
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-noinit.ld>
#ifdef CONFIG_SOC_NOINIT_LD
#include <soc-noinit.ld>
#endif
} GROUP_LINK_IN(RAMABLE_REGION)
SECTION_DATA_PROLOGUE(_DATA_SECTION_NAME,,)
{
__data_ram_start = .;
*(.data)
*(".data.*")
*(".kernel.*")
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rwdata.ld>
#ifdef CONFIG_SOC_RWDATA_LD
#include <soc-rwdata.ld>
#endif
#ifdef CONFIG_CUSTOM_RWDATA_LD
/* Located in project source directory */
#include <custom-rwdata.ld>
#endif
#ifdef CONFIG_CODE_DATA_RELOCATION
#include <linker_sram_data_relocate.ld>
#endif
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
__data_rom_start = LOADADDR(_DATA_SECTION_NAME);
#include <linker/common-ram.ld>
#include <linker/priv_stacks.ld>
#include <linker/kobject.ld>
#include <linker/priv_stacks-noinit.ld>
#include <linker/cplusplus-ram.ld>
__data_ram_end = .;
/* Define linker symbols */
_image_ram_end = .;
_end = .; /* end of image */
__kernel_ram_end = RAM_ADDR + RAM_SIZE;
__kernel_ram_size = __kernel_ram_end - __kernel_ram_start;
GROUP_END(RAMABLE_REGION)
#ifdef CONFIG_CUSTOM_SECTIONS_LD
/* Located in project source directory */
#include <custom-sections.ld>
#endif
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-sections.ld>
#include <linker/debug-sections.ld>
SECTION_PROLOGUE(.ARM.attributes, 0,)
{
KEEP(*(.ARM.attributes))
KEEP(*(.gnu.attributes))
}
/DISCARD/ : { *(.note.GNU-stack) }
#if defined(CONFIG_ARM_FIRMWARE_HAS_SECURE_ENTRY_FUNCS)
#if CONFIG_ARM_NSC_REGION_BASE_ADDRESS != 0
#define NSC_ALIGN . = ABSOLUTE(CONFIG_ARM_NSC_REGION_BASE_ADDRESS)
#elif defined(CONFIG_CPU_HAS_NRF_IDAU)
/* The nRF9160 needs the NSC region to be at the end of a 32 kB region. */
#define NSC_ALIGN . = ALIGN(0x8000) - (1 << LOG2CEIL(__sg_size))
#else
#define NSC_ALIGN . = ALIGN(4)
#endif
#ifdef CONFIG_CPU_HAS_NRF_IDAU
#define NSC_ALIGN_END . = ALIGN(0x8000)
#else
#define NSC_ALIGN_END . = ALIGN(4)
#endif
SECTION_PROLOGUE(.gnu.sgstubs,,)
{
NSC_ALIGN;
__sg_start = .;
/* No input section necessary, since the Secure Entry Veneers are
automatically placed after the .gnu.sgstubs output section. */
} GROUP_LINK_IN(ROMABLE_REGION)
__sg_end = .;
__sg_size = __sg_end - __sg_start;
NSC_ALIGN_END;
__nsc_size = . - __sg_start;
#ifdef CONFIG_CPU_HAS_NRF_IDAU
ASSERT(1 << LOG2CEIL(0x8000 - (__sg_start % 0x8000))
== (0x8000 - (__sg_start % 0x8000))
&& (0x8000 - (__sg_start % 0x8000)) >= 32
&& (0x8000 - (__sg_start % 0x8000)) <= 4096,
"The Non-Secure Callable region size must be a power of 2 \
between 32 and 4096 bytes.")
#endif
#endif /* CONFIG_ARM_FIRMWARE_HAS_SECURE_ENTRY_FUNCS */
/* Must be last in romable region */
SECTION_PROLOGUE(.last_section,(NOLOAD),)
{
} GROUP_LINK_IN(ROMABLE_REGION)
/* To provide the image size as a const expression,
* calculate this value here. */
_flash_used = LOADADDR(.last_section) - _image_rom_start;
}

View file

@ -0,0 +1,162 @@
/*
* Copyright (c) 2015, Wind River Systems, Inc.
* Copyright (c) 2017, Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
/* "Arch" bit manipulation functions in non-arch-specific C code (uses some
* gcc builtins)
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_SYS_IO_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_SYS_IO_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <sys/sys_io.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Memory mapped registers I/O functions */
static ALWAYS_INLINE u8_t sys_read8(mem_addr_t addr)
{
u8_t val = *(volatile u8_t *)addr;
__DMB();
return val;
}
static ALWAYS_INLINE void sys_write8(u8_t data, mem_addr_t addr)
{
__DMB();
*(volatile u8_t *)addr = data;
}
static ALWAYS_INLINE u16_t sys_read16(mem_addr_t addr)
{
u16_t val = *(volatile u16_t *)addr;
__DMB();
return val;
}
static ALWAYS_INLINE void sys_write16(u16_t data, mem_addr_t addr)
{
__DMB();
*(volatile u16_t *)addr = data;
}
static ALWAYS_INLINE u32_t sys_read32(mem_addr_t addr)
{
u32_t val = *(volatile u32_t *)addr;
__DMB();
return val;
}
static ALWAYS_INLINE void sys_write32(u32_t data, mem_addr_t addr)
{
__DMB();
*(volatile u32_t *)addr = data;
}
/* Memory bit manipulation functions */
static ALWAYS_INLINE void sys_set_bit(mem_addr_t addr, unsigned int bit)
{
u32_t temp = *(volatile u32_t *)addr;
*(volatile u32_t *)addr = temp | (1 << bit);
}
static ALWAYS_INLINE void sys_clear_bit(mem_addr_t addr, unsigned int bit)
{
u32_t temp = *(volatile u32_t *)addr;
*(volatile u32_t *)addr = temp & ~(1 << bit);
}
static ALWAYS_INLINE int sys_test_bit(mem_addr_t addr, unsigned int bit)
{
u32_t temp = *(volatile u32_t *)addr;
return temp & (1 << bit);
}
static ALWAYS_INLINE
void sys_bitfield_set_bit(mem_addr_t addr, unsigned int bit)
{
/* Doing memory offsets in terms of 32-bit values to prevent
* alignment issues
*/
sys_set_bit(addr + ((bit >> 5) << 2), bit & 0x1F);
}
static ALWAYS_INLINE
void sys_bitfield_clear_bit(mem_addr_t addr, unsigned int bit)
{
sys_clear_bit(addr + ((bit >> 5) << 2), bit & 0x1F);
}
static ALWAYS_INLINE
int sys_bitfield_test_bit(mem_addr_t addr, unsigned int bit)
{
return sys_test_bit(addr + ((bit >> 5) << 2), bit & 0x1F);
}
static ALWAYS_INLINE
int sys_test_and_set_bit(mem_addr_t addr, unsigned int bit)
{
int ret;
ret = sys_test_bit(addr, bit);
sys_set_bit(addr, bit);
return ret;
}
static ALWAYS_INLINE
int sys_test_and_clear_bit(mem_addr_t addr, unsigned int bit)
{
int ret;
ret = sys_test_bit(addr, bit);
sys_clear_bit(addr, bit);
return ret;
}
static ALWAYS_INLINE
int sys_bitfield_test_and_set_bit(mem_addr_t addr, unsigned int bit)
{
int ret;
ret = sys_bitfield_test_bit(addr, bit);
sys_bitfield_set_bit(addr, bit);
return ret;
}
static ALWAYS_INLINE
int sys_bitfield_test_and_clear_bit(mem_addr_t addr, unsigned int bit)
{
int ret;
ret = sys_bitfield_test_bit(addr, bit);
sys_bitfield_clear_bit(addr, bit);
return ret;
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_SYS_IO_H_ */

View file

@ -0,0 +1,22 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief ARM specific syscall header
*
* This header contains the ARM specific syscall interface. It is
* included by the syscall interface architecture-abstraction header
* (include/arch/aarch64/syscall.h)
*/
#ifndef ZEPHYR_INCLUDE_ARCH_AARCH64_ARM_SYSCALL_H_
#define ZEPHYR_INCLUDE_ARCH_AARCH64_ARM_SYSCALL_H_
#define _SVC_CALL_CONTEXT_SWITCH 0
#define _SVC_CALL_IRQ_OFFLOAD 1
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_SYSCALL_H_ */

View file

@ -0,0 +1,53 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Per-arch thread definition
*
* This file contains definitions for
*
* struct _thread_arch
* struct _callee_saved
*
* necessary to instantiate instances of struct k_thread.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_THREAD_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_THREAD_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
struct _callee_saved {
u64_t x19;
u64_t x20;
u64_t x21;
u64_t x22;
u64_t x23;
u64_t x24;
u64_t x25;
u64_t x26;
u64_t x27;
u64_t x28;
u64_t x29; /* FP */
u64_t x30; /* LR */
u64_t spsr;
u64_t elr;
u64_t sp;
};
typedef struct _callee_saved _callee_saved_t;
struct _thread_arch {
u32_t swap_return_value;
};
typedef struct _thread_arch _thread_arch_t;
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_THREAD_H_ */

View file

@ -0,0 +1,61 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_TIMER_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_TIMER_H_
#ifndef _ASMLANGUAGE
#include <drivers/timer/arm_arch_timer.h>
#include <zephyr/types.h>
#ifdef __cplusplus
extern "C" {
#endif
#define ARM_ARCH_TIMER_IRQ ((ARM_TIMER_VIRTUAL_IRQ + 1) << 8)
#define CNTV_CTL_ENABLE ((1) << 0)
static ALWAYS_INLINE void arm_arch_timer_set_compare(u64_t val)
{
__asm__ volatile("msr cntv_cval_el0, %0\n\t"
: : "r" (val) : "memory");
}
static ALWAYS_INLINE void arm_arch_timer_enable(unsigned char enable)
{
u32_t cntv_ctl;
__asm__ volatile("mrs %0, cntv_ctl_el0\n\t"
: "=r" (cntv_ctl) : : "memory");
if (enable)
cntv_ctl |= CNTV_CTL_ENABLE;
else
cntv_ctl &= ~CNTV_CTL_ENABLE;
__asm__ volatile("msr cntv_ctl_el0, %0\n\t"
: : "r" (cntv_ctl) : "memory");
}
static ALWAYS_INLINE u64_t arm_arch_timer_count(void)
{
u64_t cntvct_el0;
__asm__ volatile("mrs %0, cntvct_el0\n\t"
: "=r" (cntvct_el0) : : "memory");
return cntvct_el0;
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_TIMER_H_ */

View file

@ -13,6 +13,8 @@
#if defined(CONFIG_X86)
#include <arch/x86/arch.h>
#elif defined(CONFIG_ARM64)
#include <arch/arm/aarch64/arch.h>
#elif defined(CONFIG_ARM)
#include <arch/arm/aarch32/arch.h>
#elif defined(CONFIG_ARC)