arch: arm64: Reword comments

Fix, reword and rework comments.

Signed-off-by: Carlo Caione <ccaione@baylibre.com>
This commit is contained in:
Carlo Caione 2020-09-18 09:04:43 +02:00 committed by Ioannis Glaropoulos
commit 78b5e5563d
6 changed files with 50 additions and 95 deletions

View file

@ -4,9 +4,8 @@
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief ARM64 Cortex-A power management
/*
* ARM64 Cortex-A power management
*/
#include <toolchain.h>

View file

@ -4,9 +4,8 @@
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief ARM64 Cortex-A ISRs wrapper
/*
* ARM64 Cortex-A ISRs wrapper
*/
#include <toolchain.h>
@ -20,15 +19,13 @@ _ASM_FILE_PROLOGUE
GDATA(_sw_isr_table)
/**
* @brief Wrapper around ISRs when inserted in software ISR table
/*
* Wrapper around ISRs when inserted in software ISR table
*
* When inserted in the vector table, _isr_wrapper() demuxes the ISR table
* using the running interrupt number as the index, and invokes the registered
* ISR with its corresponding argument. When returning from the ISR, it
* determines if a context switch needs to happen.
*
* @return N/A
*/
GTEXT(_isr_wrapper)

View file

@ -9,24 +9,22 @@
#ifdef _ASMLANGUAGE
/**
* @brief Save volatile registers
/*
* Save volatile registers, x30, SPSR_EL1 and ELR_EL1
*
* Save the volatile registers and x30 on the process stack. This is
* needed if the thread is switched out because they can be clobbered by the
* ISR and/or context switch.
*
* @return N/A
*/
.macro z_arm64_enter_exc xreg0, xreg1
/*
* Two things can happen:
* Two things can happen to the remaining registers:
*
* - No context-switch: in this case x19-x28 are callee-saved register
* so we can be sure they are not going to be clobbered by ISR.
* - Context-switch: the callee-saved registers are saved by
* z_arm64_pendsv() in the kernel structure.
* z_arm64_context_switch() in the kernel structure.
*/
stp x0, x1, [sp, #-16]!
stp x2, x3, [sp, #-16]!
@ -39,45 +37,22 @@
stp x16, x17, [sp, #-16]!
stp x18, x30, [sp, #-16]!
/*
* Store SPSR_ELn and ELR_ELn. This is needed to support nested
* exception handlers
*/
mrs \xreg0, spsr_el1
mrs \xreg1, elr_el1
stp \xreg0, \xreg1, [sp, #-16]!
.endm
/**
* @brief Restore volatile registers and x30
/*
* Restore volatile registers, x30, SPSR_EL1 and ELR_EL1
*
* This is the common exit point for z_arm64_pendsv() and _isr_wrapper(). We
* restore the registers saved on the process stack including X30. The return
* address used by eret (in ELR_ELn) is either restored by z_arm64_pendsv() if
* a context-switch happened or not touched at all by the ISR if there was no
* context-switch.
*
* @return N/A
* This is the common exit point for z_arm64_svc() and _isr_wrapper().
*/
.macro z_arm64_exit_exc xreg0, xreg1
/*
* Restore SPSR_ELn and ELR_ELn. This is needed to support nested
* exception handlers
*/
ldp \xreg0, \xreg1, [sp], #16
msr spsr_el1, \xreg0
msr elr_el1, \xreg1
/*
* In x30 we can have:
*
* - The address of irq_unlock() in swap.c when swapping in a thread
* that was cooperatively swapped out (used by ret in
* z_arm64_call_svc())
* - A previos generic value if the thread that we are swapping in was
* swapped out preemptively by the ISR.
*/
ldp x18, x30, [sp], #16
ldp x16, x17, [sp], #16
ldp x14, x15, [sp], #16
@ -90,10 +65,9 @@
ldp x0, x1, [sp], #16
/*
* In general in the ELR_ELn register we can find:
* In general in the ELR_EL1 register we can find:
*
* - The address of ret in z_arm64_call_svc() in case of arch_swap()
* (see swap.c)
* - The address of ret in z_arm64_call_svc()
* - The address of the next instruction at the time of the IRQ when the
* thread was switched out.
* - The address of z_thread_entry() for new threads (see thread.c).
@ -101,10 +75,8 @@
eret
.endm
/**
* @brief Increment nested counter
*
* @return N/A
/*
* Increment nested counter
*/
.macro inc_nest_counter xreg0, xreg1
@ -114,10 +86,8 @@
str \xreg1, [\xreg0, #_kernel_offset_to_nested]
.endm
/**
* @brief Decrement nested counter
*
* @return N/A
/*
* Decrement nested counter
*/
.macro dec_nest_counter xreg0, xreg1

View file

@ -4,9 +4,8 @@
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Reset handler
/*
* Reset handler
*
* Reset handler that prepares the system for running C code.
*/
@ -22,15 +21,15 @@ _ASM_FILE_PROLOGUE
/*
* Platform may do platform specific init at EL3.
* The function implementation must preserve callee saved registers as per
* Aarch64 ABI PCS.
* AArch64 ABI PCS.
*/
WTEXT(z_arch_el3_plat_init)
SECTION_FUNC(TEXT,z_arch_el3_plat_init)
ret
/**
*
* @brief Reset vector
/*
* Reset vector
*
* Ran when the system comes out of reset. The processor is in thread mode with
* privileged level. At this point, neither SP_EL0 nor SP_ELx point to a valid
@ -38,18 +37,11 @@ ret
*
* When these steps are completed, jump to z_arm64_prep_c(), which will finish
* setting up the system for running C code.
*
* @return N/A
*/
GTEXT(__reset)
SECTION_SUBSEC_FUNC(TEXT,_reset_section,__reset)
/*
* The entry point is located at the __reset symbol, which is fetched by a XIP
* image playing the role of a bootloader, which jumps to it, not through the
* reset vector mechanism. Such bootloaders might want to search for a __start
* symbol instead, so create that alias here.
*/
GTEXT(__start)
SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
@ -58,6 +50,11 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
switch_el x1, 3f, 2f, 1f
3:
/*
* Zephyr entry happened in EL3. Do EL3 specific init before
* dropping to lower EL.
*/
/* Initialize VBAR */
msr vbar_el3, x19
isb
@ -67,7 +64,7 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
add x0, x0, #(CONFIG_ISR_STACK_SIZE)
mov sp, x0
/* Initialize sctlr_el3 to reset value */
/* Initialize SCTLR_EL3 to reset value */
mov_imm x1, SCTLR_EL3_RES1
mrs x0, sctlr_el3
orr x0, x0, x1
@ -75,9 +72,9 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
isb
/*
* Disable access traps to EL3 for CPACR, Trace, FP, ASIMD,
* SVE from lower EL.
*/
* Disable access traps to EL3 for CPACR, Trace, FP, ASIMD,
* SVE from lower EL.
*/
mov_imm x0, CPTR_EL3_RES_VAL
mov_imm x1, (CPTR_EL3_TTA | CPTR_EL3_TFP | CPTR_EL3_TCPAC)
bic x0, x0, x1
@ -88,10 +85,6 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
/* Platform specific configurations needed in EL3 */
bl z_arch_el3_plat_init
/*
* Zephyr entry happened in EL3. Do EL3 specific init before
* dropping to lower EL.
*/
/* Enable access control configuration from lower EL */
mrs x0, actlr_el3
orr x0, x0, #(ACTLR_EL3_L2ACTLR | ACTLR_EL3_L2ECTLR \
@ -99,7 +92,7 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
orr x0, x0, #(ACTLR_EL3_CPUACTLR | ACTLR_EL3_CPUECTLR)
msr actlr_el3, x0
/* Initialize sctlr_el1 to reset value */
/* Initialize SCTLR_EL1 to reset value */
mov_imm x0, SCTLR_EL1_RES1
msr sctlr_el1, x0
@ -137,9 +130,7 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
mov x0, #(CPACR_EL1_FPEN_NOTRAP)
msr cpacr_el1, x0
/*
* Enable the instruction cache and el1 stack alignment check.
*/
/* Enable the instruction cache and EL1 stack alignment check. */
mov x1, #(SCTLR_I_BIT | SCTLR_SA_BIT)
mrs x0, sctlr_el1
orr x0, x0, x1

View file

@ -4,12 +4,11 @@
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Thread context switching for ARM64 Cortex-A
/*
* Thread context switching for ARM64 Cortex-A (AArch64)
*
* This module implements the routines necessary for thread context switching
* on ARM64 Cortex-A.
* on ARM64 Cortex-A (AArch64)
*/
#include <toolchain.h>
@ -23,8 +22,8 @@ _ASM_FILE_PROLOGUE
GDATA(_kernel)
/**
* @brief Routine to handle context switches
/*
* Routine to handle context switches
*
* This function is directly called either by _isr_wrapper() in case of
* preemption, or z_arm64_svc() in case of cooperative switching.
@ -36,7 +35,7 @@ SECTION_FUNC(TEXT, z_arm64_context_switch)
ldr x2, =_thread_offset_to_callee_saved
add x2, x2, x1
/* Store rest of process context including x30 */
/* Store callee-saved registers */
stp x19, x20, [x2], #16
stp x21, x22, [x2], #16
stp x23, x24, [x2], #16
@ -85,14 +84,12 @@ SECTION_FUNC(TEXT, z_arm64_context_switch)
/* Return to z_arm64_svc() or _isr_wrapper() */
ret
/**
* @brief Service call handler
/*
* Synchronous exceptions handler
*
* The service call (SVC) is used in the following occasions:
* - Cooperative context switching
* - IRQ offloading
*
* @return N/A
*/
GTEXT(z_arm64_svc)
@ -129,8 +126,9 @@ offload:
context_switch:
/*
* Retrieve x0 and x1 from the stack:
*
* - x0 = new_thread->switch_handle = switch_to thread
* - x1 = x1 = &old_thread->switch_handle = current thread
* - x1 = &old_thread->switch_handle = current thread
*/
ldp x0, x1, [sp, #(16 * 10)]

View file

@ -4,9 +4,8 @@
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Populated vector table
/*
* Populated vector table
*/
#include <toolchain.h>
@ -58,6 +57,7 @@ _ASM_FILE_PROLOGUE
/* The whole table must be 2K aligned */
.align 11
SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,_vector_table)
/* Current EL with SP0 / Synchronous */