Xtensa port: Added Xtensa specific code (C + S) files.
Change-Id: I0dff0c33d8577cc70d4d5ee8f298db38c508ee73 Signed-off-by: Mazen NEIFER <mazen@nestwave.com>
This commit is contained in:
parent
b0669a04b3
commit
e4e3cf604e
19 changed files with 5332 additions and 0 deletions
405
arch/xtensa/core/atomic.S
Normal file
405
arch/xtensa/core/atomic.S
Normal file
|
@ -0,0 +1,405 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Cadence Design Systems, Inc.
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <xtensa_context.h>
|
||||
/**
|
||||
*
|
||||
* @brief Atomically clear a memory location
|
||||
*
|
||||
* This routine atomically clears the contents of <target> and returns the old
|
||||
* value that was in <target>.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* @return Contents of <target> before the atomic operation
|
||||
*
|
||||
* atomic_val_t atomic_clear
|
||||
* (
|
||||
* atomic_t *target /@ memory location to clear @/
|
||||
* )
|
||||
*/
|
||||
.global atomic_clear
|
||||
.type atomic_clear,@function
|
||||
.align 4
|
||||
atomic_clear:
|
||||
ENTRY(48)
|
||||
movi a4, 0
|
||||
.L_LoopClear:
|
||||
l32ai a3, a2, 0
|
||||
wsr a3, scompare1
|
||||
s32c1i a4, a2, 0
|
||||
bne a3, a4, .L_LoopClear
|
||||
mov a2, a3
|
||||
RET(48)
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomically set a memory location
|
||||
*
|
||||
* This routine atomically sets the contents of <target> to <value> and returns
|
||||
* the old value that was in <target>.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* @return Contents of <target> before the atomic operation
|
||||
*
|
||||
* atomic_val_t atomic_set
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to set @/
|
||||
* atomic_val_t value /@ set with this value @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
.global atomic_set
|
||||
.type atomic_set,@function
|
||||
.align 4
|
||||
atomic_set:
|
||||
ENTRY(48)
|
||||
.L_LoopSet:
|
||||
l32ai a4, a2, 0
|
||||
wsr a4, scompare1
|
||||
s32c1i a3, a2, 0
|
||||
bne a3, a4, .L_LoopSet
|
||||
mov a2, a3
|
||||
RET(48)
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Get the value of a shared memory atomically
|
||||
*
|
||||
* This routine atomically retrieves the value in *target
|
||||
*
|
||||
* long atomic_get
|
||||
* (
|
||||
* atomic_t * target /@ address of atom to be retrieved @/
|
||||
* )
|
||||
*
|
||||
* @return value read from address target.
|
||||
*
|
||||
*/
|
||||
.global atomic_get
|
||||
.type atomic_get,@function
|
||||
.align 4
|
||||
atomic_get:
|
||||
ENTRY(48)
|
||||
l32ai a2, a2, 0
|
||||
RET(48)
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomically increment a memory location
|
||||
*
|
||||
* This routine atomically increments the value in <target>. The operation is
|
||||
* done using unsigned integer arithmetic. Various CPU architectures may impose
|
||||
* restrictions with regards to the alignment and cache attributes of the
|
||||
* atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* @return Contents of <target> before the atomic operation
|
||||
*
|
||||
* atomic_val_t atomic_inc
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to increment @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
|
||||
.global atomic_inc
|
||||
.type atomic_inc,@function
|
||||
.align 4
|
||||
atomic_inc:
|
||||
ENTRY(48)
|
||||
.L_LoopInc:
|
||||
l32ai a3, a2, 0
|
||||
wsr a3, scompare1
|
||||
addi a4, a3, 1
|
||||
s32c1i a4, a2, 0
|
||||
bne a3, a4, .L_LoopInc
|
||||
mov a2, a3
|
||||
RET(48)
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomically add a value to a memory location
|
||||
*
|
||||
* This routine atomically adds the contents of <target> and <value>, placing
|
||||
* the result in <target>. The operation is done using signed integer arithmetic.
|
||||
* Various CPU architectures may impose restrictions with regards to the
|
||||
* alignment and cache attributes of the atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* @return Contents of <target> before the atomic operation
|
||||
*
|
||||
* atomic_val_t atomic_add
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to add to @/
|
||||
* atomic_val_t value /@ value to add @/
|
||||
* )
|
||||
*/
|
||||
.global atomic_add
|
||||
.type atomic_add,@function
|
||||
.align 4
|
||||
atomic_add:
|
||||
ENTRY(48)
|
||||
.L_LoopAdd:
|
||||
l32ai a4, a2, 0
|
||||
wsr a4, scompare1
|
||||
add a5, a3, a4
|
||||
s32c1i a5, a2, 0
|
||||
bne a5, a4, .L_LoopAdd
|
||||
mov a2, a5
|
||||
RET(48)
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomically decrement a memory location
|
||||
*
|
||||
* This routine atomically decrements the value in <target>. The operation is
|
||||
* done using unsigned integer arithmetic. Various CPU architectures may impose
|
||||
* restrictions with regards to the alignment and cache attributes of the
|
||||
* atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* @return Contents of <target> before the atomic operation
|
||||
*
|
||||
* atomic_val_t atomic_dec
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to decrement @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
|
||||
.global atomic_dec
|
||||
.type atomic_dec,@function
|
||||
.align 4
|
||||
atomic_dec:
|
||||
ENTRY(48)
|
||||
.L_LoopDec:
|
||||
l32ai a3, a2, 0
|
||||
wsr a3, scompare1
|
||||
addi a4, a3, -1
|
||||
s32c1i a4, a2, 0
|
||||
bne a3, a4, .L_LoopDec
|
||||
mov a2, a3
|
||||
RET(48)
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomically subtract a value from a memory location
|
||||
*
|
||||
* This routine atomically subtracts <value> from the contents of <target>,
|
||||
* placing the result in <target>. The operation is done using signed integer
|
||||
* arithmetic. Various CPU architectures may impose restrictions with regards to
|
||||
* the alignment and cache attributes of the atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* @return Contents of <target> before the atomic operation
|
||||
*
|
||||
* atomic_val_t atomic_sub
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to subtract from @/
|
||||
* atomic_val_t value /@ value to subtract @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
|
||||
.global atomic_sub
|
||||
.type atomic_sub,@function
|
||||
.align 4
|
||||
atomic_sub:
|
||||
ENTRY(48)
|
||||
.L_LoopSub:
|
||||
l32ai a4, a2, 0
|
||||
wsr a4, scompare1
|
||||
sub a5, a4, a3
|
||||
s32c1i a5, a2, 0
|
||||
bne a5, a4, .L_LoopSub
|
||||
mov a2, a5
|
||||
RET(48)
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomically perform a bitwise NAND on a memory location
|
||||
*
|
||||
* This routine atomically performs a bitwise NAND operation of the contents of
|
||||
* <target> and <value>, placing the result in <target>.
|
||||
* Various CPU architectures may impose restrictions with regards to the
|
||||
* alignment and cache attributes of the atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* @return Contents of <target> before the atomic operation
|
||||
*
|
||||
* atomic_val_t atomic_nand
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to NAND @/
|
||||
* atomic_val_t value /@ NAND with this value @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
|
||||
.global atomic_nand
|
||||
.type atomic_nand,@function
|
||||
.align 4
|
||||
atomic_nand:
|
||||
ENTRY(48)
|
||||
.L_LoopNand:
|
||||
l32ai a4, a2, 0
|
||||
wsr a4, scompare1
|
||||
and a5, a3, a4
|
||||
neg a5, a5
|
||||
addi a5, a5, -1
|
||||
s32c1i a5, a2, 0
|
||||
bne a5, a4, .L_LoopNand
|
||||
mov a2, a4
|
||||
RET(48)
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomically perform a bitwise AND on a memory location
|
||||
*
|
||||
* This routine atomically performs a bitwise AND operation of the contents of
|
||||
* <target> and <value>, placing the result in <target>.
|
||||
* Various CPU architectures may impose restrictions with regards to the
|
||||
* alignment and cache attributes of the atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* @return Contents of <target> before the atomic operation
|
||||
*
|
||||
* atomic_val_t atomic_and
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to AND @/
|
||||
* atomic_val_t value /@ AND with this value @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
|
||||
.global atomic_and
|
||||
.type atomic_and,@function
|
||||
.align 4
|
||||
atomic_and:
|
||||
ENTRY(48)
|
||||
.L_LoopAnd:
|
||||
l32ai a4, a2, 0
|
||||
wsr a4, scompare1
|
||||
and a5, a3, a4
|
||||
s32c1i a5, a2, 0
|
||||
bne a5, a4, .L_LoopAnd
|
||||
mov a2, a4
|
||||
RET(48)
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomically perform a bitwise OR on memory location
|
||||
*
|
||||
* This routine atomically performs a bitwise OR operation of the contents of
|
||||
* <target> and <value>, placing the result in <target>.
|
||||
* Various CPU architectures may impose restrictions with regards to the
|
||||
* alignment and cache attributes of the atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* @return Contents of <target> before the atomic operation
|
||||
*
|
||||
* atomic_val_t atomic_or
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to OR @/
|
||||
* atomic_val_t value /@ OR with this value @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
|
||||
.global atomic_or
|
||||
.type atomic_or,@function
|
||||
.align 4
|
||||
atomic_or:
|
||||
ENTRY(48)
|
||||
.L_LoopOr:
|
||||
l32ai a4, a2, 0
|
||||
wsr a4, scompare1
|
||||
or a5, a3, a4
|
||||
s32c1i a5, a2, 0
|
||||
bne a4, a5, .L_LoopOr
|
||||
mov a2, a4
|
||||
RET(48)
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomically perform a bitwise XOR on a memory location
|
||||
*
|
||||
* This routine atomically performs a bitwise XOR operation of the contents of
|
||||
* <target> and <value>, placing the result in <target>.
|
||||
* Various CPU architectures may impose restrictions with regards to the
|
||||
* alignment and cache attributes of the atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* @return Contents of <target> before the atomic operation
|
||||
*
|
||||
* atomic_val_t atomic_xor
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to XOR @/
|
||||
* atomic_val_t value /@ XOR with this value @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
|
||||
.global atomic_xor
|
||||
.type atomic_xor,@function
|
||||
.align 4
|
||||
atomic_xor:
|
||||
ENTRY(48)
|
||||
.L_LoopXor:
|
||||
l32ai a4, a2, 0
|
||||
wsr a4, scompare1
|
||||
xor a5, a3, a4
|
||||
s32c1i a5, a2, 0
|
||||
bne a5, a4, .L_LoopXor
|
||||
mov a2, a4
|
||||
RET(48)
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomically compare-and-swap the contents of a memory location
|
||||
*
|
||||
* This routine performs an atomic compare-and-swap. testing that the contents of
|
||||
* <target> contains <oldValue>, and if it does, setting the value of <target>
|
||||
* to <newValue>. Various CPU architectures may impose restrictions with regards
|
||||
* to the alignment and cache attributes of the atomic_t type.
|
||||
*
|
||||
* This routine can be used from both task and interrupt level.
|
||||
*
|
||||
* @return 1 if the swap is actually executed, 0 otherwise.
|
||||
*
|
||||
* int atomic_cas
|
||||
* (
|
||||
* atomic_t *target, /@ memory location to compare-and-swap @/
|
||||
* atomic_val_t oldValue, /@ compare to this value @/
|
||||
* atomic_val_t newValue, /@ swap with this value @/
|
||||
* )
|
||||
*
|
||||
*/
|
||||
.global atomic_cas
|
||||
.type atomic_cas,@function
|
||||
.align 4
|
||||
atomic_cas:
|
||||
ENTRY(48)
|
||||
.L_LoopCas:
|
||||
l32ai a5, a2, 0
|
||||
beq a5, a3, 1f
|
||||
movi a2, 0
|
||||
j 2f
|
||||
1:
|
||||
wsr a5, scompare1
|
||||
s32c1i a4, a2, 0
|
||||
bne a3, a5, .L_LoopCas
|
||||
movi a2, 1
|
||||
2:
|
||||
RET(48)
|
39
arch/xtensa/core/cpu_idle.c
Normal file
39
arch/xtensa/core/cpu_idle.c
Normal file
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Cadence Design Systems, Inc.
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <xtensa/tie/xt_core.h>
|
||||
#include <xtensa/tie/xt_interrupt.h>
|
||||
#include <logging/kernel_event_logger.h>
|
||||
|
||||
/*
|
||||
* @brief Put the CPU in low-power mode
|
||||
*
|
||||
* This function always exits with interrupts unlocked.
|
||||
*
|
||||
* void k_cpu_idle(void)
|
||||
*/
|
||||
void k_cpu_idle(void)
|
||||
{
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
|
||||
_sys_k_event_logger_enter_sleep();
|
||||
#endif
|
||||
XT_WAITI(0);
|
||||
}
|
||||
/*
|
||||
* @brief Put the CPU in low-power mode, entered with IRQs locked
|
||||
*
|
||||
* This function exits with interrupts restored to <key>.
|
||||
*
|
||||
* void nano_cpu_atomic_idle(unsigned int key)
|
||||
*/
|
||||
void k_cpu_atomic_idle(unsigned int key)
|
||||
{
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
|
||||
_sys_k_event_logger_enter_sleep();
|
||||
#endif
|
||||
XT_WAITI(0);
|
||||
XT_WSR_PS(key);
|
||||
XT_RSYNC();
|
||||
}
|
264
arch/xtensa/core/crt1-boards.S
Normal file
264
arch/xtensa/core/crt1-boards.S
Normal file
|
@ -0,0 +1,264 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Cadence Design Systems, Inc.
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/*
|
||||
* For most hardware / boards, this code sets up the C calling context
|
||||
* (setting up stack, PS, and clearing BSS) and jumps to __clibrary_start
|
||||
* which sets up the C library, calls constructors and registers destructors,
|
||||
* and calls main().
|
||||
*
|
||||
* Control arrives here at _start from the reset vector or from crt0-app.S.
|
||||
*/
|
||||
|
||||
#include <xtensa/coreasm.h>
|
||||
#include "xtos-internal.h"
|
||||
|
||||
|
||||
/* Exports */
|
||||
.global _start
|
||||
|
||||
/*
|
||||
* Imports
|
||||
* __clibrary_init from C library (eg. newlib or uclibc)
|
||||
* exit from C library
|
||||
* main from user application
|
||||
* board_init board-specific (uart/mingloss/tinygloss.c)
|
||||
* xthal_dcache_all_writeback from HAL library
|
||||
* __stack from linker script (see LSP Ref Manual)
|
||||
* _bss_table_start from linker script (see LSP Ref Manual)
|
||||
* _bss_table_end from linker script (see LSP Ref Manual)
|
||||
*/
|
||||
|
||||
.type main, @function
|
||||
|
||||
/* Macros to abstract away ABI differences */
|
||||
|
||||
#if __XTENSA_CALL0_ABI__
|
||||
# define CALL call0
|
||||
# define CALLX callx0
|
||||
# define ARG1 a2 /* 1st outgoing call argument */
|
||||
# define ARG2 a3 /* 2nd outgoing call argument */
|
||||
# define ARG3 a4 /* 3rd outgoing call argument */
|
||||
# define ARG4 a5 /* 4th outgoing call argument */
|
||||
# define ARG5 a6 /* 5th outgoing call argument */
|
||||
#else
|
||||
# define CALL call4
|
||||
# define CALLX callx4
|
||||
# define ARG1 a6 /* 1st outgoing call argument */
|
||||
# define ARG2 a7 /* 2nd outgoing call argument */
|
||||
# define ARG3 a8 /* 3rd outgoing call argument */
|
||||
# define ARG4 a9 /* 4th outgoing call argument */
|
||||
# define ARG5 a10 /* 5th outgoing call argument */
|
||||
#endif
|
||||
|
||||
|
||||
/**************************************************************************/
|
||||
|
||||
.text
|
||||
.align 4
|
||||
_start:
|
||||
/*
|
||||
* _start is typically NOT at the beginning of the text segment --
|
||||
* it is always called from either the reset vector or other code
|
||||
* that does equivalent initialization (such as crt0-app.S).
|
||||
*
|
||||
* Assumptions on entry to _start:
|
||||
* - low (level-one) and medium priority interrupts are disabled
|
||||
* via PS.INTLEVEL and/or INTENABLE (PS.INTLEVEL is expected to
|
||||
* be zeroed, to potentially enable them, before calling main)
|
||||
* - C calling context not initialized:
|
||||
* - PS not initialized
|
||||
* - SP not initialized
|
||||
* - the following are initialized:
|
||||
* - LITBASE, cache attributes, WindowBase, WindowStart,
|
||||
* CPENABLE, FP's FCR and FSR, EXCSAVE[n]
|
||||
|
||||
* Keep a0 zero. It is used to initialize a few things.
|
||||
* It is also the return address, where zero indicates
|
||||
* that the frame used by _start is the bottommost frame.
|
||||
*/
|
||||
#if !XCHAL_HAVE_HALT || !XCHAL_HAVE_BOOTLOADER /* not needed for Xtensa TX */
|
||||
movi a0, 0 /* keep this register zero. */
|
||||
#endif
|
||||
|
||||
#if XTOS_RESET_UNNEEDED && !XCHAL_HAVE_HALT
|
||||
#include "reset-unneeded.S"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Initialize the stack pointer.
|
||||
* See the "ABI and Software Conventions" chapter in the
|
||||
* Xtensa ISA Reference manual for details.
|
||||
|
||||
* NOTE: Because the _start routine does not use any memory in its
|
||||
* stack frame, and because all of its CALL instructions use a
|
||||
* window size of 4 (or zero), the stack frame for _start can be empty.
|
||||
*/
|
||||
movi sp, __stack
|
||||
|
||||
/*
|
||||
* Now that sp (a1) is set, we can set PS as per the application
|
||||
* (user vector mode, enable interrupts, enable window exceptions if applicable).
|
||||
*/
|
||||
#if XCHAL_HAVE_EXCEPTIONS
|
||||
# ifdef __XTENSA_CALL0_ABI__
|
||||
movi a3, PS_UM /* PS.WOE = 0, PS.UM = 1, PS.EXCM = 0, PS.INTLEVEL = 0 */
|
||||
# else
|
||||
movi a3, PS_UM|PS_WOE /* PS.WOE = 1, PS.UM = 1, PS.EXCM = 0, PS.INTLEVEL = 0 */
|
||||
# endif
|
||||
wsr a3, PS
|
||||
rsync
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Do any initialization that affects the memory map, such as
|
||||
* setting up TLB entries, that needs to be done before we can
|
||||
* successfully clear BSS (e.g. if some BSS segments are in
|
||||
* remapped areas).
|
||||
*
|
||||
* NOTE: This hook works where the reset vector does not unpack
|
||||
* segments (see "ROM packing" in the LSP manual), or where
|
||||
* unpacking of segments is not affected by memory remapping.
|
||||
* If ROM unpacking is affected, TLB setup must be done in
|
||||
* assembler from the reset vector.
|
||||
*
|
||||
* The __memmap_init() routine can be a C function, however it
|
||||
* does not have BSS initialized! In particular, __memmap_init()
|
||||
* cannot set BSS variables, i.e. uninitialized global variables
|
||||
* (they'll be wiped out by the following BSS clear), nor can it
|
||||
* assume they are yet initialized to zero.
|
||||
*
|
||||
* The __memmap_init() function is optional. It is marked as a
|
||||
* weak symbol, so that it gets valued zero if not defined.
|
||||
*/
|
||||
.weak __memmap_init
|
||||
movi a4, __memmap_init
|
||||
beqz a4, 1f
|
||||
CALLX a4
|
||||
1:
|
||||
|
||||
|
||||
#if !XCHAL_HAVE_BOOTLOADER /* boot loader takes care of zeroing BSS */
|
||||
|
||||
# ifdef __XTENSA_CALL0_ABI__
|
||||
/* Clear a0 again as possible CALLX to __memmap_init changed it. */
|
||||
movi a0, 0
|
||||
# endif
|
||||
/*
|
||||
* Clear the BSS (uninitialized data) segments.
|
||||
* This code supports multiple zeroed sections (*.bss).
|
||||
*
|
||||
* Register allocation:
|
||||
* a0 = 0
|
||||
* a6 = pointer to start of table, and through table
|
||||
* a7 = pointer to end of table
|
||||
* a8 = start address of bytes to be zeroed
|
||||
* a9 = end address of bytes to be zeroed
|
||||
* a10 = length of bytes to be zeroed
|
||||
*/
|
||||
movi a6, _bss_table_start
|
||||
movi a7, _bss_table_end
|
||||
bgeu a6, a7, .L3zte
|
||||
|
||||
.L0zte: l32i a8, a6, 0 /* get start address, assumed multiple of 4 */
|
||||
l32i a9, a6, 4 /* get end address, assumed multiple of 4 */
|
||||
addi a6, a6, 8 /* next entry */
|
||||
sub a10, a9, a8 /* a10 = length, assumed a multiple of 4 */
|
||||
bbci.l a10, 2, .L1zte
|
||||
s32i a0, a8, 0 /* clear 4 bytes to make length multiple of 8 */
|
||||
addi a8, a8, 4
|
||||
.L1zte: bbci.l a10, 3, .L2zte
|
||||
s32i a0, a8, 0 /* clear 8 bytes to make length multiple of 16 */
|
||||
s32i a0, a8, 4
|
||||
addi a8, a8, 8
|
||||
.L2zte: srli a10, a10, 4 /* length is now multiple of 16, divide by 16 */
|
||||
floopnez a10, clearzte
|
||||
s32i a0, a8, 0 /* clear 16 bytes at a time... */
|
||||
s32i a0, a8, 4
|
||||
s32i a0, a8, 8
|
||||
s32i a0, a8, 12
|
||||
addi a8, a8, 16
|
||||
floopend a10, clearzte
|
||||
|
||||
bltu a6, a7, .L0zte /* loop until end of table of *.bss sections */
|
||||
.L3zte:
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* We can now call C code, the C calling environment has been initialized.
|
||||
*
|
||||
* From this point on, we use ABI-specific macros to refer to registers a0 .. a15
|
||||
* (ARG#).
|
||||
*/
|
||||
|
||||
#if XCHAL_HAVE_HALT
|
||||
|
||||
/*
|
||||
* Assume minimalist environment for memory-constrained TX cores.
|
||||
* No C library or board initialization, no parameters passed to main
|
||||
* (assume declared as "void main(void)") and no call to exit().
|
||||
*/
|
||||
CALL main
|
||||
halt
|
||||
|
||||
#else /* !HALT */
|
||||
|
||||
.type board_init, @function
|
||||
.type __clibrary_init, @function
|
||||
.type exit, @function
|
||||
|
||||
|
||||
/* Initialize the board (eg. UART, etc). */
|
||||
CALL board_init
|
||||
|
||||
/*
|
||||
* Call __clibrary_init to initialize the C library:
|
||||
*
|
||||
* void __clibrary_init(int argc, char ** argv, char ** environ,
|
||||
* void(*init_func)(void), void(*fini_func)(void));
|
||||
*/
|
||||
|
||||
* Pass an empty argv array, with an empty string as the program name. */
|
||||
|
||||
movi ARG1, _start_argc /* argc address */
|
||||
movi ARG2, _start_argv /* argv = ["", 0] */
|
||||
movi ARG3, _start_envp /* envp = [0] */
|
||||
movi ARG4, _init /* function that calls constructors */
|
||||
movi ARG5, _fini /* function that calls destructors */
|
||||
l32i ARG1, ARG1, 0 /* argc = 1 */
|
||||
CALL __clibrary_init
|
||||
|
||||
/* Call: int main(int argc, char ** argv, char ** environ); */
|
||||
movi ARG1, _start_argc /* argc address */
|
||||
movi ARG2, _start_argv /* argv = ["", 0] */
|
||||
movi ARG3, _start_envp /* envp = [0] */
|
||||
l32i ARG1, ARG1, 0 /* argc = 1 */
|
||||
CALL main
|
||||
/* The return value is the same register as the first outgoing argument. */
|
||||
CALL exit /* exit with main's return value */
|
||||
/* Does not return here. */
|
||||
|
||||
.data
|
||||
/*
|
||||
* Mark argc/argv/envp parameters as weak so that an external
|
||||
* object file can override them.
|
||||
*/
|
||||
.weak _start_argc, _start_argv, _start_envp
|
||||
.align 4
|
||||
_start_argv:
|
||||
.word _start_null /* empty program name */
|
||||
_start_null:
|
||||
_start_envp:
|
||||
.word 0 /* end of argv array, empty string, empty environ */
|
||||
_start_argc:
|
||||
.word 1 /* one argument (program name) */
|
||||
.text
|
||||
|
||||
#endif /* !HALT */
|
||||
|
||||
.size _start, . - _start
|
||||
|
239
arch/xtensa/core/crt1-sim.S
Normal file
239
arch/xtensa/core/crt1-sim.S
Normal file
|
@ -0,0 +1,239 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Cadence Design Systems, Inc.
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/*
|
||||
* For the Xtensa simulator target, this code sets up the C calling context
|
||||
* and calls main() (via __clibrary_start).
|
||||
* Control arrives here at _start from the reset vector or from crt0-app.S.
|
||||
*/
|
||||
|
||||
#include <xtensa/simboard.h>
|
||||
#include <xtensa/simcall.h>
|
||||
#include <xtensa/coreasm.h>
|
||||
|
||||
|
||||
/* Exports */
|
||||
.global _start
|
||||
.global __start
|
||||
|
||||
/*
|
||||
* Imports
|
||||
* __clibrary_init from C library (eg. newlib or uclibc)
|
||||
* exit from C library
|
||||
* main from user application
|
||||
* __stack from linker script (see LSP Ref Manual)
|
||||
*/
|
||||
|
||||
.type __clibrary_init, @function
|
||||
.type _Cstart, @function
|
||||
.type exit, @function
|
||||
|
||||
|
||||
/* Macros to abstract away ABI differences */
|
||||
|
||||
#if __XTENSA_CALL0_ABI__
|
||||
# define CALL call0
|
||||
# define CALLX callx0
|
||||
# define ARG1 a2 /* 1st outgoing call argument */
|
||||
# define ARG2 a3 /* 2nd outgoing call argument */
|
||||
# define ARG3 a4 /* 3rd outgoing call argument */
|
||||
# define ARG4 a5 /* 4th outgoing call argument */
|
||||
# define ARG5 a6 /* 5th outgoing call argument */
|
||||
#else
|
||||
# define CALL call4
|
||||
# define CALLX callx4
|
||||
# define ARG1 a6 /* 1st outgoing call argument */
|
||||
# define ARG2 a7 /* 2nd outgoing call argument */
|
||||
# define ARG3 a8 /* 3rd outgoing call argument */
|
||||
# define ARG4 a9 /* 4th outgoing call argument */
|
||||
# define ARG5 a10 /* 5th outgoing call argument */
|
||||
#endif
|
||||
|
||||
.data
|
||||
.weak _start_envp /* allow overriding */
|
||||
.align 4
|
||||
_start_envp: .word 0 /* empty environ */
|
||||
|
||||
|
||||
|
||||
.text
|
||||
.align 4
|
||||
|
||||
_start:
|
||||
__start:
|
||||
/*
|
||||
* _start is typically NOT at the beginning of the text segment --
|
||||
* it is always called from either the reset vector or other code
|
||||
* that does equivalent initialization (such as crt0-app.S).
|
||||
*
|
||||
* Assumptions on entry to _start:
|
||||
* - low (level-one) and medium priority interrupts are disabled
|
||||
* via PS.INTLEVEL and/or INTENABLE (PS.INTLEVEL is expected to
|
||||
* be zeroed, to potentially enable them, before calling main)
|
||||
* - C calling context not initialized:
|
||||
* - PS not initialized
|
||||
* - SP not initialized
|
||||
* - the following are initialized:
|
||||
* - LITBASE, cache attributes, WindowBase, WindowStart,
|
||||
* CPENABLE, FP's FCR and FSR, EXCSAVE[n]
|
||||
|
||||
* Keep a0 zero. It is used to initialize a few things.
|
||||
* It is also the return address, where zero indicates
|
||||
* that the frame used by _start is the bottommost frame.
|
||||
*
|
||||
*/
|
||||
movi a0, 0 /* keep this register zero. */
|
||||
|
||||
#if XTOS_RESET_UNNEEDED
|
||||
#include "reset-unneeded.S"
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Initialize the stack pointer.
|
||||
* See the "ABI and Software Conventions" chapter in the
|
||||
* Xtensa ISA Reference manual for details.
|
||||
*
|
||||
* NOTE: Because the _start routine does not use any memory in its
|
||||
* stack frame, and because all of its CALL instructions use a
|
||||
* window size of 4, the stack frame for _start can be empty.
|
||||
*/
|
||||
movi sp, __stack
|
||||
|
||||
/*
|
||||
* reserve stack space for
|
||||
* - argv array
|
||||
* - argument strings
|
||||
*/
|
||||
movi a2, SYS_iss_argv_size
|
||||
simcall /* returns size of argv[] + its strings in a2 */
|
||||
#if XCHAL_HAVE_PIF
|
||||
/*
|
||||
* The stack only needs 16-byte alignment.
|
||||
* However, here we round up the argv size further to 128 byte multiples
|
||||
* so that in most cases, variations in argv[0]'s path do not result in
|
||||
* different stack allocation. Otherwise, such variations can impact
|
||||
* execution timing (eg. due to cache effects etc) for the same code and data.
|
||||
* If we have a PIF, it's more likely the extra required space is okay.
|
||||
*/
|
||||
addi a2, a2, 127
|
||||
srli a2, a2, 7
|
||||
slli a2, a2, 7
|
||||
#else
|
||||
/* Keep stack 16-byte aligned. */
|
||||
addi a2, a2, 15
|
||||
srli a2, a2, 4
|
||||
slli a2, a2, 4
|
||||
#endif
|
||||
/*
|
||||
* No need to use MOVSP because we have no caller (we're the
|
||||
* base caller); in fact it's better not to use MOVSP in this
|
||||
* context, to avoid unnecessary ALLOCA exceptions and copying
|
||||
* from undefined memory:
|
||||
* sub a3, sp, a2
|
||||
* movsp sp, a3
|
||||
*/
|
||||
sub sp, sp, a2
|
||||
|
||||
|
||||
/*
|
||||
* Now that sp (a1) is set, we can set PS as per the application
|
||||
* (user vector mode, enable interrupts, enable window exceptions if applicable).
|
||||
*/
|
||||
#if XCHAL_HAVE_EXCEPTIONS
|
||||
# ifdef __XTENSA_CALL0_ABI__
|
||||
movi a3, PS_UM /* PS.WOE = 0, PS.UM = 1, PS.EXCM = 0, PS.INTLEVEL = 0 */
|
||||
# else
|
||||
movi a3, PS_UM|PS_WOE /* PS.WOE = 1, PS.UM = 1, PS.EXCM = 0, PS.INTLEVEL = 0 */
|
||||
# endif
|
||||
wsr a3, PS
|
||||
rsync
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Do any initialization that affects the memory map, such as
|
||||
* setting up TLB entries, that needs to be done before we can
|
||||
* successfully clear BSS (e.g. if some BSS segments are in
|
||||
* remapped areas).
|
||||
*
|
||||
* NOTE: This hook works where the reset vector does not unpack
|
||||
* segments (see "ROM packing" in the LSP manual), or where
|
||||
* unpacking of segments is not affected by memory remapping.
|
||||
* If ROM unpacking is affected, TLB setup must be done in
|
||||
* assembler from the reset vector.
|
||||
*
|
||||
* The __memmap_init() routine can be a C function, however it
|
||||
* does not have BSS initialized! In particular, __memmap_init()
|
||||
* cannot set BSS variables, i.e. uninitialized global variables
|
||||
* (they'll be wiped out by the following BSS clear), nor can it
|
||||
* assume they are yet initialized to zero.
|
||||
*
|
||||
* The __memmap_init() function is optional. It is marked as a
|
||||
* weak symbol, so that it gets valued zero if not defined.
|
||||
*/
|
||||
.weak __memmap_init
|
||||
movi a4, __memmap_init
|
||||
beqz a4, 1f
|
||||
CALLX a4
|
||||
1:
|
||||
|
||||
|
||||
/* The new ISS simcall only appeared after RB-2007.2: */
|
||||
#if !XCHAL_HAVE_BOOTLOADER && (XCHAL_HW_MAX_VERSION > XTENSA_HWVERSION_RB_2007_2) /* pre-LX2 cores only */
|
||||
/*
|
||||
* Clear the BSS (uninitialized data) segments.
|
||||
* This code supports multiple zeroed sections (*.bss).
|
||||
* For speed, we clear memory using an ISS simcall
|
||||
* (see crt1-boards.S for more generic BSS clearing code).
|
||||
*/
|
||||
movi a6, _bss_table_start
|
||||
movi a7, _bss_table_end
|
||||
bgeu a6, a7, .Lnobss
|
||||
.Lbssloop:
|
||||
movi a2, SYS_memset
|
||||
l32i a3, a6, 0 /* arg1 = fill start address */
|
||||
movi a4, 0 /* arg2 = fill pattern */
|
||||
l32i a5, a6, 4 /* get end address */
|
||||
addi a6, a6, 8 /* next bss table entry */
|
||||
sub a5, a5, a3 /* arg3 = fill size in bytes */
|
||||
simcall /* memset(a3,a4,a5) */
|
||||
bltu a6, a7, .Lbssloop /* loop until end of bss table */
|
||||
.Lnobss:
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Call __clibrary_init to initialize the C library:
|
||||
*
|
||||
* void __clibrary_init(int argc, char ** argv, char ** environ,
|
||||
* void(*init_func)(void), void(*fini_func)(void));
|
||||
*/
|
||||
|
||||
/* Get argv with the arguments from the ISS */
|
||||
mov a3, sp /* tell simcall where to write argv[] */
|
||||
movi a2, SYS_iss_set_argv
|
||||
simcall /* write argv[] array at a3 */
|
||||
|
||||
movi a2, SYS_iss_argc
|
||||
simcall /* put argc in a2 */
|
||||
|
||||
|
||||
/* Call: int _Cstart(); */
|
||||
CALL _Cstart
|
||||
/* The return value is the same register as the first outgoing argument. */
|
||||
CALL exit
|
||||
/* Does not return here. */
|
||||
|
||||
.size _start, . - _start
|
||||
|
||||
|
||||
/*
|
||||
* Local Variables:
|
||||
* mode:fundamental
|
||||
* comment-start: "/* "
|
||||
* comment-start-skip: "/* *"
|
||||
* End:
|
||||
*/
|
121
arch/xtensa/core/fatal.c
Normal file
121
arch/xtensa/core/fatal.c
Normal file
|
@ -0,0 +1,121 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Cadence Design Systems, Inc.
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <kernel_arch_data.h>
|
||||
#ifdef CONFIG_PRINTK
|
||||
#include <misc/printk.h>
|
||||
#define PR_EXC(...) printk(__VA_ARGS__)
|
||||
#else
|
||||
#define PR_EXC(...)
|
||||
#endif /* CONFIG_PRINTK */
|
||||
|
||||
const NANO_ESF _default_esf = {
|
||||
{0xdeaddead}, /* sp */
|
||||
0xdeaddead, /* pc */
|
||||
};
|
||||
|
||||
extern void exit(int exit_code);
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Nanokernel fatal error handler
|
||||
*
|
||||
* This routine is called when fatal error conditions are detected by software
|
||||
* and is responsible only for reporting the error. Once reported, it then
|
||||
* invokes the user provided routine _SysFatalErrorHandler() which is
|
||||
* responsible for implementing the error handling policy.
|
||||
*
|
||||
* The caller is expected to always provide a usable ESF. In the event that the
|
||||
* fatal error does not have a hardware generated ESF, the caller should either
|
||||
* create its own or use a pointer to the global default ESF <_default_esf>.
|
||||
*
|
||||
* @param reason the reason that the handler was called
|
||||
* @param pEsf pointer to the exception stack frame
|
||||
*
|
||||
* @return This function does not return.
|
||||
*/
|
||||
void _NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
|
||||
{
|
||||
switch (reason) {
|
||||
case _NANO_ERR_INVALID_TASK_EXIT:
|
||||
PR_EXC("***** Invalid Exit Software Error! *****\n");
|
||||
break;
|
||||
#if defined(CONFIG_STACK_CANARIES)
|
||||
case _NANO_ERR_STACK_CHK_FAIL:
|
||||
PR_EXC("***** Stack Check Fail! *****\n");
|
||||
break;
|
||||
#endif /* CONFIG_STACK_CANARIES */
|
||||
case _NANO_ERR_ALLOCATION_FAIL:
|
||||
PR_EXC("**** Kernel Allocation Failure! ****\n");
|
||||
break;
|
||||
default:
|
||||
PR_EXC("**** Unknown Fatal Error %d! ****\n", reason);
|
||||
break;
|
||||
}
|
||||
PR_EXC("Current thread ID = 0x%x\n"
|
||||
"Faulting instruction address = 0x%x\n",
|
||||
sys_thread_self_get(),
|
||||
pEsf->pc);
|
||||
/*
|
||||
* Now that the error has been reported, call the user implemented
|
||||
* policy
|
||||
* to respond to the error. The decisions as to what responses are
|
||||
* appropriate to the various errors are something the customer must
|
||||
* decide.
|
||||
*/
|
||||
/* TODO: call _SysFatalErrorHandler(reason, pEsf); */
|
||||
exit(253);
|
||||
}
|
||||
|
||||
void FatalErrorHandler(void)
|
||||
{
|
||||
unsigned int tmpReg = 0;
|
||||
unsigned int Esf[5];
|
||||
|
||||
__asm__ volatile("rsr %0, 177\n\t" : "=r"(tmpReg)); /* epc */
|
||||
Esf[0] = tmpReg;
|
||||
__asm__ volatile("rsr %0, 232\n\t" : "=r"(tmpReg)); /* exccause */
|
||||
Esf[1] = tmpReg;
|
||||
__asm__ volatile("rsr %0, 209\n\t" : "=r"(tmpReg)); /* excsave */
|
||||
Esf[2] = tmpReg;
|
||||
__asm__ volatile("rsr %0, 230\n\t" : "=r"(tmpReg)); /* ps */
|
||||
Esf[3] = tmpReg;
|
||||
__asm__ volatile("rsr %0, 238\n\t" : "=r"(tmpReg)); /* excvaddr */
|
||||
Esf[4] = tmpReg;
|
||||
PR_EXC("Error\nEPC = 0x%x\n"
|
||||
"EXCCAUSE = 0x%x\n"
|
||||
"EXCSAVE = 0x%x\n"
|
||||
"PS = 0x%x\n"
|
||||
"EXCVADDR = 0x%x\n",
|
||||
Esf[0], Esf[1], Esf[2], Esf[3], Esf[4]);
|
||||
exit(255);
|
||||
}
|
||||
|
||||
void ReservedInterruptHandler(unsigned int intNo)
|
||||
{
|
||||
unsigned int tmpReg = 0;
|
||||
unsigned int Esf[5];
|
||||
|
||||
__asm__ volatile("rsr %0, 177\n\t" : "=r"(tmpReg)); /* epc */
|
||||
Esf[0] = tmpReg;
|
||||
__asm__ volatile("rsr %0, 232\n\t" : "=r"(tmpReg)); /* exccause */
|
||||
Esf[1] = tmpReg;
|
||||
__asm__ volatile("rsr %0, 209\n\t" : "=r"(tmpReg)); /* excsave */
|
||||
Esf[2] = tmpReg;
|
||||
__asm__ volatile("rsr %0, 230\n\t" : "=r"(tmpReg)); /* ps */
|
||||
Esf[3] = tmpReg;
|
||||
__asm__ volatile("rsr %0, 228\n\t" : "=r"(tmpReg)); /* intenable */
|
||||
Esf[4] = tmpReg;
|
||||
PR_EXC("Error, unhandled interrupt\n"
|
||||
"EPC = 0x%x\n"
|
||||
"EXCCAUSE = 0x%x\n"
|
||||
"EXCSAVE = 0x%x\n"
|
||||
"PS = 0x%x\n"
|
||||
"INTENABLE = 0x%x\n"
|
||||
"INTERRUPT = 0x%x\n",
|
||||
Esf[0], Esf[1], Esf[2], Esf[3], Esf[4], (1 << intNo));
|
||||
exit(254);
|
||||
}
|
||||
|
36
arch/xtensa/core/irq_manage.c
Normal file
36
arch/xtensa/core/irq_manage.c
Normal file
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Cadence Design Systems, Inc.
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <xtensa_api.h>
|
||||
#include <kernel_arch_data.h>
|
||||
#include <misc/__assert.h>
|
||||
/*
|
||||
* @internal
|
||||
*
|
||||
* @brief Set an interrupt's priority
|
||||
*
|
||||
* The priority is verified if ASSERT_ON is enabled.
|
||||
*
|
||||
* The priority is verified if ASSERT_ON is enabled. The maximum number
|
||||
* of priority levels is a little complex, as there are some hardware
|
||||
* priority levels which are reserved: three for various types of exceptions,
|
||||
* and possibly one additional to support zero latency interrupts.
|
||||
*
|
||||
* Valid values are from 1 to 6. Interrupts of priority 1 are not masked when
|
||||
* interrupts are locked system-wide, so care must be taken when using them. ISR
|
||||
* installed with priority 0 interrupts cannot make kernel calls.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
||||
void _irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
|
||||
{
|
||||
__ASSERT(prio < XCHAL_EXCM_LEVEL + 1,
|
||||
"invalid priority %d! values must be less than %d\n",
|
||||
prio, XCHAL_EXCM_LEVEL + 1);
|
||||
/* TODO: Write code to set priority if this is ever possible on Xtensa */
|
||||
}
|
42
arch/xtensa/core/irq_offload.c
Normal file
42
arch/xtensa/core/irq_offload.c
Normal file
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Cadence Design Systems, Inc.
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <nanokernel.h>
|
||||
#include <irq_offload.h>
|
||||
#include <arch/xtensa/arch.h>
|
||||
#include <xtensa_api.h>
|
||||
|
||||
/*
|
||||
* Xtensa core should support software interrupt in order to allow using
|
||||
* irq_offload feature
|
||||
*/
|
||||
#ifndef CONFIG_IRQ_OFFLOAD_INTNUM
|
||||
#error "Please add entry for IRQ_OFFLOAD_INTNUM option to your arch/xtensa/soc/${XTENSA_CORE}/Kconfig file in order to use IRQ offload on this core."
|
||||
#endif
|
||||
|
||||
static irq_offload_routine_t offload_routine;
|
||||
static void *offload_param;
|
||||
|
||||
/* Called by ISR dispatcher */
|
||||
void _irq_do_offload(void *unused)
|
||||
{
|
||||
ARG_UNUSED(unused);
|
||||
offload_routine(offload_param);
|
||||
}
|
||||
|
||||
void irq_offload(irq_offload_routine_t routine, void *parameter)
|
||||
{
|
||||
IRQ_CONNECT(CONFIG_IRQ_OFFLOAD_INTNUM, XCHAL_EXCM_LEVEL,
|
||||
_irq_do_offload, NULL, 0);
|
||||
_arch_irq_disable(CONFIG_IRQ_OFFLOAD_INTNUM);
|
||||
offload_routine = routine;
|
||||
offload_param = parameter;
|
||||
_xt_set_intset(1 << CONFIG_IRQ_OFFLOAD_INTNUM);
|
||||
/*
|
||||
* Enable the software interrupt, in case it is disabled, so that IRQ
|
||||
* offload is serviced.
|
||||
*/
|
||||
_arch_irq_enable(CONFIG_IRQ_OFFLOAD_INTNUM);
|
||||
}
|
598
arch/xtensa/core/startup/reset-vector.S
Normal file
598
arch/xtensa/core/startup/reset-vector.S
Normal file
|
@ -0,0 +1,598 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Cadence Design Systems, Inc.
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <xtensa/coreasm.h>
|
||||
#include <xtensa/corebits.h>
|
||||
#include <xtensa/cacheasm.h>
|
||||
#include <xtensa/cacheattrasm.h>
|
||||
#include <xtensa/xtensa-xer.h>
|
||||
#include <xtensa/xdm-regs.h>
|
||||
#include <xtensa/config/specreg.h>
|
||||
#include <xtensa/config/system.h> /* for XSHAL_USE_ABSOLUTE_LITERALS only */
|
||||
#include <xtensa/xtruntime-core-state.h>
|
||||
|
||||
/*
|
||||
* The following reset vector avoids initializing certain registers already
|
||||
* initialized by processor reset. But it does initialize some of them
|
||||
* anyway, for minimal support of warm restart (restarting in software by
|
||||
* jumping to the reset vector rather than asserting hardware reset).
|
||||
*/
|
||||
|
||||
.begin literal_prefix .ResetVector
|
||||
.section .ResetVector.text, "ax"
|
||||
|
||||
.align 4
|
||||
.global _ResetVector
|
||||
_ResetVector:
|
||||
|
||||
#if (!XCHAL_HAVE_HALT || defined(XTOS_UNPACK)) && XCHAL_HAVE_IMEM_LOADSTORE
|
||||
/*
|
||||
* NOTE:
|
||||
*
|
||||
* IMPORTANT: If you move the _ResetHandler portion to a section
|
||||
* other than .ResetVector.text that is outside the range of
|
||||
* the reset vector's 'j' instruction, the _ResetHandler symbol
|
||||
* and a more elaborate j/movi/jx sequence are needed in
|
||||
* .ResetVector.text to dispatch to the new location.
|
||||
*/
|
||||
j _ResetHandler
|
||||
|
||||
.size _ResetVector, . - _ResetVector
|
||||
|
||||
# if XCHAL_HAVE_HALT
|
||||
/*
|
||||
* Xtensa TX: reset vector segment is only 4 bytes, so must place the
|
||||
* unpacker code elsewhere in the memory that contains the reset vector.
|
||||
*/
|
||||
# if XCHAL_RESET_VECTOR_VADDR == XCHAL_INSTRAM0_VADDR
|
||||
.section .iram0.text, "ax"
|
||||
# elif XCHAL_RESET_VECTOR_VADDR == XCHAL_INSTROM0_VADDR
|
||||
.section .irom0.text, "ax"
|
||||
# elif XCHAL_RESET_VECTOR_VADDR == XCHAL_URAM0_VADDR
|
||||
.section .uram0.text, "ax"
|
||||
# else
|
||||
# warning "Xtensa TX reset vector not at start of iram0, irom0, or uram0 -- ROMing LSPs may not work"
|
||||
.text
|
||||
# endif
|
||||
# endif
|
||||
|
||||
.extern __memctl_default
|
||||
|
||||
.align 4
|
||||
.literal_position /* tells the assembler/linker to place literals here */
|
||||
|
||||
.align 4
|
||||
.global _ResetHandler
|
||||
_ResetHandler:
|
||||
#endif
|
||||
|
||||
#if !XCHAL_HAVE_HALT
|
||||
|
||||
/*
|
||||
* Even if the processor supports the non-PC-relative L32R option,
|
||||
* it will always start up in PC-relative mode. We take advantage of
|
||||
* this, and use PC-relative mode at least until we're sure the .lit4
|
||||
* section is in place (which is sometimes only after unpacking).
|
||||
*/
|
||||
.begin no-absolute-literals
|
||||
|
||||
/*
|
||||
* If we have dynamic cache way support, init the caches as soon
|
||||
* as we can, which is now. Except, if we are waking up from a
|
||||
* PSO event, then we need to do this slightly later.
|
||||
*/
|
||||
#if XCHAL_HAVE_ICACHE_DYN_WAYS || XCHAL_HAVE_DCACHE_DYN_WAYS
|
||||
# if XCHAL_HAVE_PSO_CDM && !XCHAL_HAVE_PSO_FULL_RETENTION
|
||||
/* Do this later on in the code -- see below */
|
||||
# else
|
||||
movi a0, __memctl_default
|
||||
wsr a0, MEMCTL
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If we have PSO support, then we must check for a warm start with
|
||||
* caches left powered on. If the caches had been left powered on,
|
||||
* we must restore the state of MEMCTL to the saved state if any.
|
||||
* Note that MEMCTL may not be present depending on config.
|
||||
*/
|
||||
#if XCHAL_HAVE_PSO_CDM && !XCHAL_HAVE_PSO_FULL_RETENTION
|
||||
movi a2, XDM_MISC_PWRSTAT /* Read PWRSTAT */
|
||||
movi a3, _xtos_pso_savearea /* Save area address - retained for later */
|
||||
movi a5, CORE_STATE_SIGNATURE /* Signature for compare - retained for later */
|
||||
rer a7, a2 /* PWRSTAT value - retained for later */
|
||||
extui a4, a7, 1, 2 /* Now bottom 2 bits are core wakeup and cache power lost */
|
||||
bnei a4, 1, .Lcold_start /* a4==1 means PSO wakeup, caches did not lose power */
|
||||
l32i a4, a3, CS_SA_signature /* Load save area signature field */
|
||||
sub a4, a4, a5
|
||||
bnez a4, .Lcold_start /* If signature mismatch then do cold start */
|
||||
#if XCHAL_USE_MEMCTL
|
||||
l32i a4, a3, CS_SA_memctl /* Load saved MEMCTL value */
|
||||
movi a0, ~MEMCTL_INV_EN
|
||||
and a0, a4, a0 /* Clear invalidate bit */
|
||||
wsr a0, MEMCTL
|
||||
#endif
|
||||
j .Lwarm_start
|
||||
|
||||
.Lcold_start:
|
||||
|
||||
#if XCHAL_HAVE_ICACHE_DYN_WAYS || XCHAL_HAVE_DCACHE_DYN_WAYS
|
||||
/*
|
||||
* Enable and invalidate all ways of both caches. If there is no
|
||||
* dynamic way support then this write will have no effect.
|
||||
*/
|
||||
movi a0, __memctl_default
|
||||
wsr a0, MEMCTL
|
||||
#endif
|
||||
|
||||
.Lwarm_start:
|
||||
|
||||
#endif
|
||||
|
||||
movi a0, 0 /* a0 is always 0 in this code, used to initialize lots of things */
|
||||
|
||||
#if XCHAL_HAVE_INTERRUPTS /* technically this should be under !FULL_RESET, assuming hard reset */
|
||||
wsr a0, INTENABLE /* make sure that interrupts are shut off (*before* we lower PS.INTLEVEL and PS.EXCM!) */
|
||||
#endif
|
||||
|
||||
#if !XCHAL_HAVE_FULL_RESET
|
||||
|
||||
#if XCHAL_HAVE_CCOUNT && (XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RB_2006_0) /* pre-LX2 cores only */
|
||||
wsr a0, CCOUNT /* not really necessary, but nice; best done very early */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* For full MMU configs, put page table at an unmapped virtual address.
|
||||
* This ensures that accesses outside the static maps result
|
||||
* in miss exceptions rather than random behaviour.
|
||||
* Assumes XCHAL_SEG_MAPPABLE_VADDR == 0 (true in released MMU).
|
||||
*/
|
||||
#if XCHAL_ITLB_ARF_WAYS > 0 || XCHAL_DTLB_ARF_WAYS > 0
|
||||
wsr a0, PTEVADDR
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Debug initialization
|
||||
*
|
||||
* NOTE: DBREAKCn must be initialized before the combination of these two things:
|
||||
* any load/store, and a lowering of PS.INTLEVEL below DEBUG_LEVEL.
|
||||
* The processor already resets IBREAKENABLE appropriately.
|
||||
*/
|
||||
#if XCHAL_HAVE_DEBUG
|
||||
# if XCHAL_NUM_DBREAK
|
||||
# if XCHAL_NUM_DBREAK >= 2
|
||||
wsr a0, DBREAKC1
|
||||
# endif
|
||||
wsr a0, DBREAKC0
|
||||
dsync /* wait for WSRs to DBREAKCn to complete */
|
||||
# endif
|
||||
|
||||
# if XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RA_2004_1 /* pre-LX cores only */
|
||||
/*
|
||||
* Starting in Xtensa LX, ICOUNTLEVEL resets to zero (not 15), so no need to initialize it.
|
||||
* Prior to that we do, otherwise we get an ICOUNT exception, 2^32 instructions after reset.
|
||||
*/
|
||||
rsr a2, ICOUNTLEVEL /* are we being debugged? (detected by ICOUNTLEVEL not 15, or dropped below 12) */
|
||||
bltui a2, 12, 1f /* if so, avoid initializing ICOUNTLEVEL which drops single-steps through here */
|
||||
wsr a0, ICOUNTLEVEL /* avoid ICOUNT exceptions */
|
||||
isync /* wait for WSR to ICOUNTLEVEL to complete */
|
||||
1:
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#endif /* !XCHAL_HAVE_FULL_RESET */
|
||||
|
||||
#if XCHAL_HAVE_ABSOLUTE_LITERALS
|
||||
/* Technically, this only needs to be done under !FULL_RESET, assuming hard reset: */
|
||||
wsr a0, LITBASE
|
||||
rsync
|
||||
#endif
|
||||
|
||||
#if XCHAL_HAVE_PSO_CDM && ! XCHAL_HAVE_PSO_FULL_RETENTION
|
||||
/*
|
||||
* If we're powering up from a temporary power shut-off (PSO),
|
||||
* restore state saved just prior to shut-off. Note that the
|
||||
* MEMCTL register was already restored earlier, and as a side
|
||||
* effect, registers a3, a5, a7 are now preloaded with values
|
||||
* that we will use here.
|
||||
* a3 - pointer to save area base address (_xtos_pso_savearea)
|
||||
* a5 - saved state signature (CORE_STATE_SIGNATURE)
|
||||
* a7 - contents of PWRSTAT register
|
||||
*/
|
||||
l32i a4, a3, CS_SA_signature /* load save area signature */
|
||||
sub a4, a4, a5 /* compare signature with expected one */
|
||||
# if XTOS_PSO_TEST
|
||||
movi a7, PWRSTAT_WAKEUP_RESET /* pretend PSO warm start with warm caches */
|
||||
# endif
|
||||
bbci.l a7, PWRSTAT_WAKEUP_RESET_SHIFT, 1f /* wakeup from PSO? (branch if not) */
|
||||
/* Yes, wakeup from PSO. Check whether state was properly saved. */
|
||||
addi a5, a7, - PWRSTAT_WAKEUP_RESET /* speculatively clear PSO-wakeup bit */
|
||||
movnez a7, a5, a4 /* if state not saved (corrupted?), mark as cold start */
|
||||
bnez a4, 1f /* if state not saved, just continue with reset */
|
||||
/* Wakeup from PSO with good signature. Now check cache status: */
|
||||
bbci.l a7, PWRSTAT_CACHES_LOST_POWER_SHIFT, .Lpso_restore /* if caches warm, restore now */
|
||||
/* Caches got shutoff. Continue reset, we'll end up initializing caches, and check again later for PSO. */
|
||||
# if XCHAL_HAVE_PRID && XCHAL_HAVE_S32C1I
|
||||
j .Ldonesync /* skip reset sync, only done for cold start */
|
||||
# endif
|
||||
1: /* Cold start. (Not PSO wakeup.) Proceed with normal full reset. */
|
||||
#endif
|
||||
|
||||
#if XCHAL_HAVE_PRID && XCHAL_HAVE_S32C1I
|
||||
/* Core 0 initializes the XMP synchronization variable, if present. This operation needs to
|
||||
happen as early as possible in the startup sequence so that the other cores can be released
|
||||
from reset. */
|
||||
.weak _ResetSync
|
||||
movi a2, _ResetSync /* address of sync variable */
|
||||
rsr.prid a3 /* core and multiprocessor ID */
|
||||
extui a3, a3, 0, 8 /* extract core ID (FIXME: need proper constants for PRID bits to extract) */
|
||||
beqz a2, .Ldonesync /* skip if no sync variable */
|
||||
bnez a3, .Ldonesync /* only do this on core 0 */
|
||||
s32i a0, a2, 0 /* clear sync variable */
|
||||
.Ldonesync:
|
||||
#endif
|
||||
#if XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MP_RUNSTALL
|
||||
/* On core 0, this releases other cores. On other cores this has no effect, because
|
||||
runstall control is unconnected. */
|
||||
movi a2, XER_MPSCORE
|
||||
wer a0, a2
|
||||
#endif
|
||||
|
||||
/*
|
||||
* For processors with relocatable vectors, apply any alternate
|
||||
* vector base given to xt-genldscripts, which sets the
|
||||
* _memmap_vecbase_reset symbol accordingly.
|
||||
*/
|
||||
#if XCHAL_HAVE_VECBASE
|
||||
movi a2, _memmap_vecbase_reset /* note: absolute symbol, not a ptr */
|
||||
wsr a2, vecbase
|
||||
#endif
|
||||
|
||||
#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) /* have ATOMCTL ? */
|
||||
# if XCHAL_DCACHE_IS_COHERENT
|
||||
movi a3, 0x25 /* MX -- internal for writeback, RCW otherwise */
|
||||
# else
|
||||
movi a3, 0x15 /* non-MX -- always RCW */
|
||||
# endif
|
||||
wsr a3, ATOMCTL
|
||||
#endif
|
||||
|
||||
#if XCHAL_HAVE_INTERRUPTS && XCHAL_HAVE_DEBUG
|
||||
rsil a2, 1 /* lower PS.INTLEVEL here to make reset vector easier to debug */
|
||||
#endif
|
||||
|
||||
/* If either of the caches does not have dynamic way support, then
|
||||
* use the old (slow) method to init them. If the cache is absent
|
||||
* the macros will expand to empty.
|
||||
*/
|
||||
#if ! XCHAL_HAVE_ICACHE_DYN_WAYS
|
||||
icache_reset a2, a3
|
||||
#endif
|
||||
#if ! XCHAL_HAVE_DCACHE_DYN_WAYS
|
||||
dcache_reset a2, a3
|
||||
#endif
|
||||
|
||||
#if XCHAL_HAVE_PSO_CDM && ! XCHAL_HAVE_PSO_FULL_RETENTION
|
||||
/*
|
||||
* Here, a7 still contains status from the power status register,
|
||||
* or zero if signature check failed.
|
||||
*/
|
||||
bbci.l a7, PWRSTAT_WAKEUP_RESET_SHIFT, .Lcoldstart /* wakeup from PSO with good signature? */
|
||||
* Yes, wakeup from PSO. Caches had been powered down, now are initialized.
|
||||
.Lpso_restore:
|
||||
/*
|
||||
* Assume memory still initialized, so all code still unpacked etc.
|
||||
* So we can just jump/call to relevant state restore code (wherever located).
|
||||
*/
|
||||
movi a2, 0 /* make shutoff routine return zero */
|
||||
movi a3, _xtos_pso_savearea
|
||||
/* Here, as below for _start, call0 is used as an unlimited-range jump. */
|
||||
call0 _xtos_core_restore_nw
|
||||
/* (does not return) */
|
||||
.Lcoldstart:
|
||||
#endif
|
||||
|
||||
#if XCHAL_HAVE_PREFETCH
|
||||
/* Enable cache prefetch if present. */
|
||||
movi a2, XCHAL_CACHE_PREFCTL_DEFAULT
|
||||
wsr a2, PREFCTL
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Now setup the memory attributes. On some cores this "enables" caches.
|
||||
* We do this ahead of unpacking, so it can proceed more efficiently.
|
||||
*
|
||||
* The _memmap_cacheattr_reset symbol's value (address) is defined
|
||||
* by the LSP's linker script, as generated by xt-genldscripts.
|
||||
* If defines 4-bit attributes for eight 512MB regions.
|
||||
*
|
||||
* (NOTE: for cores with the older MMU v1 or v2, or without any memory
|
||||
* protection mechanism, the following code has no effect.)
|
||||
*/
|
||||
#if XCHAL_HAVE_MPU
|
||||
/* If there's an empty background map, setup foreground maps to mimic region protection: */
|
||||
# if XCHAL_MPU_ENTRIES >= 8 && XCHAL_MPU_BACKGROUND_ENTRIES <= 2
|
||||
.pushsection .rodata, "a"
|
||||
.global _xtos_mpu_attribs
|
||||
.align 4
|
||||
_xtos_mpu_attribs:
|
||||
.word 0x00006000+XCHAL_MPU_ENTRIES-8 * Illegal (---)
|
||||
.word 0x000F7700+XCHAL_MPU_ENTRIES-8 * Writeback (rwx Cacheable Non-shareable wb rd-alloc wr-alloc)
|
||||
.word 0x000D5700+XCHAL_MPU_ENTRIES-8 * WBNA (rwx Cacheable Non-shareable wb rd-alloc)
|
||||
.word 0x000C4700+XCHAL_MPU_ENTRIES-8 * Writethru (rwx Cacheable Non-shareable wt rd-alloc)
|
||||
.word 0x00006700+XCHAL_MPU_ENTRIES-8 * Bypass (rwx Device non-interruptible system-shareable)
|
||||
.popsection
|
||||
|
||||
/*
|
||||
* We assume reset state: all MPU entries zeroed and disabled.
|
||||
* Otherwise we'd need a loop to zero everything.
|
||||
*/
|
||||
movi a2, _memmap_cacheattr_reset /* note: absolute symbol, not a ptr */
|
||||
movi a3, _xtos_mpu_attribs
|
||||
movi a4, 0x20000000 /* 512 MB delta */
|
||||
movi a6, 8
|
||||
movi a7, 1 /* MPU entry vaddr 0, with valid bit set */
|
||||
movi a9, 0 /* cacheadrdis value */
|
||||
wsr.cacheadrdis a9 /* enable everything temporarily while MPU updates */
|
||||
|
||||
/* Write eight MPU entries, from the last one going backwards (entries n-1 thru n-8) */
|
||||
2: extui a8, a2, 28, 4 /* get next attribute nibble (msb first) */
|
||||
extui a5, a8, 0, 2 /* lower two bit indicate whether cached */
|
||||
slli a9, a9, 1 /* add a bit to cacheadrdis... */
|
||||
addi a10, a9, 1 /* set that new bit if... */
|
||||
moveqz a9, a10, a5 /* ... that region is non-cacheable */
|
||||
addx4 a5, a8, a3 /* index into _xtos_mpu_attribs table */
|
||||
addi a8, a8, -5 /* make valid attrib indices negative */
|
||||
movgez a5, a3, a8 /* if not valid attrib, use Illegal */
|
||||
l32i a5, a5, 0 /* load access rights, memtype from table entry */
|
||||
slli a2, a2, 4
|
||||
sub a7, a7, a4 /* next 512MB region (last to first) */
|
||||
addi a6, a6, -1
|
||||
add a5, a5, a6 /* add the index */
|
||||
wptlb a5, a7 /* write the MPU entry */
|
||||
bnez a6, 2b /* loop until done */
|
||||
# else
|
||||
movi a9, XCHAL_MPU_BG_CACHEADRDIS /* default value of CACHEADRDIS for bgnd map */
|
||||
# endif
|
||||
wsr.cacheadrdis a9 /* update cacheadrdis */
|
||||
#elif XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR \
|
||||
|| (XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
|
||||
movi a2, _memmap_cacheattr_reset /* note: absolute symbol, not a ptr */
|
||||
cacheattr_set /* set CACHEATTR from a2 (clobbers a3-a8) */
|
||||
#endif
|
||||
|
||||
/* Now that caches are initialized, cache coherency can be enabled. */
|
||||
#if XCHAL_DCACHE_IS_COHERENT
|
||||
# if XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MX && (XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RE_2012_0)
|
||||
/* Opt into coherence for MX (for backward compatibility / testing). */
|
||||
movi a3, 1
|
||||
movi a2, XER_CCON
|
||||
wer a3, a2
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* Enable zero-overhead loop instr buffer, and snoop responses, if configured. */
|
||||
/* If HW erratum 453 fix is to be applied, then don't enable loop instr buffer. */
|
||||
#if XCHAL_USE_MEMCTL && XCHAL_SNOOP_LB_MEMCTL_DEFAULT
|
||||
movi a3, XCHAL_SNOOP_LB_MEMCTL_DEFAULT
|
||||
rsr a2, MEMCTL
|
||||
or a2, a2, a3
|
||||
wsr a2, MEMCTL
|
||||
#endif
|
||||
|
||||
/* Caches are all up and running, clear PWRCTL.ShutProcOffOnPWait. */
|
||||
#if XCHAL_HAVE_PSO_CDM
|
||||
movi a2, XDM_MISC_PWRCTL
|
||||
movi a4, ~PWRCTL_CORE_SHUTOFF
|
||||
rer a3, a2
|
||||
and a3, a3, a4
|
||||
wer a3, a2
|
||||
#endif
|
||||
|
||||
#endif /* !XCHAL_HAVE_HALT */
|
||||
|
||||
/*
|
||||
* Unpack code and data (eg. copy ROMed segments to RAM, vectors into
|
||||
* their proper location, etc).
|
||||
*/
|
||||
|
||||
#if defined(XTOS_UNPACK)
|
||||
movi a2, _rom_store_table
|
||||
beqz a2, unpackdone
|
||||
unpack: l32i a3, a2, 0 /* start vaddr */
|
||||
l32i a4, a2, 4 /* end vaddr */
|
||||
l32i a5, a2, 8 /* store vaddr */
|
||||
addi a2, a2, 12
|
||||
bgeu a3, a4, upnext /* skip unless start < end */
|
||||
uploop: l32i a6, a5, 0
|
||||
addi a5, a5, 4
|
||||
s32i a6, a3, 0
|
||||
addi a3, a3, 4
|
||||
bltu a3, a4, uploop
|
||||
j unpack
|
||||
upnext: bnez a3, unpack
|
||||
bnez a5, unpack
|
||||
#endif /* XTOS_UNPACK */
|
||||
|
||||
unpackdone:
|
||||
|
||||
#if defined(XTOS_UNPACK) || defined(XTOS_MP)
|
||||
/*
|
||||
* If writeback caches are configured and enabled, unpacked data must be
|
||||
* written out to memory before trying to execute it:
|
||||
*/
|
||||
dcache_writeback_all a2, a3, a4, 0
|
||||
icache_sync a2 /* ensure data written back is visible to i-fetch */
|
||||
/*
|
||||
* Note: no need to invalidate the i-cache after the above, because we
|
||||
* already invalidated it further above and did not execute anything within
|
||||
* unpacked regions afterwards. [Strictly speaking, if an unpacked region
|
||||
* follows this code very closely, it's possible for cache-ahead to have
|
||||
* cached a bit of that unpacked region, so in the future we may need to
|
||||
* invalidate the entire i-cache here again anyway.]
|
||||
*/
|
||||
#endif
|
||||
|
||||
|
||||
#if !XCHAL_HAVE_HALT /* skip for TX */
|
||||
|
||||
/*
|
||||
* Now that we know the .lit4 section is present (if got unpacked)
|
||||
* (and if absolute literals are used), initialize LITBASE to use it.
|
||||
*/
|
||||
#if XCHAL_HAVE_ABSOLUTE_LITERALS && XSHAL_USE_ABSOLUTE_LITERALS
|
||||
/*
|
||||
* Switch from PC-relative to absolute (litbase-relative) L32R mode.
|
||||
* Set LITBASE to 256 kB beyond the start of the literals in .lit4
|
||||
* (aligns to the nearest 4 kB boundary, LITBASE does not have bits 1..11)
|
||||
* and set the enable bit (_lit4_start is assumed 4-byte aligned).
|
||||
*/
|
||||
movi a2, _lit4_start + 0x40001
|
||||
wsr a2, LITBASE
|
||||
rsync
|
||||
#endif /* have and use absolute literals */
|
||||
.end no-absolute-literals /* we can now start using absolute literals */
|
||||
|
||||
|
||||
/* Technically, this only needs to be done pre-LX2, assuming hard reset: */
|
||||
# if XCHAL_HAVE_WINDOWED && defined(__XTENSA_WINDOWED_ABI__)
|
||||
/* Windowed register init, so we can call windowed code (eg. C code). */
|
||||
movi a1, 1
|
||||
wsr a1, WINDOWSTART
|
||||
/*
|
||||
* The processor always clears WINDOWBASE at reset, so no need to clear it here.
|
||||
* It resets WINDOWSTART to 1 starting with LX2.0/X7.0 (RB-2006.0).
|
||||
* However, assuming hard reset is not yet always practical, so do this anyway:
|
||||
*/
|
||||
wsr a0, WINDOWBASE
|
||||
rsync
|
||||
movi a0, 0 /* possibly a different a0, clear it */
|
||||
# endif
|
||||
|
||||
#if XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RB_2006_0 /* only pre-LX2 needs this */
|
||||
/* Coprocessor option initialization */
|
||||
# if XCHAL_HAVE_CP
|
||||
/*
|
||||
*movi a2, XCHAL_CP_MASK // enable existing CPs
|
||||
* To allow creating new coprocessors using TC that are not known
|
||||
* at GUI build time without having to explicitly enable them,
|
||||
* all CPENABLE bits must be set, even though they may not always
|
||||
* correspond to a coprocessor.
|
||||
*/
|
||||
movi a2, 0xFF /* enable *all* bits, to allow dynamic TIE */
|
||||
wsr a2, CPENABLE
|
||||
# endif
|
||||
|
||||
/*
|
||||
* Floating point coprocessor option initialization (at least
|
||||
* rounding mode, so that floating point ops give predictable results)
|
||||
*/
|
||||
# if XCHAL_HAVE_FP && !XCHAL_HAVE_VECTORFPU2005
|
||||
# define FCR 232 /* floating-point control register (user register number) */
|
||||
# define FSR 233 /* floating-point status register (user register number) */
|
||||
rsync /* wait for WSR to CPENABLE to complete before accessing FP coproc state */
|
||||
wur a0, FCR /* clear FCR (default rounding mode, round-nearest) */
|
||||
wur a0, FSR /* clear FSR */
|
||||
# endif
|
||||
#endif /* pre-LX2 */
|
||||
|
||||
|
||||
/*
|
||||
* Initialize memory error handler address.
|
||||
* Putting this address in a register allows multiple instances of
|
||||
* the same configured core (with separate program images but shared
|
||||
* code memory, thus forcing memory error vector to be shared given
|
||||
* it is not VECBASE relative) to have the same memory error vector,
|
||||
* yet each have their own handler and associated data save area.
|
||||
*/
|
||||
#if XCHAL_HAVE_MEM_ECC_PARITY
|
||||
movi a4, _MemErrorHandler
|
||||
wsr a4, MESAVE
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Initialize medium and high priority interrupt dispatchers:
|
||||
*/
|
||||
#if HAVE_XSR
|
||||
|
||||
/* For asm macros; works for positive a,b smaller than 1000: */
|
||||
# define GREATERTHAN(a,b) (((b)-(a)) & ~0xFFF)
|
||||
|
||||
# ifndef XCHAL_DEBUGLEVEL /* debug option not selected? */
|
||||
# define XCHAL_DEBUGLEVEL 99 /* bogus value outside 2..6 */
|
||||
# endif
|
||||
|
||||
.macro init_vector level
|
||||
.if GREATERTHAN(XCHAL_NUM_INTLEVELS+1,\level)
|
||||
.if XCHAL_DEBUGLEVEL-\level
|
||||
.weak _Level&level&FromVector
|
||||
movi a4, _Level&level&FromVector
|
||||
wsr a4, EXCSAVE+\level
|
||||
.if GREATERTHAN(\level,XCHAL_EXCM_LEVEL)
|
||||
movi a5, _Pri_&level&_HandlerAddress
|
||||
s32i a4, a5, 0
|
||||
/* If user provides their own handler, that handler might
|
||||
* not provide its own _Pri_<n>_HandlerAddress variable for
|
||||
* linking handlers. In that case, the reference below
|
||||
* would pull in the XTOS handler anyway, causing a conflict.
|
||||
* To avoid that, provide a weak version of it here:
|
||||
*/
|
||||
.pushsection .data, "aw"
|
||||
.global _Pri_&level&_HandlerAddress
|
||||
.weak _Pri_&level&_HandlerAddress
|
||||
.align 4
|
||||
_Pri_&level&_HandlerAddress: .space 4
|
||||
.popsection
|
||||
.endif
|
||||
.endif
|
||||
.endif
|
||||
.endm
|
||||
|
||||
init_vector 2
|
||||
init_vector 3
|
||||
init_vector 4
|
||||
init_vector 5
|
||||
init_vector 6
|
||||
|
||||
#endif /*HAVE_XSR*/
|
||||
|
||||
|
||||
/*
|
||||
* Complete reset initialization outside the vector,
|
||||
* to avoid requiring a vector that is larger than necessary.
|
||||
* This 2nd-stage startup code sets up the C Run-Time (CRT) and calls main().
|
||||
*
|
||||
* Here we use call0 not because we expect any return, but
|
||||
* because the assembler/linker dynamically sizes call0 as
|
||||
* needed (with -mlongcalls) which it doesn't with j or jx.
|
||||
* Note: This needs to be call0 regardless of the selected ABI.
|
||||
*/
|
||||
call0 _start /* jump to _start (in crt1-*.S) */
|
||||
/* does not return */
|
||||
|
||||
#else /* XCHAL_HAVE_HALT */
|
||||
|
||||
j _start /* jump to _start (in crt1-*.S) */
|
||||
/* (TX has max 64kB IRAM, so J always in range) */
|
||||
|
||||
/* Paranoia -- double-check requirements / assumptions of this Xtensa TX code: */
|
||||
# if !defined(__XTENSA_CALL0_ABI__) || !XCHAL_HAVE_FULL_RESET || XCHAL_HAVE_INTERRUPTS || XCHAL_HAVE_CCOUNT || XCHAL_DTLB_ARF_WAYS || XCHAL_HAVE_DEBUG || XCHAL_HAVE_S32C1I || XCHAL_HAVE_ABSOLUTE_LITERALS || XCHAL_DCACHE_SIZE || XCHAL_ICACHE_SIZE || XCHAL_HAVE_PIF || XCHAL_HAVE_WINDOWED
|
||||
# error "Halt architecture (Xtensa TX) requires: call0 ABI, all flops reset, no exceptions or interrupts, no TLBs, no debug, no S32C1I, no LITBASE, no cache, no PIF, no windowed regs"
|
||||
# endif
|
||||
|
||||
#endif /* XCHAL_HAVE_HALT */
|
||||
|
||||
|
||||
#if (!XCHAL_HAVE_HALT || defined(XTOS_UNPACK)) && XCHAL_HAVE_IMEM_LOADSTORE
|
||||
.size _ResetHandler, . - _ResetHandler
|
||||
#else
|
||||
.size _ResetVector, . - _ResetVector
|
||||
#endif
|
||||
|
||||
.text
|
||||
.global xthals_hw_configid0, xthals_hw_configid1
|
||||
.global xthals_release_major, xthals_release_minor
|
||||
.end literal_prefix
|
59
arch/xtensa/core/sw_isr_table.S
Normal file
59
arch/xtensa/core/sw_isr_table.S
Normal file
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Cadence Design Systems, Inc.
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @brief ISR table for static ISR declarations for XTENSA
|
||||
*
|
||||
* Software ISR table for XTENSA
|
||||
*/
|
||||
|
||||
#include <toolchain.h>
|
||||
#include <sections.h>
|
||||
#include <arch/cpu.h>
|
||||
#include <xtensa/config/core.h>
|
||||
|
||||
/*
|
||||
* Xtensa assembly code uses xt_unhandled_interrupt for default IRQ handler.
|
||||
*/
|
||||
#define _irq_spurious xt_unhandled_interrupt
|
||||
|
||||
/*
|
||||
* enable preprocessor features, such
|
||||
* as %expr - evaluate the expression and use it as a string
|
||||
*/
|
||||
.altmacro
|
||||
|
||||
/*
|
||||
* Define an ISR table entry
|
||||
* Define symbol as weak and give the section .gnu.linkonce.d
|
||||
* prefix. This allows linker overload the symbol and the
|
||||
* whole section by the one defined by a device driver
|
||||
*/
|
||||
.macro _isr_table_entry_declare index
|
||||
WDATA(_isr_irq\index)
|
||||
.section .gnu.linkonce.d.isr_irq\index
|
||||
_isr_irq\index: .word 0xABAD1DEA, _irq_spurious
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Declare the ISR table
|
||||
*/
|
||||
.macro _isr_table_declare from, to
|
||||
counter = \from
|
||||
.rept (\to - \from)
|
||||
_isr_table_entry_declare %counter
|
||||
counter = counter + 1
|
||||
.endr
|
||||
.endm
|
||||
|
||||
GTEXT(_irq_spurious)
|
||||
GDATA(_sw_isr_table)
|
||||
|
||||
.section .isr_irq0
|
||||
.align
|
||||
_sw_isr_table:
|
||||
|
||||
_isr_table_declare 0 XCHAL_NUM_INTERRUPTS
|
89
arch/xtensa/core/swap.S
Normal file
89
arch/xtensa/core/swap.S
Normal file
|
@ -0,0 +1,89 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Cadence Design Systems, Inc.
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @brief kernel swapper code for Xtensa
|
||||
*
|
||||
* This module implements the _Swap() routine for the Xtensa architecture.
|
||||
*/
|
||||
|
||||
#include <xtensa_context.h>
|
||||
#include <kernel_arch_data.h>
|
||||
#include <offsets_short.h>
|
||||
|
||||
.extern _kernel
|
||||
/**
|
||||
unsigned int _Swap (unsigned int basepri);
|
||||
*/
|
||||
.globl _Swap
|
||||
.type _Swap,@function
|
||||
.align 4
|
||||
_Swap:
|
||||
#ifdef __XTENSA_CALL0_ABI__
|
||||
addi sp, sp, -XT_SOL_FRMSZ
|
||||
#else
|
||||
entry sp, XT_SOL_FRMSZ
|
||||
#endif
|
||||
s32i a0, sp, XT_SOL_pc
|
||||
s32i a2, sp, XT_SOL_ps
|
||||
#ifdef __XTENSA_CALL0_ABI__
|
||||
s32i a12, sp, XT_SOL_a12 /* save callee-saved registers */
|
||||
s32i a13, sp, XT_SOL_a13
|
||||
s32i a14, sp, XT_SOL_a14
|
||||
s32i a15, sp, XT_SOL_a15
|
||||
#else
|
||||
/* Spill register windows. Calling xthal_window_spill() causes extra */
|
||||
/* spills and reloads, so we will set things up to call the _nw version */
|
||||
/* instead to save cycles. */
|
||||
movi a6, ~(PS_WOE_MASK|PS_INTLEVEL_MASK) /* spills a4-a7 if needed */
|
||||
and a2, a2, a6 /* clear WOE, INTLEVEL */
|
||||
addi a2, a2, XCHAL_EXCM_LEVEL /* set INTLEVEL */
|
||||
wsr a2, PS
|
||||
rsync
|
||||
call0 xthal_window_spill_nw
|
||||
l32i a2, sp, XT_SOL_ps /* restore PS */
|
||||
addi a2, a2, XCHAL_EXCM_LEVEL
|
||||
wsr a2, PS
|
||||
#endif
|
||||
#if XCHAL_CP_NUM > 0
|
||||
/* Save coprocessor callee-saved state (if any). At this point CPENABLE */
|
||||
/* should still reflect which CPs were in use (enabled). */
|
||||
call0 _xt_coproc_savecs
|
||||
#endif
|
||||
movi a2, _kernel
|
||||
movi a3, 0
|
||||
l32i a4, a2, KERNEL_OFFSET(current) /* a4 := _kernel->current */
|
||||
|
||||
s32i a3, sp, XT_SOL_exit /* 0 to flag as solicited frame */
|
||||
s32i sp, a4, THREAD_OFFSET(sp) /* sp := current->arch.topOfStack */
|
||||
|
||||
#if XCHAL_CP_NUM > 0
|
||||
/* Clear CPENABLE, also in task's co-processor state save area. */
|
||||
movi a3, 0
|
||||
l32i a4, a4, THREAD_OFFSET(cpStack) /* a4 := current->arch.preempCoprocReg.cpStack */
|
||||
wsr a3, CPENABLE
|
||||
beqz a4, 1f
|
||||
s16i a3, a4, XT_CPENABLE /* clear saved cpenable */
|
||||
1:
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
|
||||
/* Register the context switch */
|
||||
#ifdef __XTENSA_CALL0_ABI__
|
||||
call0 _sys_k_event_logger_context_switch
|
||||
#else
|
||||
call4 _sys_k_event_logger_context_switch
|
||||
#endif
|
||||
#endif
|
||||
|
||||
l32i a2, a2, KERNEL_OFFSET(ready_q_cache)
|
||||
/*
|
||||
* At this point, the a2 register contains the 'k_thread *' of the
|
||||
* thread to be swapped in.
|
||||
*/
|
||||
call0 _zxt_dispatch
|
||||
/* Never reaches here. */
|
||||
|
144
arch/xtensa/core/thread.c
Normal file
144
arch/xtensa/core/thread.c
Normal file
|
@ -0,0 +1,144 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Cadence Design Systems, Inc.
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_INIT_STACKS
|
||||
#include <string.h>
|
||||
#endif /* CONFIG_INIT_STACKS */
|
||||
#ifdef CONFIG_DEBUG
|
||||
#include <misc/printk.h>
|
||||
#endif
|
||||
#include <kernel_structs.h>
|
||||
#include <wait_q.h>
|
||||
#include <xtensa_config.h>
|
||||
|
||||
extern void _xt_user_exit(void);
|
||||
#if CONFIG_MICROKERNEL
|
||||
extern FUNC_NORETURN void _TaskAbort(void);
|
||||
#endif
|
||||
extern void fiber_abort(void);
|
||||
|
||||
#if defined(CONFIG_THREAD_MONITOR)
|
||||
#define THREAD_MONITOR_INIT(tcs) _thread_monitor_init(tcs)
|
||||
#else
|
||||
#define THREAD_MONITOR_INIT(tcs) \
|
||||
do {/* do nothing */ \
|
||||
} while ((0))
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_THREAD_MONITOR)
|
||||
/**
|
||||
*
|
||||
* @brief Initialize thread monitoring support
|
||||
*
|
||||
* Currently only inserts the new thread in the list of active threads.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
||||
static inline void _thread_monitor_init(struct tcs *tcs)
|
||||
{
|
||||
unsigned int key;
|
||||
|
||||
/*
|
||||
* Add the newly initialized thread to head of the list of threads.
|
||||
* This singly linked list of threads maintains ALL the threads in the
|
||||
* system:
|
||||
* both tasks and fibers regardless of whether they are runnable.
|
||||
*/
|
||||
|
||||
key = irq_lock();
|
||||
tcs->next_thread = _nanokernel.threads;
|
||||
_nanokernel.threads = tcs;
|
||||
irq_unlock(key);
|
||||
}
|
||||
#endif /* CONFIG_THREAD_MONITOR */
|
||||
|
||||
/*
|
||||
* @brief Initialize a new thread from its stack space
|
||||
*
|
||||
* The control structure (TCS) is put at the lower address of the stack. An
|
||||
* initial context, to be "restored" by __return_from_coop(), is put at
|
||||
* the other end of the stack, and thus reusable by the stack when not
|
||||
* needed anymore.
|
||||
*
|
||||
* The initial context is a basic stack frame that contains arguments for
|
||||
* _thread_entry() return address, that points at _thread_entry()
|
||||
* and status register.
|
||||
*
|
||||
* <options> is currently unused.
|
||||
*
|
||||
* @param pStackmem the pointer to aligned stack memory
|
||||
* @param stackSize the stack size in bytes
|
||||
* @param pEntry thread entry point routine
|
||||
* @param p1 first param to entry point
|
||||
* @param p2 second param to entry point
|
||||
* @param p3 third param to entry point
|
||||
* @param fiber prio, -1 for task
|
||||
* @param options is unused (saved for future expansion)
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
||||
void _new_thread(char *pStack, size_t stackSize,
|
||||
void (*pEntry)(void *, void *, void *),
|
||||
void *p1, void *p2, void *p3,
|
||||
int prio, unsigned int options)
|
||||
{
|
||||
/* Align stack end to maximum alignment requirement. */
|
||||
char *stackEnd = (char *)ROUND_DOWN(pStack + stackSize,
|
||||
(XCHAL_TOTAL_SA_ALIGN < 16 ? 16 : XCHAL_TOTAL_SA_ALIGN));
|
||||
/* TCS is located at top of stack while frames are located at end of it */
|
||||
struct tcs *tcs = (struct tcs *)(pStack);
|
||||
#ifdef CONFIG_DEBUG
|
||||
printk("\nstackEnd = %p\n", stackEnd);
|
||||
#endif
|
||||
#ifdef CONFIG_INIT_STACKS
|
||||
memset(pStack, 0xaa, stackSize);
|
||||
#endif
|
||||
#if XCHAL_CP_NUM > 0
|
||||
/* Coprocessor's stack alignment is granted as both operands are aligned */
|
||||
tcs->arch.preempCoprocReg.cpStack = stackEnd - XT_CP_SIZE;
|
||||
/* Coprocessor's save area alignment is granted as both operands are aligned */
|
||||
*(uint32_t *)(tcs->arch.preempCoprocReg.cpStack + XT_CP_ASA) =
|
||||
(uint32_t)stackEnd - XT_CP_SA_SIZE;
|
||||
#ifdef CONFIG_DEBUG
|
||||
printk("cpStack = %p\n", tcs->arch.preempCoprocReg.cpStack);
|
||||
printk("cpAsa = %p\n", *(uint32_t *)(tcs->arch.preempCoprocReg.cpStack + XT_CP_ASA));
|
||||
#endif
|
||||
#endif
|
||||
/* Thread's first frame alignment is granted as both operands are aligned */
|
||||
XtExcFrame *pInitCtx = (XtExcFrame *)(stackEnd - XT_XTRA_SIZE);
|
||||
#ifdef CONFIG_DEBUG
|
||||
printk("pInitCtx = %p\n", pInitCtx);
|
||||
#endif
|
||||
/* Explicitly initialize certain saved registers */
|
||||
pInitCtx->pc = (uint32_t)_thread_entry; /* task entrypoint */
|
||||
pInitCtx->a1 = (uint32_t)pInitCtx + XT_STK_FRMSZ; /* physical top of stack frame */
|
||||
pInitCtx->exit = (uint32_t)_xt_user_exit; /* user exception exit dispatcher */
|
||||
/* Set initial PS to int level 0, EXCM disabled, user mode. */
|
||||
/* Also set entry point argument arg. */
|
||||
#ifdef __XTENSA_CALL0_ABI__
|
||||
pInitCtx->a2 = (uint32_t)pEntry;
|
||||
pInitCtx->a3 = (uint32_t)p1;
|
||||
pInitCtx->a4 = (uint32_t)p2;
|
||||
pInitCtx->a5 = (uint32_t)p3;
|
||||
pInitCtx->ps = PS_UM | PS_EXCM;
|
||||
#else
|
||||
/* For windowed ABI set also WOE and CALLINC (pretend task is 'call8'). */
|
||||
pInitCtx->a6 = (uint32_t)pEntry;
|
||||
pInitCtx->a7 = (uint32_t)p1;
|
||||
pInitCtx->a8 = (uint32_t)p2;
|
||||
pInitCtx->a9 = (uint32_t)p3;
|
||||
pInitCtx->ps = PS_UM | PS_EXCM | PS_WOE | PS_CALLINC(2);
|
||||
#endif
|
||||
tcs->callee_saved.topOfStack = pInitCtx;
|
||||
tcs->arch.flags = 0;
|
||||
tcs->arch.prio = prio;
|
||||
|
||||
/* initial values in all other registers/TCS entries are irrelevant */
|
||||
|
||||
THREAD_MONITOR_INIT(tcs);
|
||||
}
|
||||
|
387
arch/xtensa/core/xt_zephyr.S
Normal file
387
arch/xtensa/core/xt_zephyr.S
Normal file
|
@ -0,0 +1,387 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Cadence Design Systems, Inc.
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <xtensa_context.h>
|
||||
#include <xtensa_timer.h>
|
||||
#include <offsets_short.h>
|
||||
#include <kernel_structs.h>
|
||||
|
||||
.extern _interrupt_stack
|
||||
.extern _kernel
|
||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
.extern _timer_int_handler
|
||||
#endif
|
||||
.set _interrupt_stack_top, _interrupt_stack + CONFIG_ISR_STACK_SIZE
|
||||
|
||||
/*
|
||||
* _zxt_dispatch(k_thread_t *_thread)
|
||||
*/
|
||||
.text
|
||||
.globl _zxt_dispatch
|
||||
.type _zxt_dispatch,@function
|
||||
.align 4
|
||||
_zxt_dispatch:
|
||||
mov a3, a2 /* a3 := _thread = _kernel.current */
|
||||
l32i sp, a3, THREAD_OFFSET(sp) /* SP := current->topOfStack; */
|
||||
|
||||
/* Determine the type of stack frame. */
|
||||
l32i a2, sp, XT_STK_exit /* exit dispatcher or solicited flag */
|
||||
bnez a2, .L_frxt_dispatch_stk
|
||||
|
||||
.L_frxt_dispatch_sol:
|
||||
/* Solicited stack frame. Restore retval from _Swap */
|
||||
l32i a2, a3, THREAD_OFFSET(retval)
|
||||
l32i a3, sp, XT_SOL_ps
|
||||
|
||||
#ifdef __XTENSA_CALL0_ABI__
|
||||
l32i a12, sp, XT_SOL_a12
|
||||
l32i a13, sp, XT_SOL_a13
|
||||
l32i a14, sp, XT_SOL_a14
|
||||
l32i a15, sp, XT_SOL_a15
|
||||
#endif
|
||||
l32i a0, sp, XT_SOL_pc
|
||||
#if XCHAL_CP_NUM > 0
|
||||
/* Ensure wsr.CPENABLE is complete (should be, it was cleared on entry). */
|
||||
rsync
|
||||
#endif
|
||||
/* As soons as PS is restored, interrupts can happen. No need to sync PS. */
|
||||
wsr a3, PS
|
||||
#ifdef __XTENSA_CALL0_ABI__
|
||||
addi sp, sp, XT_SOL_FRMSZ
|
||||
ret
|
||||
#else
|
||||
retw
|
||||
#endif
|
||||
|
||||
.L_frxt_dispatch_stk:
|
||||
|
||||
#if XCHAL_CP_NUM > 0
|
||||
/* Restore CPENABLE from task's co-processor save area. */
|
||||
l32i a2, a3, THREAD_OFFSET(cpStack)
|
||||
l16ui a3, a2, XT_CPENABLE
|
||||
wsr a3, CPENABLE
|
||||
#endif
|
||||
/*
|
||||
* Interrupt stack frame.
|
||||
* Restore full context and return to exit dispatcher.
|
||||
*/
|
||||
call0 _xt_context_restore
|
||||
|
||||
/* In Call0 ABI, restore callee-saved regs (A12, A13 already restored). */
|
||||
#ifdef __XTENSA_CALL0_ABI__
|
||||
l32i a14, sp, XT_STK_a14
|
||||
l32i a15, sp, XT_STK_a15
|
||||
#endif
|
||||
|
||||
#if XCHAL_CP_NUM > 0
|
||||
/* Ensure wsr.CPENABLE has completed. */
|
||||
rsync
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Must return via the exit dispatcher corresponding to the entrypoint from
|
||||
* which this was called. Interruptee's A0, A1, PS, PC are restored and
|
||||
* the interrupt stack frame is deallocated in the exit dispatcher.
|
||||
*/
|
||||
l32i a0, sp, XT_STK_exit
|
||||
ret
|
||||
|
||||
/*
|
||||
*******************************************************************************
|
||||
* _zxt_int_enter
|
||||
* void _zxt_int_enter(void)
|
||||
*
|
||||
* Implements the Xtensa RTOS porting layer's XT_RTOS_INT_ENTER function for
|
||||
* freeRTOS. Saves the rest of the interrupt context (not already saved).
|
||||
* May only be called from assembly code by the 'call0' instruction, with
|
||||
* interrupts disabled.
|
||||
* See the detailed description of the XT_RTOS_ENTER macro in xtensa_rtos.h.
|
||||
*
|
||||
*******************************************************************************
|
||||
*/
|
||||
.globl _zxt_int_enter
|
||||
.type _zxt_int_enter,@function
|
||||
.align 4
|
||||
_zxt_int_enter:
|
||||
|
||||
/* Save a12-13 in the stack frame as required by _xt_context_save. */
|
||||
s32i a12, a1, XT_STK_a12
|
||||
s32i a13, a1, XT_STK_a13
|
||||
|
||||
/* Save return address in a safe place (free a0). */
|
||||
mov a12, a0
|
||||
|
||||
/* Save the rest of the interrupted context (preserves A12-13). */
|
||||
call0 _xt_context_save
|
||||
|
||||
/*
|
||||
* Save interrupted task's SP in TCB only if not nesting.
|
||||
* Manage nesting directly rather than call the generic IntEnter() (in
|
||||
* windowed ABI we can't call a C function here anyway because PS.EXCM is
|
||||
* still set).
|
||||
*/
|
||||
movi a2, _kernel /* a2 := _kernel */
|
||||
l32i a3, a2, KERNEL_OFFSET(nested) /* a3 := _kernel->nested */
|
||||
addi a3, a3, 1 /* increment nesting count */
|
||||
s32i a3, a2, KERNEL_OFFSET(nested) /* save nesting count */
|
||||
bnei a3, 1, .Lnested /* !=0 before incr, so nested */
|
||||
|
||||
l32i a3, a2, KERNEL_OFFSET(current)/* a3 := _kernel->current */
|
||||
s32i a1, a3, THREAD_OFFSET(sp) /* save SP to Current top of stack */
|
||||
movi a1, _interrupt_stack_top /* a1 = top of intr stack */
|
||||
.Lnested:
|
||||
1:
|
||||
mov a0, a12 /* restore return addr and return */
|
||||
ret
|
||||
|
||||
/*
|
||||
* _zxt_int_exit
|
||||
* void _zxt_int_exit(void)
|
||||
*
|
||||
* Implements the Xtensa RTOS porting layer's XT_RTOS_INT_EXIT function for
|
||||
* Zephyr. If required, calls vPortYieldFromInt() to perform task context
|
||||
* switching, restore the (possibly) new task's context, and return to the
|
||||
* exit dispatcher saved in the task's stack frame at XT_STK_EXIT.
|
||||
* May only be called from assembly code by the 'call0' instruction. Does not
|
||||
* return to caller.
|
||||
* See the description of the XT_RTOS_ENTER macro in xtensa_rtos.h.
|
||||
*
|
||||
*/
|
||||
.globl _zxt_int_exit
|
||||
.type _zxt_int_exit,@function
|
||||
.align 4
|
||||
_zxt_int_exit:
|
||||
|
||||
rsil a0, XCHAL_EXCM_LEVEL /* lock out interrupts */
|
||||
movi a2, _kernel
|
||||
l32i a3, a2, KERNEL_OFFSET(nested) /* _kernel->nested */
|
||||
addi a3, a3, -1 /* decrement nesting count */
|
||||
s32i a3, a2, KERNEL_OFFSET(nested) /* save nesting count */
|
||||
bnez a3, .Lnesting /* !=0 after decr so still nested */
|
||||
|
||||
l32i a3, a2, KERNEL_OFFSET(current) /* a3 := _kernel->current */
|
||||
beqz a3, .Lnoswitch
|
||||
l32i a1, a3, THREAD_OFFSET(sp) /* SP - stack, a3 = current TCB */
|
||||
|
||||
l32i a4, a3, ___thread_arch_t_flags_OFFSET
|
||||
movi a5, 0
|
||||
and a4, a4, a5
|
||||
beqz a4, .Lnoswitch
|
||||
movi a4, 0
|
||||
s32i a4, a3, 0 /* zero out the flag for next time */
|
||||
|
||||
1:
|
||||
/*
|
||||
* When using call0 ABI callee-saved registers a12-15 need to be saved
|
||||
* before enabling preemption. They were already saved by _zxt_int_enter().
|
||||
*/
|
||||
#ifdef __XTENSA_CALL0_ABI__
|
||||
s32i a14, a1, XT_STK_a14
|
||||
s32i a15, a1, XT_STK_a15
|
||||
#endif
|
||||
|
||||
#if XCHAL_CP_NUM > 0
|
||||
l32i a4, a3, THREAD_OFFSET(cpStack)
|
||||
rsr a5, CPENABLE
|
||||
s16i a5, a4, XT_CPENABLE /* cp_state->cpenable = CPENABLE; */
|
||||
movi a3, 0
|
||||
wsr a3, CPENABLE /* disable all co-processors */
|
||||
#endif
|
||||
|
||||
l32i a2, a2, KERNEL_OFFSET(ready_q_cache)
|
||||
/*
|
||||
* At this point, the a2 register contains the 'k_thread *' of the
|
||||
* thread to be swapped in.
|
||||
*/
|
||||
call0 _zxt_dispatch /* tail-call dispatcher */
|
||||
/* Never returns here. */
|
||||
|
||||
.Lnoswitch:
|
||||
/*
|
||||
If we came here then about to resume the interrupted task.
|
||||
*/
|
||||
|
||||
.Lnesting:
|
||||
/*
|
||||
* We come here only if there was no context switch, that is if this
|
||||
* is a nested interrupt, or the interrupted task was not preempted.
|
||||
* In either case there's no need to load the SP.
|
||||
*/
|
||||
|
||||
/* Restore full context from interrupt stack frame */
|
||||
call0 _xt_context_restore
|
||||
|
||||
/*
|
||||
* Must return via the exit dispatcher corresponding to the entrypoint
|
||||
* from which this was called. Interruptee's A0, A1, PS, PC are restored
|
||||
* and the interrupt stack frame is deallocated in the exit dispatcher.
|
||||
*/
|
||||
l32i a0, a1, XT_STK_exit
|
||||
ret
|
||||
|
||||
/*
|
||||
* _zxt_timer_int
|
||||
* void _zxt_timer_int(void)
|
||||
*
|
||||
* Implements Xtensa RTOS porting layer's XT_RTOS_TIMER_INT function.
|
||||
* Called every timer interrupt.
|
||||
* Manages the tick timer and calls xPortSysTickHandler() every tick.
|
||||
* See the detailed description of the XT_RTOS_ENTER macro in xtensa_rtos.h.
|
||||
* Callable from C.
|
||||
* Implemented in assmebly code for performance.
|
||||
*
|
||||
*/
|
||||
.globl _zxt_timer_int
|
||||
.type _zxt_timer_int,@function
|
||||
.align 4
|
||||
_zxt_timer_int:
|
||||
|
||||
/*
|
||||
* Xtensa timers work by comparing a cycle counter with a preset value.
|
||||
* Once the match occurs an interrupt is generated, and the handler has to
|
||||
* set a new cycle count into the comparator.
|
||||
* To avoid clock drift due to interrupt latency, the new cycle count is
|
||||
* computed from the old, not the time the interrupt was serviced. However
|
||||
* if a timer interrupt is ever serviced more than one tick late, it is
|
||||
* necessary to process multiple ticks until the new cycle count is in the
|
||||
* future, otherwise the next timer interrupt would not occur until after
|
||||
* the cycle counter had wrapped (2^32 cycles later).
|
||||
*
|
||||
* do {
|
||||
* ticks++;
|
||||
* old_ccompare = read_ccompare_i();
|
||||
* write_ccompare_i( old_ccompare + divisor );
|
||||
* service one tick;
|
||||
* diff = read_ccount() - old_ccompare;
|
||||
* } while ( diff > divisor );
|
||||
*/
|
||||
|
||||
ENTRY(16)
|
||||
.L_xt_timer_int_catchup:
|
||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
|
||||
#if CONFIG_XTENSA_INTERNAL_TIMER || (CONFIG_XTENSA_TIMER_IRQ < 0)
|
||||
/* Update the timer comparator for the next tick. */
|
||||
#ifdef XT_CLOCK_FREQ
|
||||
movi a2, XT_TICK_DIVISOR /* a2 = comparator increment */
|
||||
#else
|
||||
movi a3, _xt_tick_divisor
|
||||
l32i a2, a3, 0 /* a2 = comparator increment */
|
||||
#endif
|
||||
rsr a3, XT_CCOMPARE /* a3 = old comparator value */
|
||||
add a4, a3, a2 /* a4 = new comparator value */
|
||||
wsr a4, XT_CCOMPARE /* update comp. and clear interrupt */
|
||||
esync
|
||||
#endif /* CONFIG_XTENSA_INTERNAL_TIMER || (CONFIG_XTENSA_TIMER_IRQ < 0) */
|
||||
|
||||
|
||||
#ifdef __XTENSA_CALL0_ABI__
|
||||
/* Preserve a2 and a3 across C calls. */
|
||||
s32i a2, sp, 4
|
||||
s32i a3, sp, 8
|
||||
/* TODO: movi a2, _xt_interrupt_table */
|
||||
movi a3, _timer_int_handler
|
||||
/* TODO: l32i a2, a2, 0 */
|
||||
callx0 a3
|
||||
/* Restore a2 and a3. */
|
||||
l32i a2, sp, 4
|
||||
l32i a3, sp, 8
|
||||
#else
|
||||
/* TODO: movi a6, _xt_interrupt_table */
|
||||
movi a7, _timer_int_handler
|
||||
/* TODO: l32i a6, a6, 0 */
|
||||
callx4 a7
|
||||
#endif
|
||||
|
||||
#if CONFIG_XTENSA_INTERNAL_TIMER || (CONFIG_XTENSA_TIMER_IRQ < 0)
|
||||
/* Check if we need to process more ticks to catch up. */
|
||||
esync /* ensure comparator update complete */
|
||||
rsr a4, CCOUNT /* a4 = cycle count */
|
||||
sub a4, a4, a3 /* diff = ccount - old comparator */
|
||||
blt a2, a4, .L_xt_timer_int_catchup /* repeat while diff > divisor */
|
||||
#endif /* CONFIG_XTENSA_INTERNAL_TIMER || (CONFIG_XTENSA_TIMER_IRQ < 0) */
|
||||
|
||||
#endif
|
||||
|
||||
RET(16)
|
||||
|
||||
/*
|
||||
* _zxt_tick_timer_init
|
||||
* void _zxt_tick_timer_init(void)
|
||||
*
|
||||
* Initialize timer and timer interrrupt handler (_xt_tick_divisor_init() has
|
||||
* already been been called).
|
||||
* Callable from C (obeys ABI conventions on entry).
|
||||
*
|
||||
*/
|
||||
.globl _zxt_tick_timer_init
|
||||
.type _zxt_tick_timer_init,@function
|
||||
.align 4
|
||||
_zxt_tick_timer_init:
|
||||
|
||||
ENTRY(48)
|
||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
#if CONFIG_XTENSA_INTERNAL_TIMER || (CONFIG_XTENSA_TIMER_IRQ < 0)
|
||||
|
||||
/* Set up the periodic tick timer (assume enough time to complete init). */
|
||||
#ifdef XT_CLOCK_FREQ
|
||||
movi a3, XT_TICK_DIVISOR
|
||||
#else
|
||||
movi a2, _xt_tick_divisor
|
||||
l32i a3, a2, 0
|
||||
#endif
|
||||
rsr a2, CCOUNT /* current cycle count */
|
||||
add a2, a2, a3 /* time of first timer interrupt */
|
||||
wsr a2, XT_CCOMPARE /* set the comparator */
|
||||
|
||||
/*
|
||||
Enable the timer interrupt at the device level. Don't write directly
|
||||
to the INTENABLE register because it may be virtualized.
|
||||
*/
|
||||
#ifdef __XTENSA_CALL0_ABI__
|
||||
movi a2, XT_TIMER_INTEN
|
||||
call0 _xt_ints_on
|
||||
#else
|
||||
movi a6, XT_TIMER_INTEN
|
||||
call4 _xt_ints_on
|
||||
#endif
|
||||
|
||||
#endif
|
||||
#endif /* CONFIG_XTENSA_INTERNAL_TIMER || (CONFIG_XTENSA_TIMER_IRQ < 0) */
|
||||
RET(48)
|
||||
|
||||
/*
|
||||
* _zxt_task_coproc_state
|
||||
* void _zxt_task_coproc_state(void)
|
||||
*
|
||||
* Implements the Xtensa RTOS porting layer's XT_RTOS_CP_STATE function.
|
||||
*
|
||||
* May only be called when a task is running, not within an interrupt handler
|
||||
* (returns 0 in that case).
|
||||
* May only be called from assembly code by the 'call0' instruction.
|
||||
* Does NOT obey ABI conventions.
|
||||
* Returns in A15 a pointer to the base of the co-processor state save area
|
||||
* for the current task.
|
||||
* See the detailed description of the XT_RTOS_ENTER macro in xtensa_rtos.h.
|
||||
*
|
||||
*/
|
||||
#if XCHAL_CP_NUM > 0
|
||||
|
||||
.globl _zxt_task_coproc_state
|
||||
.type _zxt_task_coproc_state,@function
|
||||
.align 4
|
||||
_zxt_task_coproc_state:
|
||||
movi a2, _kernel
|
||||
l32i a15, a2, KERNEL_OFFSET(nested)
|
||||
bnez a15, 1f
|
||||
l32i a2, a2, KERNEL_OFFSET(current)
|
||||
beqz a2, 1f
|
||||
l32i a15, a2, THREAD_OFFSET(cpStack)
|
||||
ret
|
||||
|
||||
1: movi a15, 0
|
||||
2: ret
|
||||
#endif /* XCHAL_CP_NUM > 0 */
|
||||
|
608
arch/xtensa/core/xtensa_context.S
Normal file
608
arch/xtensa/core/xtensa_context.S
Normal file
|
@ -0,0 +1,608 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Cadence Design Systems, Inc.
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/*******************************************************************************
|
||||
|
||||
XTENSA CONTEXT SAVE AND RESTORE ROUTINES
|
||||
|
||||
Low-level Call0 functions for handling generic context save and restore of
|
||||
registers not specifically addressed by the interrupt vectors and handlers.
|
||||
Those registers (not handled by these functions) are PC, PS, A0, A1 (SP).
|
||||
Except for the calls to RTOS functions, this code is generic to Xtensa.
|
||||
|
||||
Note that in Call0 ABI, interrupt handlers are expected to preserve the callee-
|
||||
save regs (A12-A15), which is always the case if the handlers are coded in C.
|
||||
However A12, A13 are made available as scratch registers for interrupt dispatch
|
||||
code, so are presumed saved anyway, and are always restored even in Call0 ABI.
|
||||
Only A14, A15 are truly handled as callee-save regs.
|
||||
|
||||
Because Xtensa is a configurable architecture, this port supports all user
|
||||
generated configurations (except restrictions stated in the release notes).
|
||||
This is accomplished by conditional compilation using macros and functions
|
||||
defined in the Xtensa HAL (hardware adaptation layer) for your configuration.
|
||||
Only the processor state included in your configuration is saved and restored,
|
||||
including any processor state added by user configuration options or TIE.
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
/* Warn nicely if this file gets named with a lowercase .s instead of .S: */
|
||||
#define NOERROR #
|
||||
NOERROR: .error "C preprocessor needed for this file: make sure its filename\
|
||||
ends in uppercase .S, or use xt-xcc's -x assembler-with-cpp option."
|
||||
|
||||
|
||||
#include "xtensa_rtos.h"
|
||||
|
||||
#ifdef XT_USE_OVLY
|
||||
#include <xtensa/overlay_os_asm.h>
|
||||
#endif
|
||||
|
||||
.text
|
||||
|
||||
/*******************************************************************************
|
||||
|
||||
_xt_context_save
|
||||
|
||||
!! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!
|
||||
|
||||
Saves all Xtensa processor state except PC, PS, A0, A1 (SP), A12, A13, in the
|
||||
interrupt stack frame defined in xtensa_rtos.h.
|
||||
Its counterpart is _xt_context_restore (which also restores A12, A13).
|
||||
|
||||
Caller is expected to have saved PC, PS, A0, A1 (SP), A12, A13 in the frame.
|
||||
This function preserves A12 & A13 in order to provide the caller with 2 scratch
|
||||
regs that need not be saved over the call to this function. The choice of which
|
||||
2 regs to provide is governed by xthal_window_spill_nw and xthal_save_extra_nw,
|
||||
to avoid moving data more than necessary. Caller can assign regs accordingly.
|
||||
|
||||
Entry Conditions:
|
||||
A0 = Return address in caller.
|
||||
A1 = Stack pointer of interrupted thread or handler ("interruptee").
|
||||
Original A12, A13 have already been saved in the interrupt stack frame.
|
||||
Other processor state except PC, PS, A0, A1 (SP), A12, A13, is as at the
|
||||
point of interruption.
|
||||
If windowed ABI, PS.EXCM = 1 (exceptions disabled).
|
||||
|
||||
Exit conditions:
|
||||
A0 = Return address in caller.
|
||||
A1 = Stack pointer of interrupted thread or handler ("interruptee").
|
||||
A12, A13 as at entry (preserved).
|
||||
If windowed ABI, PS.EXCM = 1 (exceptions disabled).
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
.global _xt_context_save
|
||||
.type _xt_context_save,@function
|
||||
.align 4
|
||||
_xt_context_save:
|
||||
|
||||
s32i a2, sp, XT_STK_a2
|
||||
s32i a3, sp, XT_STK_a3
|
||||
s32i a4, sp, XT_STK_a4
|
||||
s32i a5, sp, XT_STK_a5
|
||||
s32i a6, sp, XT_STK_a6
|
||||
s32i a7, sp, XT_STK_a7
|
||||
s32i a8, sp, XT_STK_a8
|
||||
s32i a9, sp, XT_STK_a9
|
||||
s32i a10, sp, XT_STK_a10
|
||||
s32i a11, sp, XT_STK_a11
|
||||
|
||||
/*
|
||||
Call0 ABI callee-saved regs a12-15 do not need to be saved here.
|
||||
a12-13 are the caller's responsibility so it can use them as scratch.
|
||||
So only need to save a14-a15 here for Windowed ABI (not Call0).
|
||||
*/
|
||||
#ifndef __XTENSA_CALL0_ABI__
|
||||
s32i a14, sp, XT_STK_a14
|
||||
s32i a15, sp, XT_STK_a15
|
||||
#endif
|
||||
|
||||
rsr a3, SAR
|
||||
s32i a3, sp, XT_STK_sar
|
||||
|
||||
#if XCHAL_HAVE_LOOPS
|
||||
rsr a3, LBEG
|
||||
s32i a3, sp, XT_STK_lbeg
|
||||
rsr a3, LEND
|
||||
s32i a3, sp, XT_STK_lend
|
||||
rsr a3, LCOUNT
|
||||
s32i a3, sp, XT_STK_lcount
|
||||
#endif
|
||||
|
||||
#if XT_USE_SWPRI
|
||||
/* Save virtual priority mask */
|
||||
movi a3, _xt_vpri_mask
|
||||
l32i a3, a3, 0
|
||||
s32i a3, sp, XT_STK_VPRI
|
||||
#endif
|
||||
|
||||
#if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__)
|
||||
mov a9, a0 /* preserve ret addr */
|
||||
#endif
|
||||
|
||||
#ifndef __XTENSA_CALL0_ABI__
|
||||
/*
|
||||
To spill the reg windows, temp. need pre-interrupt stack ptr and a4-15.
|
||||
Need to save a9,12,13 temporarily (in frame temps) and recover originals.
|
||||
Interrupts need to be disabled below XCHAL_EXCM_LEVEL and window overflow
|
||||
and underflow exceptions disabled (assured by PS.EXCM == 1).
|
||||
*/
|
||||
s32i a12, sp, XT_STK_tmp0 /* temp. save stuff in stack frame */
|
||||
s32i a13, sp, XT_STK_tmp1
|
||||
s32i a9, sp, XT_STK_tmp2
|
||||
|
||||
/*
|
||||
Save the overlay state if we are supporting overlays. Since we just saved
|
||||
three registers, we can conveniently use them here. Note that as of now,
|
||||
overlays only work for windowed calling ABI.
|
||||
*/
|
||||
#ifdef XT_USE_OVLY
|
||||
l32i a9, sp, XT_STK_PC /* recover saved PC */
|
||||
_xt_overlay_get_state a9, a12, a13
|
||||
s32i a9, sp, XT_STK_OVLY /* save overlay state */
|
||||
#endif
|
||||
|
||||
l32i a12, sp, XT_STK_a12 /* recover original a9,12,13 */
|
||||
l32i a13, sp, XT_STK_a13
|
||||
l32i a9, sp, XT_STK_a9
|
||||
addi sp, sp, XT_STK_FRMSZ /* restore the interruptee's SP */
|
||||
call0 xthal_window_spill_nw /* preserves only a4,5,8,9,12,13 */
|
||||
addi sp, sp, -XT_STK_FRMSZ
|
||||
l32i a12, sp, XT_STK_tmp0 /* recover stuff from stack frame */
|
||||
l32i a13, sp, XT_STK_tmp1
|
||||
l32i a9, sp, XT_STK_tmp2
|
||||
#endif
|
||||
|
||||
#if XCHAL_EXTRA_SA_SIZE > 0
|
||||
/*
|
||||
NOTE: Normally the xthal_save_extra_nw macro only affects address
|
||||
registers a2-a5. It is theoretically possible for Xtensa processor
|
||||
designers to write TIE that causes more address registers to be
|
||||
affected, but it is generally unlikely. If that ever happens,
|
||||
more registers need to be saved/restored around this macro invocation.
|
||||
Here we assume a9,12,13 are preserved.
|
||||
Future Xtensa tools releases might limit the regs that can be affected.
|
||||
*/
|
||||
addi a2, sp, XT_STK_EXTRA /* where to save it */
|
||||
# if XCHAL_EXTRA_SA_ALIGN > 16
|
||||
movi a3, -XCHAL_EXTRA_SA_ALIGN
|
||||
and a2, a2, a3 /* align dynamically >16 bytes */
|
||||
# endif
|
||||
call0 xthal_save_extra_nw /* destroys a0,2,3,4,5 */
|
||||
#endif
|
||||
|
||||
#if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__)
|
||||
mov a0, a9 /* retrieve ret addr */
|
||||
#endif
|
||||
|
||||
ret
|
||||
|
||||
/*******************************************************************************
|
||||
|
||||
_xt_context_restore
|
||||
|
||||
!! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!
|
||||
|
||||
Restores all Xtensa processor state except PC, PS, A0, A1 (SP) (and in Call0
|
||||
ABI, A14, A15 which are preserved by all interrupt handlers) from an interrupt
|
||||
stack frame defined in xtensa_rtos.h .
|
||||
Its counterpart is _xt_context_save (whose caller saved A12, A13).
|
||||
|
||||
Caller is responsible to restore PC, PS, A0, A1 (SP).
|
||||
|
||||
Entry Conditions:
|
||||
A0 = Return address in caller.
|
||||
A1 = Stack pointer of interrupted thread or handler ("interruptee").
|
||||
|
||||
Exit conditions:
|
||||
A0 = Return address in caller.
|
||||
A1 = Stack pointer of interrupted thread or handler ("interruptee").
|
||||
Other processor state except PC, PS, A0, A1 (SP), is as at the point
|
||||
of interruption.
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
.global _xt_context_restore
|
||||
.type _xt_context_restore,@function
|
||||
.align 4
|
||||
_xt_context_restore:
|
||||
|
||||
#if XCHAL_EXTRA_SA_SIZE > 0
|
||||
/*
|
||||
NOTE: Normally the xthal_restore_extra_nw macro only affects address
|
||||
registers a2-a5. It is theoretically possible for Xtensa processor
|
||||
designers to write TIE that causes more address registers to be
|
||||
affected, but it is generally unlikely. If that ever happens,
|
||||
more registers need to be saved/restored around this macro invocation.
|
||||
Here we only assume a13 is preserved.
|
||||
Future Xtensa tools releases might limit the regs that can be affected.
|
||||
*/
|
||||
mov a13, a0 /* preserve ret addr */
|
||||
addi a2, sp, XT_STK_EXTRA /* where to find it */
|
||||
# if XCHAL_EXTRA_SA_ALIGN > 16
|
||||
movi a3, -XCHAL_EXTRA_SA_ALIGN
|
||||
and a2, a2, a3 /* align dynamically >16 bytes */
|
||||
# endif
|
||||
call0 xthal_restore_extra_nw /* destroys a0,2,3,4,5 */
|
||||
mov a0, a13 /* retrieve ret addr */
|
||||
#endif
|
||||
|
||||
#if XCHAL_HAVE_LOOPS
|
||||
l32i a2, sp, XT_STK_lbeg
|
||||
l32i a3, sp, XT_STK_lend
|
||||
wsr a2, LBEG
|
||||
l32i a2, sp, XT_STK_lcount
|
||||
wsr a3, LEND
|
||||
wsr a2, LCOUNT
|
||||
#endif
|
||||
|
||||
#ifdef XT_USE_OVLY
|
||||
/*
|
||||
If we are using overlays, this is a good spot to check if we need
|
||||
to restore an overlay for the incoming task. Here we have a bunch
|
||||
of registers to spare. Note that this step is going to use a few
|
||||
bytes of storage below SP (SP-20 to SP-32) if an overlay is going
|
||||
to be restored.
|
||||
*/
|
||||
l32i a2, sp, XT_STK_pc /* retrieve PC */
|
||||
l32i a3, sp, XT_STK_ps /* retrieve PS */
|
||||
l32i a4, sp, XT_STK_ovly /* retrieve overlay state */
|
||||
l32i a5, sp, XT_STK_a1 /* retrieve stack ptr */
|
||||
_xt_overlay_check_map a2, a3, a4, a5, a6
|
||||
s32i a2, sp, XT_STK_pc /* save updated PC */
|
||||
s32i a3, sp, XT_STK_ps /* save updated PS */
|
||||
#endif
|
||||
|
||||
#ifdef XT_USE_SWPRI
|
||||
/* Restore virtual interrupt priority and interrupt enable */
|
||||
movi a3, _xt_intdata
|
||||
l32i a4, a3, 0 /* a4 = _xt_intenable */
|
||||
l32i a5, sp, XT_STK_VPRI /* a5 = saved _xt_vpri_mask */
|
||||
and a4, a4, a5
|
||||
wsr a4, INTENABLE /* update INTENABLE */
|
||||
s32i a5, a3, 4 /* restore _xt_vpri_mask */
|
||||
#endif
|
||||
|
||||
l32i a3, sp, XT_STK_sar
|
||||
l32i a2, sp, XT_STK_a2
|
||||
wsr a3, SAR
|
||||
l32i a3, sp, XT_STK_a3
|
||||
l32i a4, sp, XT_STK_a4
|
||||
l32i a5, sp, XT_STK_a5
|
||||
l32i a6, sp, XT_STK_a6
|
||||
l32i a7, sp, XT_STK_a7
|
||||
l32i a8, sp, XT_STK_a8
|
||||
l32i a9, sp, XT_STK_a9
|
||||
l32i a10, sp, XT_STK_a10
|
||||
l32i a11, sp, XT_STK_a11
|
||||
|
||||
/*
|
||||
Call0 ABI callee-saved regs a12-15 do not need to be restored here.
|
||||
However a12-13 were saved for scratch before XT_RTOS_INT_ENTER(),
|
||||
so need to be restored anyway, despite being callee-saved in Call0.
|
||||
*/
|
||||
l32i a12, sp, XT_STK_a12
|
||||
l32i a13, sp, XT_STK_a13
|
||||
#ifndef __XTENSA_CALL0_ABI__
|
||||
l32i a14, sp, XT_STK_a14
|
||||
l32i a15, sp, XT_STK_a15
|
||||
#endif
|
||||
|
||||
ret
|
||||
|
||||
|
||||
/*******************************************************************************
|
||||
|
||||
_xt_coproc_init
|
||||
|
||||
Initializes global co-processor management data, setting all co-processors
|
||||
to "unowned". Leaves CPENABLE as it found it (does NOT clear it).
|
||||
|
||||
Called during initialization of the RTOS, before any threads run.
|
||||
|
||||
This may be called from normal Xtensa single-threaded application code which
|
||||
might use co-processors. The Xtensa run-time initialization enables all
|
||||
co-processors. They must remain enabled here, else a co-processor exception
|
||||
might occur outside of a thread, which the exception handler doesn't expect.
|
||||
|
||||
Entry Conditions:
|
||||
Xtensa single-threaded run-time environment is in effect.
|
||||
No thread is yet running.
|
||||
|
||||
Exit conditions:
|
||||
None.
|
||||
|
||||
Obeys ABI conventions per prototype:
|
||||
void _xt_coproc_init(void)
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#if XCHAL_CP_NUM > 0
|
||||
|
||||
.global _xt_coproc_init
|
||||
.type _xt_coproc_init,@function
|
||||
.align 4
|
||||
_xt_coproc_init:
|
||||
ENTRY0
|
||||
|
||||
/* Initialize thread co-processor ownerships to 0 (unowned). */
|
||||
movi a2, _xt_coproc_owner_sa /* a2 = base of owner array */
|
||||
addi a3, a2, XCHAL_CP_MAX << 2 /* a3 = top+1 of owner array */
|
||||
movi a4, 0 /* a4 = 0 (unowned) */
|
||||
1: s32i a4, a2, 0
|
||||
addi a2, a2, 4
|
||||
bltu a2, a3, 1b
|
||||
|
||||
RET0
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*******************************************************************************
|
||||
|
||||
_xt_coproc_release
|
||||
|
||||
Releases any and all co-processors owned by a given thread. The thread is
|
||||
identified by it's co-processor state save area defined in xtensa_context.h .
|
||||
|
||||
Must be called before a thread's co-proc save area is deleted to avoid
|
||||
memory corruption when the exception handler tries to save the state.
|
||||
May be called when a thread terminates or completes but does not delete
|
||||
the co-proc save area, to avoid the exception handler having to save the
|
||||
thread's co-proc state before another thread can use it (optimization).
|
||||
|
||||
Entry Conditions:
|
||||
A2 = Pointer to base of co-processor state save area.
|
||||
|
||||
Exit conditions:
|
||||
None.
|
||||
|
||||
Obeys ABI conventions per prototype:
|
||||
void _xt_coproc_release(void * coproc_sa_base)
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#if XCHAL_CP_NUM > 0
|
||||
|
||||
.global _xt_coproc_release
|
||||
.type _xt_coproc_release,@function
|
||||
.align 4
|
||||
_xt_coproc_release:
|
||||
ENTRY0 /* a2 = base of save area */
|
||||
|
||||
movi a3, _xt_coproc_owner_sa /* a3 = base of owner array */
|
||||
addi a4, a3, XCHAL_CP_MAX << 2 /* a4 = top+1 of owner array */
|
||||
movi a5, 0 /* a5 = 0 (unowned) */
|
||||
|
||||
rsil a6, XCHAL_EXCM_LEVEL /* lock interrupts */
|
||||
|
||||
1: l32i a7, a3, 0 /* a7 = owner at a3 */
|
||||
bne a2, a7, 2f /* if (coproc_sa_base == owner) */
|
||||
s32i a5, a3, 0 /* owner = unowned */
|
||||
2: addi a3, a3, 1<<2 /* a3 = next entry in owner array */
|
||||
bltu a3, a4, 1b /* repeat until end of array */
|
||||
|
||||
3: wsr a6, PS /* restore interrupts */
|
||||
|
||||
RET0
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*******************************************************************************
|
||||
_xt_coproc_savecs
|
||||
|
||||
If there is a current thread and it has a coprocessor state save area, then
|
||||
save all callee-saved state into this area. This function is called from the
|
||||
solicited context switch handler. It calls a system-specific function to get
|
||||
the coprocessor save area base address.
|
||||
|
||||
Entry conditions:
|
||||
- The thread being switched out is still the current thread.
|
||||
- CPENABLE state reflects which coprocessors are active.
|
||||
- Registers have been saved/spilled already.
|
||||
|
||||
Exit conditions:
|
||||
- All necessary CP callee-saved state has been saved.
|
||||
- Registers a2-a7, a13-a15 have been trashed.
|
||||
|
||||
Must be called from assembly code only, using CALL0.
|
||||
*******************************************************************************/
|
||||
#if XCHAL_CP_NUM > 0
|
||||
|
||||
.extern _xt_coproc_sa_offset /* external reference */
|
||||
|
||||
.global _xt_coproc_savecs
|
||||
.type _xt_coproc_savecs,@function
|
||||
.align 4
|
||||
_xt_coproc_savecs:
|
||||
|
||||
/* At entry, CPENABLE should be showing which CPs are enabled. */
|
||||
|
||||
rsr a2, CPENABLE /* a2 = which CPs are enabled */
|
||||
beqz a2, .Ldone /* quick exit if none */
|
||||
mov a14, a0 /* save return address */
|
||||
call0 XT_RTOS_CP_STATE /* get address of CP save area */
|
||||
mov a0, a14 /* restore return address */
|
||||
beqz a15, .Ldone /* if none then nothing to do */
|
||||
s16i a2, a15, XT_CP_CS_ST /* save mask of CPs being stored */
|
||||
movi a13, _xt_coproc_sa_offset /* array of CP save offsets */
|
||||
l32i a15, a15, XT_CP_ASA /* a15 = base of aligned save area */
|
||||
|
||||
#if XCHAL_CP0_SA_SIZE
|
||||
bbci.l a2, 0, 2f /* CP 0 not enabled */
|
||||
l32i a14, a13, 0 /* a14 = _xt_coproc_sa_offset[0] */
|
||||
add a3, a14, a15 /* a3 = save area for CP 0 */
|
||||
xchal_cp0_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
|
||||
2:
|
||||
#endif
|
||||
|
||||
#if XCHAL_CP1_SA_SIZE
|
||||
bbci.l a2, 1, 2f /* CP 1 not enabled */
|
||||
l32i a14, a13, 4 /* a14 = _xt_coproc_sa_offset[1] */
|
||||
add a3, a14, a15 /* a3 = save area for CP 1 */
|
||||
xchal_cp1_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
|
||||
2:
|
||||
#endif
|
||||
|
||||
#if XCHAL_CP2_SA_SIZE
|
||||
bbci.l a2, 2, 2f
|
||||
l32i a14, a13, 8
|
||||
add a3, a14, a15
|
||||
xchal_cp2_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
|
||||
2:
|
||||
#endif
|
||||
|
||||
#if XCHAL_CP3_SA_SIZE
|
||||
bbci.l a2, 3, 2f
|
||||
l32i a14, a13, 12
|
||||
add a3, a14, a15
|
||||
xchal_cp3_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
|
||||
2:
|
||||
#endif
|
||||
|
||||
#if XCHAL_CP4_SA_SIZE
|
||||
bbci.l a2, 4, 2f
|
||||
l32i a14, a13, 16
|
||||
add a3, a14, a15
|
||||
xchal_cp4_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
|
||||
2:
|
||||
#endif
|
||||
|
||||
#if XCHAL_CP5_SA_SIZE
|
||||
bbci.l a2, 5, 2f
|
||||
l32i a14, a13, 20
|
||||
add a3, a14, a15
|
||||
xchal_cp5_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
|
||||
2:
|
||||
#endif
|
||||
|
||||
#if XCHAL_CP6_SA_SIZE
|
||||
bbci.l a2, 6, 2f
|
||||
l32i a14, a13, 24
|
||||
add a3, a14, a15
|
||||
xchal_cp6_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
|
||||
2:
|
||||
#endif
|
||||
|
||||
#if XCHAL_CP7_SA_SIZE
|
||||
bbci.l a2, 7, 2f
|
||||
l32i a14, a13, 28
|
||||
add a3, a14, a15
|
||||
xchal_cp7_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
|
||||
2:
|
||||
#endif
|
||||
|
||||
.Ldone:
|
||||
ret
|
||||
#endif
|
||||
|
||||
|
||||
/*******************************************************************************
|
||||
_xt_coproc_restorecs
|
||||
|
||||
Restore any callee-saved coprocessor state for the incoming thread.
|
||||
This function is called from coprocessor exception handling, when giving
|
||||
ownership to a thread that solicited a context switch earlier. It calls a
|
||||
system-specific function to get the coprocessor save area base address.
|
||||
|
||||
Entry conditions:
|
||||
- The incoming thread is set as the current thread.
|
||||
- CPENABLE is set up correctly for all required coprocessors.
|
||||
- a2 = mask of coprocessors to be restored.
|
||||
|
||||
Exit conditions:
|
||||
- All necessary CP callee-saved state has been restored.
|
||||
- CPENABLE - unchanged.
|
||||
- Registers a2-a7, a13-a15 have been trashed.
|
||||
|
||||
Must be called from assembly code only, using CALL0.
|
||||
*******************************************************************************/
|
||||
#if XCHAL_CP_NUM > 0
|
||||
|
||||
.global _xt_coproc_restorecs
|
||||
.type _xt_coproc_restorecs,@function
|
||||
.align 4
|
||||
_xt_coproc_restorecs:
|
||||
|
||||
mov a14, a0 /* save return address */
|
||||
call0 XT_RTOS_CP_STATE /* get address of CP save area */
|
||||
mov a0, a14 /* restore return address */
|
||||
beqz a15, .Ldone2 /* if none then nothing to do */
|
||||
l16ui a3, a15, XT_CP_CS_ST /* a3 = which CPs have been saved */
|
||||
xor a3, a3, a2 /* clear the ones being restored */
|
||||
s32i a3, a15, XT_CP_CS_ST /* update saved CP mask */
|
||||
movi a13, _xt_coproc_sa_offset /* array of CP save offsets */
|
||||
l32i a15, a15, XT_CP_ASA /* a15 = base of aligned save area */
|
||||
|
||||
#if XCHAL_CP0_SA_SIZE
|
||||
bbci.l a2, 0, 2f /* CP 0 not enabled */
|
||||
l32i a14, a13, 0 /* a14 = _xt_coproc_sa_offset[0] */
|
||||
add a3, a14, a15 /* a3 = save area for CP 0 */
|
||||
xchal_cp0_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
|
||||
2:
|
||||
#endif
|
||||
|
||||
#if XCHAL_CP1_SA_SIZE
|
||||
bbci.l a2, 1, 2f /* CP 1 not enabled */
|
||||
l32i a14, a13, 4 /* a14 = _xt_coproc_sa_offset[1] */
|
||||
add a3, a14, a15 /* a3 = save area for CP 1 */
|
||||
xchal_cp1_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
|
||||
2:
|
||||
#endif
|
||||
|
||||
#if XCHAL_CP2_SA_SIZE
|
||||
bbci.l a2, 2, 2f
|
||||
l32i a14, a13, 8
|
||||
add a3, a14, a15
|
||||
xchal_cp2_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
|
||||
2:
|
||||
#endif
|
||||
|
||||
#if XCHAL_CP3_SA_SIZE
|
||||
bbci.l a2, 3, 2f
|
||||
l32i a14, a13, 12
|
||||
add a3, a14, a15
|
||||
xchal_cp3_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
|
||||
2:
|
||||
#endif
|
||||
|
||||
#if XCHAL_CP4_SA_SIZE
|
||||
bbci.l a2, 4, 2f
|
||||
l32i a14, a13, 16
|
||||
add a3, a14, a15
|
||||
xchal_cp4_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
|
||||
2:
|
||||
#endif
|
||||
|
||||
#if XCHAL_CP5_SA_SIZE
|
||||
bbci.l a2, 5, 2f
|
||||
l32i a14, a13, 20
|
||||
add a3, a14, a15
|
||||
xchal_cp5_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
|
||||
2:
|
||||
#endif
|
||||
|
||||
#if XCHAL_CP6_SA_SIZE
|
||||
bbci.l a2, 6, 2f
|
||||
l32i a14, a13, 24
|
||||
add a3, a14, a15
|
||||
xchal_cp6_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
|
||||
2:
|
||||
#endif
|
||||
|
||||
#if XCHAL_CP7_SA_SIZE
|
||||
bbci.l a2, 7, 2f
|
||||
l32i a14, a13, 28
|
||||
add a3, a14, a15
|
||||
xchal_cp7_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
|
||||
2:
|
||||
#endif
|
||||
|
||||
.Ldone2:
|
||||
ret
|
||||
|
||||
#endif
|
||||
|
69
arch/xtensa/core/xtensa_intr.c
Normal file
69
arch/xtensa/core/xtensa_intr.c
Normal file
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Cadence Design Systems, Inc.
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/*
|
||||
* Xtensa-specific interrupt and exception functions for RTOS ports.
|
||||
* Also see xtensa_intr_asm.S.
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <xtensa/config/core.h>
|
||||
#include "xtensa_rtos.h"
|
||||
#include "xtensa_api.h"
|
||||
#include <kernel_structs.h>
|
||||
|
||||
#if XCHAL_HAVE_EXCEPTIONS
|
||||
|
||||
/* Handler table is in xtensa_intr_asm.S */
|
||||
|
||||
extern xt_exc_handler _xt_exception_table[XCHAL_EXCCAUSE_NUM];
|
||||
|
||||
|
||||
/*
|
||||
* Default handler for unhandled exceptions.
|
||||
*/
|
||||
void xt_unhandled_exception(XtExcFrame *frame)
|
||||
{
|
||||
FatalErrorHandler();
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* This function registers a handler for the specified exception.
|
||||
* The function returns the address of the previous handler.
|
||||
* On error, it returns 0.
|
||||
*/
|
||||
xt_exc_handler _xt_set_exception_handler(int n, xt_exc_handler f)
|
||||
{
|
||||
xt_exc_handler old;
|
||||
|
||||
if (n < 0 || n >= XCHAL_EXCCAUSE_NUM)
|
||||
return 0; /* invalid exception number */
|
||||
|
||||
old = _xt_exception_table[n];
|
||||
|
||||
if (f) {
|
||||
_xt_exception_table[n] = f;
|
||||
} else {
|
||||
_xt_exception_table[n] = &xt_unhandled_exception;
|
||||
}
|
||||
|
||||
return ((old == &xt_unhandled_exception) ? 0 : old);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if XCHAL_HAVE_INTERRUPTS
|
||||
/*
|
||||
* Default handler for unhandled interrupts.
|
||||
*/
|
||||
void xt_unhandled_interrupt(void *arg)
|
||||
{
|
||||
ReservedInterruptHandler((unsigned int)arg);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
#endif /* XCHAL_HAVE_INTERRUPTS */
|
140
arch/xtensa/core/xtensa_intr_asm.S
Normal file
140
arch/xtensa/core/xtensa_intr_asm.S
Normal file
|
@ -0,0 +1,140 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Cadence Design Systems, Inc.
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/******************************************************************************
|
||||
Xtensa interrupt handling data and assembly routines.
|
||||
Also see xtensa_intr.c and xtensa_vectors.S.
|
||||
******************************************************************************/
|
||||
|
||||
#include <xtensa/hal.h>
|
||||
#include <xtensa/config/core.h>
|
||||
#include "xtensa_rtos.h"
|
||||
#include "xtensa_context.h"
|
||||
|
||||
#if XCHAL_HAVE_INTERRUPTS
|
||||
|
||||
/*
|
||||
-------------------------------------------------------------------------------
|
||||
INTENABLE virtualization information.
|
||||
-------------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
.data
|
||||
.global _xt_intdata
|
||||
.align 8
|
||||
_xt_intdata:
|
||||
.global _xt_intenable
|
||||
.type _xt_intenable,@object
|
||||
.size _xt_intenable,4
|
||||
.global _xt_vpri_mask
|
||||
.type _xt_vpri_mask,@object
|
||||
.size _xt_vpri_mask,4
|
||||
|
||||
_xt_intenable: .word 0 /* Virtual INTENABLE */
|
||||
_xt_vpri_mask: .word 0xFFFFFFFF /* Virtual priority mask */
|
||||
|
||||
|
||||
#endif /* XCHAL_HAVE_INTERRUPTS */
|
||||
|
||||
|
||||
#if XCHAL_HAVE_EXCEPTIONS
|
||||
|
||||
/*
|
||||
-------------------------------------------------------------------------------
|
||||
Table of C-callable exception handlers for each exception. Note that not all
|
||||
slots will be active, because some exceptions (e.g. coprocessor exceptions)
|
||||
are always handled by the OS and cannot be hooked by user handlers.
|
||||
-------------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
.data
|
||||
.global _xt_exception_table
|
||||
.align 4
|
||||
|
||||
_xt_exception_table:
|
||||
.rept XCHAL_EXCCAUSE_NUM
|
||||
.word xt_unhandled_exception /* handler address */
|
||||
.endr
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
-------------------------------------------------------------------------------
|
||||
unsigned int _xt_ints_on ( unsigned int mask )
|
||||
|
||||
Enables a set of interrupts. Does not simply set INTENABLE directly, but
|
||||
computes it as a function of the current virtual priority.
|
||||
Can be called from interrupt handlers.
|
||||
-------------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
.text
|
||||
.align 4
|
||||
.global _xt_ints_on
|
||||
.type _xt_ints_on,@function
|
||||
|
||||
_xt_ints_on:
|
||||
|
||||
ENTRY0
|
||||
#if XCHAL_HAVE_INTERRUPTS
|
||||
movi a3, 0
|
||||
movi a4, _xt_intdata
|
||||
xsr a3, INTENABLE /* Disables all interrupts */
|
||||
rsync
|
||||
l32i a3, a4, 0 /* a3 = _xt_intenable */
|
||||
l32i a6, a4, 4 /* a6 = _xt_vpri_mask */
|
||||
or a5, a3, a2 /* a5 = _xt_intenable | mask */
|
||||
s32i a5, a4, 0 /* _xt_intenable |= mask */
|
||||
and a5, a5, a6 /* a5 = _xt_intenable & _xt_vpri_mask */
|
||||
wsr a5, INTENABLE /* Reenable interrupts */
|
||||
mov a2, a3 /* Previous mask */
|
||||
#else
|
||||
movi a2, 0 /* Return zero */
|
||||
#endif
|
||||
RET0
|
||||
|
||||
.size _xt_ints_on, . - _xt_ints_on
|
||||
|
||||
|
||||
/*
|
||||
-------------------------------------------------------------------------------
|
||||
unsigned int _xt_ints_off ( unsigned int mask )
|
||||
|
||||
Disables a set of interrupts. Does not simply set INTENABLE directly,
|
||||
but computes it as a function of the current virtual priority.
|
||||
Can be called from interrupt handlers.
|
||||
-------------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
.text
|
||||
.align 4
|
||||
.global _xt_ints_off
|
||||
.type _xt_ints_off,@function
|
||||
|
||||
_xt_ints_off:
|
||||
|
||||
ENTRY0
|
||||
#if XCHAL_HAVE_INTERRUPTS
|
||||
movi a3, 0
|
||||
movi a4, _xt_intdata
|
||||
xsr a3, INTENABLE /* Disables all interrupts */
|
||||
rsync
|
||||
l32i a3, a4, 0 /* a3 = _xt_intenable */
|
||||
l32i a6, a4, 4 /* a6 = _xt_vpri_mask */
|
||||
or a5, a3, a2 /* a5 = _xt_intenable | mask */
|
||||
xor a5, a5, a2 /* a5 = _xt_intenable & ~mask */
|
||||
s32i a5, a4, 0 /* _xt_intenable &= ~mask */
|
||||
and a5, a5, a6 /* a5 = _xt_intenable & _xt_vpri_mask */
|
||||
wsr a5, INTENABLE /* Reenable interrupts */
|
||||
mov a2, a3 /* Previous mask */
|
||||
#else
|
||||
movi a2, 0 /* return zero */
|
||||
#endif
|
||||
RET0
|
||||
|
||||
.size _xt_ints_off, . - _xt_ints_off
|
||||
|
||||
|
1937
arch/xtensa/core/xtensa_vectors.S
Normal file
1937
arch/xtensa/core/xtensa_vectors.S
Normal file
File diff suppressed because it is too large
Load diff
36
arch/xtensa/include/kernel_event_logger_arch.h
Normal file
36
arch/xtensa/include/kernel_event_logger_arch.h
Normal file
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Cadence Design Systems, Inc.
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @brief Kernel event logger support for Xtensa
|
||||
*/
|
||||
|
||||
#ifndef __KERNEL_EVENT_LOGGER_ARCH_H__
|
||||
#define __KERNEL_EVENT_LOGGER_ARCH_H__
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Get the identification of the current interrupt.
|
||||
*
|
||||
* This routine obtain the key of the interrupt that is currently processed
|
||||
* if it is called from a ISR context.
|
||||
*
|
||||
* @return The key of the interrupt that is currently being processed.
|
||||
*/
|
||||
static inline int _sys_current_irq_key_get(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL_EVENT_LOGGER_ARCH_H__ */
|
|
@ -10,6 +10,7 @@
|
|||
#include <offsets.h>
|
||||
|
||||
/* kernel */
|
||||
#define KERNEL_OFFSET(field) _kernel_offset_to_##field
|
||||
|
||||
#define _kernel_offset_to_flags \
|
||||
(___kernel_t_arch_OFFSET + ___kernel_arch_t_flags_OFFSET)
|
||||
|
@ -17,6 +18,7 @@
|
|||
/* end - kernel */
|
||||
|
||||
/* threads */
|
||||
#define THREAD_OFFSET(field) _thread_offset_to_##field
|
||||
|
||||
#define _thread_offset_to_sp \
|
||||
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_topOfStack_OFFSET)
|
||||
|
@ -30,6 +32,9 @@
|
|||
#define _thread_offset_to_preempCoprocReg \
|
||||
(___thread_t_arch_OFFSET + ___thread_arch_t_preempCoprocReg_OFFSET)
|
||||
|
||||
#define _thread_offset_to_cpStack \
|
||||
(_thread_offset_to_preempCoprocReg + __tPreempCoprocReg_cpStack_OFFSET)
|
||||
|
||||
/* end - threads */
|
||||
|
||||
#endif /* _offsets_short_arch__h_ */
|
||||
|
|
114
arch/xtensa/include/xtensa_sys_io.h
Normal file
114
arch/xtensa/include/xtensa_sys_io.h
Normal file
|
@ -0,0 +1,114 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Cadence Design Systems, Inc.
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#ifndef XTENSA_SYS_IO_H
|
||||
#define XTENSA_SYS_IO_H
|
||||
|
||||
#if !defined(_ASMLANGUAGE)
|
||||
|
||||
#include <sys_io.h>
|
||||
|
||||
/* Memory mapped registers I/O functions */
|
||||
|
||||
static ALWAYS_INLINE uint32_t sys_read32(mem_addr_t addr)
|
||||
{
|
||||
return *(volatile uint32_t *)addr;
|
||||
}
|
||||
|
||||
|
||||
static ALWAYS_INLINE void sys_write32(uint32_t data, mem_addr_t addr)
|
||||
{
|
||||
*(volatile uint32_t *)addr = data;
|
||||
}
|
||||
|
||||
|
||||
/* Memory bit manipulation functions */
|
||||
|
||||
static ALWAYS_INLINE void sys_set_bit(mem_addr_t addr, unsigned int bit)
|
||||
{
|
||||
uint32_t temp = *(volatile uint32_t *)addr;
|
||||
|
||||
*(volatile uint32_t *)addr = temp | (1 << bit);
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void sys_clear_bit(mem_addr_t addr, unsigned int bit)
|
||||
{
|
||||
uint32_t temp = *(volatile uint32_t *)addr;
|
||||
|
||||
*(volatile uint32_t *)addr = temp & ~(1 << bit);
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE int sys_test_bit(mem_addr_t addr, unsigned int bit)
|
||||
{
|
||||
int temp = *(volatile int *)addr;
|
||||
|
||||
return (int)(temp & (1 << bit));
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE int sys_test_and_set_bit(mem_addr_t addr, unsigned int bit)
|
||||
{
|
||||
int retval = (*(volatile int *)addr) & (1 << bit);
|
||||
*(volatile int *)addr = (*(volatile int *)addr) | (1 << bit);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE int sys_test_and_clear_bit(mem_addr_t addr, unsigned int bit)
|
||||
{
|
||||
int retval = (*(volatile int *)addr) & (1 << bit);
|
||||
*(volatile int *)addr = (*(volatile int *)addr) & ~(1 << bit);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE
|
||||
void sys_bitfield_set_bit(mem_addr_t addr, unsigned int bit)
|
||||
{
|
||||
/* Doing memory offsets in terms of 32-bit values to prevent
|
||||
* alignment issues
|
||||
*/
|
||||
sys_set_bit(addr + ((bit >> 5) << 2), bit & 0x1F);
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE
|
||||
void sys_bitfield_clear_bit(mem_addr_t addr, unsigned int bit)
|
||||
{
|
||||
sys_clear_bit(addr + ((bit >> 5) << 2), bit & 0x1F);
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE
|
||||
int sys_bitfield_test_bit(mem_addr_t addr, unsigned int bit)
|
||||
{
|
||||
return sys_test_bit(addr + ((bit >> 5) << 2), bit & 0x1F);
|
||||
}
|
||||
|
||||
|
||||
static ALWAYS_INLINE
|
||||
int sys_bitfield_test_and_set_bit(mem_addr_t addr, unsigned int bit)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = sys_bitfield_test_bit(addr, bit);
|
||||
sys_bitfield_set_bit(addr, bit);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE
|
||||
int sys_bitfield_test_and_clear_bit(mem_addr_t addr, unsigned int bit)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = sys_bitfield_test_bit(addr, bit);
|
||||
sys_bitfield_clear_bit(addr, bit);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
#endif /* !_ASMLANGUAGE */
|
||||
|
||||
|
||||
#endif /* XTENSA_SYS_IO_H */
|
Loading…
Add table
Add a link
Reference in a new issue