xtensa: fix numerous checkpatch issues

Change-Id: I903f5f0692849fb9e7bf5d978b63b12bf1bd6e33
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2017-02-10 12:58:08 -08:00
commit 42562b9367
21 changed files with 1736 additions and 1673 deletions

View file

@ -92,9 +92,9 @@ atomic_get:
* @brief Atomically increment a memory location
*
* This routine atomically increments the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the
* atomic_t type.
* done using unsigned integer arithmetic. Various CPU architectures may
* impose restrictions with regards to the alignment and cache attributes of
* the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
@ -126,9 +126,9 @@ atomic_inc:
* @brief Atomically add a value to a memory location
*
* This routine atomically adds the contents of <target> and <value>, placing
* the result in <target>. The operation is done using signed integer arithmetic.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
* the result in <target>. The operation is done using signed integer
* arithmetic. Various CPU architectures may impose restrictions with regards
* to the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
@ -369,10 +369,10 @@ atomic_xor:
*
* @brief Atomically compare-and-swap the contents of a memory location
*
* This routine performs an atomic compare-and-swap. testing that the contents of
* <target> contains <oldValue>, and if it does, setting the value of <target>
* to <newValue>. Various CPU architectures may impose restrictions with regards
* to the alignment and cache attributes of the atomic_t type.
* This routine performs an atomic compare-and-swap. testing that the contents
* of <target> contains <oldValue>, and if it does, setting the value of
* <target> to <newValue>. Various CPU architectures may impose restrictions
* with regards to the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*

View file

@ -79,7 +79,9 @@ _start:
* It is also the return address, where zero indicates
* that the frame used by _start is the bottommost frame.
*/
#if !XCHAL_HAVE_HALT || !XCHAL_HAVE_BOOTLOADER /* not needed for Xtensa TX */
/* not needed for Xtensa TX */
#if !XCHAL_HAVE_HALT || !XCHAL_HAVE_BOOTLOADER
movi a0, 0 /* keep this register zero. */
#endif
@ -91,7 +93,7 @@ _start:
* Initialize the stack pointer.
* See the "ABI and Software Conventions" chapter in the
* Xtensa ISA Reference manual for details.
*
* NOTE: Because the _start routine does not use any memory in its
* stack frame, and because all of its CALL instructions use a
* window size of 4 (or zero), the stack frame for _start can be empty.
@ -99,14 +101,17 @@ _start:
movi sp, __stack
/*
* Now that sp (a1) is set, we can set PS as per the application
* (user vector mode, enable interrupts, enable window exceptions if applicable).
* Now that sp (a1) is set, we can set PS as per the application (user
* vector mode, enable interrupts, enable window exceptions if
* applicable).
*/
#if XCHAL_HAVE_EXCEPTIONS
# ifdef __XTENSA_CALL0_ABI__
movi a3, PS_UM /* PS.WOE = 0, PS.UM = 1, PS.EXCM = 0, PS.INTLEVEL = 0 */
/* PS.WOE = 0, PS.UM = 1, PS.EXCM = 0, PS.INTLEVEL = 0 */
movi a3, PS_UM
# else
movi a3, PS_UM|PS_WOE /* PS.WOE = 1, PS.UM = 1, PS.EXCM = 0, PS.INTLEVEL = 0 */
/* PS.WOE = 1, PS.UM = 1, PS.EXCM = 0, PS.INTLEVEL = 0 */
movi a3, PS_UM|PS_WOE
# endif
wsr a3, PS
rsync
@ -168,13 +173,13 @@ _start:
addi a6, a6, 8 /* next entry */
sub a10, a9, a8 /* a10 = length, assumed a multiple of 4 */
bbci.l a10, 2, .L1zte
s32i a0, a8, 0 /* clear 4 bytes to make length multiple of 8 */
s32i a0, a8, 0 /* clear 4 bytes to make len multiple of 8 */
addi a8, a8, 4
.L1zte: bbci.l a10, 3, .L2zte
s32i a0, a8, 0 /* clear 8 bytes to make length multiple of 16 */
s32i a0, a8, 0 /* clear 8 bytes to make len multiple of 16 */
s32i a0, a8, 4
addi a8, a8, 8
.L2zte: srli a10, a10, 4 /* length is now multiple of 16, divide by 16 */
.L2zte: srli a10, a10, 4 /* len is now multiple of 16, divide by 16 */
floopnez a10, clearzte
s32i a0, a8, 0 /* clear 16 bytes at a time... */
s32i a0, a8, 4
@ -189,18 +194,19 @@ _start:
/*
* We can now call C code, the C calling environment has been initialized.
* We can now call C code, the C calling environment has been
* initialized.
*
* From this point on, we use ABI-specific macros to refer to registers a0 .. a15
* (ARG#).
* From this point on, we use ABI-specific macros to refer to
* registers a0 .. a15 (ARG#).
*/
#if XCHAL_HAVE_HALT
/*
* Assume minimalist environment for memory-constrained TX cores.
* No C library or board initialization, no parameters passed to main
* (assume declared as "void main(void)") and no call to exit().
* No C library or board initialization, no parameters passed to main
* (assume declared as "void main(void)") and no call to exit().
*/
CALL main
halt
@ -220,10 +226,8 @@ _start:
*
* void __clibrary_init(int argc, char ** argv, char ** environ,
* void(*init_func)(void), void(*fini_func)(void));
* Pass an empty argv array, with an empty string as the program name
*/
* Pass an empty argv array, with an empty string as the program name. */
movi ARG1, _start_argc /* argc address */
movi ARG2, _start_argv /* argv = ["", 0] */
movi ARG3, _start_envp /* envp = [0] */
@ -238,7 +242,9 @@ _start:
movi ARG3, _start_envp /* envp = [0] */
l32i ARG1, ARG1, 0 /* argc = 1 */
CALL main
/* The return value is the same register as the first outgoing argument. */
/* The return value is the same register as the first
* outgoing argument.
*/
CALL exit /* exit with main's return value */
/* Does not return here. */
@ -253,7 +259,8 @@ _start_argv:
.word _start_null /* empty program name */
_start_null:
_start_envp:
.word 0 /* end of argv array, empty string, empty environ */
/* end of argv array, empty string, empty environ */
.word 0
_start_argc:
.word 1 /* one argument (program name) */
.text

View file

@ -111,12 +111,12 @@ __start:
simcall /* returns size of argv[] + its strings in a2 */
#if XCHAL_HAVE_PIF
/*
* The stack only needs 16-byte alignment.
* However, here we round up the argv size further to 128 byte multiples
* so that in most cases, variations in argv[0]'s path do not result in
* different stack allocation. Otherwise, such variations can impact
* execution timing (eg. due to cache effects etc) for the same code and data.
* If we have a PIF, it's more likely the extra required space is okay.
* The stack only needs 16-byte alignment. However, here we round up
* the argv size further to 128 byte multiples so that in most cases,
* variations in argv[0]'s path do not result in different stack
* allocation. Otherwise, such variations can impact execution timing
* (eg. due to cache effects etc) for the same code and data. If we
* have a PIF, it's more likely the extra required space is okay.
*/
addi a2, a2, 127
srli a2, a2, 7
@ -139,14 +139,17 @@ __start:
/*
* Now that sp (a1) is set, we can set PS as per the application
* (user vector mode, enable interrupts, enable window exceptions if applicable).
* Now that sp (a1) is set, we can set PS as per the application (user
* vector mode, enable interrupts, enable window exceptions if
* applicable).
*/
#if XCHAL_HAVE_EXCEPTIONS
# ifdef __XTENSA_CALL0_ABI__
movi a3, PS_UM /* PS.WOE = 0, PS.UM = 1, PS.EXCM = 0, PS.INTLEVEL = 0 */
/* PS.WOE = 0, PS.UM = 1, PS.EXCM = 0, PS.INTLEVEL = 0 */
movi a3, PS_UM
# else
movi a3, PS_UM|PS_WOE /* PS.WOE = 1, PS.UM = 1, PS.EXCM = 0, PS.INTLEVEL = 0 */
/* PS.WOE = 1, PS.UM = 1, PS.EXCM = 0, PS.INTLEVEL = 0 */
movi a3, PS_UM|PS_WOE
# endif
wsr a3, PS
rsync
@ -180,9 +183,11 @@ __start:
CALLX a4
1:
/* The new ISS simcall only appeared after RB-2007.2: */
#if !XCHAL_HAVE_BOOTLOADER && (XCHAL_HW_MAX_VERSION > XTENSA_HWVERSION_RB_2007_2) /* pre-LX2 cores only */
#if !XCHAL_HAVE_BOOTLOADER && \
(XCHAL_HW_MAX_VERSION > XTENSA_HWVERSION_RB_2007_2)
/* pre-LX2 cores only */
/*
* Clear the BSS (uninitialized data) segments.
* This code supports multiple zeroed sections (*.bss).
@ -223,7 +228,9 @@ __start:
/* Call: int _Cstart(); */
CALL _Cstart
/* The return value is the same register as the first outgoing argument. */
/* The return value is the same register as the first outgoing
* argument.
*/
CALL exit
/* Does not return here. */

View file

@ -15,14 +15,14 @@
*
* The priority is verified if ASSERT_ON is enabled.
*
* The priority is verified if ASSERT_ON is enabled. The maximum number
* of priority levels is a little complex, as there are some hardware
* priority levels which are reserved: three for various types of exceptions,
* and possibly one additional to support zero latency interrupts.
* The priority is verified if ASSERT_ON is enabled. The maximum number of
* priority levels is a little complex, as there are some hardware priority
* levels which are reserved: three for various types of exceptions, and
* possibly one additional to support zero latency interrupts.
*
* Valid values are from 1 to 6. Interrupts of priority 1 are not masked when
* interrupts are locked system-wide, so care must be taken when using them. ISR
* installed with priority 0 interrupts cannot make kernel calls.
* interrupts are locked system-wide, so care must be taken when using them.
* ISR installed with priority 0 interrupts cannot make kernel calls.
*
* @return N/A
*/
@ -32,5 +32,7 @@ void _irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
__ASSERT(prio < XCHAL_EXCM_LEVEL + 1,
"invalid priority %d! values must be less than %d\n",
prio, XCHAL_EXCM_LEVEL + 1);
/* TODO: Write code to set priority if this is ever possible on Xtensa */
/* TODO: Write code to set priority if this is ever possible on
* Xtensa
*/
}

View file

@ -12,9 +12,6 @@
* Xtensa core should support software interrupt in order to allow using
* irq_offload feature
*/
#ifndef CONFIG_IRQ_OFFLOAD_INTNUM
#error "Please add entry for IRQ_OFFLOAD_INTNUM option to your arch/xtensa/soc/${XTENSA_CORE}/Kconfig file in order to use IRQ offload on this core."
#endif
static irq_offload_routine_t offload_routine;
static void *offload_param;

View file

@ -10,7 +10,7 @@
#include <xtensa/xtensa-xer.h>
#include <xtensa/xdm-regs.h>
#include <xtensa/config/specreg.h>
#include <xtensa/config/system.h> /* for XSHAL_USE_ABSOLUTE_LITERALS only */
#include <xtensa/config/system.h> /* for XSHAL_USE_ABSOLUTE_LITERALS only */
#include <xtensa/xtruntime-core-state.h>
/*
@ -41,28 +41,30 @@ _ResetVector:
.size _ResetVector, . - _ResetVector
# if XCHAL_HAVE_HALT
#if XCHAL_HAVE_HALT
/*
* Xtensa TX: reset vector segment is only 4 bytes, so must place the
* unpacker code elsewhere in the memory that contains the reset vector.
* unpacker code elsewhere in the memory that contains the reset
* vector.
*/
# if XCHAL_RESET_VECTOR_VADDR == XCHAL_INSTRAM0_VADDR
#if XCHAL_RESET_VECTOR_VADDR == XCHAL_INSTRAM0_VADDR
.section .iram0.text, "ax"
# elif XCHAL_RESET_VECTOR_VADDR == XCHAL_INSTROM0_VADDR
#elif XCHAL_RESET_VECTOR_VADDR == XCHAL_INSTROM0_VADDR
.section .irom0.text, "ax"
# elif XCHAL_RESET_VECTOR_VADDR == XCHAL_URAM0_VADDR
#elif XCHAL_RESET_VECTOR_VADDR == XCHAL_URAM0_VADDR
.section .uram0.text, "ax"
# else
# warning "Xtensa TX reset vector not at start of iram0, irom0, or uram0 -- ROMing LSPs may not work"
#else
#warning "Xtensa TX reset vector not at start of iram0, irom0, or uram0 -- ROMing LSPs may not work"
.text
# endif
# endif
#endif
#endif /* XCHAL_HAVE_HALT */
.extern __memctl_default
.align 4
.literal_position /* tells the assembler/linker to place literals here */
/* tells the assembler/linker to place literals here */
.literal_position
.align 4
.global _ResetHandler
_ResetHandler:
@ -99,19 +101,29 @@ _ResetHandler:
* Note that MEMCTL may not be present depending on config.
*/
#if XCHAL_HAVE_PSO_CDM && !XCHAL_HAVE_PSO_FULL_RETENTION
movi a2, XDM_MISC_PWRSTAT /* Read PWRSTAT */
movi a3, _xtos_pso_savearea /* Save area address - retained for later */
movi a5, CORE_STATE_SIGNATURE /* Signature for compare - retained for later */
rer a7, a2 /* PWRSTAT value - retained for later */
extui a4, a7, 1, 2 /* Now bottom 2 bits are core wakeup and cache power lost */
bnei a4, 1, .Lcold_start /* a4==1 means PSO wakeup, caches did not lose power */
l32i a4, a3, CS_SA_signature /* Load save area signature field */
/* Read PWRSTAT */
movi a2, XDM_MISC_PWRSTAT
/* Save area address - retained for later */
movi a3, _xtos_pso_savearea
/* Signature for compare - retained for later */
movi a5, CORE_STATE_SIGNATURE
/* PWRSTAT value - retained for later */
rer a7, a2
/* Now bottom 2 bits are core wakeup and cache power lost */
extui a4, a7, 1, 2
/* a4==1 means PSO wakeup, caches did not lose power */
bnei a4, 1, .Lcold_start
/* Load save area signature field */
l32i a4, a3, CS_SA_signature
sub a4, a4, a5
bnez a4, .Lcold_start /* If signature mismatch then do cold start */
/* If signature mismatch then do cold start */
bnez a4, .Lcold_start
#if XCHAL_USE_MEMCTL
l32i a4, a3, CS_SA_memctl /* Load saved MEMCTL value */
/* Load saved MEMCTL value */
l32i a4, a3, CS_SA_memctl
movi a0, ~MEMCTL_INV_EN
and a0, a4, a0 /* Clear invalidate bit */
/* Clear invalidate bit */
and a0, a4, a0
wsr a0, MEMCTL
#endif
j .Lwarm_start
@ -130,17 +142,23 @@ _ResetHandler:
.Lwarm_start:
#endif
/* a0 is always 0 in this code, used to initialize lots of things */
movi a0, 0
movi a0, 0 /* a0 is always 0 in this code, used to initialize lots of things */
#if XCHAL_HAVE_INTERRUPTS /* technically this should be under !FULL_RESET, assuming hard reset */
wsr a0, INTENABLE /* make sure that interrupts are shut off (*before* we lower PS.INTLEVEL and PS.EXCM!) */
/* technically this should be under !FULL_RESET, assuming hard reset */
#if XCHAL_HAVE_INTERRUPTS
/* make sure that interrupts are shut off (*before* we lower
* PS.INTLEVEL and PS.EXCM!)
*/
wsr a0, INTENABLE
#endif
#if !XCHAL_HAVE_FULL_RESET
#if XCHAL_HAVE_CCOUNT && (XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RB_2006_0) /* pre-LX2 cores only */
wsr a0, CCOUNT /* not really necessary, but nice; best done very early */
/* pre-LX2 cores only */
#if XCHAL_HAVE_CCOUNT && (XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RB_2006_0)
/* not really necessary, but nice; best done very early */
wsr a0, CCOUNT
#endif
/*
@ -156,36 +174,50 @@ _ResetHandler:
/*
* Debug initialization
*
* NOTE: DBREAKCn must be initialized before the combination of these two things:
* any load/store, and a lowering of PS.INTLEVEL below DEBUG_LEVEL.
* The processor already resets IBREAKENABLE appropriately.
* NOTE: DBREAKCn must be initialized before the combination of these
* two things: any load/store, and a lowering of PS.INTLEVEL below
* DEBUG_LEVEL. The processor already resets IBREAKENABLE
* appropriately.
*/
#if XCHAL_HAVE_DEBUG
# if XCHAL_NUM_DBREAK
# if XCHAL_NUM_DBREAK >= 2
#if XCHAL_NUM_DBREAK
#if XCHAL_NUM_DBREAK >= 2
wsr a0, DBREAKC1
# endif
#endif
wsr a0, DBREAKC0
dsync /* wait for WSRs to DBREAKCn to complete */
# endif
#endif /* XCHAL_NUM_DBREAK */
# if XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RA_2004_1 /* pre-LX cores only */
/* pre-LX cores only */
# if XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RA_2004_1
/*
* Starting in Xtensa LX, ICOUNTLEVEL resets to zero (not 15), so no need to initialize it.
* Prior to that we do, otherwise we get an ICOUNT exception, 2^32 instructions after reset.
* Starting in Xtensa LX, ICOUNTLEVEL resets to zero (not 15), so no
* need to initialize it. Prior to that we do, otherwise we get an
* ICOUNT exception, 2^32 instructions after reset.
*/
rsr a2, ICOUNTLEVEL /* are we being debugged? (detected by ICOUNTLEVEL not 15, or dropped below 12) */
bltui a2, 12, 1f /* if so, avoid initializing ICOUNTLEVEL which drops single-steps through here */
wsr a0, ICOUNTLEVEL /* avoid ICOUNT exceptions */
isync /* wait for WSR to ICOUNTLEVEL to complete */
/* are we being debugged? (detected by ICOUNTLEVEL not 15, or dropped
* below 12)
*/
rsr a2, ICOUNTLEVEL
/* if so, avoid initializing ICOUNTLEVEL which drops single-steps
* through here
* */
bltui a2, 12, 1f
/* avoid ICOUNT exceptions */
wsr a0, ICOUNTLEVEL
/* wait for WSR to ICOUNTLEVEL to complete */
isync
1:
# endif
#endif
#endif /* XCHAL_HAVE_DEBUG */
#endif /* !XCHAL_HAVE_FULL_RESET */
#if XCHAL_HAVE_ABSOLUTE_LITERALS
/* Technically, this only needs to be done under !FULL_RESET, assuming hard reset: */
/* Technically, this only needs to be done under !FULL_RESET,
* assuming hard reset:
*/
wsr a0, LITBASE
rsync
#endif
@ -201,19 +233,30 @@ _ResetHandler:
* a5 - saved state signature (CORE_STATE_SIGNATURE)
* a7 - contents of PWRSTAT register
*/
l32i a4, a3, CS_SA_signature /* load save area signature */
sub a4, a4, a5 /* compare signature with expected one */
/* load save area signature */
l32i a4, a3, CS_SA_signature
/* compare signature with expected one */
sub a4, a4, a5
# if XTOS_PSO_TEST
movi a7, PWRSTAT_WAKEUP_RESET /* pretend PSO warm start with warm caches */
/* pretend PSO warm start with warm caches */
movi a7, PWRSTAT_WAKEUP_RESET
# endif
bbci.l a7, PWRSTAT_WAKEUP_RESET_SHIFT, 1f /* wakeup from PSO? (branch if not) */
/* Yes, wakeup from PSO. Check whether state was properly saved. */
addi a5, a7, - PWRSTAT_WAKEUP_RESET /* speculatively clear PSO-wakeup bit */
movnez a7, a5, a4 /* if state not saved (corrupted?), mark as cold start */
bnez a4, 1f /* if state not saved, just continue with reset */
/* Wakeup from PSO with good signature. Now check cache status: */
bbci.l a7, PWRSTAT_CACHES_LOST_POWER_SHIFT, .Lpso_restore /* if caches warm, restore now */
/* Caches got shutoff. Continue reset, we'll end up initializing caches, and check again later for PSO. */
/* wakeup from PSO? (branch if not) */
bbci.l a7, PWRSTAT_WAKEUP_RESET_SHIFT, 1f
/* Yes, wakeup from PSO. Check whether state was properly saved.
* speculatively clear PSO-wakeup bit */
addi a5, a7, - PWRSTAT_WAKEUP_RESET
/* if state not saved (corrupted?), mark as cold start */
movnez a7, a5, a4
/* if state not saved, just continue with reset */
bnez a4, 1f
/* Wakeup from PSO with good signature. Now check cache status:
* if caches warm, restore now */
bbci.l a7, PWRSTAT_CACHES_LOST_POWER_SHIFT, .Lpso_restore
/* Caches got shutoff. Continue reset, we'll end up initializing
* caches, and check again later for PSO.
*/
# if XCHAL_HAVE_PRID && XCHAL_HAVE_S32C1I
j .Ldonesync /* skip reset sync, only done for cold start */
# endif
@ -221,46 +264,54 @@ _ResetHandler:
#endif
#if XCHAL_HAVE_PRID && XCHAL_HAVE_S32C1I
/* Core 0 initializes the XMP synchronization variable, if present. This operation needs to
happen as early as possible in the startup sequence so that the other cores can be released
from reset. */
/* Core 0 initializes the XMP synchronization variable, if present.
* This operation needs to happen as early as possible in the startup
* sequence so that the other cores can be released from reset.
*/
.weak _ResetSync
movi a2, _ResetSync /* address of sync variable */
rsr.prid a3 /* core and multiprocessor ID */
extui a3, a3, 0, 8 /* extract core ID (FIXME: need proper constants for PRID bits to extract) */
extui a3, a3, 0, 8 /* extract core ID (FIXME: need proper
* constants for PRID bits to extract) */
beqz a2, .Ldonesync /* skip if no sync variable */
bnez a3, .Ldonesync /* only do this on core 0 */
s32i a0, a2, 0 /* clear sync variable */
.Ldonesync:
#endif
#if XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MP_RUNSTALL
/* On core 0, this releases other cores. On other cores this has no effect, because
runstall control is unconnected. */
/* On core 0, this releases other cores. On other cores this has no
* effect, because runstall control is unconnected
*/
movi a2, XER_MPSCORE
wer a0, a2
#endif
/*
* For processors with relocatable vectors, apply any alternate
* vector base given to xt-genldscripts, which sets the
* _memmap_vecbase_reset symbol accordingly.
* For processors with relocatable vectors, apply any alternate
* vector base given to xt-genldscripts, which sets the
* _memmap_vecbase_reset symbol accordingly.
*/
#if XCHAL_HAVE_VECBASE
movi a2, _memmap_vecbase_reset /* note: absolute symbol, not a ptr */
/* note: absolute symbol, not a ptr */
movi a2, _memmap_vecbase_reset
wsr a2, vecbase
#endif
#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) /* have ATOMCTL ? */
# if XCHAL_DCACHE_IS_COHERENT
movi a3, 0x25 /* MX -- internal for writeback, RCW otherwise */
# else
movi a3, 0x15 /* non-MX -- always RCW */
# endif
/* have ATOMCTL ? */
#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
#if XCHAL_DCACHE_IS_COHERENT
/* MX -- internal for writeback, RCW otherwise */
movi a3, 0x25
#else
/* non-MX -- always RCW */
movi a3, 0x15
#endif /* XCHAL_DCACHE_IS_COHERENT */
wsr a3, ATOMCTL
#endif
#if XCHAL_HAVE_INTERRUPTS && XCHAL_HAVE_DEBUG
rsil a2, 1 /* lower PS.INTLEVEL here to make reset vector easier to debug */
/* lower PS.INTLEVEL here to make reset vector easier to debug */
rsil a2, 1
#endif
/* If either of the caches does not have dynamic way support, then
@ -275,20 +326,27 @@ _ResetHandler:
#endif
#if XCHAL_HAVE_PSO_CDM && ! XCHAL_HAVE_PSO_FULL_RETENTION
/*
* Here, a7 still contains status from the power status register,
* or zero if signature check failed.
/* Here, a7 still contains status from the power status register,
* or zero if signature check failed.
*/
/* wakeup from PSO with good signature? */
bbci.l a7, PWRSTAT_WAKEUP_RESET_SHIFT, .Lcoldstart
/* Yes, wakeup from PSO. Caches had been powered down, now are
* initialized.
*/
bbci.l a7, PWRSTAT_WAKEUP_RESET_SHIFT, .Lcoldstart /* wakeup from PSO with good signature? */
* Yes, wakeup from PSO. Caches had been powered down, now are initialized.
.Lpso_restore:
/*
* Assume memory still initialized, so all code still unpacked etc.
* So we can just jump/call to relevant state restore code (wherever located).
/* Assume memory still initialized, so all code still unpacked etc.
* So we can just jump/call to relevant state restore code (wherever
* located).
*/
movi a2, 0 /* make shutoff routine return zero */
/* make shutoff routine return zero */
movi a2, 0
movi a3, _xtos_pso_savearea
/* Here, as below for _start, call0 is used as an unlimited-range jump. */
/* Here, as below for _start, call0 is used as an unlimited-range
* jump.
*/
call0 _xtos_core_restore_nw
/* (does not return) */
.Lcoldstart:
@ -301,80 +359,101 @@ _ResetHandler:
#endif
/*
* Now setup the memory attributes. On some cores this "enables" caches.
* We do this ahead of unpacking, so it can proceed more efficiently.
* Now setup the memory attributes. On some cores this "enables"
* caches. We do this ahead of unpacking, so it can proceed more
* efficiently.
*
* The _memmap_cacheattr_reset symbol's value (address) is defined
* by the LSP's linker script, as generated by xt-genldscripts.
* If defines 4-bit attributes for eight 512MB regions.
* The _memmap_cacheattr_reset symbol's value (address) is defined by
* the LSP's linker script, as generated by xt-genldscripts. If
* defines 4-bit attributes for eight 512MB regions.
*
* (NOTE: for cores with the older MMU v1 or v2, or without any memory
* protection mechanism, the following code has no effect.)
* (NOTE: for cores with the older MMU v1 or v2, or without any
* memory protection mechanism, the following code has no effect.)
*/
#if XCHAL_HAVE_MPU
/* If there's an empty background map, setup foreground maps to mimic region protection: */
/* If there's an empty background map, setup foreground maps to mimic
* region protection:
*/
# if XCHAL_MPU_ENTRIES >= 8 && XCHAL_MPU_BACKGROUND_ENTRIES <= 2
.pushsection .rodata, "a"
.global _xtos_mpu_attribs
.align 4
_xtos_mpu_attribs:
.word 0x00006000+XCHAL_MPU_ENTRIES-8 * Illegal (---)
.word 0x000F7700+XCHAL_MPU_ENTRIES-8 * Writeback (rwx Cacheable Non-shareable wb rd-alloc wr-alloc)
.word 0x000D5700+XCHAL_MPU_ENTRIES-8 * WBNA (rwx Cacheable Non-shareable wb rd-alloc)
.word 0x000C4700+XCHAL_MPU_ENTRIES-8 * Writethru (rwx Cacheable Non-shareable wt rd-alloc)
.word 0x00006700+XCHAL_MPU_ENTRIES-8 * Bypass (rwx Device non-interruptible system-shareable)
/* Illegal (---) */
.word 0x00006000+XCHAL_MPU_ENTRIES-8
/* Writeback (rwx Cacheable Non-shareable wb rd-alloc wr-alloc) */
.word 0x000F7700+XCHAL_MPU_ENTRIES-8
/* WBNA (rwx Cacheable Non-shareable wb rd-alloc) */
.word 0x000D5700+XCHAL_MPU_ENTRIES-8
/* Writethru (rwx Cacheable Non-shareable wt rd-alloc) */
.word 0x000C4700+XCHAL_MPU_ENTRIES-8
/* Bypass (rwx Device non-interruptible system-shareable) */
.word 0x00006700+XCHAL_MPU_ENTRIES-8
.popsection
/*
* We assume reset state: all MPU entries zeroed and disabled.
* Otherwise we'd need a loop to zero everything.
*/
movi a2, _memmap_cacheattr_reset /* note: absolute symbol, not a ptr */
/* note: absolute symbol, not a ptr */
movi a2, _memmap_cacheattr_reset
movi a3, _xtos_mpu_attribs
movi a4, 0x20000000 /* 512 MB delta */
movi a4, 0x20000000 /* 512 MB delta */
movi a6, 8
movi a7, 1 /* MPU entry vaddr 0, with valid bit set */
movi a9, 0 /* cacheadrdis value */
wsr.cacheadrdis a9 /* enable everything temporarily while MPU updates */
movi a7, 1 /* MPU entry vaddr 0, with valid bit set */
movi a9, 0 /* cacheadrdis value */
/* enable everything temporarily while MPU updates */
wsr.cacheadrdis a9
/* Write eight MPU entries, from the last one going backwards (entries n-1 thru n-8) */
2: extui a8, a2, 28, 4 /* get next attribute nibble (msb first) */
extui a5, a8, 0, 2 /* lower two bit indicate whether cached */
slli a9, a9, 1 /* add a bit to cacheadrdis... */
addi a10, a9, 1 /* set that new bit if... */
moveqz a9, a10, a5 /* ... that region is non-cacheable */
addx4 a5, a8, a3 /* index into _xtos_mpu_attribs table */
addi a8, a8, -5 /* make valid attrib indices negative */
movgez a5, a3, a8 /* if not valid attrib, use Illegal */
l32i a5, a5, 0 /* load access rights, memtype from table entry */
/* Write eight MPU entries, from the last one going backwards
* (entries n-1 thru n-8)
*/
2: extui a8, a2, 28, 4 /* get next attribute nibble (msb first) */
extui a5, a8, 0, 2 /* lower two bit indicate whether cached */
slli a9, a9, 1 /* add a bit to cacheadrdis... */
addi a10, a9, 1 /* set that new bit if... */
moveqz a9, a10, a5 /* ... that region is non-cacheable */
addx4 a5, a8, a3 /* index into _xtos_mpu_attribs table */
addi a8, a8, -5 /* make valid attrib indices negative */
movgez a5, a3, a8 /* if not valid attrib, use Illegal */
l32i a5, a5, 0 /* load access rights, memtype from table
* entry
*/
slli a2, a2, 4
sub a7, a7, a4 /* next 512MB region (last to first) */
sub a7, a7, a4 /* next 512MB region (last to first) */
addi a6, a6, -1
add a5, a5, a6 /* add the index */
wptlb a5, a7 /* write the MPU entry */
bnez a6, 2b /* loop until done */
add a5, a5, a6 /* add the index */
wptlb a5, a7 /* write the MPU entry */
bnez a6, 2b /* loop until done */
# else
movi a9, XCHAL_MPU_BG_CACHEADRDIS /* default value of CACHEADRDIS for bgnd map */
/* default value of CACHEADRDIS for bgnd map */
movi a9, XCHAL_MPU_BG_CACHEADRDIS
# endif
wsr.cacheadrdis a9 /* update cacheadrdis */
#elif XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR \
#elif XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR \
|| XCHAL_HAVE_XLT_CACHEATTR \
|| (XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
movi a2, _memmap_cacheattr_reset /* note: absolute symbol, not a ptr */
cacheattr_set /* set CACHEATTR from a2 (clobbers a3-a8) */
/* note: absolute symbol, not a ptr */
movi a2, _memmap_cacheattr_reset
/* set CACHEATTR from a2 (clobbers a3-a8) */
cacheattr_set
#endif
/* Now that caches are initialized, cache coherency can be enabled. */
/* Now that caches are initialized, cache coherency can be enabled. */
#if XCHAL_DCACHE_IS_COHERENT
# if XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MX && (XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RE_2012_0)
/* Opt into coherence for MX (for backward compatibility / testing). */
# if XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MX && \
(XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RE_2012_0)
/* Opt into coherence for MX (for backward compatibility / testing). */
movi a3, 1
movi a2, XER_CCON
wer a3, a2
# endif
#endif
/* Enable zero-overhead loop instr buffer, and snoop responses, if configured. */
/* If HW erratum 453 fix is to be applied, then don't enable loop instr buffer. */
/* Enable zero-overhead loop instr buffer, and snoop responses, if
* configured. If HW erratum 453 fix is to be applied, then don't
* enable loop instr buffer.
*/
#if XCHAL_USE_MEMCTL && XCHAL_SNOOP_LB_MEMCTL_DEFAULT
movi a3, XCHAL_SNOOP_LB_MEMCTL_DEFAULT
rsr a2, MEMCTL
@ -420,18 +499,20 @@ unpackdone:
#if defined(XTOS_UNPACK) || defined(XTOS_MP)
/*
* If writeback caches are configured and enabled, unpacked data must be
* written out to memory before trying to execute it:
* If writeback caches are configured and enabled, unpacked data must
* be written out to memory before trying to execute it:
*/
dcache_writeback_all a2, a3, a4, 0
icache_sync a2 /* ensure data written back is visible to i-fetch */
/* ensure data written back is visible to i-fetch */
icache_sync a2
/*
* Note: no need to invalidate the i-cache after the above, because we
* already invalidated it further above and did not execute anything within
* unpacked regions afterwards. [Strictly speaking, if an unpacked region
* follows this code very closely, it's possible for cache-ahead to have
* cached a bit of that unpacked region, so in the future we may need to
* invalidate the entire i-cache here again anyway.]
* Note: no need to invalidate the i-cache after the above, because
* we already invalidated it further above and did not execute
* anything within unpacked regions afterwards. [Strictly speaking,
* if an unpacked region follows this code very closely, it's possible
* for cache-ahead to have cached a bit of that unpacked region, so in
* the future we may need to invalidate the entire i-cache here again
* anyway.]
*/
#endif
@ -446,40 +527,44 @@ unpackdone:
/*
* Switch from PC-relative to absolute (litbase-relative) L32R mode.
* Set LITBASE to 256 kB beyond the start of the literals in .lit4
* (aligns to the nearest 4 kB boundary, LITBASE does not have bits 1..11)
* and set the enable bit (_lit4_start is assumed 4-byte aligned).
* (aligns to the nearest 4 kB boundary, LITBASE does not have bits
* 1..11) and set the enable bit (_lit4_start is assumed 4-byte
* aligned).
*/
movi a2, _lit4_start + 0x40001
wsr a2, LITBASE
rsync
#endif /* have and use absolute literals */
.end no-absolute-literals /* we can now start using absolute literals */
/* we can now start using absolute literals */
.end no-absolute-literals
/* Technically, this only needs to be done pre-LX2, assuming hard reset: */
/* Technically, this only needs to be done pre-LX2, assuming hard
* reset:
*/
# if XCHAL_HAVE_WINDOWED && defined(__XTENSA_WINDOWED_ABI__)
/* Windowed register init, so we can call windowed code (eg. C code). */
/* Windowed register init, so we can call windowed code (eg. C code). */
movi a1, 1
wsr a1, WINDOWSTART
/*
* The processor always clears WINDOWBASE at reset, so no need to clear it here.
* It resets WINDOWSTART to 1 starting with LX2.0/X7.0 (RB-2006.0).
* However, assuming hard reset is not yet always practical, so do this anyway:
* The processor always clears WINDOWBASE at reset, so no need to
* clear it here. It resets WINDOWSTART to 1 starting with LX2.0/X7.0
* (RB-2006.0). However, assuming hard reset is not yet always
* practical, so do this anyway:
*/
wsr a0, WINDOWBASE
rsync
movi a0, 0 /* possibly a different a0, clear it */
# endif
#if XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RB_2006_0 /* only pre-LX2 needs this */
/* only pre-LX2 needs this */
#if XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RB_2006_0
/* Coprocessor option initialization */
# if XCHAL_HAVE_CP
/*
*movi a2, XCHAL_CP_MASK // enable existing CPs
* To allow creating new coprocessors using TC that are not known
* at GUI build time without having to explicitly enable them,
* all CPENABLE bits must be set, even though they may not always
* correspond to a coprocessor.
* To allow creating new coprocessors using TC that are not known
* at GUI build time without having to explicitly enable them,
* all CPENABLE bits must be set, even though they may not always
* correspond to a coprocessor.
*/
movi a2, 0xFF /* enable *all* bits, to allow dynamic TIE */
wsr a2, CPENABLE
@ -490,9 +575,14 @@ unpackdone:
* rounding mode, so that floating point ops give predictable results)
*/
# if XCHAL_HAVE_FP && !XCHAL_HAVE_VECTORFPU2005
# define FCR 232 /* floating-point control register (user register number) */
# define FSR 233 /* floating-point status register (user register number) */
rsync /* wait for WSR to CPENABLE to complete before accessing FP coproc state */
/* floating-point control register (user register number) */
# define FCR 232
/* floating-point status register (user register number) */
# define FSR 233
/* wait for WSR to CPENABLE to complete before accessing FP coproc
* state
*/
rsync
wur a0, FCR /* clear FCR (default rounding mode, round-nearest) */
wur a0, FSR /* clear FSR */
# endif
@ -561,25 +651,32 @@ unpackdone:
/*
* Complete reset initialization outside the vector,
* to avoid requiring a vector that is larger than necessary.
* This 2nd-stage startup code sets up the C Run-Time (CRT) and calls main().
* Complete reset initialization outside the vector, to avoid
* requiring a vector that is larger than necessary. This 2nd-stage
* startup code sets up the C Run-Time (CRT) and calls main().
*
* Here we use call0 not because we expect any return, but
* because the assembler/linker dynamically sizes call0 as
* needed (with -mlongcalls) which it doesn't with j or jx.
* Note: This needs to be call0 regardless of the selected ABI.
* Here we use call0 not because we expect any return, but because the
* assembler/linker dynamically sizes call0 as needed (with
* -mlongcalls) which it doesn't with j or jx. Note: This needs to
* be call0 regardless of the selected ABI.
*/
call0 _start /* jump to _start (in crt1-*.S) */
/* does not return */
#else /* XCHAL_HAVE_HALT */
j _start /* jump to _start (in crt1-*.S) */
/* (TX has max 64kB IRAM, so J always in range) */
j _start /* jump to _start (in crt1-*.S) */
/* (TX has max 64kB IRAM, so J always in range) */
/* Paranoia -- double-check requirements / assumptions of this Xtensa TX code: */
# if !defined(__XTENSA_CALL0_ABI__) || !XCHAL_HAVE_FULL_RESET || XCHAL_HAVE_INTERRUPTS || XCHAL_HAVE_CCOUNT || XCHAL_DTLB_ARF_WAYS || XCHAL_HAVE_DEBUG || XCHAL_HAVE_S32C1I || XCHAL_HAVE_ABSOLUTE_LITERALS || XCHAL_DCACHE_SIZE || XCHAL_ICACHE_SIZE || XCHAL_HAVE_PIF || XCHAL_HAVE_WINDOWED
/* Paranoia -- double-check requirements / assumptions of this Xtensa
* TX code:
*/
# if !defined(__XTENSA_CALL0_ABI__) || !XCHAL_HAVE_FULL_RESET \
|| XCHAL_HAVE_INTERRUPTS || XCHAL_HAVE_CCOUNT \
|| XCHAL_DTLB_ARF_WAYS || XCHAL_HAVE_DEBUG \
|| XCHAL_HAVE_S32C1I || XCHAL_HAVE_ABSOLUTE_LITERALS \
|| XCHAL_DCACHE_SIZE || XCHAL_ICACHE_SIZE || XCHAL_HAVE_PIF \
|| XCHAL_HAVE_WINDOWED
# error "Halt architecture (Xtensa TX) requires: call0 ABI, all flops reset, no exceptions or interrupts, no TLBs, no debug, no S32C1I, no LITBASE, no cache, no PIF, no windowed regs"
# endif

View file

@ -15,9 +15,7 @@
#include <offsets_short.h>
.extern _kernel
/**
unsigned int _Swap (unsigned int basepri);
*/
/* unsigned int _Swap (unsigned int basepri); */
.globl _Swap
.type _Swap,@function
.align 4
@ -30,34 +28,37 @@ _Swap:
s32i a0, sp, XT_SOL_pc
s32i a2, sp, XT_SOL_ps
#ifdef __XTENSA_CALL0_ABI__
s32i a12, sp, XT_SOL_a12 /* save callee-saved registers */
s32i a12, sp, XT_SOL_a12 /* save callee-saved registers */
s32i a13, sp, XT_SOL_a13
s32i a14, sp, XT_SOL_a14
s32i a15, sp, XT_SOL_a15
#else
/* Spill register windows. Calling xthal_window_spill() causes extra */
/* spills and reloads, so we will set things up to call the _nw version */
/* instead to save cycles. */
movi a6, ~(PS_WOE_MASK|PS_INTLEVEL_MASK) /* spills a4-a7 if needed */
and a2, a2, a6 /* clear WOE, INTLEVEL */
addi a2, a2, XCHAL_EXCM_LEVEL /* set INTLEVEL */
/* Spill register windows. Calling xthal_window_spill() causes extra
* spills and reloads, so we will set things up to call the _nw version
* instead to save cycles.
*/
/* spills a4-a7 if needed */
movi a6, ~(PS_WOE_MASK|PS_INTLEVEL_MASK)
and a2, a2, a6 /* clear WOE, INTLEVEL */
addi a2, a2, XCHAL_EXCM_LEVEL /* set INTLEVEL */
wsr a2, PS
rsync
call0 xthal_window_spill_nw
l32i a2, sp, XT_SOL_ps /* restore PS */
l32i a2, sp, XT_SOL_ps /* restore PS */
addi a2, a2, XCHAL_EXCM_LEVEL
wsr a2, PS
#endif
#if XCHAL_CP_NUM > 0
/* Save coprocessor callee-saved state (if any). At this point CPENABLE */
/* should still reflect which CPs were in use (enabled). */
/* Save coprocessor callee-saved state (if any). At this point CPENABLE
* should still reflect which CPs were in use (enabled).
*/
call0 _xt_coproc_savecs
#endif
movi a2, _kernel
movi a3, 0
l32i a4, a2, KERNEL_OFFSET(current) /* a4 := _kernel->current */
s32i a3, sp, XT_SOL_exit /* 0 to flag as solicited frame */
s32i a3, sp, XT_SOL_exit /* 0 to flag as solicited frame */
s32i sp, a4, THREAD_OFFSET(sp) /* current->arch.topOfStack := sp */
/*
* Set _Swap()'s default return code to -EAGAIN. This eliminates the
@ -69,7 +70,8 @@ _Swap:
#if XCHAL_CP_NUM > 0
/* Clear CPENABLE, also in task's co-processor state save area. */
movi a3, 0
l32i a4, a4, THREAD_OFFSET(cpStack) /* a4 := current->arch.preempCoprocReg.cpStack */
/* a4 := current->arch.preempCoprocReg.cpStack */
l32i a4, a4, THREAD_OFFSET(cpStack)
wsr a3, CPENABLE
beqz a4, 1f
s16i a3, a4, XT_CPENABLE /* clear saved cpenable */

View file

@ -89,7 +89,9 @@ void _new_thread(char *pStack, size_t stackSize,
/* Align stack end to maximum alignment requirement. */
char *stackEnd = (char *)ROUND_DOWN(pStack + stackSize,
(XCHAL_TOTAL_SA_ALIGN < 16 ? 16 : XCHAL_TOTAL_SA_ALIGN));
/* TCS is located at top of stack while frames are located at end of it */
/* TCS is located at top of stack while frames are located at end
* of it
*/
struct tcs *tcs = (struct tcs *)(pStack);
#if XCHAL_CP_NUM > 0
uint32_t *cpSA;
@ -111,20 +113,32 @@ void _new_thread(char *pStack, size_t stackSize,
(XCHAL_TOTAL_SA_ALIGN < 16 ? 16 : XCHAL_TOTAL_SA_ALIGN));
#ifdef CONFIG_DEBUG
printk("cpStack = %p\n", tcs->arch.preempCoprocReg.cpStack);
printk("cpAsa = %p\n", *(void **)(tcs->arch.preempCoprocReg.cpStack + XT_CP_ASA));
printk("cpAsa = %p\n",
*(void **)(tcs->arch.preempCoprocReg.cpStack + XT_CP_ASA));
#endif
#endif
/* Thread's first frame alignment is granted as both operands are aligned */
XtExcFrame *pInitCtx = (XtExcFrame *)(stackEnd - (XT_XTRA_SIZE - XT_CP_SIZE));
/* Thread's first frame alignment is granted as both operands are
* aligned
*/
XtExcFrame *pInitCtx =
(XtExcFrame *)(stackEnd - (XT_XTRA_SIZE - XT_CP_SIZE));
#ifdef CONFIG_DEBUG
printk("pInitCtx = %p\n", pInitCtx);
#endif
/* Explicitly initialize certain saved registers */
pInitCtx->pc = (uint32_t)_thread_entry; /* task entrypoint */
pInitCtx->a1 = (uint32_t)pInitCtx + XT_STK_FRMSZ; /* physical top of stack frame */
pInitCtx->exit = (uint32_t)_xt_user_exit; /* user exception exit dispatcher */
/* Set initial PS to int level 0, EXCM disabled, user mode. */
/* Also set entry point argument arg. */
/* task entrypoint */
pInitCtx->pc = (uint32_t)_thread_entry;
/* physical top of stack frame */
pInitCtx->a1 = (uint32_t)pInitCtx + XT_STK_FRMSZ;
/* user exception exit dispatcher */
pInitCtx->exit = (uint32_t)_xt_user_exit;
/* Set initial PS to int level 0, EXCM disabled, user mode.
* Also set entry point argument arg.
*/
#ifdef __XTENSA_CALL0_ABI__
pInitCtx->a2 = (uint32_t)pEntry;
pInitCtx->a3 = (uint32_t)p1;
@ -132,7 +146,9 @@ void _new_thread(char *pStack, size_t stackSize,
pInitCtx->a5 = (uint32_t)p3;
pInitCtx->ps = PS_UM | PS_EXCM;
#else
/* For windowed ABI set also WOE and CALLINC (pretend task is 'call4'). */
/* For windowed ABI set also WOE and CALLINC
* (pretend task is 'call4')
*/
pInitCtx->a6 = (uint32_t)pEntry;
pInitCtx->a7 = (uint32_t)p1;
pInitCtx->a8 = (uint32_t)p2;

View file

@ -46,10 +46,14 @@ _zxt_dispatch:
#endif
l32i a0, sp, XT_SOL_pc
#if XCHAL_CP_NUM > 0
/* Ensure wsr.CPENABLE is complete (should be, it was cleared on entry). */
/* Ensure wsr.CPENABLE is complete (should be, it was cleared on
* entry).
*/
rsync
#endif
/* As soons as PS is restored, interrupts can happen. No need to sync PS. */
/* As soons as PS is restored, interrupts can happen. No need to sync
* PS.
*/
wsr a3, PS
#ifdef __XTENSA_CALL0_ABI__
addi sp, sp, XT_SOL_FRMSZ
@ -72,7 +76,9 @@ _zxt_dispatch:
*/
call0 _xt_context_restore
/* In Call0 ABI, restore callee-saved regs (A12, A13 already restored). */
/* In Call0 ABI, restore callee-saved regs (A12, A13 already
* restored).
*/
#ifdef __XTENSA_CALL0_ABI__
l32i a14, sp, XT_STK_a14
l32i a15, sp, XT_STK_a15
@ -84,26 +90,24 @@ _zxt_dispatch:
#endif
/*
* Must return via the exit dispatcher corresponding to the entrypoint from
* which this was called. Interruptee's A0, A1, PS, PC are restored and
* the interrupt stack frame is deallocated in the exit dispatcher.
* Must return via the exit dispatcher corresponding to the entrypoint
* from which this was called. Interruptee's A0, A1, PS, PC are
* restored and the interrupt stack frame is deallocated in the exit
* dispatcher.
*/
l32i a0, sp, XT_STK_exit
ret
/*
*******************************************************************************
* _zxt_int_enter
* void _zxt_int_enter(void)
*
* Implements the Xtensa RTOS porting layer's XT_RTOS_INT_ENTER function for
* freeRTOS. Saves the rest of the interrupt context (not already saved).
* May only be called from assembly code by the 'call0' instruction, with
* interrupts disabled.
* See the detailed description of the XT_RTOS_ENTER macro in xtensa_rtos.h.
*
*******************************************************************************
*/
* _zxt_int_enter
* void _zxt_int_enter(void)
*
* Implements the Xtensa RTOS porting layer's XT_RTOS_INT_ENTER function for
* freeRTOS. Saves the rest of the interrupt context (not already saved).
* May only be called from assembly code by the 'call0' instruction, with
* interrupts disabled.
* See the detailed description of the XT_RTOS_ENTER macro in xtensa_rtos.h.
*/
.globl _zxt_int_enter
.type _zxt_int_enter,@function
.align 4
@ -120,10 +124,10 @@ _zxt_int_enter:
call0 _xt_context_save
/*
* Save interrupted task's SP in TCB only if not nesting.
* Manage nesting directly rather than call the generic IntEnter() (in
* windowed ABI we can't call a C function here anyway because PS.EXCM is
* still set).
* Save interrupted task's SP in TCB only if not nesting. Manage
* nesting directly rather than call the generic IntEnter() (in
* windowed ABI we can't call a C function here anyway because PS.EXCM
* is still set).
*/
movi a2, _kernel /* a2 := _kernel */
l32i a3, a2, KERNEL_OFFSET(nested) /* a3 := _kernel->nested */
@ -140,17 +144,16 @@ _zxt_int_enter:
ret
/*
* _zxt_int_exit
* void _zxt_int_exit(void)
* _zxt_int_exit
* void _zxt_int_exit(void)
*
* Implements the Xtensa RTOS porting layer's XT_RTOS_INT_EXIT function for
* Zephyr. If required, calls vPortYieldFromInt() to perform task context
* switching, restore the (possibly) new task's context, and return to the
* exit dispatcher saved in the task's stack frame at XT_STK_EXIT.
* May only be called from assembly code by the 'call0' instruction. Does not
* return to caller.
* See the description of the XT_RTOS_ENTER macro in xtensa_rtos.h.
*
* switching, restore the (possibly) new task's context, and return to the exit
* dispatcher saved in the task's stack frame at XT_STK_EXIT. May only be
* called from assembly code by the 'call0' instruction. Does not return to
* caller. See the description of the XT_RTOS_ENTER macro in xtensa_rtos.h.
*/
.globl _zxt_int_exit
.type _zxt_int_exit,@function
@ -159,15 +162,16 @@ _zxt_int_exit:
rsil a0, XCHAL_EXCM_LEVEL /* lock out interrupts */
movi a2, _kernel
l32i a3, a2, KERNEL_OFFSET(nested) /* _kernel->nested */
addi a3, a3, -1 /* decrement nesting count */
s32i a3, a2, KERNEL_OFFSET(nested) /* save nesting count */
bnez a3, .Lnesting /* !=0 after decr so still nested */
l32i a3, a2, KERNEL_OFFSET(nested) /* _kernel->nested */
addi a3, a3, -1 /* decrement nesting count */
s32i a3, a2, KERNEL_OFFSET(nested) /* save nesting count */
bnez a3, .Lnesting /* !=0 after decr so still nested */
/*
* When using call0 ABI callee-saved registers a12-15 need to be saved
* before enabling preemption. They were already saved by _zxt_int_enter().
*/
* before enabling preemption. They were already saved by
* _zxt_int_enter().
*/
#ifdef __XTENSA_CALL0_ABI__
s32i a14, a1, XT_STK_a14
s32i a15, a1, XT_STK_a15
@ -210,22 +214,22 @@ _zxt_int_exit:
/*
* Must return via the exit dispatcher corresponding to the entrypoint
* from which this was called. Interruptee's A0, A1, PS, PC are restored
* and the interrupt stack frame is deallocated in the exit dispatcher.
* from which this was called. Interruptee's A0, A1, PS, PC are
* restored and the interrupt stack frame is deallocated in the exit
* dispatcher.
*/
l32i a0, sp, XT_STK_exit
ret
/*
* _zxt_timer_int
* void _zxt_timer_int(void)
* _zxt_timer_int
* void _zxt_timer_int(void)
*
* Implements Xtensa RTOS porting layer's XT_RTOS_TIMER_INT function.
* Called every timer interrupt.
* Manages the tick timer and calls xPortSysTickHandler() every tick.
* See the detailed description of the XT_RTOS_ENTER macro in xtensa_rtos.h.
* Callable from C.
* Implemented in assmebly code for performance.
* Implements Xtensa RTOS porting layer's XT_RTOS_TIMER_INT function. Called
* every timer interrupt. Manages the tick timer and calls
* xPortSysTickHandler() every tick. See the detailed description of the
* XT_RTOS_ENTER macro in xtensa_rtos.h. Callable from C. Implemented in
* assmebly code for performance.
*
*/
.globl _zxt_timer_int
@ -235,14 +239,14 @@ _zxt_timer_int:
/*
* Xtensa timers work by comparing a cycle counter with a preset value.
* Once the match occurs an interrupt is generated, and the handler has to
* set a new cycle count into the comparator.
* To avoid clock drift due to interrupt latency, the new cycle count is
* computed from the old, not the time the interrupt was serviced. However
* if a timer interrupt is ever serviced more than one tick late, it is
* necessary to process multiple ticks until the new cycle count is in the
* future, otherwise the next timer interrupt would not occur until after
* the cycle counter had wrapped (2^32 cycles later).
* Once the match occurs an interrupt is generated, and the handler has
* to set a new cycle count into the comparator. To avoid clock drift
* due to interrupt latency, the new cycle count is computed from the
* old, not the time the interrupt was serviced. However if a timer
* interrupt is ever serviced more than one tick late, it is necessary
* to process multiple ticks until the new cycle count is in the
* future, otherwise the next timer interrupt would not occur until
* after the cycle counter had wrapped (2^32 cycles later).
*
* do {
* ticks++;
@ -303,10 +307,10 @@ _zxt_timer_int:
RET(16)
/*
* _zxt_tick_timer_init
* void _zxt_tick_timer_init(void)
* _zxt_tick_timer_init
* void _zxt_tick_timer_init(void)
*
* Initialize timer and timer interrrupt handler (_xt_tick_divisor_init() has
* Initialize timer and timer interrupt handler (_xt_tick_divisor_init() has
* already been been called).
* Callable from C (obeys ABI conventions on entry).
*
@ -320,7 +324,9 @@ _zxt_tick_timer_init:
#ifdef CONFIG_SYS_CLOCK_EXISTS
#if CONFIG_XTENSA_INTERNAL_TIMER || (CONFIG_XTENSA_TIMER_IRQ < 0)
/* Set up the periodic tick timer (assume enough time to complete init). */
/* Set up the periodic tick timer (assume enough time to complete
* init).
*/
#ifdef XT_CLOCK_FREQ
movi a3, XT_TICK_DIVISOR
#else
@ -348,8 +354,8 @@ _zxt_tick_timer_init:
RET(48)
/*
* _zxt_task_coproc_state
* void _zxt_task_coproc_state(void)
* _zxt_task_coproc_state
* void _zxt_task_coproc_state(void)
*
* Implements the Xtensa RTOS porting layer's XT_RTOS_CP_STATE function.
*

View file

@ -3,29 +3,28 @@
* SPDX-License-Identifier: Apache-2.0
*/
/*******************************************************************************
XTENSA CONTEXT SAVE AND RESTORE ROUTINES
Low-level Call0 functions for handling generic context save and restore of
registers not specifically addressed by the interrupt vectors and handlers.
Those registers (not handled by these functions) are PC, PS, A0, A1 (SP).
Except for the calls to RTOS functions, this code is generic to Xtensa.
Note that in Call0 ABI, interrupt handlers are expected to preserve the callee-
save regs (A12-A15), which is always the case if the handlers are coded in C.
However A12, A13 are made available as scratch registers for interrupt dispatch
code, so are presumed saved anyway, and are always restored even in Call0 ABI.
Only A14, A15 are truly handled as callee-save regs.
Because Xtensa is a configurable architecture, this port supports all user
generated configurations (except restrictions stated in the release notes).
This is accomplished by conditional compilation using macros and functions
defined in the Xtensa HAL (hardware adaptation layer) for your configuration.
Only the processor state included in your configuration is saved and restored,
including any processor state added by user configuration options or TIE.
*******************************************************************************/
/* XTENSA CONTEXT SAVE AND RESTORE ROUTINES
*
* Low-level Call0 functions for handling generic context save and restore of
* registers not specifically addressed by the interrupt vectors and handlers.
* Those registers (not handled by these functions) are PC, PS, A0, A1 (SP).
* Except for the calls to RTOS functions, this code is generic to Xtensa.
*
* Note that in Call0 ABI, interrupt handlers are expected to preserve the
* callee-save regs (A12-A15), which is always the case if the handlers are
* coded in C. However A12, A13 are made available as scratch registers for
* interrupt dispatch code, so are presumed saved anyway, and are always
* restored even in Call0 ABI. Only A14, A15 are truly handled as callee-save
* regs.
*
* Because Xtensa is a configurable architecture, this port supports all user
* generated configurations (except restrictions stated in the release notes).
* This is accomplished by conditional compilation using macros and functions
* defined in the Xtensa HAL (hardware adaptation layer) for your
* configuration. Only the processor state included in your configuration is
* saved and restored, including any processor state added by user
* configuration options or TIE.
*/
/* Warn nicely if this file gets named with a lowercase .s instead of .S: */
#define NOERROR #
@ -41,37 +40,36 @@ NOERROR: .error "C preprocessor needed for this file: make sure its filename\
.text
/*******************************************************************************
_xt_context_save
!! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!
Saves all Xtensa processor state except PC, PS, A0, A1 (SP), A12, A13, in the
interrupt stack frame defined in xtensa_rtos.h.
Its counterpart is _xt_context_restore (which also restores A12, A13).
Caller is expected to have saved PC, PS, A0, A1 (SP), A12, A13 in the frame.
This function preserves A12 & A13 in order to provide the caller with 2 scratch
regs that need not be saved over the call to this function. The choice of which
2 regs to provide is governed by xthal_window_spill_nw and xthal_save_extra_nw,
to avoid moving data more than necessary. Caller can assign regs accordingly.
Entry Conditions:
A0 = Return address in caller.
A1 = Stack pointer of interrupted thread or handler ("interruptee").
Original A12, A13 have already been saved in the interrupt stack frame.
Other processor state except PC, PS, A0, A1 (SP), A12, A13, is as at the
point of interruption.
If windowed ABI, PS.EXCM = 1 (exceptions disabled).
Exit conditions:
A0 = Return address in caller.
A1 = Stack pointer of interrupted thread or handler ("interruptee").
A12, A13 as at entry (preserved).
If windowed ABI, PS.EXCM = 1 (exceptions disabled).
*******************************************************************************/
/*
* _xt_context_save
*
* !! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!
*
* Saves all Xtensa processor state except PC, PS, A0, A1 (SP), A12, A13, in
* the interrupt stack frame defined in xtensa_rtos.h. Its counterpart is
* _xt_context_restore (which also restores A12, A13).
*
* Caller is expected to have saved PC, PS, A0, A1 (SP), A12, A13 in the frame.
* This function preserves A12 & A13 in order to provide the caller with 2
* scratch regs that need not be saved over the call to this function. The
* choice of which 2 regs to provide is governed by xthal_window_spill_nw and
* xthal_save_extra_nw, to avoid moving data more than necessary. Caller can
* assign regs accordingly.
*
* Entry Conditions:
* A0 = Return address in caller.
* A1 = Stack pointer of interrupted thread or handler ("interruptee").
* Original A12, A13 have already been saved in the interrupt stack frame.
* Other processor state except PC, PS, A0, A1 (SP), A12, A13, is as at the
* point of interruption.
* If windowed ABI, PS.EXCM = 1 (exceptions disabled).
*
* Exit conditions:
* A0 = Return address in caller.
* A1 = Stack pointer of interrupted thread or handler ("interruptee").
* A12, A13 as at entry (preserved).
* If windowed ABI, PS.EXCM = 1 (exceptions disabled).
*/
.global _xt_context_save
.type _xt_context_save,@function
@ -89,11 +87,10 @@ _xt_context_save:
s32i a10, sp, XT_STK_a10
s32i a11, sp, XT_STK_a11
/*
Call0 ABI callee-saved regs a12-15 do not need to be saved here.
a12-13 are the caller's responsibility so it can use them as scratch.
So only need to save a14-a15 here for Windowed ABI (not Call0).
*/
/* Call0 ABI callee-saved regs a12-15 do not need to be saved here. a12-13
* are the caller's responsibility so it can use them as scratch. So only
* need to save a14-a15 here for Windowed ABI (not Call0).
*/
#ifndef __XTENSA_CALL0_ABI__
s32i a14, sp, XT_STK_a14
s32i a15, sp, XT_STK_a15
@ -123,21 +120,20 @@ _xt_context_save:
#endif
#ifndef __XTENSA_CALL0_ABI__
/*
To spill the reg windows, temp. need pre-interrupt stack ptr and a4-15.
Need to save a9,12,13 temporarily (in frame temps) and recover originals.
Interrupts need to be disabled below XCHAL_EXCM_LEVEL and window overflow
and underflow exceptions disabled (assured by PS.EXCM == 1).
*/
/* To spill the reg windows, temp. need pre-interrupt stack ptr and a4-15.
* Need to save a9,12,13 temporarily (in frame temps) and recover
* originals. Interrupts need to be disabled below XCHAL_EXCM_LEVEL and
* window overflow and underflow exceptions disabled (assured by PS.EXCM ==
* 1).
*/
s32i a12, sp, XT_STK_tmp0 /* temp. save stuff in stack frame */
s32i a13, sp, XT_STK_tmp1
s32i a9, sp, XT_STK_tmp2
/*
Save the overlay state if we are supporting overlays. Since we just saved
three registers, we can conveniently use them here. Note that as of now,
overlays only work for windowed calling ABI.
*/
/* Save the overlay state if we are supporting overlays. Since we just
* saved three registers, we can conveniently use them here. Note that as
* of now, overlays only work for windowed calling ABI.
*/
#ifdef XT_USE_OVLY
l32i a9, sp, XT_STK_PC /* recover saved PC */
_xt_overlay_get_state a9, a12, a13
@ -156,15 +152,14 @@ _xt_context_save:
#endif
#if XCHAL_EXTRA_SA_SIZE > 0
/*
NOTE: Normally the xthal_save_extra_nw macro only affects address
registers a2-a5. It is theoretically possible for Xtensa processor
designers to write TIE that causes more address registers to be
affected, but it is generally unlikely. If that ever happens,
more registers need to be saved/restored around this macro invocation.
Here we assume a9,12,13 are preserved.
Future Xtensa tools releases might limit the regs that can be affected.
*/
/* NOTE: Normally the xthal_save_extra_nw macro only affects address
* registers a2-a5. It is theoretically possible for Xtensa processor
* designers to write TIE that causes more address registers to be
* affected, but it is generally unlikely. If that ever happens, more
* registers need to be saved/restored around this macro invocation. Here
* we assume a9,12,13 are preserved. Future Xtensa tools releases might
* limit the regs that can be affected.
*/
addi a2, sp, XT_STK_EXTRA /* where to save it */
# if XCHAL_EXTRA_SA_ALIGN > 16
movi a3, -XCHAL_EXTRA_SA_ALIGN
@ -179,30 +174,27 @@ _xt_context_save:
ret
/*******************************************************************************
_xt_context_restore
!! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!
Restores all Xtensa processor state except PC, PS, A0, A1 (SP) (and in Call0
ABI, A14, A15 which are preserved by all interrupt handlers) from an interrupt
stack frame defined in xtensa_rtos.h .
Its counterpart is _xt_context_save (whose caller saved A12, A13).
Caller is responsible to restore PC, PS, A0, A1 (SP).
Entry Conditions:
A0 = Return address in caller.
A1 = Stack pointer of interrupted thread or handler ("interruptee").
Exit conditions:
A0 = Return address in caller.
A1 = Stack pointer of interrupted thread or handler ("interruptee").
Other processor state except PC, PS, A0, A1 (SP), is as at the point
of interruption.
*******************************************************************************/
/*_xt_context_restore
*
* !! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!
*
* Restores all Xtensa processor state except PC, PS, A0, A1 (SP) (and in Call0
* ABI, A14, A15 which are preserved by all interrupt handlers) from an
* interrupt stack frame defined in xtensa_rtos.h. Its counterpart is
* _xt_context_save (whose caller saved A12, A13).
*
* Caller is responsible to restore PC, PS, A0, A1 (SP).
*
* Entry Conditions:
* A0 = Return address in caller.
* A1 = Stack pointer of interrupted thread or handler ("interruptee").
*
* Exit conditions:
* A0 = Return address in caller.
* A1 = Stack pointer of interrupted thread or handler ("interruptee").
* Other processor state except PC, PS, A0, A1 (SP), is as at the point
* of interruption.
*/
.global _xt_context_restore
.type _xt_context_restore,@function
@ -210,15 +202,14 @@ Exit conditions:
_xt_context_restore:
#if XCHAL_EXTRA_SA_SIZE > 0
/*
NOTE: Normally the xthal_restore_extra_nw macro only affects address
registers a2-a5. It is theoretically possible for Xtensa processor
designers to write TIE that causes more address registers to be
affected, but it is generally unlikely. If that ever happens,
more registers need to be saved/restored around this macro invocation.
Here we only assume a13 is preserved.
Future Xtensa tools releases might limit the regs that can be affected.
*/
/* NOTE: Normally the xthal_restore_extra_nw macro only affects address
* registers a2-a5. It is theoretically possible for Xtensa processor
* designers to write TIE that causes more address registers to be
* affected, but it is generally unlikely. If that ever happens, more
* registers need to be saved/restored around this macro invocation. Here
* we only assume a13 is preserved. Future Xtensa tools releases might
* limit the regs that can be affected.
*/
mov a13, a0 /* preserve ret addr */
addi a2, sp, XT_STK_EXTRA /* where to find it */
# if XCHAL_EXTRA_SA_ALIGN > 16
@ -239,12 +230,11 @@ _xt_context_restore:
#endif
#ifdef XT_USE_OVLY
/*
If we are using overlays, this is a good spot to check if we need
to restore an overlay for the incoming task. Here we have a bunch
of registers to spare. Note that this step is going to use a few
bytes of storage below SP (SP-20 to SP-32) if an overlay is going
to be restored.
/* If we are using overlays, this is a good spot to check if we need to
* restore an overlay for the incoming task. Here we have a bunch of
* registers to spare. Note that this step is going to use a few bytes of
* storage below SP (SP-20 to SP-32) if an overlay is going to be
* restored.
*/
l32i a2, sp, XT_STK_pc /* retrieve PC */
l32i a3, sp, XT_STK_ps /* retrieve PS */
@ -279,10 +269,10 @@ _xt_context_restore:
l32i a11, sp, XT_STK_a11
/*
Call0 ABI callee-saved regs a12-15 do not need to be restored here.
However a12-13 were saved for scratch before XT_RTOS_INT_ENTER(),
so need to be restored anyway, despite being callee-saved in Call0.
*/
* Call0 ABI callee-saved regs a12-15 do not need to be restored here.
* However a12-13 were saved for scratch before XT_RTOS_INT_ENTER(),
* so need to be restored anyway, despite being callee-saved in Call0.
*/
l32i a12, sp, XT_STK_a12
l32i a13, sp, XT_STK_a13
#ifndef __XTENSA_CALL0_ABI__
@ -293,31 +283,28 @@ _xt_context_restore:
ret
/*******************************************************************************
_xt_coproc_init
Initializes global co-processor management data, setting all co-processors
to "unowned". Leaves CPENABLE as it found it (does NOT clear it).
Called during initialization of the RTOS, before any threads run.
This may be called from normal Xtensa single-threaded application code which
might use co-processors. The Xtensa run-time initialization enables all
co-processors. They must remain enabled here, else a co-processor exception
might occur outside of a thread, which the exception handler doesn't expect.
Entry Conditions:
Xtensa single-threaded run-time environment is in effect.
No thread is yet running.
Exit conditions:
None.
Obeys ABI conventions per prototype:
void _xt_coproc_init(void)
*******************************************************************************/
/* _xt_coproc_init
*
* Initializes global co-processor management data, setting all co-processors
* to "unowned". Leaves CPENABLE as it found it (does NOT clear it).
*
* Called during initialization of the RTOS, before any threads run.
*
* This may be called from normal Xtensa single-threaded application code which
* might use co-processors. The Xtensa run-time initialization enables all
* co-processors. They must remain enabled here, else a co-processor exception
* might occur outside of a thread, which the exception handler doesn't expect.
*
* Entry Conditions:
* Xtensa single-threaded run-time environment is in effect.
* No thread is yet running.
*
* Exit conditions:
* None.
*
* Obeys ABI conventions per prototype:
* void _xt_coproc_init(void)
*/
#if XCHAL_CP_NUM > 0
@ -340,29 +327,26 @@ _xt_coproc_init:
#endif
/*******************************************************************************
_xt_coproc_release
Releases any and all co-processors owned by a given thread. The thread is
identified by it's co-processor state save area defined in xtensa_context.h .
Must be called before a thread's co-proc save area is deleted to avoid
memory corruption when the exception handler tries to save the state.
May be called when a thread terminates or completes but does not delete
the co-proc save area, to avoid the exception handler having to save the
thread's co-proc state before another thread can use it (optimization).
Entry Conditions:
A2 = Pointer to base of co-processor state save area.
Exit conditions:
None.
Obeys ABI conventions per prototype:
void _xt_coproc_release(void * coproc_sa_base)
*******************************************************************************/
/* _xt_coproc_release
*
* Releases any and all co-processors owned by a given thread. The thread is
* identified by it's co-processor state save area defined in xtensa_context.h
*
* Must be called before a thread's co-proc save area is deleted to avoid
* memory corruption when the exception handler tries to save the state.
* May be called when a thread terminates or completes but does not delete
* the co-proc save area, to avoid the exception handler having to save the
* thread's co-proc state before another thread can use it (optimization).
*
* Entry Conditions:
* A2 = Pointer to base of co-processor state save area.
*
* Exit conditions:
* None.
*
* Obeys ABI conventions per prototype:
* void _xt_coproc_release(void * coproc_sa_base)
*/
#if XCHAL_CP_NUM > 0
@ -391,25 +375,25 @@ _xt_coproc_release:
#endif
/*******************************************************************************
_xt_coproc_savecs
/* _xt_coproc_savecs
*
* If there is a current thread and it has a coprocessor state save area, then
* save all callee-saved state into this area. This function is called from the
* solicited context switch handler. It calls a system-specific function to get
* the coprocessor save area base address.
*
* Entry conditions:
* - The thread being switched out is still the current thread.
* - CPENABLE state reflects which coprocessors are active.
* - Registers have been saved/spilled already.
*
* Exit conditions:
* - All necessary CP callee-saved state has been saved.
* - Registers a2-a7, a13-a15 have been trashed.
*
* Must be called from assembly code only, using CALL0.
*/
If there is a current thread and it has a coprocessor state save area, then
save all callee-saved state into this area. This function is called from the
solicited context switch handler. It calls a system-specific function to get
the coprocessor save area base address.
Entry conditions:
- The thread being switched out is still the current thread.
- CPENABLE state reflects which coprocessors are active.
- Registers have been saved/spilled already.
Exit conditions:
- All necessary CP callee-saved state has been saved.
- Registers a2-a7, a13-a15 have been trashed.
Must be called from assembly code only, using CALL0.
*******************************************************************************/
#if XCHAL_CP_NUM > 0
.extern _xt_coproc_sa_offset /* external reference */
@ -435,7 +419,8 @@ _xt_coproc_savecs:
bbci.l a2, 0, 2f /* CP 0 not enabled */
l32i a14, a13, 0 /* a14 = _xt_coproc_sa_offset[0] */
add a3, a14, a15 /* a3 = save area for CP 0 */
xchal_cp0_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
xchal_cp0_store a3, a4, a5, a6, a7 continue=0 ofs=-1 \
select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
@ -443,7 +428,8 @@ _xt_coproc_savecs:
bbci.l a2, 1, 2f /* CP 1 not enabled */
l32i a14, a13, 4 /* a14 = _xt_coproc_sa_offset[1] */
add a3, a14, a15 /* a3 = save area for CP 1 */
xchal_cp1_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
xchal_cp1_store a3, a4, a5, a6, a7 continue=0 ofs=-1 \
select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
@ -451,7 +437,8 @@ _xt_coproc_savecs:
bbci.l a2, 2, 2f
l32i a14, a13, 8
add a3, a14, a15
xchal_cp2_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
xchal_cp2_store a3, a4, a5, a6, a7 continue=0 ofs=-1 \
select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
@ -459,7 +446,8 @@ _xt_coproc_savecs:
bbci.l a2, 3, 2f
l32i a14, a13, 12
add a3, a14, a15
xchal_cp3_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
xchal_cp3_store a3, a4, a5, a6, a7 continue=0 ofs=-1 \
select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
@ -467,7 +455,8 @@ _xt_coproc_savecs:
bbci.l a2, 4, 2f
l32i a14, a13, 16
add a3, a14, a15
xchal_cp4_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
xchal_cp4_store a3, a4, a5, a6, a7 continue=0 ofs=-1 \
select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
@ -475,7 +464,8 @@ _xt_coproc_savecs:
bbci.l a2, 5, 2f
l32i a14, a13, 20
add a3, a14, a15
xchal_cp5_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
xchal_cp5_store a3, a4, a5, a6, a7 continue=0 ofs=-1 \
select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
@ -483,7 +473,8 @@ _xt_coproc_savecs:
bbci.l a2, 6, 2f
l32i a14, a13, 24
add a3, a14, a15
xchal_cp6_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
xchal_cp6_store a3, a4, a5, a6, a7 continue=0 ofs=-1 \
select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
@ -491,7 +482,8 @@ _xt_coproc_savecs:
bbci.l a2, 7, 2f
l32i a14, a13, 28
add a3, a14, a15
xchal_cp7_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
xchal_cp7_store a3, a4, a5, a6, a7 continue=0 ofs=-1 \
select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
@ -500,26 +492,27 @@ _xt_coproc_savecs:
#endif
/*******************************************************************************
_xt_coproc_restorecs
/*
* _xt_coproc_restorecs
*
* Restore any callee-saved coprocessor state for the incoming thread.
* This function is called from coprocessor exception handling, when giving
* ownership to a thread that solicited a context switch earlier. It calls a
* system-specific function to get the coprocessor save area base address.
*
* Entry conditions:
* - The incoming thread is set as the current thread.
* - CPENABLE is set up correctly for all required coprocessors.
* - a2 = mask of coprocessors to be restored.
*
* Exit conditions:
* - All necessary CP callee-saved state has been restored.
* - CPENABLE - unchanged.
* - Registers a2-a7, a13-a15 have been trashed.
*
* Must be called from assembly code only, using CALL0.
*/
Restore any callee-saved coprocessor state for the incoming thread.
This function is called from coprocessor exception handling, when giving
ownership to a thread that solicited a context switch earlier. It calls a
system-specific function to get the coprocessor save area base address.
Entry conditions:
- The incoming thread is set as the current thread.
- CPENABLE is set up correctly for all required coprocessors.
- a2 = mask of coprocessors to be restored.
Exit conditions:
- All necessary CP callee-saved state has been restored.
- CPENABLE - unchanged.
- Registers a2-a7, a13-a15 have been trashed.
Must be called from assembly code only, using CALL0.
*******************************************************************************/
#if XCHAL_CP_NUM > 0
.global _xt_coproc_restorecs
@ -541,7 +534,8 @@ _xt_coproc_restorecs:
bbci.l a2, 0, 2f /* CP 0 not enabled */
l32i a14, a13, 0 /* a14 = _xt_coproc_sa_offset[0] */
add a3, a14, a15 /* a3 = save area for CP 0 */
xchal_cp0_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
xchal_cp0_load a3, a4, a5, a6, a7 continue=0 ofs=-1 \
select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
@ -549,7 +543,8 @@ _xt_coproc_restorecs:
bbci.l a2, 1, 2f /* CP 1 not enabled */
l32i a14, a13, 4 /* a14 = _xt_coproc_sa_offset[1] */
add a3, a14, a15 /* a3 = save area for CP 1 */
xchal_cp1_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
xchal_cp1_load a3, a4, a5, a6, a7 continue=0 ofs=-1 \
select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
@ -557,7 +552,8 @@ _xt_coproc_restorecs:
bbci.l a2, 2, 2f
l32i a14, a13, 8
add a3, a14, a15
xchal_cp2_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
xchal_cp2_load a3, a4, a5, a6, a7 continue=0 ofs=-1 \
select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
@ -565,7 +561,8 @@ _xt_coproc_restorecs:
bbci.l a2, 3, 2f
l32i a14, a13, 12
add a3, a14, a15
xchal_cp3_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
xchal_cp3_load a3, a4, a5, a6, a7 continue=0 ofs=-1 \
select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
@ -573,7 +570,8 @@ _xt_coproc_restorecs:
bbci.l a2, 4, 2f
l32i a14, a13, 16
add a3, a14, a15
xchal_cp4_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
xchal_cp4_load a3, a4, a5, a6, a7 continue=0 ofs=-1 \
select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
@ -581,7 +579,8 @@ _xt_coproc_restorecs:
bbci.l a2, 5, 2f
l32i a14, a13, 20
add a3, a14, a15
xchal_cp5_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
xchal_cp5_load a3, a4, a5, a6, a7 continue=0 ofs=-1 \
select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
@ -589,7 +588,8 @@ _xt_coproc_restorecs:
bbci.l a2, 6, 2f
l32i a14, a13, 24
add a3, a14, a15
xchal_cp6_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
xchal_cp6_load a3, a4, a5, a6, a7 continue=0 ofs=-1 \
select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif
@ -597,7 +597,8 @@ _xt_coproc_restorecs:
bbci.l a2, 7, 2f
l32i a14, a13, 28
add a3, a14, a15
xchal_cp7_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
xchal_cp7_load a3, a4, a5, a6, a7 continue=0 ofs=-1 \
select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif

File diff suppressed because it is too large Load diff

View file

@ -75,9 +75,9 @@ typedef struct _caller_saved _caller_saved_t;
*/
struct _callee_saved {
/*
* The following registers are considered non-volatile, i.e. callee-saved,
* but their values are pushed onto the stack rather than stored in the
* TCS structure:
* The following registers are considered non-volatile, i.e.
* callee-saved, but their values are pushed onto the stack rather than
* stored in the TCS structure:
*/
uint32_t retval; /* a2 */
XtExcFrame *topOfStack; /* a1 = sp */
@ -96,8 +96,8 @@ typedef struct __esf __esf_t;
typedef struct s_coopCoprocReg {
/*
* This structure intentionally left blank. Coprocessor's registers are all
* 'volatile' and saved using the lazy context switch mechanism.
* This structure intentionally left blank. Coprocessor's registers are
* all 'volatile' and saved using the lazy context switch mechanism.
*/
} tCoopCoprocReg;
@ -106,13 +106,13 @@ typedef struct s_coopCoprocReg {
* The following structure defines the set of 'volatile' x87 FPU/MMX/SSE
* registers. These registers need not be preserved by a called C function.
* Given that they are not preserved across function calls, they must be
* save/restored (along with s_coopCoprocReg) when a preemptive context
* switch occurs.
* save/restored (along with s_coopCoprocReg) when a preemptive context switch
* occurs.
*/
typedef struct s_preempCoprocReg {
/*
* This structure intentionally left blank, as for now coprocessor's stack
* is positioned at top of the stack.
* This structure intentionally left blank, as for now coprocessor's
* stack is positioned at top of the stack.
*/
#if XCHAL_CP_NUM > 0
char *cpStack;
@ -128,9 +128,8 @@ typedef struct s_preempCoprocReg {
struct _thread_arch {
/*
* See the above flag definitions above for valid bit settings. This
* field must remain near the start of struct tcs, specifically
* before any #ifdef'ed fields since the host tools currently use a
* fixed
* field must remain near the start of struct tcs, specifically before
* any #ifdef'ed fields since the host tools currently use a fixed
* offset to read the 'flags' field.
*/
uint32_t flags;
@ -138,28 +137,37 @@ struct _thread_arch {
void *custom_data; /* available for custom use */
#endif
#if defined(CONFIG_THREAD_MONITOR)
struct __thread_entry *entry; /* thread entry and parameters description */
struct tcs *next_thread; /* next item in list of ALL fiber+tasks */
/* thread entry and parameters description */
struct __thread_entry *entry;
/* next item in list of ALL fiber+tasks */
struct tcs *next_thread;
#endif
#ifdef CONFIG_ERRNO
int errno_var;
#endif
/*
* The location of all floating point related structures/fields MUST be
* located at the end of struct tcs. This way only the
* fibers/tasks that actually utilize non-integer capabilities need to
* account for the increased memory required for storing FP state when
* sizing stacks.
* located at the end of struct tcs. This way only the fibers/tasks
* that actually utilize non-integer capabilities need to account for
* the increased memory required for storing FP state when sizing
* stacks.
*
* Given that stacks "grow down" on Xtensa, and the TCS is located
* at the start of a thread's "workspace" memory, the stacks of
* Given that stacks "grow down" on Xtensa, and the TCS is located at
* the start of a thread's "workspace" memory, the stacks of
* fibers/tasks that do not utilize floating point instruction can
* effectively consume the memory occupied by the 'tCoopCoprocReg' and
* 'tPreempCoprocReg' structures without ill effect.
* effectively consume the memory occupied by the 'tCoopCoprocReg'
* and 'tPreempCoprocReg' structures without ill effect.
*
* TODO: Move Xtensa coprocessor's stack here to get rid of extra
* indirection
*/
/* TODO: Move Xtensa coprocessor's stack here to get rid of extra indirection */
tCoopCoprocReg coopCoprocReg; /* non-volatile coprocessor's register storage */
tPreempCoprocReg preempCoprocReg; /* volatile coprocessor's register storage */
/* non-volatile coprocessor's register storage */
tCoopCoprocReg coopCoprocReg;
/* volatile coprocessor's register storage */
tPreempCoprocReg preempCoprocReg;
};
typedef struct _thread_arch _thread_arch_t;

View file

@ -21,7 +21,7 @@ extern "C" {
#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE)
extern void FatalErrorHandler(void);
extern void ReservedInterruptHandler(unsigned int);
extern void ReservedInterruptHandler(unsigned int intNo);
/* Defined in xtensa_context.S */
extern void _xt_coproc_init(void);
@ -30,9 +30,9 @@ extern void _xt_coproc_init(void);
*
* @brief Performs architecture-specific initialization
*
* This routine performs architecture-specific initialization of the nanokernel.
* Trivial stuff is done inline; more complex initialization is done via
* function calls.
* This routine performs architecture-specific initialization of the
* nanokernel. Trivial stuff is done inline; more complex initialization is
* done via function calls.
*
* @return N/A
*/
@ -40,7 +40,9 @@ static ALWAYS_INLINE void nanoArchInit(void)
{
_kernel.nested = 0;
#if XCHAL_CP_NUM > 0
/* Initialize co-processor management for tasks. Leave CPENABLE alone. */
/* Initialize co-processor management for tasks.
* Leave CPENABLE alone.
*/
_xt_coproc_init();
#endif
}

View file

@ -3,10 +3,6 @@
* SPDX-License-Identifier: Apache-2.0
*/
/******************************************************************************
Xtensa-specific API for RTOS ports.
******************************************************************************/
#ifndef __XTENSA_API_H__
#define __XTENSA_API_H__
@ -21,82 +17,55 @@ typedef void (*xt_handler)(void *);
/* Typedef for C-callable exception handler function */
typedef void (*xt_exc_handler)(XtExcFrame *);
/*
-------------------------------------------------------------------------------
Call this function to set a handler for the specified exception.
n - Exception number (type)
f - Handler function address, NULL to uninstall handler.
The handler will be passed a pointer to the exception frame, which is created
on the stack of the thread that caused the exception.
If the handler returns, the thread context will be restored and the faulting
instruction will be retried. Any values in the exception frame that are
modified by the handler will be restored as part of the context. For details
of the exception frame structure see xtensa_context.h.
-------------------------------------------------------------------------------
*/
* Call this function to set a handler for the specified exception.
*
* n - Exception number (type) f - Handler function address,
* NULL to uninstall handler.
*
* The handler will be passed a pointer to the exception frame, which is created
* on the stack of the thread that caused the exception.
*
* If the handler returns, the thread context will be restored and the faulting
* instruction will be retried. Any values in the exception frame that are
* modified by the handler will be restored as part of the context. For details
* of the exception frame structure see xtensa_context.h.
*
* FIXME: Remove this API entirely
*/
extern xt_exc_handler _xt_set_exception_handler(int n, xt_exc_handler f);
/*
-------------------------------------------------------------------------------
Call this function to set a handler for the specified interrupt.
n - Interrupt number.
f - Handler function address, NULL to uninstall handler.
arg - Argument to be passed to handler.
-------------------------------------------------------------------------------
*/
#if XT_RTOS_IS_ZEPHYR_OS
#define _xt_set_interrupt_handler _irq_handler_set /* Map it to Zephyr's one */
#else
extern xt_handler _xt_set_interrupt_handler(int n, xt_handler f, void * arg);
#endif
/*
-------------------------------------------------------------------------------
Call this function to enable the specified interrupts.
mask - Bit mask of interrupts to be enabled.
-------------------------------------------------------------------------------
*/
* Call this function to enable the specified interrupts.
*
* mask - Bit mask of interrupts to be enabled.
*/
extern void _xt_ints_on(unsigned int mask);
/*
-------------------------------------------------------------------------------
Call this function to disable the specified interrupts.
mask - Bit mask of interrupts to be disabled.
-------------------------------------------------------------------------------
*/
* Call this function to disable the specified interrupts.
*
* mask - Bit mask of interrupts to be disabled.
*/
extern void _xt_ints_off(unsigned int mask);
/*
-------------------------------------------------------------------------------
Call this function to set the specified (s/w) interrupt.
-------------------------------------------------------------------------------
*/
* Call this function to set the specified (s/w) interrupt.
*/
static inline void _xt_set_intset(unsigned int arg)
{
xthal_set_intset(arg);
xthal_set_intset(arg);
}
/*
-------------------------------------------------------------------------------
Call this function to clear the specified (s/w or edge-triggered)
interrupt.
-------------------------------------------------------------------------------
*/
/* Call this function to clear the specified (s/w or edge-triggered)
* interrupt.
*/
static inline void _xt_set_intclear(unsigned int arg)
{
xthal_set_intclear(arg);
xthal_set_intclear(arg);
}

View file

@ -3,16 +3,6 @@
* SPDX-License-Identifier: Apache-2.0
*/
/*******************************************************************************
Configuration-specific information for Xtensa build. This file must be
included in FreeRTOSConfig.h to properly set up the config-dependent
parameters correctly.
NOTE: To enable thread-safe C library support, XT_USE_THREAD_SAFE_CLIB must
be defined to be > 0 somewhere above or on the command line.
*******************************************************************************/
#ifndef XTENSA_CONFIG_H
#define XTENSA_CONFIG_H
@ -26,55 +16,55 @@ extern "C" {
#include "xtensa_context.h"
/*-----------------------------------------------------------------------------
* STACK REQUIREMENTS
*
* This section defines the minimum stack size, and the extra space required to
* be allocated for saving coprocessor state and/or C library state information
* (if thread safety is enabled for the C library). The sizes are in bytes.
*
* Stack sizes for individual tasks should be derived from these minima based on
* the maximum call depth of the task and the maximum level of interrupt nesting.
* A minimum stack size is defined by XT_STACK_MIN_SIZE. This minimum is based
* on the requirement for a task that calls nothing else but can be interrupted.
* This assumes that interrupt handlers do not call more than a few levels deep.
* If this is not true, i.e. one or more interrupt handlers make deep calls then
* the minimum must be increased.
*
* If the Xtensa processor configuration includes coprocessors, then space is
* allocated to save the coprocessor state on the stack.
*
* If thread safety is enabled for the C runtime library, (XT_USE_THREAD_SAFE_CLIB
* is defined) then space is allocated to save the C library context in the TCB.
*
* Allocating insufficient stack space is a common source of hard-to-find errors.
* During development, it is best to enable the FreeRTOS stack checking features.
*
* Usage:
*
* XT_USE_THREAD_SAFE_CLIB -- Define this to a nonzero value to enable thread-safe
* use of the C library. This will require extra stack
* space to be allocated for tasks that use the C library
* reentrant functions. See below for more information.
*
* NOTE: The Xtensa toolchain supports multiple C libraries and not all of them
* support thread safety. Check your core configuration to see which C library
* was chosen for your system.
*
* XT_STACK_MIN_SIZE -- The minimum stack size for any task. It is recommended
* that you do not use a stack smaller than this for any
* task. In case you want to use stacks smaller than this
* size, you must verify that the smaller size(s) will work
* under all operating conditions.
*
* XT_STACK_EXTRA -- The amount of extra stack space to allocate for a task
* that does not make C library reentrant calls. Add this
* to the amount of stack space required by the task itself.
*
* XT_STACK_EXTRA_CLIB -- The amount of space to allocate for C library state.
*
-----------------------------------------------------------------------------*/
/*
* STACK REQUIREMENTS
*
* This section defines the minimum stack size, and the extra space required to
* be allocated for saving coprocessor state and/or C library state information
* (if thread safety is enabled for the C library). The sizes are in bytes.
*
* Stack sizes for individual tasks should be derived from these minima based
* on the maximum call depth of the task and the maximum level of interrupt
* nesting. A minimum stack size is defined by XT_STACK_MIN_SIZE. This minimum
* is based on the requirement for a task that calls nothing else but can be
* interrupted. This assumes that interrupt handlers do not call more than a
* few levels deep. If this is not true, i.e. one or more interrupt handlers
* make deep calls then the minimum must be increased.
*
* If the Xtensa processor configuration includes coprocessors, then space is
* allocated to save the coprocessor state on the stack.
*
* If thread safety is enabled for the C runtime library,
* (XT_USE_THREAD_SAFE_CLIB is defined) then space is allocated to save the C
* library context in the TCB.
*
* Allocating insufficient stack space is a common source of hard-to-find
* errors. During development, it is best to enable the FreeRTOS stack
* checking features.
*
* Usage:
*
* XT_USE_THREAD_SAFE_CLIB -- Define this to a nonzero value to enable
* thread-safe use of the C library. This will require extra stack space to be
* allocated for tasks that use the C library reentrant functions. See below
* for more information.
*
* NOTE: The Xtensa toolchain supports multiple C libraries and not all of them
* support thread safety. Check your core configuration to see which C library
* was chosen for your system.
*
* XT_STACK_MIN_SIZE -- The minimum stack size for any task. It is
* recommended that you do not use a stack smaller than this for any task. In
* case you want to use stacks smaller than this size, you must verify that the
* smaller size(s) will work under all operating conditions.
*
* XT_STACK_EXTRA -- The amount of extra stack space to allocate for a
* task that does not make C library reentrant calls. Add this to the amount of
* stack space required by the task itself.
*
* XT_STACK_EXTRA_CLIB -- The amount of space to allocate for C library
* state.
*/
/* Extra space required for interrupt/exception hooks. */
#ifdef XT_INTEXC_HOOKS
@ -87,44 +77,47 @@ extern "C" {
#define STK_INTEXC_EXTRA 0
#endif
/* Check C library thread safety support and compute size of C library save area. */
/* Check C library thread safety support and compute size of C library save
* area.
*/
#if XT_USE_THREAD_SAFE_CLIB > 0u
#if XSHAL_CLIB == XTHAL_CLIB_XCLIB
#define XT_HAVE_THREAD_SAFE_CLIB 0
#error Thread-safe operation is not yet supported for the XCLIB C library.
#define XT_HAVE_THREAD_SAFE_CLIB 0
#error "Thread-safe operation is not yet supported for the XCLIB C library."
#elif XSHAL_CLIB == XTHAL_CLIB_NEWLIB
#define XT_HAVE_THREAD_SAFE_CLIB 1
#define XT_HAVE_THREAD_SAFE_CLIB 1
#if !defined __ASSEMBLER__
#include <sys/reent.h>
#define XT_CLIB_CONTEXT_AREA_SIZE ((sizeof(struct _reent) + 15) + (-16))
#define XT_CLIB_GLOBAL_PTR _impure_ptr
#define XT_CLIB_CONTEXT_AREA_SIZE ((sizeof(struct _reent) + 15) + (-16))
#define XT_CLIB_GLOBAL_PTR _impure_ptr
#endif
#else
#define XT_HAVE_THREAD_SAFE_CLIB 0
#error The selected C runtime library is not thread safe.
#define XT_HAVE_THREAD_SAFE_CLIB 0
#error "The selected C runtime library is not thread safe."
#endif
#else
#define XT_CLIB_CONTEXT_AREA_SIZE 0
#define XT_CLIB_CONTEXT_AREA_SIZE 0
#endif
/*------------------------------------------------------------------------------
Extra size -- interrupt frame plus coprocessor save area plus hook space.
NOTE: Make sure XT_INTEXC_HOOKS is undefined unless you really need the hooks.
------------------------------------------------------------------------------*/
/* Extra size -- interrupt frame plus coprocessor save area plus hook space.
*
* NOTE: Make sure XT_INTEXC_HOOKS is undefined unless you really need the
* hooks.
*/
#ifdef __XTENSA_CALL0_ABI__
#define XT_XTRA_SIZE (XT_STK_FRMSZ + STK_INTEXC_EXTRA + 0x10 + XT_CP_SIZE)
#define XT_XTRA_SIZE (XT_STK_FRMSZ + STK_INTEXC_EXTRA + 0x10 + XT_CP_SIZE)
#else
#define XT_XTRA_SIZE (XT_STK_FRMSZ + STK_INTEXC_EXTRA + 0x20 + XT_CP_SIZE)
#define XT_XTRA_SIZE (XT_STK_FRMSZ + STK_INTEXC_EXTRA + 0x20 + XT_CP_SIZE)
#endif
/*------------------------------------------------------------------------------
Space allocated for user code -- function calls and local variables.
NOTE: This number can be adjusted to suit your needs. You must verify that the
amount of space you reserve is adequate for the worst-case conditions in your
application.
NOTE: The windowed ABI requires more stack, since space has to be reserved
for spilling register windows.
------------------------------------------------------------------------------*/
/*
* Space allocated for user code -- function calls and local variables.
*
* NOTE: This number can be adjusted to suit your needs. You must verify that
* the amount of space you reserve is adequate for the worst-case conditions in
* your application. NOTE: The windowed ABI requires more stack, since space
* has to be reserved for spilling register windows.
*/
#ifdef __XTENSA_CALL0_ABI__
#define XT_USER_SIZE 0x200
#else
@ -132,7 +125,8 @@ extern "C" {
#endif
/* Minimum recommended stack size. */
#define XT_STACK_MIN_SIZE ((XT_XTRA_SIZE + XT_USER_SIZE) / sizeof(unsigned char))
#define XT_STACK_MIN_SIZE \
((XT_XTRA_SIZE + XT_USER_SIZE) / sizeof(unsigned char))
/* OS overhead with and without C library thread context. */
#define XT_STACK_EXTRA (XT_XTRA_SIZE)

View file

@ -3,20 +3,18 @@
* SPDX-License-Identifier: Apache-2.0
*/
/*******************************************************************************
XTENSA CONTEXT FRAMES AND MACROS FOR RTOS ASSEMBLER SOURCES
This header contains definitions and macros for use primarily by Xtensa
RTOS assembly coded source files. It includes and uses the Xtensa hardware
abstraction layer (HAL) to deal with config specifics. It may also be
included in C source files.
!! Supports only Xtensa Exception Architecture 2 (XEA2). XEA1 not supported. !!
NOTE: The Xtensa architecture requires stack pointer alignment to 16 bytes.
*******************************************************************************/
/*
* XTENSA CONTEXT FRAMES AND MACROS FOR RTOS ASSEMBLER SOURCES
*
* This header contains definitions and macros for use primarily by Xtensa RTOS
* assembly coded source files. It includes and uses the Xtensa hardware
* abstraction layer (HAL) to deal with config specifics. It may also be
* included in C source files.
*
* Supports only Xtensa Exception Architecture 2 (XEA2). XEA1 not supported.
*
* NOTE: The Xtensa architecture requires stack pointer alignment to 16 bytes.
*/
#ifndef XTENSA_CONTEXT_H
#define XTENSA_CONTEXT_H
@ -34,67 +32,65 @@ NOTE: The Xtensa architecture requires stack pointer alignment to 16 bytes.
/* Align a value up to nearest n-byte boundary, where n is a power of 2. */
#define ALIGNUP(n, val) (((val) + (n)-1) & -(n))
/*
-------------------------------------------------------------------------------
INTERRUPT/EXCEPTION STACK FRAME FOR A THREAD OR NESTED INTERRUPT
A stack frame of this structure is allocated for any interrupt or exception.
It goes on the current stack. If the RTOS has a system stack for handling
interrupts, every thread stack must allow space for just one interrupt stack
frame, then nested interrupt stack frames go on the system stack.
The frame includes basic registers (explicit) and "extra" registers introduced
by user TIE or the use of the MAC16 option in the user's Xtensa config.
The frame size is minimized by omitting regs not applicable to user's config.
For Windowed ABI, this stack frame includes the interruptee's base save area,
another base save area to manage gcc nested functions, and a little temporary
space to help manage the spilling of the register windows.
-------------------------------------------------------------------------------
*/
* INTERRUPT/EXCEPTION STACK FRAME FOR A THREAD OR NESTED INTERRUPT
*
* A stack frame of this structure is allocated for any interrupt or exception.
* It goes on the current stack. If the RTOS has a system stack for handling
* interrupts, every thread stack must allow space for just one interrupt stack
* frame, then nested interrupt stack frames go on the system stack.
*
* The frame includes basic registers (explicit) and "extra" registers
* introduced by user TIE or the use of the MAC16 option in the user's Xtensa
* config. The frame size is minimized by omitting regs not applicable to
* user's config.
*
* For Windowed ABI, this stack frame includes the interruptee's base save
* area, another base save area to manage gcc nested functions, and a little
* temporary space to help manage the spilling of the register windows.
*/
STRUCT_BEGIN
STRUCT_FIELD (long, 4, XT_STK_, exit) /* exit point for dispatch */
STRUCT_FIELD (long, 4, XT_STK_, pc) /* return PC */
STRUCT_FIELD (long, 4, XT_STK_, ps) /* return PS */
STRUCT_FIELD (long, 4, XT_STK_, a0)
STRUCT_FIELD (long, 4, XT_STK_, a1) /* stack pointer before interrupt */
STRUCT_FIELD (long, 4, XT_STK_, a2)
STRUCT_FIELD (long, 4, XT_STK_, a3)
STRUCT_FIELD (long, 4, XT_STK_, a4)
STRUCT_FIELD (long, 4, XT_STK_, a5)
STRUCT_FIELD (long, 4, XT_STK_, a6)
STRUCT_FIELD (long, 4, XT_STK_, a7)
STRUCT_FIELD (long, 4, XT_STK_, a8)
STRUCT_FIELD (long, 4, XT_STK_, a9)
STRUCT_FIELD (long, 4, XT_STK_, a10)
STRUCT_FIELD (long, 4, XT_STK_, a11)
STRUCT_FIELD (long, 4, XT_STK_, a12)
STRUCT_FIELD (long, 4, XT_STK_, a13)
STRUCT_FIELD (long, 4, XT_STK_, a14)
STRUCT_FIELD (long, 4, XT_STK_, a15)
STRUCT_FIELD (long, 4, XT_STK_, sar)
STRUCT_FIELD (long, 4, XT_STK_, exccause)
STRUCT_FIELD (long, 4, XT_STK_, excvaddr)
STRUCT_FIELD(long, 4, XT_STK_, exit) /* exit point for dispatch */
STRUCT_FIELD(long, 4, XT_STK_, pc) /* return PC */
STRUCT_FIELD(long, 4, XT_STK_, ps) /* return PS */
STRUCT_FIELD(long, 4, XT_STK_, a0)
STRUCT_FIELD(long, 4, XT_STK_, a1) /* stack pointer before irq */
STRUCT_FIELD(long, 4, XT_STK_, a2)
STRUCT_FIELD(long, 4, XT_STK_, a3)
STRUCT_FIELD(long, 4, XT_STK_, a4)
STRUCT_FIELD(long, 4, XT_STK_, a5)
STRUCT_FIELD(long, 4, XT_STK_, a6)
STRUCT_FIELD(long, 4, XT_STK_, a7)
STRUCT_FIELD(long, 4, XT_STK_, a8)
STRUCT_FIELD(long, 4, XT_STK_, a9)
STRUCT_FIELD(long, 4, XT_STK_, a10)
STRUCT_FIELD(long, 4, XT_STK_, a11)
STRUCT_FIELD(long, 4, XT_STK_, a12)
STRUCT_FIELD(long, 4, XT_STK_, a13)
STRUCT_FIELD(long, 4, XT_STK_, a14)
STRUCT_FIELD(long, 4, XT_STK_, a15)
STRUCT_FIELD(long, 4, XT_STK_, sar)
STRUCT_FIELD(long, 4, XT_STK_, exccause)
STRUCT_FIELD(long, 4, XT_STK_, excvaddr)
#if XCHAL_HAVE_LOOPS
STRUCT_FIELD (long, 4, XT_STK_, lbeg)
STRUCT_FIELD (long, 4, XT_STK_, lend)
STRUCT_FIELD (long, 4, XT_STK_, lcount)
STRUCT_FIELD(long, 4, XT_STK_, lbeg)
STRUCT_FIELD(long, 4, XT_STK_, lend)
STRUCT_FIELD(long, 4, XT_STK_, lcount)
#endif
#ifndef __XTENSA_CALL0_ABI__
/* Temporary space for saving stuff during window spill */
STRUCT_FIELD (long, 4, XT_STK_, tmp0)
STRUCT_FIELD (long, 4, XT_STK_, tmp1)
STRUCT_FIELD (long, 4, XT_STK_, tmp2)
STRUCT_FIELD(long, 4, XT_STK_, tmp0)
STRUCT_FIELD(long, 4, XT_STK_, tmp1)
STRUCT_FIELD(long, 4, XT_STK_, tmp2)
#endif
#ifdef XT_USE_SWPRI
/* Storage for virtual priority mask */
STRUCT_FIELD (long, 4, XT_STK_, vpri)
STRUCT_FIELD(long, 4, XT_STK_, vpri)
#endif
#ifdef XT_USE_OVLY
/* Storage for overlay state */
STRUCT_FIELD (long, 4, XT_STK_, ovly)
STRUCT_FIELD(long, 4, XT_STK_, ovly)
#endif
STRUCT_END(XtExcFrame)
@ -111,63 +107,61 @@ STRUCT_END(XtExcFrame)
#define XT_STK_EXTRA ALIGNUP(XCHAL_EXTRA_SA_ALIGN, XT_STK_NEXT1)
#else
/* If need more alignment than stack, add space for dynamic alignment */
#define XT_STK_EXTRA (ALIGNUP(XCHAL_EXTRA_SA_ALIGN, XT_STK_NEXT1) + XCHAL_EXTRA_SA_ALIGN)
#define XT_STK_EXTRA (ALIGNUP(XCHAL_EXTRA_SA_ALIGN, XT_STK_NEXT1) \
+ XCHAL_EXTRA_SA_ALIGN)
#endif
#define XT_STK_NEXT2 (XT_STK_EXTRA + XCHAL_EXTRA_SA_SIZE)
#else
#define XT_STK_NEXT2 XT_STK_NEXT1
#define XT_STK_NEXT2 XT_STK_NEXT1
#endif
/*
-------------------------------------------------------------------------------
This is the frame size. Add space for 4 registers (interruptee's base save
area) and some space for gcc nested functions if any.
-------------------------------------------------------------------------------
*/
* This is the frame size. Add space for 4 registers (interruptee's base save
* area) and some space for gcc nested functions if any.
*/
#define XT_STK_FRMSZ (ALIGNUP(0x10, XT_STK_NEXT2) + 0x20)
/*
-------------------------------------------------------------------------------
SOLICITED STACK FRAME FOR A THREAD
A stack frame of this structure is allocated whenever a thread enters the
RTOS kernel intentionally (and synchronously) to submit to thread scheduling.
It goes on the current thread's stack.
The solicited frame only includes registers that are required to be preserved
by the callee according to the compiler's ABI conventions, some space to save
the return address for returning to the caller, and the caller's PS register.
For Windowed ABI, this stack frame includes the caller's base save area.
Note on XT_SOL_EXIT field:
It is necessary to distinguish a solicited from an interrupt stack frame.
This field corresponds to XT_STK_EXIT in the interrupt stack frame and is
always at the same offset (0). It can be written with a code (usually 0)
to distinguish a solicted frame from an interrupt frame. An RTOS port may
opt to ignore this field if it has another way of distinguishing frames.
-------------------------------------------------------------------------------
*/
* SOLICITED STACK FRAME FOR A THREAD
*
* A stack frame of this structure is allocated whenever a thread enters the
* RTOS kernel intentionally (and synchronously) to submit to thread
* scheduling. It goes on the current thread's stack.
*
* The solicited frame only includes registers that are required to be
* preserved by the callee according to the compiler's ABI conventions, some
* space to save the return address for returning to the caller, and the
* caller's PS register. For Windowed ABI, this stack frame includes the
* caller's base save area.
*
* Note on XT_SOL_EXIT field:
*
* It is necessary to distinguish a solicited from an interrupt stack frame.
* This field corresponds to XT_STK_EXIT in the interrupt stack frame and is
* always at the same offset (0). It can be written with a code (usually 0) to
* distinguish a solicted frame from an interrupt frame. An RTOS port may opt
* to ignore this field if it has another way of distinguishing frames.
*/
STRUCT_BEGIN
STRUCT_FIELD (long, 4, XT_SOL_, exit)
STRUCT_FIELD (long, 4, XT_SOL_, pc)
STRUCT_FIELD (long, 4, XT_SOL_, ps)
STRUCT_FIELD (long, 4, XT_SOL_, next)
STRUCT_FIELD(long, 4, XT_SOL_, exit)
STRUCT_FIELD(long, 4, XT_SOL_, pc)
STRUCT_FIELD(long, 4, XT_SOL_, ps)
STRUCT_FIELD(long, 4, XT_SOL_, next)
#ifdef __XTENSA_CALL0_ABI__
STRUCT_FIELD (long, 4, XT_SOL_, a12) /* should be on 16-byte alignment */
STRUCT_FIELD (long, 4, XT_SOL_, a13)
STRUCT_FIELD (long, 4, XT_SOL_, a14)
STRUCT_FIELD (long, 4, XT_SOL_, a15)
STRUCT_FIELD(long, 4, XT_SOL_, a12) /* should be on 16-byte alignment */
STRUCT_FIELD(long, 4, XT_SOL_, a13)
STRUCT_FIELD(long, 4, XT_SOL_, a14)
STRUCT_FIELD(long, 4, XT_SOL_, a15)
#else
STRUCT_FIELD (long, 4, XT_SOL_, a0) /* should be on 16-byte alignment */
STRUCT_FIELD (long, 4, XT_SOL_, a1)
STRUCT_FIELD (long, 4, XT_SOL_, a2)
STRUCT_FIELD (long, 4, XT_SOL_, a3)
STRUCT_FIELD(long, 4, XT_SOL_, a0) /* should be on 16-byte alignment */
STRUCT_FIELD(long, 4, XT_SOL_, a1)
STRUCT_FIELD(long, 4, XT_SOL_, a2)
STRUCT_FIELD(long, 4, XT_SOL_, a3)
#endif
STRUCT_END(XtSolFrame)
@ -176,66 +170,68 @@ STRUCT_END(XtSolFrame)
/*
-------------------------------------------------------------------------------
CO-PROCESSOR STATE SAVE AREA FOR A THREAD
The RTOS must provide an area per thread to save the state of co-processors
when that thread does not have control. Co-processors are context-switched
lazily (on demand) only when a new thread uses a co-processor instruction,
otherwise a thread retains ownership of the co-processor even when it loses
control of the processor. An Xtensa co-processor exception is triggered when
any co-processor instruction is executed by a thread that is not the owner,
and the context switch of that co-processor is then peformed by the handler.
Ownership represents which thread's state is currently in the co-processor.
Co-processors may not be used by interrupt or exception handlers. If an
co-processor instruction is executed by an interrupt or exception handler,
the co-processor exception handler will trigger a kernel panic and freeze.
This restriction is introduced to reduce the overhead of saving and restoring
co-processor state (which can be quite large) and in particular remove that
overhead from interrupt handlers.
The co-processor state save area may be in any convenient per-thread location
such as in the thread control block or above the thread stack area. It need
not be in the interrupt stack frame since interrupts don't use co-processors.
Along with the save area for each co-processor, two bitmasks with flags per
co-processor (laid out as in the CPENABLE reg) help manage context-switching
co-processors as efficiently as possible:
XT_CPENABLE
The contents of a non-running thread's CPENABLE register.
It represents the co-processors owned (and whose state is still needed)
by the thread. When a thread is preempted, its CPENABLE is saved here.
When a thread solicits a context-swtich, its CPENABLE is cleared - the
compiler has saved the (caller-saved) co-proc state if it needs to.
When a non-running thread loses ownership of a CP, its bit is cleared.
When a thread runs, it's XT_CPENABLE is loaded into the CPENABLE reg.
Avoids co-processor exceptions when no change of ownership is needed.
XT_CPSTORED
A bitmask with the same layout as CPENABLE, a bit per co-processor.
Indicates whether the state of each co-processor is saved in the state
save area. When a thread enters the kernel, only the state of co-procs
still enabled in CPENABLE is saved. When the co-processor exception
handler assigns ownership of a co-processor to a thread, it restores
the saved state only if this bit is set, and clears this bit.
XT_CP_CS_ST
A bitmask with the same layout as CPENABLE, a bit per co-processor.
Indicates whether callee-saved state is saved in the state save area.
Callee-saved state is saved by itself on a solicited context switch,
and restored when needed by the coprocessor exception handler.
Unsolicited switches will cause the entire coprocessor to be saved
when necessary.
XT_CP_ASA
Pointer to the aligned save area. Allows it to be aligned more than
the overall save area (which might only be stack-aligned or TCB-aligned).
Especially relevant for Xtensa cores configured with a very large data
path that requires alignment greater than 16 bytes (ABI stack alignment).
-------------------------------------------------------------------------------
*/
* CO-PROCESSOR STATE SAVE AREA FOR A THREAD
*
* The RTOS must provide an area per thread to save the state of co-processors
* when that thread does not have control. Co-processors are context-switched
* lazily (on demand) only when a new thread uses a co-processor instruction,
* otherwise a thread retains ownership of the co-processor even when it loses
* control of the processor. An Xtensa co-processor exception is triggered when
* any co-processor instruction is executed by a thread that is not the owner,
* and the context switch of that co-processor is then peformed by the handler.
* Ownership represents which thread's state is currently in the co-processor.
*
* Co-processors may not be used by interrupt or exception handlers. If a
* co-processor instruction is executed by an interrupt or exception handler,
* the co-processor exception handler will trigger a kernel panic and freeze.
* This restriction is introduced to reduce the overhead of saving and
* restoring co-processor state (which can be quite large) and in particular
* remove that overhead from interrupt handlers.
*
* The co-processor state save area may be in any convenient per-thread
* location such as in the thread control block or above the thread stack area.
* It need not be in the interrupt stack frame since interrupts don't use
* co-processors.
*
* Along with the save area for each co-processor, two bitmasks with flags per
* co-processor (laid out as in the CPENABLE reg) help manage context-switching
* co-processors as efficiently as possible:
*
* XT_CPENABLE
*
* The contents of a non-running thread's CPENABLE register. It represents the
* co-processors owned (and whose state is still needed) by the thread. When a
* thread is preempted, its CPENABLE is saved here. When a thread solicits a
* context-swtich, its CPENABLE is cleared - the compiler has saved the
* (caller-saved) co-proc state if it needs to. When a non-running thread
* loses ownership of a CP, its bit is cleared. When a thread runs, it's
* XT_CPENABLE is loaded into the CPENABLE reg. Avoids co-processor exceptions
* when no change of ownership is needed.
*
* XT_CPSTORED
*
* A bitmask with the same layout as CPENABLE, a bit per co-processor.
* Indicates whether the state of each co-processor is saved in the state save
* area. When a thread enters the kernel, only the state of co-procs still
* enabled in CPENABLE is saved. When the co-processor exception handler
* assigns ownership of a co-processor to a thread, it restores the saved state
* only if this bit is set, and clears this bit.
*
* XT_CP_CS_ST
*
* A bitmask with the same layout as CPENABLE, a bit per co-processor.
* Indicates whether callee-saved state is saved in the state save area.
* Callee-saved state is saved by itself on a solicited context switch, and
* restored when needed by the coprocessor exception handler. Unsolicited
* switches will cause the entire coprocessor to be saved when necessary.
*
* XT_CP_ASA
*
* Pointer to the aligned save area. Allows it to be aligned more than the
* overall save area (which might only be stack-aligned or TCB-aligned).
* Especially relevant for Xtensa cores configured with a very large data path
* that requires alignment greater than 16 bytes (ABI stack alignment).
*/
#if XCHAL_CP_NUM > 0
@ -251,11 +247,20 @@ STRUCT_END(XtSolFrame)
#define XT_CP_SA_SIZE ALIGNUP(16, XT_CP7_SA + XCHAL_CP7_SA_SIZE)
/* Offsets within the overall save area: */
#define XT_CPENABLE 0 /* (2 bytes) coprocessors active for this thread */
#define XT_CPSTORED 2 /* (2 bytes) coprocessors saved for this thread */
#define XT_CP_CS_ST 4 /* (2 bytes) coprocessor callee-saved regs stored for this thread */
#define XT_CP_ASA 8 /* (4 bytes) ptr to aligned save area */
/* Overall size allows for dynamic alignment: */
/* (2 bytes) coprocessors active for this thread */
#define XT_CPENABLE 0
/* (2 bytes) coprocessors saved for this thread */
#define XT_CPSTORED 2
/* (2 bytes) coprocessor callee-saved regs stored for this thread */
#define XT_CP_CS_ST 4
/* (4 bytes) ptr to aligned save area */
#define XT_CP_ASA 8
/* Overall size allows for dynamic alignment: */
#define XT_CP_SIZE ALIGNUP(XCHAL_TOTAL_SA_ALIGN, 12 + XT_CP_SA_SIZE)
#else
#define XT_CP_SIZE 0
@ -263,46 +268,46 @@ STRUCT_END(XtSolFrame)
/*
-------------------------------------------------------------------------------
MACROS TO HANDLE ABI SPECIFICS OF FUNCTION ENTRY AND RETURN
Convenient where the frame size requirements are the same for both ABIs.
ENTRY(sz), RET(sz) are for framed functions (have locals or make calls).
ENTRY0, RET0 are for frameless functions (no locals, no calls).
where size = size of stack frame in bytes (must be >0 and aligned to 16).
For framed functions the frame is created and the return address saved at
base of frame (Call0 ABI) or as determined by hardware (Windowed ABI).
For frameless functions, there is no frame and return address remains in a0.
Note: Because CPP macros expand to a single line, macros requiring multi-line
expansions are implemented as assembler macros.
-------------------------------------------------------------------------------
*/
* MACROS TO HANDLE ABI SPECIFICS OF FUNCTION ENTRY AND RETURN
*
* Convenient where the frame size requirements are the same for both ABIs.
* ENTRY(sz), RET(sz) are for framed functions (have locals or make calls).
* ENTRY0, RET0 are for frameless functions (no locals, no calls).
*
* where size = size of stack frame in bytes (must be >0 and aligned to 16).
* For framed functions the frame is created and the return address saved at
* base of frame (Call0 ABI) or as determined by hardware (Windowed ABI). For
* frameless functions, there is no frame and return address remains in
* a0.
*
* Note: Because CPP macros expand to a single line, macros requiring
* multi-line expansions are implemented as assembler macros.
*/
#ifdef __ASSEMBLER__
#ifdef __XTENSA_CALL0_ABI__
/* Call0 */
#define ENTRY(sz) entry1 sz
.macro entry1 size=0x10
addi sp, sp, -\size
s32i a0, sp, 0
.endm
#define ENTRY0
#define RET(sz) ret1 sz
.macro ret1 size=0x10
l32i a0, sp, 0
addi sp, sp, \size
ret
.endm
#define RET0 ret
/* Call0 */
#define ENTRY(sz) entry1 sz
.macro entry1 size=0x10
addi sp, sp, -\size
s32i a0, sp, 0
.endm
#define ENTRY0
#define RET(sz) ret1 sz
.macro ret1 size=0x10
l32i a0, sp, 0
addi sp, sp, \size
ret
.endm
#define RET0 ret
#else
/* Windowed */
#define ENTRY(sz) entry sp, sz
#define ENTRY0 entry sp, 0x10
#define RET(sz) retw
#define RET0 retw
#endif
#endif
/* Windowed */
#define ENTRY(sz) entry sp, sz
#define ENTRY0 entry sp, 0x10
#define RET(sz) retw
#define RET0 retw
#endif /* __XTENSA_CALL0_ABI__ */
#endif /* __ASSEMBLER__ */
#endif /* XTENSA_CONTEXT_H */

View file

@ -54,7 +54,8 @@
#ifndef XT_TIMER_INDEX
#if defined configXT_TIMER_INDEX
#define XT_TIMER_INDEX configXT_TIMER_INDEX /* Index of hardware timer to be used */
/* Index of hardware timer to be used */
#define XT_TIMER_INDEX configXT_TIMER_INDEX
#endif
#endif
@ -105,8 +106,7 @@
* Called after minimal context has been saved, with interrupts disabled.
* RTOS port can call0 _xt_context_save to save the rest of the context.
* May only be called from assembly code by the 'call0' instruction.
*/
// void XT_RTOS_INT_ENTER(void)
*/
#define XT_RTOS_INT_ENTER _zxt_int_enter
/*
@ -133,14 +133,14 @@
/*
* Return in a15 the base address of the co-processor state save area for the
* thread that triggered a co-processor exception, or 0 if no thread was
* running.
* The state save area is structured as defined in xtensa_context.h and has size
* XT_CP_SIZE. Co-processor instructions should only be used in thread code,
* never in interrupt handlers or the RTOS kernel. May only be called from
* assembly code and by the 'call0' instruction. A result of 0 indicates an
* unrecoverable error.
* running. The state save area is structured as defined in xtensa_context.h
* and has size XT_CP_SIZE. Co-processor instructions should only be used in
* thread code, never in interrupt handlers or the RTOS kernel. May only be
* called from assembly code and by the 'call0' instruction. A result of 0
* indicates an unrecoverable error.
*
* The implementation may use only a2-4, a15 (all other regs must be preserved).
* The implementation may use only a2-4, a15 (all other regs must be
* preserved).
*/
#define XT_RTOS_CP_STATE _zxt_task_coproc_state
@ -149,28 +149,28 @@
* HOOKS TO DYNAMICALLY INSTALL INTERRUPT AND EXCEPTION HANDLERS PER LEVEL.
*
* This Xtensa RTOS port provides hooks for dynamically installing exception
* and interrupt handlers to facilitate automated testing where each test
* case can install its own handler for user exceptions and each interrupt
* priority (level). This consists of an array of function pointers indexed
* by interrupt priority, with index 0 being the user exception handler hook.
* Each entry in the array is initially 0, and may be replaced by a function
* pointer of type XT_INTEXC_HOOK. A handler may be uninstalled by installing 0.
* and interrupt handlers to facilitate automated testing where each test case
* can install its own handler for user exceptions and each interrupt priority
* (level). This consists of an array of function pointers indexed by interrupt
* priority, with index 0 being the user exception handler hook. Each entry in
* the array is initially 0, and may be replaced by a function pointer of type
* XT_INTEXC_HOOK. A handler may be uninstalled by installing 0.
*
* The handler for low and medium priority obeys ABI conventions so may be coded
* in C. For the exception handler, the cause is the contents of the EXCCAUSE
* reg, and the result is -1 if handled, else the cause (still needs handling).
* For interrupt handlers, the cause is a mask of pending enabled interrupts at
* that level, and the result is the same mask with the bits for the handled
* interrupts cleared (those not cleared still need handling). This allows a
* test case to either pre-handle or override the default handling for the
* exception or interrupt level (see xtensa_vectors.S).
* The handler for low and medium priority obeys ABI conventions so may be
* coded in C. For the exception handler, the cause is the contents of the
* EXCCAUSE reg, and the result is -1 if handled, else the cause (still needs
* handling). For interrupt handlers, the cause is a mask of pending enabled
* interrupts at that level, and the result is the same mask with the bits for
* the handled interrupts cleared (those not cleared still need handling). This
* allows a test case to either pre-handle or override the default handling for
* the exception or interrupt level (see xtensa_vectors.S).
*
* High priority handlers (including NMI) must be coded in assembly, are always
* called by 'call0' regardless of ABI, must preserve all registers except a0,
* and must not use or modify the interrupted stack. The hook argument 'cause'
* is not passed and the result is ignored, so as not to burden the caller with
* saving and restoring a2 (it assumes only one interrupt per level - see the
* discussion in high priority interrupts in xtensa_vectors.S). The handler
* is not passed and the result is ignored, so as not to burden the caller
* with saving and restoring a2 (it assumes only one interrupt per level - see
* the discussion in high priority interrupts in xtensa_vectors.S). The handler
* therefore should be coded to prototype 'void h(void)' even though it plugs
* into an array of handlers of prototype 'unsigned h(unsigned)'.
*
@ -180,7 +180,7 @@
#define XT_INTEXC_HOOK_NUM (1 + XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI)
#ifndef __ASSEMBLER__
typedef unsigned (*XT_INTEXC_HOOK)(unsigned cause);
typedef unsigned int (*XT_INTEXC_HOOK)(unsigned int cause);
extern volatile XT_INTEXC_HOOK _xt_intexc_hooks[XT_INTEXC_HOOK_NUM];
#endif
@ -188,8 +188,9 @@ extern volatile XT_INTEXC_HOOK _xt_intexc_hooks[XT_INTEXC_HOOK_NUM];
/*
* CONVENIENCE INCLUSIONS.
*
* Ensures RTOS specific files need only include this one Xtensa-generic header.
* These headers are included last so they can use the RTOS definitions above.
* Ensures RTOS specific files need only include this one Xtensa-generic
* header. These headers are included last so they can use the RTOS
* definitions above.
*/
#include "xtensa_context.h"

View file

@ -96,23 +96,20 @@
/*
* Set processor clock frequency, used to determine clock divisor for timer
* tick.
* User should BE SURE TO ADJUST THIS for the Xtensa platform being used.
* If using a supported board via the board-independent API defined in xtbsp.h,
* this may be left undefined and frequency and tick divisor will be computed
* and cached during run-time initialization.
* tick. User should BE SURE TO ADJUST THIS for the Xtensa platform being
* used. If using a supported board via the board-independent API defined in
* xtbsp.h, this may be left undefined and frequency and tick divisor will be
* computed and cached during run-time initialization.
*
* NOTE ON SIMULATOR:
* Under the Xtensa instruction set simulator, the frequency can only be
* estimated
* because it depends on the speed of the host and the version of the simulator.
* Also because it runs much slower than hardware, it is not possible to achieve
* real-time performance for most applications under the simulator. A frequency
* too low does not allow enough time between timer interrupts, starving
* threads.
* To obtain a more convenient but non-real-time tick duration on the simulator,
* compile with xt-xcc option "-DXT_SIMULATOR".
* Adjust this frequency to taste (it's not real-time anyway!).
* NOTE ON SIMULATOR: Under the Xtensa instruction set simulator, the frequency
* can only be estimated because it depends on the speed of the host and the
* version of the simulator. Also because it runs much slower than hardware,
* it is not possible to achieve real-time performance for most applications
* under the simulator. A frequency too low does not allow enough time between
* timer interrupts, starving threads. To obtain a more convenient but
* non-real-time tick duration on the simulator, compile with xt-xcc option
* "-DXT_SIMULATOR". Adjust this frequency to taste (it's not real-time
* anyway!).
*/
#if defined(XT_SIMULATOR) && !defined(XT_CLOCK_FREQ)
#define XT_CLOCK_FREQ CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC
@ -142,8 +139,8 @@
#if CONFIG_XTENSA_INTERNAL_TIMER || (CONFIG_XTENSA_TIMER_IRQ < 0)
#ifndef __ASSEMBLER__
extern unsigned _xt_tick_divisor;
extern void _xt_tick_divisor_init(void);
extern unsigned int _xt_tick_divisor;
extern void _xt_tick_divisor_init(void);
#endif
#endif // Internal/External timer

View file

@ -16,6 +16,7 @@
static int console_out(int c)
{
char buf[16];
register int a2 __asm__ ("a2") = SYS_write;
register int a3 __asm__ ("a3") = 1;
register char *a4 __asm__ ("a4") = buf;

View file

@ -72,8 +72,8 @@
#define PERFOPT_ALIGN .balign 4
#elif defined(CONFIG_NIOS2) || defined(CONFIG_RISCV32) || defined(CONFIG_XTENSA)
#elif defined(CONFIG_NIOS2) || defined(CONFIG_RISCV32) || \
defined(CONFIG_XTENSA)
#define PERFOPT_ALIGN .balign 4
#else

View file

@ -136,7 +136,8 @@ A##a:
#if defined(_ASMLANGUAGE) && !defined(_LINKER)
#if defined(CONFIG_ARM) || defined(CONFIG_NIOS2) || defined(CONFIG_RISCV32) || defined(CONFIG_XTENSA)
#if defined(CONFIG_ARM) || defined(CONFIG_NIOS2) || defined(CONFIG_RISCV32) \
|| defined(CONFIG_XTENSA)
#define GTEXT(sym) .global sym; .type sym, %function
#define GDATA(sym) .global sym; .type sym, %object
#define WTEXT(sym) .weak sym; .type sym, %function