xtensa: Add exception/interrupt vectors in asm2 mode

This adds vectors for all interrupt levels defined by core-isa.h.

Modify the entry code a little bit to select correct linker sections
(levels 1, 6 and 7 get special names for... no particularly good
reason) and to constructed the interrupted PS value correctly (no EPS1
register for exceptions since they had to have interrupted level 0
code and thus differ only in the EXCM bit).

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2017-12-11 15:11:55 -08:00 committed by Anas Nashif
commit bf2139331c
4 changed files with 291 additions and 20 deletions

View file

@ -4,6 +4,7 @@
* SPDX-License-Identifier: Apache-2.0
*/
#include <xtensa-asm2-s.h>
#include <offsets.h>
/*
* xtensa_save_high_regs
@ -207,3 +208,60 @@ xtensa_switch:
j _restore_context
_switch_restore_pc:
retw
#ifdef CONFIG_XTENSA_ASM2
/* Define our entry handler to load the struct kernel_t from the
* MISC0 special register, and to find the nest and irq_stack values
* at the precomputed offsets.
*/
.align 4
_handle_excint:
EXCINT_HANDLER MISC0, ___kernel_t_nested_OFFSET, ___kernel_t_irq_stack_OFFSET
/* Define the actual vectors for the hardware-defined levels with
* DEF_EXCINT. These load a C handler address and jump to our handler
* above.
*/
DEF_EXCINT 1, _handle_excint, xtensa_excint1_c
#if XCHAL_NMILEVEL >= 2
DEF_EXCINT 2, _handle_excint, xtensa_int2_c
#endif
#if XCHAL_NMILEVEL >= 3
DEF_EXCINT 3, _handle_excint, xtensa_int3_c
#endif
#if XCHAL_NMILEVEL >= 4
DEF_EXCINT 4, _handle_excint, xtensa_int4_c
#endif
#if XCHAL_NMILEVEL >= 5
DEF_EXCINT 5, _handle_excint, xtensa_int5_c
#endif
#if XCHAL_NMILEVEL >= 6
DEF_EXCINT 6, _handle_excint, xtensa_int6_c
#endif
#if XCHAL_NMILEVEL >= 7
DEF_EXCINT 7, _handle_excint, xtensa_int7_c
#endif
/* In theory you can have levels up to 15, but known hardware only uses 7. */
#if XCHAL_NMILEVEL > 7
#error More interrupts than expected.
#endif
/* We don't actually use "kernel mode" currently. Populate the vector
* out of simple caution in case app code clears the UM bit by mistake.
*/
.pushsection .KernelExceptionVector.text, "ax"
.global _KernelExceptionVector
_KernelExceptionVector:
j _Level1Vector
.popsection
#endif /* CONFIG_XTENSA_ASM2 */

View file

@ -3,10 +3,13 @@
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <misc/printk.h>
#include <string.h>
#include <xtensa-asm2.h>
#include <kernel.h>
#include <ksched.h>
#include <kernel_structs.h>
#include <_soc_inthandlers.h>
void *xtensa_init_stack(int *stack_top,
void (*entry)(void *, void *, void *),
@ -49,16 +52,169 @@ void *xtensa_init_stack(int *stack_top,
return &bsa[-9];
}
/* This is a kernel hook, just a wrapper around other APIs. Build
* only if we're using asm2 as the core OS interface and not just as
* utilities/testables.
*/
#ifdef CONFIG_XTENSA_ASM2
void _new_thread(struct k_thread *thread, k_thread_stack_t *stack, size_t sz,
k_thread_entry_t entry, void *p1, void *p2, void *p3,
int prio, unsigned int opts)
{
char *base = K_THREAD_STACK_BUFFER(stack);
char *top = base + sz;
__ASSERT((((size_t)top) & 3) == 0, "Misaligned stack");
_new_thread_init(thread, base, sz, prio, opts);
thread->switch_handle = xtensa_init_stack(top, entry, p1, p2, p3);
thread->switch_handle = xtensa_init_stack((void *)top, entry,
p1, p2, p3);
}
#endif
#ifdef CONFIG_XTENSA_ASM2
void _irq_spurious(void *arg)
{
int irqs, ie;
ARG_UNUSED(arg);
__asm__ volatile("rsr.interrupt %0" : "=r"(irqs));
__asm__ volatile("rsr.intenable %0" : "=r"(ie));
printk(" ** Spurious INTERRUPT(s) %p, INTENABLE = %p\n",
(void *)irqs, (void *)ie);
_NanoFatalErrorHandler(_NANO_ERR_RESERVED_IRQ, &_default_esf);
}
#endif
static void dump_stack(int *stack)
{
int *bsa = *(int **)stack;
printk(" ** A0 %p SP %p A2 %p A3 %p\n",
(void *)bsa[BSA_A0_OFF/4], ((char *)bsa) + BASE_SAVE_AREA_SIZE,
(void *)bsa[BSA_A2_OFF/4], (void *)bsa[BSA_A3_OFF/4]);
if (bsa - stack > 4) {
printk(" ** A4 %p A5 %p A6 %p A7 %p\n",
(void *)bsa[-4], (void *)bsa[-3],
(void *)bsa[-2], (void *)bsa[-1]);
}
if (bsa - stack > 8) {
printk(" ** A8 %p A9 %p A10 %p A11 %p\n",
(void *)bsa[-8], (void *)bsa[-7],
(void *)bsa[-6], (void *)bsa[-5]);
}
if (bsa - stack > 12) {
printk(" ** A12 %p A13 %p A14 %p A15 %p\n",
(void *)bsa[-12], (void *)bsa[-11],
(void *)bsa[-10], (void *)bsa[-9]);
}
#if XCHAL_HAVE_LOOPS
printk(" ** LBEG %p LEND %p LCOUNT %p\n",
(void *)bsa[BSA_LBEG_OFF/4],
(void *)bsa[BSA_LEND_OFF/4],
(void *)bsa[BSA_LCOUNT_OFF/4]);
#endif
printk(" ** SAR %p\n", (void *)bsa[BSA_SAR_OFF/4]);
}
#if CONFIG_XTENSA_ASM2
static inline void *restore_stack(void *interrupted_stack)
{
int key = irq_lock();
_kernel.current->switch_handle = interrupted_stack;
_kernel.current = _get_next_ready_thread();
void *ret = _kernel.current->switch_handle;
irq_unlock(key);
return ret;
}
#endif
/* The wrapper code lives here instead of in the python script that
* generates _xtensa_handle_one_int*(). Seems cleaner, still kind of
* ugly.
*/
#define DEF_INT_C_HANDLER(l) \
void *xtensa_int##l##_c(void *interrupted_stack) \
{ \
int irqs, m; \
__asm__ volatile("rsr.interrupt %0" : "=r"(irqs)); \
\
while ((m = _xtensa_handle_one_int##l(irqs))) { \
irqs ^= m; \
__asm__ volatile("wsr.intclear %0" : : "r"(m)); \
} \
return restore_stack(interrupted_stack); \
}
DEF_INT_C_HANDLER(2)
DEF_INT_C_HANDLER(3)
DEF_INT_C_HANDLER(4)
DEF_INT_C_HANDLER(5)
DEF_INT_C_HANDLER(6)
DEF_INT_C_HANDLER(7)
static inline DEF_INT_C_HANDLER(1)
/* C handler for level 1 exceptions/interrupts. Hooked from the
* DEF_EXCINT 1 vector declaration in assembly code. This one looks
* different because exceptions and interrupts land at the same
* vector; other interrupt levels have their own vectors.
*/
void *xtensa_excint1_c(int *interrupted_stack)
{
int cause, vaddr, *bsa = *(int **)interrupted_stack;
__asm__ volatile("rsr.exccause %0" : "=r"(cause));
if (cause == EXCCAUSE_LEVEL1_INTERRUPT) {
return xtensa_int1_c(interrupted_stack);
} else if (cause == EXCCAUSE_SYSCALL) {
/* Just report it to the console for now */
printk(" ** SYSCALL PS %p PC %p\n",
(void *)bsa[BSA_PS_OFF/4], (void *)bsa[BSA_PC_OFF/4]);
dump_stack(interrupted_stack);
/* Xtensa exceptions don't automatically advance PC,
* have to skip the SYSCALL instruction manually or
* else it will just loop forever
*/
bsa[BSA_PC_OFF/4] += 3;
} else {
__asm__ volatile("rsr.excvaddr %0" : "=r"(vaddr));
/* Wouldn't hurt to translate EXCCAUSE to a string for
* the user...
*/
printk(" ** FATAL EXCEPTION\n");
printk(" ** EXCCAUSE %d PS %p PC %p VADDR %p\n",
cause, (void *)bsa[BSA_PS_OFF/4],
(void *)bsa[BSA_PC_OFF/4], (void *)vaddr);
dump_stack(interrupted_stack);
/* FIXME: legacy xtensa port reported "HW" exception
* for all unhandled exceptions, which seems incorrect
* as these are software errors. Should clean this
* up.
*/
_NanoFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, &_default_esf);
}
return restore_stack(interrupted_stack);
}

View file

@ -26,6 +26,8 @@ extern void ReservedInterruptHandler(unsigned int intNo);
/* Defined in xtensa_context.S */
extern void _xt_coproc_init(void);
extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE);
/**
*
* @brief Performs architecture-specific initialization
@ -39,7 +41,23 @@ extern void _xt_coproc_init(void);
static ALWAYS_INLINE void kernel_arch_init(void)
{
_kernel.nested = 0;
#if XCHAL_CP_NUM > 0
#if CONFIG_XTENSA_ASM2
_kernel.irq_stack = (K_THREAD_STACK_BUFFER(_interrupt_stack) +
CONFIG_ISR_STACK_SIZE);
/* The asm2 scheme keeps the kernel pointer in MISC0 for easy
* access. That saves 4 bytes of immediate value to store the
* address when compared to the legacy scheme. But in SMP
* this record is a per-CPU thing and having it stored in a SR
* already is a big win.
*/
void *cpuptr = &_kernel;
__asm__ volatile("wsr.MISC0 %0; rsync" : : "r"(cpuptr));
#endif
#if !defined(CONFIG_XTENSA_ASM2) && XCHAL_CP_NUM > 0
/* Initialize co-processor management for threads.
* Leave CPENABLE alone.
*/

View file

@ -231,12 +231,34 @@ _xstack_returned_\@:
l32i a2, a1, 0
l32i a2, a2, BSA_SCRATCH_OFF
/* There's a gotcha with level 1 handlers: the INTLEVEL field
* gets left at zero and not set like high priority interrupts
* do. That works fine for exceptions, but for L1 interrupts,
* when we unmask EXCM below, the CPU will just fire the
* interrupt again and get stuck in a loop blasting save
* frames down the stack to the bottom of memory. It would be
* good to put this code into the L1 handler only, but there's
* not enough room in the vector without some work there to
* squash it some. Next choice would be to make this a macro
* argument and expand two versions of this handler. An
* optimization FIXME, I guess.
*/
rsr.PS a0
movi a3, PS_INTLEVEL_MASK
and a0, a0, a3
bnez a0, _not_l1
rsr.PS a0
movi a3, PS_INTLEVEL(1)
or a0, a0, a3
wsr.PS a0
_not_l1:
/* Unmask EXCM bit so C code can spill/fill in window
* exceptions. Note interrupts are already fully masked by
* INTLEVEL, so this is safe.
*/
rsr.PS a0
movi a3, ~16
movi a3, ~(PS_EXCM_MASK)
and a0, a0, a3
wsr.PS a0
rsync
@ -297,17 +319,21 @@ _restore_\@:
* entry code (defined via EXCINT_HANDLER) and a C handler for this
* particular level.
*
* FIXME: needs special handling for exceptions (level 1): it's "EPC"
* and not "EPC1" (though IIRC the assembler makes this work).
* And there is no EPS: instead PS is simply the interrupted PS
* with EXCM flipped from 0 to 1.
*
* FIXME: needs better locking. The hardware will NOT mask out "high
* priority" exceptions on arrival here, so we have to do it ourselves
* with RSIL.
* Note that the linker sections for some levels get special names for
* no particularly good reason. Only level 1 has any code generation
* difference, because it is the legacy exception level that predates
* the EPS/EPC registers.
*/
.macro DEF_EXCINT LVL, ENTRY_SYM, C_HANDLER_SYM
.if \LVL == 1
.pushsection .UserExceptionVector.text, "ax"
.elseif \LVL == XCHAL_DEBUGLEVEL
.pushsection .DebugExceptionVector.text, "ax"
.elseif \LVL == XCHAL_NMILEVEL
.pushsection .NMIExceptionVector.text, "ax"
.else
.pushsection .Level\LVL\()InterruptVector.text, "ax"
.endif
.global _Level\LVL\()Vector
_Level\LVL\()Vector:
addi a1, a1, -BASE_SAVE_AREA_SIZE
@ -315,8 +341,21 @@ _Level\LVL\()Vector:
s32i a2, a1, BSA_A2_OFF
s32i a3, a1, BSA_A3_OFF
/* Level "1" is the exception handler, which uses a different
* calling convention. No special register holds the
* interrupted PS, instead we just assume that the CPU has
* turned on the EXCM bit and set INTLEVEL.
*/
.if \LVL == 1
rsr.PS a0
movi a2, ~(PS_EXCM_MASK | PS_INTLEVEL_MASK)
and a0, a0, a2
s32i a0, a1, BSA_PS_OFF
.else
rsr.EPS\LVL a0
s32i a0, a1, BSA_PS_OFF
.endif
rsr.EPC\LVL a0
s32i a0, a1, BSA_PC_OFF