nios2: implement irq_offload()

For this implementation, the presence of a value in global
_offload_routine signifies to the exception code that we should
enter the IRQ handling code even if there are no bits enabled
in ipending. The 'trap' instruction gets us into the exception
handling code.

Change-Id: Iac96adba0eaf24b54ac28678a31c26517867a4d2
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2016-06-23 13:49:50 -07:00
commit d4a209d484
4 changed files with 55 additions and 4 deletions

View file

@ -27,6 +27,10 @@ GTEXT(_exception_enter_fault)
GTEXT(_Fault) GTEXT(_Fault)
GTEXT(_Swap) GTEXT(_Swap)
GTEXT(_exception_try_muldiv) GTEXT(_exception_try_muldiv)
#ifdef CONFIG_IRQ_OFFLOAD
GTEXT(_irq_do_offload)
GTEXT(_offload_routine)
#endif
/* Allows use of r1/at register, otherwise reserved for assembler use */ /* Allows use of r1/at register, otherwise reserved for assembler use */
.set noat .set noat
@ -89,6 +93,7 @@ SECTION_FUNC(exception.entry, _exception)
andi r15, et, 1 andi r15, et, 1
beq r15, zero, not_interrupt beq r15, zero, not_interrupt
BRANCH_LABEL(is_interrupt)
/* If we get here, this is an interrupt */ /* If we get here, this is an interrupt */
/* Grab a reference to _nanokernel in r10 so we can determine the /* Grab a reference to _nanokernel in r10 so we can determine the
@ -166,8 +171,6 @@ BRANCH_LABEL(on_irq_stack)
BRANCH_LABEL(not_interrupt) BRANCH_LABEL(not_interrupt)
#if 0 /* TODO enable multiply / divide exception handing */
/* Since this wasn't an interrupt we're not going to restart the /* Since this wasn't an interrupt we're not going to restart the
* faulting instruction. If it's an unimplemented math instruction, * faulting instruction. If it's an unimplemented math instruction,
* the muldiv code will handle it, else we just give up and _Fault. * the muldiv code will handle it, else we just give up and _Fault.
@ -176,6 +179,17 @@ BRANCH_LABEL(not_interrupt)
*/ */
stw ea, 72(sp) stw ea, 72(sp)
#ifdef CONFIG_IRQ_OFFLOAD
/* Check the contents of _offload_routine. If non-NULL, jump into
* the interrupt code anyway.
*/
movhi r10, %hi(_offload_routine)
ori r10, r10, %lo(_offload_routine)
ldw r11, (r10)
bne r11, zero, is_interrupt
#endif
#if 0 /* TODO: implement multiply/divide handling */
/* Could be an unimplemented instruction we have to emulate. /* Could be an unimplemented instruction we have to emulate.
* Smaller Nios II cores don't have multiply or divide instructions. * Smaller Nios II cores don't have multiply or divide instructions.
* This code comes back to either _exception_enter_fault or * This code comes back to either _exception_enter_fault or

View file

@ -95,6 +95,10 @@ void _enter_irq(uint32_t ipending)
{ {
int index; int index;
#ifdef CONFIG_IRQ_OFFLOAD
_irq_do_offload();
#endif
while (ipending) { while (ipending) {
_IsrTableEntry_t *ite; _IsrTableEntry_t *ite;

View file

@ -18,8 +18,37 @@
#include <nano_private.h> #include <nano_private.h>
#include <irq_offload.h> #include <irq_offload.h>
void irq_offload(irq_offload_routine_t routine, void *parameter) volatile irq_offload_routine_t _offload_routine;
static volatile void *offload_param;
/* Called by _enter_irq if it was passed 0 for ipending.
* Just in case the offload routine itself generates an unhandled
* exception, clear the offload_routine global before executing.
*/
void _irq_do_offload(void)
{ {
/* STUB */ irq_offload_routine_t tmp;
if (!_offload_routine) {
return;
}
tmp = _offload_routine;
_offload_routine = NULL;
tmp((void *)offload_param);
}
void irq_offload(irq_offload_routine_t routine, void *parameter)
{
int key;
key = irq_lock();
_offload_routine = routine;
offload_param = parameter;
__asm__ volatile ("trap");
irq_unlock(key);
} }

View file

@ -197,6 +197,10 @@ static ALWAYS_INLINE int _IS_IN_ISR(void)
return 1; return 1;
} }
#ifdef CONFIG_IRQ_OFFLOAD
void _irq_do_offload(void);
#endif
#endif /* _ASMLANGUAGE */ #endif /* _ASMLANGUAGE */
#endif /* _NANO_PRIVATE_H */ #endif /* _NANO_PRIVATE_H */