First commit

Signed-off-by:  <inaky.perez-gonzalez@intel.com>
This commit is contained in:
Inaky Perez-Gonzalez 2015-04-10 16:44:37 -07:00
commit 8ddf82cf70
1063 changed files with 163901 additions and 0 deletions

429
arch/arm/core/atomic.s Normal file
View file

@ -0,0 +1,429 @@
/* armAtomic.s - ARM atomic operations library */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This library provides routines to perform a number of atomic operations
on a memory location: add, subtract, increment, decrement, bitwise OR,
bitwise NOR, bitwise AND, bitwise NAND, set, clear and compare-and-swap.
*/
#define _ASMLANGUAGE
#include <toolchain.h>
#include <sections.h>
_ASM_FILE_PROLOGUE
/* exports */
GTEXT(atomic_set)
GTEXT(atomic_get)
GTEXT(atomic_add)
GTEXT(atomic_nand)
GTEXT(atomic_and)
GTEXT(atomic_or)
GTEXT(atomic_xor)
GTEXT(atomic_clear)
GTEXT(atomic_dec)
GTEXT(atomic_inc)
GTEXT(atomic_sub)
GTEXT(atomic_cas)
/*******************************************************************************
*
* atomic_clear - atomically clear a memory location
*
* This routine atomically clears the contents of <target> and returns the old
* value that was in <target>.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_clear
* (
* atomic_t *target /@ memory location to clear @/
* )
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_clear)
MOV r1, #0
/* fall through into atomic_set */
/*******************************************************************************
*
* atomic_set - atomically set a memory location
*
* This routine atomically sets the contents of <target> to <value> and returns
* the old value that was in <target>.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_set
* (
* atomic_t *target, /@ memory location to set @/
* atomic_val_t value /@ set with this value @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_set)
LDREX r2, [r0] /* load old value and mark exclusive access */
STREX r12, r1, [r0] /* try to store new value */
TEQ r12, #0 /* store successful? */
BNE atomic_set /* if not, retry */
MOV r0, r2 /* return old value */
MOV pc, lr
/******************************************************************************
*
* atomic_get - Get the value of a shared memory atomically
*
* This routine atomically retrieves the value in *target
*
* long atomic_get
* (
* atomic_t * target /@ address of atom to be retrieved @/
* )
*
* RETURN: value read from address target.
*
*/
SECTION_FUNC(TEXT, atomic_get)
LDR r0, [r0]
MOV pc, lr
/*******************************************************************************
*
* atomic_inc - atomically increment a memory location
*
* This routine atomically increments the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the
* atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_inc
* (
* atomic_t *target, /@ memory location to increment @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_inc)
MOV r1, #1
/* fall through into atomic_add */
/*******************************************************************************
*
* atomic_add - atomically add a value to a memory location
*
* This routine atomically adds the contents of <target> and <value>, placing
* the result in <target>. The operation is done using signed integer arithmetic.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_add
* (
* atomic_t *target, /@ memory location to add to @/
* atomic_val_t value /@ value to add @/
* )
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_add)
LDREX r2, [r0] /* load old value and mark exclusive access */
ADD r3, r2, r1 /* add word */
STREX r12, r3, [r0] /* try to store new value */
TEQ r12, #0 /* store successful? */
BNE atomic_add /* if not, retry */
MOV r0, r2 /* return old value */
MOV pc, lr
/*******************************************************************************
*
* atomic_dec - atomically decrement a memory location
*
* This routine atomically decrements the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the
* atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_dec
* (
* atomic_t *target, /@ memory location to decrement @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_decSub, atomic_dec)
MOV r1, #1
/* fall through into atomic_sub */
/*******************************************************************************
*
* atomic_sub - atomically subtract a value from a memory location
*
* This routine atomically subtracts <value> from the contents of <target>,
* placing the result in <target>. The operation is done using signed integer
* arithmetic. Various CPU architectures may impose restrictions with regards to
* the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_sub
* (
* atomic_t *target, /@ memory location to subtract from @/
* atomic_val_t value /@ value to subtract @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_decSub, atomic_sub)
LDREX r2, [r0] /* load old value and mark exclusive access */
SUB r3, r2, r1 /* subtract word */
STREX r12, r3, [r0] /* try to store new value */
TEQ r12, #0 /* store successful? */
BNE atomic_sub /* if not, retry */
MOV r0, r2 /* return old value */
MOV pc, lr
/******************************************************************************
*
* atomic_nand - atomically perform a bitwise NAND on a memory location
*
* This routine atomically performs a bitwise NAND operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_nand
* (
* atomic_t *target, /@ memory location to NAND @/
* atomic_val_t value /@ NAND with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_nand)
LDREX r2, [r0] /* load old value and mark exclusive access */
AND r3, r2, r1 /* AND word */
MVN r3, r3 /* invert */
STREX r12, r3, [r0] /* try to store new value */
TEQ r12, #0 /* store successful? */
BNE atomic_nand /* if not, retry */
MOV r0, r2 /* return old value */
MOV pc, lr
/******************************************************************************
*
* atomic_and - atomically perform a bitwise AND on a memory location
*
* This routine atomically performs a bitwise AND operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_and
* (
* atomic_t *target, /@ memory location to AND @/
* atomic_val_t value /@ AND with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_and)
LDREX r2, [r0] /* load old value and mark exclusive access */
AND r3, r2, r1 /* AND word */
STREX r12, r3, [r0] /* try to store new value */
TEQ r12, #0 /* store successful? */
BNE atomic_and /* if not, retry */
MOV r0, r2 /* return old value */
MOV pc, lr
/*******************************************************************************
*
* atomic_or - atomically perform a bitwise OR on memory location
*
* This routine atomically performs a bitwise OR operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_or
* (
* atomic_t *target, /@ memory location to OR @/
* atomic_val_t value /@ OR with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_or)
LDREX r2, [r0] /* load old value and mark exclusive access */
ORR r3, r2, r1 /* OR word */
STREX r12, r3, [r0] /* try to store new value */
TEQ r12, #0 /* store successful? */
BNE atomic_or /* if not, retry */
MOV r0, r2 /* return old value */
MOV pc, lr
/*******************************************************************************
*
* atomic_xor - atomically perform a bitwise XOR on a memory location
*
* This routine atomically performs a bitwise XOR operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_xor
* (
* atomic_t *target, /@ memory location to XOR @/
* atomic_val_t value /@ XOR with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_xor)
LDREX r2, [r0] /* load old value and mark exclusive access */
EOR r3, r2, r1 /* XOR word */
STREX r12, r3, [r0] /* try to store new value */
TEQ r12, #0 /* store successful? */
BNE atomic_xor /* if not, retry */
MOV r0, r2 /* return old value */
MOV pc, lr
/*******************************************************************************
*
* atomic_cas - atomically compare-and-swap the contents of a memory location
*
* This routine performs an atomic compare-and-swap. testing that the contents of
* <target> contains <oldValue>, and if it does, setting the value of <target>
* to <newValue>. Various CPU architectures may impose restrictions with regards
* to the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* RETURNS: 1 if the swap is actually executed, 0 otherwise.
*
* ERRNO: N/A
*
* int atomic_cas
* (
* atomic_t *target, /@ memory location to compare-and-swap @/
* atomic_val_t oldValue, /@ compare to this value @/
* atomic_val_t newValue, /@ swap with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_cas)
LDREX r3, [r0] /* load the value and mark exclusive access */
CMP r3, r1 /* if (*target != oldValue) */
ITT NE
MOVNE r0, #0 /* return FALSE */
MOVNE pc, lr
STREX r12, r2, [r0] /* try to store if equal */
TEQ r12, #0 /* store successful? */
BNE atomic_cas /* if not, retry */
MOV r0, #1 /* return TRUE if swap occurred */
MOV pc, lr

95
arch/arm/core/basepri.s Normal file
View file

@ -0,0 +1,95 @@
/* basepri.s - ARM Cortex-M interrupt locking via BASEPRI */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Provide irq_lock() and irq_unlock() via the BASEPRI register. This
allows locking up to a certain interrupt priority. VxMicro locks out priorities
2 and lower (higher numbered priorities), in essence leaving priorities 0 and 1
unlocked. This achieves two purposes:
1. The service call exception is installed at priority 0, allowing it to be
invoked with interrupts locked. This is needed since 'svc #0' is the
implementation of _Swap(), which is invoked with interrupts locked in the
common implementation of nanokernel objects.
2. Zero Interrupt Latency (ZLI) is achievable via this by allowing certain
interrupts to set their priority to 1, thus being allowed in when interrupts
are locked for regular interrupts.
*/
#define _ASMLANGUAGE
#include <toolchain.h>
#include <sections.h>
#include <nanokernel/cpu.h>
_ASM_FILE_PROLOGUE
GTEXT(irq_lock)
GTEXT(irq_unlock)
/*******************************************************************************
*
* irq_lock - lock interrupts
*
* Prevent exceptions of priority lower than to the two highest priorities from
* interrupting the CPU.
*
* This function can be called recursively: it will return a key to return the
* state of interrupt locking to the previous level.
*
* RETURNS: a key to return to the previous interrupt locking level
*/
SECTION_FUNC(TEXT,irq_lock)
movs.n r1, #_EXC_IRQ_DEFAULT_PRIO
mrs r0, BASEPRI
msr BASEPRI, r1
bx lr
/*******************************************************************************
*
* irq_unlock - unlock interrupts
*
* Return the state of interrupt locking to a previous level, passed in via the
* <key> parameter, obtained from a previous call to irq_lock().
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT,irq_unlock)
msr BASEPRI, r0
bx lr
.end

198
arch/arm/core/cpu_idle.s Normal file
View file

@ -0,0 +1,198 @@
/* cpu_idle.s - ARM CORTEX-M3 power management */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
*/
#define _ASMLANGUAGE
#include <offsets.h>
#include <toolchain.h>
#include <sections.h>
#include <nanokernel/cpu.h>
#ifdef CONFIG_TICKLESS_IDLE
#include <nanok.h>
#endif
_ASM_FILE_PROLOGUE
GTEXT(_CpuIdleInit)
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
GTEXT(_NanoIdleValGet)
GTEXT(_NanoIdleValClear)
#endif
GTEXT(nano_cpu_idle)
GTEXT(nano_cpu_atomic_idle)
#define _SCR_INIT_BITS _SCB_SCR_SEVONPEND
/*******************************************************************************
*
* _CpuIdleInit - initialization of CPU idle
*
* Only called by nanoArchInit(). Sets SEVONPEND bit once for the system's
* duration.
*
* RETURNS: N/A
*
* C function prototype:
*
* void _CpuIdleInit (void);
*/
SECTION_FUNC(TEXT, _CpuIdleInit)
ldr r1, =_SCB_SCR
movs.n r2, #_SCR_INIT_BITS
str r2, [r1]
bx lr
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
/*******************************************************************************
*
* _NanoIdleValGet - get the kernel idle setting
*
* Returns the nanokernel idle setting, in ticks. Only called by __systick().
*
* RETURNS: the requested number of ticks for the kernel to be idle
*
* C function prototype:
*
* int32_t _NanoIdleValGet (void);
*/
SECTION_FUNC(TEXT, _NanoIdleValGet)
ldr r0, =_NanoKernel
ldr r0, [r0, #__tNANO_idle_OFFSET]
bx lr
/*******************************************************************************
*
* _NanoIdleValClear - clear the kernel idle setting
*
* Sets the nanokernel idle setting to 0. Only called by __systick().
*
* RETURNS: N/A
*
* C function prototype:
*
* void _NanoIdleValClear (void);
*/
SECTION_FUNC(TEXT, _NanoIdleValClear)
ldr r0, =_NanoKernel
eors.n r1, r1
str r1, [r0, #__tNANO_idle_OFFSET]
bx lr
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
/*******************************************************************************
*
* nano_cpu_idle - power save idle routine for ARM Cortex-M
*
* This function will be called by the nanokernel idle loop or possibly within
* an implementation of _SysPowerSaveIdle in the microkernel when the
* '_SysPowerSaveFlag' variable is non-zero. The ARM 'wfi' instruction
* will be issued, causing a low-power consumption sleep mode.
*
* RETURNS: N/A
*
* C function prototype:
*
* void nano_cpu_idle (void);
*/
SECTION_FUNC(TEXT, nano_cpu_idle)
/* clear BASEPRI so wfi is awakened by incoming interrupts */
eors.n r0, r0
msr BASEPRI, r0
wfi
bx lr
/*******************************************************************************
*
* nano_cpu_atomic_idle - atomically re-enable interrupts and enter low power mode
*
* This function is utilized by the nanokernel object "wait" APIs for task
* contexts, e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(), nano_task_stack_pop_wait(),
* and nano_task_fifo_get_wait().
*
* INTERNAL
* The requirements for nano_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments
* in nano_task_lifo_get_wait(), for example, of the race condition that occurs
* if this requirement is not met.
*
* 2) After waking up from the low-power mode, the interrupt lockout state
* must be restored as indicated in the 'imask' input parameter.
*
* RETURNS: N/A
*
* C function prototype:
*
* void nano_cpu_atomic_idle (unsigned int imask);
*/
SECTION_FUNC(TEXT, nano_cpu_atomic_idle)
/*
* r0: interrupt mask from caller
* r1: zero, for setting BASEPRI (needs a register)
*/
eors.n r1, r1
/*
* Lock PRIMASK while sleeping: wfe will still get interrupted by incoming
* interrupts but the CPU will not service them right away.
*/
cpsid i
/*
* No need to set SEVONPEND, it's set once in _CpuIdleInit() and never
* touched again.
*/
/* unlock BASEPRI so wfe gets interrupted by incoming interrupts */
msr BASEPRI, r1
wfe
msr BASEPRI, r0
cpsie i
bx lr

128
arch/arm/core/exc_exit.s Normal file
View file

@ -0,0 +1,128 @@
/* exc_exit.s - ARM CORTEX-M3 exception/interrupt exit API */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Provides functions for performing kernel handling when exiting exceptions or
interrupts that are installed directly in the vector table (i.e. that are not
wrapped around by _IsrWrapper()).
*/
#define _ASMLANGUAGE
#include <nanok.h>
#include <offsets.h>
#include <toolchain.h>
#include <nanokernel/cpu.h>
_ASM_FILE_PROLOGUE
GTEXT(_ExcExit)
GTEXT(_IntExit)
GDATA(_NanoKernel)
#if CONFIG_GDB_INFO
#define _EXIT_EXC_IF_FIBER_PREEMPTED beq _ExcExitWithGdbStub
#else
_EXIT_EXC_IF_FIBER_PREEMPTED: .macro
it eq
bxeq lr
.endm
#endif
#define _EXIT_EXC_IF_FIBER_NOT_READY _EXIT_EXC_IF_FIBER_PREEMPTED
/*******************************************************************************
*
* _IntExit - kernel housekeeping when exiting interrupt handler installed
* directly in vector table
*
* VxMicro allows installing interrupt handlers (ISRs) directly into the vector
* table to get the lowest interrupt latency possible. This allows the ISR to be
* invoked directly without going through a software interrupt table. However,
* upon exiting the ISR, some kernel work must still be performed, namely
* possible context switching. While ISRs connected in the software interrupt
* table do this automatically via a wrapper, ISRs connected directly in the
* vector table must invoke _IntExit() as the *very last* action before
* returning.
*
* e.g.
*
* void myISR(void)
* {
* printk("in %s\n", __FUNCTION__);
* doStuff();
* _IntExit();
* }
*
* RETURNS: N/A
*/
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit)
/* _IntExit falls through to _ExcExit (they are aliases of each other) */
/*******************************************************************************
*
* _ExcExit - kernel housekeeping when exiting exception handler installed
* directly in vector table
*
* See _IntExit().
*
* RETURNS: N/A
*/
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)
ldr r1, =_NanoKernel
/* is the current thread preemptible (task) ? */
ldr r2, [r1, #__tNANO_flags_OFFSET]
ands.w r2, #PREEMPTIBLE
_EXIT_EXC_IF_FIBER_PREEMPTED
/* is there a fiber ready ? */
ldr r2, [r1, #__tNANO_fiber_OFFSET]
cmp r2, #0
_EXIT_EXC_IF_FIBER_NOT_READY
/* context switch required, pend the PendSV exception */
ldr r1, =_SCS_ICSR
ldr r2, =_SCS_ICSR_PENDSV
str r2, [r1]
_ExcExitWithGdbStub:
_GDB_STUB_EXC_EXIT
bx lr

414
arch/arm/core/fault.c Normal file
View file

@ -0,0 +1,414 @@
/* fault.c - common fault handler for ARM Cortex-M */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Common fault handler for ARM Cortex-M processors.
*/
#include <toolchain.h>
#include <sections.h>
#include <cputype.h>
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <nanok.h>
#ifdef CONFIG_PRINTK
#include <misc/printk.h>
#define PR_EXC(...) printk(__VA_ARGS__)
#else
#define PR_EXC(...)
#endif /* CONFIG_PRINTK */
#if (CONFIG_FAULT_DUMP > 0)
#define FAULT_DUMP(esf, fault) _FaultDump(esf, fault)
#else
#define FAULT_DUMP(esf, fault) \
do { \
(void) esf; \
(void) fault; \
} while ((0))
#endif
#if (CONFIG_FAULT_DUMP == 1)
/*******************************************************************************
*
* _FaultDump - dump information regarding fault (FAULT_DUMP == 1)
*
* Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 1
* (short form).
*
* eg. (precise bus error escalated to hard fault):
*
* Fault! EXC #3, Thread: 0x200000dc, instr: 0x000011d3
* HARD FAULT: Escalation (see below)!
* MMFSR: 0x00000000, BFSR: 0x00000082, UFSR: 0x00000000
* BFAR: 0xff001234
*
* RETURNS: N/A
*
* \NOMANUAL
*/
void _FaultDump(const NANO_ESF *esf, int fault)
{
int escalation = 0;
PR_EXC("Fault! EXC #%d, Thread: %x, instr @ %x\n",
fault,
context_self_get(),
esf->pc);
if (3 == fault) { /* hard fault */
escalation = _ScbHardFaultIsForced();
PR_EXC("HARD FAULT: %s\n",
escalation ? "Escalation (see below)!"
: "Bus fault on vector table read\n");
}
PR_EXC("MMFSR: %x, BFSR: %x, UFSR: %x\n",
__scs.scb.cfsr.byte.mmfsr.val,
__scs.scb.cfsr.byte.bfsr.val,
__scs.scb.cfsr.byte.ufsr.val);
if (_ScbMemFaultIsMmfarValid()) {
PR_EXC("MMFAR: %x\n", _ScbMemFaultAddrGet());
if (escalation) {
_ScbMemFaultMmfarReset();
}
}
if (_ScbBusFaultIsBfarValid()) {
PR_EXC("BFAR: %x\n", _ScbBusFaultAddrGet());
if (escalation) {
_ScbBusFaultBfarReset();
}
}
/* clear USFR sticky bits */
_ScbUsageFaultAllFaultsReset();
}
#endif
#if (CONFIG_FAULT_DUMP == 2)
/*******************************************************************************
*
* _FaultContextShow - dump context information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _FaultContextShow(const NANO_ESF *esf)
{
PR_EXC(" Executing context ID (thread): 0x%x\n"
" Faulting instruction address: 0x%x\n",
context_self_get(),
esf->pc);
}
/*******************************************************************************
*
* _MpuFault - dump MPU fault information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _MpuFault(const NANO_ESF *esf,
int fromHardFault)
{
PR_EXC("***** MPU FAULT *****\n");
_FaultContextShow(esf);
if (_ScbMemFaultIsStacking()) {
PR_EXC(" Stacking error\n");
} else if (_ScbMemFaultIsUnstacking()) {
PR_EXC(" Unstacking error\n");
} else if (_ScbMemFaultIsDataAccessViolation()) {
PR_EXC(" Data Access Violation\n");
if (_ScbMemFaultIsMmfarValid()) {
PR_EXC(" Address: 0x%x\n", _ScbMemFaultAddrGet());
if (fromHardFault) {
_ScbMemFaultMmfarReset();
}
}
} else if (_ScbMemFaultIsInstrAccessViolation()) {
PR_EXC(" Instruction Access Violation\n");
}
}
/*******************************************************************************
*
* _BusFault - dump bus fault information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _BusFault(const NANO_ESF *esf,
int fromHardFault)
{
PR_EXC("***** BUS FAULT *****\n");
_FaultContextShow(esf);
if (_ScbBusFaultIsStacking()) {
PR_EXC(" Stacking error\n");
} else if (_ScbBusFaultIsUnstacking()) {
PR_EXC(" Unstacking error\n");
} else if (_ScbBusFaultIsPrecise()) {
PR_EXC(" Precise data bus error\n");
if (_ScbBusFaultIsBfarValid()) {
PR_EXC(" Address: 0x%x\n", _ScbBusFaultAddrGet());
if (fromHardFault) {
_ScbBusFaultBfarReset();
}
}
/* it's possible to have both a precise and imprecise fault */
if (_ScbBusFaultIsImprecise()) {
PR_EXC(" Imprecise data bus error\n");
}
} else if (_ScbBusFaultIsImprecise()) {
PR_EXC(" Imprecise data bus error\n");
} else if (_ScbBusFaultIsInstrBusErr()) {
PR_EXC(" Instruction bus error\n");
}
}
/*******************************************************************************
*
* _UsageFault - dump usage fault information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _UsageFault(const NANO_ESF *esf)
{
PR_EXC("***** USAGE FAULT *****\n");
_FaultContextShow(esf);
/* bits are sticky: they stack and must be reset */
if (_ScbUsageFaultIsDivByZero()) {
PR_EXC(" Division by zero\n");
}
if (_ScbUsageFaultIsUnaligned()) {
PR_EXC(" Unaligned memory access\n");
}
if (_ScbUsageFaultIsNoCp()) {
PR_EXC(" No coprocessor instructions\n");
}
if (_ScbUsageFaultIsInvalidPcLoad()) {
PR_EXC(" Illegal load of EXC_RETURN into PC\n");
}
if (_ScbUsageFaultIsInvalidState()) {
PR_EXC(" Illegal use of the EPSR\n");
}
if (_ScbUsageFaultIsUndefinedInstr()) {
PR_EXC(" Attempt to execute undefined instruction\n");
}
_ScbUsageFaultAllFaultsReset();
}
/*******************************************************************************
*
* _HardFault - dump hard fault information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _HardFault(const NANO_ESF *esf)
{
PR_EXC("***** HARD FAULT *****\n");
if (_ScbHardFaultIsBusErrOnVectorRead()) {
PR_EXC(" Bus fault on vector table read\n");
} else if (_ScbHardFaultIsForced()) {
PR_EXC(" Fault escalation (see below)\n");
if (_ScbIsMemFault()) {
_MpuFault(esf, 1);
} else if (_ScbIsBusFault()) {
_BusFault(esf, 1);
} else if (_ScbIsUsageFault()) {
_UsageFault(esf);
}
}
}
/*******************************************************************************
*
* _DebugMonitor - dump debug monitor exception information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _DebugMonitor(const NANO_ESF *esf)
{
PR_EXC("***** Debug monitor exception (not implemented) *****\n");
}
/*******************************************************************************
*
* _ReservedException - dump reserved exception information
*
* See _FaultDump() for example.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _ReservedException(const NANO_ESF *esf,
int fault)
{
PR_EXC("***** %s %d) *****\n",
fault < 16 ? "Reserved Exception (" : "Spurious interrupt (IRQ ",
fault - 16);
}
/*******************************************************************************
*
* _FaultDump - dump information regarding fault (FAULT_DUMP == 2)
*
* Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 2
* (long form).
*
* eg. (precise bus error escalated to hard fault):
*
* Executing context ID (thread): 0x200000dc
* Faulting instruction address: 0x000011d3
* ***** HARD FAULT *****
* Fault escalation (see below)
* ***** BUS FAULT *****
* Precise data bus error
* Address: 0xff001234
*
* RETURNS: N/A
*
* \NOMANUAL
*/
static void _FaultDump(const NANO_ESF *esf, int fault)
{
switch (fault) {
case 3:
_HardFault(esf);
break;
case 4:
_MpuFault(esf, 0);
break;
case 5:
_BusFault(esf, 0);
break;
case 6:
_UsageFault(esf);
break;
case 12:
_DebugMonitor(esf);
break;
default:
_ReservedException(esf, fault);
break;
}
}
#endif /* FAULT_DUMP == 2 */
/*******************************************************************************
*
* _Fault - fault handler
*
* This routine is called when fatal error conditions are detected by hardware
* and is responsible only for reporting the error. Once reported, it then
* invokes the user provided routine _SysFatalErrorHandler() which is
* responsible for implementing the error handling policy.
*
* Since the ESF can be either on the MSP or PSP depending if an exception or
* interrupt was already being handled, it is passed a pointer to both and has
* to find out on which the ESP is present.
*
* RETURNS: This function does not return.
*
* \NOMANUAL
*/
void _Fault(
const NANO_ESF *msp, /* pointer to potential ESF on MSP */
const NANO_ESF *psp /* pointer to potential ESF on PSP */
)
{
const NANO_ESF *esf = _ScbIsNestedExc() ? msp : psp;
int fault = _ScbActiveVectorGet();
FAULT_DUMP(esf, fault);
_SysFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, esf);
}
/*******************************************************************************
*
* _FaultInit - initialization of fault handling
*
* Turns on the desired hardware faults.
*
* RETURNS: N/A
*
* \NOMANUAL
*/
void _FaultInit(void)
{
_ScbDivByZeroFaultEnable();
_ScbUnalignedFaultEnable();
}

101
arch/arm/core/fault_s.s Normal file
View file

@ -0,0 +1,101 @@
/* fault_s.s - fault handlers for ARM Cortex-M */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Fault handlers for ARM Cortex-M processors.
*/
#define _ASMLANGUAGE
#include <toolchain.h>
#include <sections.h>
#include <nanokernel/cpu.h>
_ASM_FILE_PROLOGUE
GTEXT(_Fault)
GTEXT(__hard_fault)
GTEXT(__mpu_fault)
GTEXT(__bus_fault)
GTEXT(__usage_fault)
GTEXT(__debug_monitor)
GTEXT(__reserved)
/*******************************************************************************
*
* __fault - fault handler installed in the fault and reserved vectors
*
* Entry point for the hard fault, MPU fault, bus fault, usage fault, debug
* monitor and reserved exceptions.
*
* Save the values of the MSP and PSP in r0 and r1 respectively, so the first
* and second parameters to the _Fault() C function that will handle the rest.
* This has to be done because at this point we do not know if the fault
* happened while handling an exception or not, and thus the ESF could be on
* either stack. _Fault() will find out where the ESF resides.
*
* Provides these symbols:
*
* __hard_fault
* __mpu_fault
* __bus_fault
* __usage_fault
* __debug_monitor
* __reserved
*/
SECTION_SUBSEC_FUNC(TEXT,__fault,__hard_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,__mpu_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,__bus_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,__usage_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,__debug_monitor)
SECTION_SUBSEC_FUNC(TEXT,__fault,__reserved)
_GDB_STUB_EXC_ENTRY
/* force unlock interrupts */
eors.n r0, r0
msr BASEPRI, r0
mrs r0, MSP
mrs r1, PSP
push {lr}
bl _Fault
_GDB_STUB_EXC_EXIT
pop {pc}
.end

93
arch/arm/core/ffs.s Normal file
View file

@ -0,0 +1,93 @@
/* ffs.s - ARM find first set assembly routines */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This library implements find_last_set() and find_first_set() which returns the
most and least significant bit set respectively.
*/
#define _ASMLANGUAGE
#include <toolchain.h>
#include <sections.h>
_ASM_FILE_PROLOGUE
/* Exports */
GTEXT(find_last_set)
GTEXT(find_first_set)
/*******************************************************************************
*
* find_last_set - find first set bit (searching from the most significant bit)
*
* This routine finds the first bit set in the argument passed it and
* returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit. A return value of zero indicates that
* the value passed is zero.
*
* RETURNS: most significant bit set
*/
SECTION_FUNC(TEXT, find_last_set)
cmp r0, #0
itt ne
clzne r0, r0
rsbne r0, r0, #32
mov pc, lr
/*******************************************************************************
*
* find_first_set - find first set bit (searching from the least significant bit)
*
* This routine finds the first bit set in the argument passed it and
* returns the index of that bit. Bits are numbered starting
* at 1 from the least significant bit. A return value of zero indicates that
* the value passed is zero.
*
* RETURNS: least significant bit set
*/
SECTION_FUNC(TEXT, find_first_set)
rsb r1, r0, #0
ands r0, r1, r0 /* r0 = x & (-x): only LSB set */
itt ne
clzne r0, r0 /* count leading zeroes */
rsbne r0, r0, #32
mov pc, lr

163
arch/arm/core/gdb_stub.s Normal file
View file

@ -0,0 +1,163 @@
/* gdb_stub.s - extra work performed upon exception entry/exit for GDB */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Prep work done when entering exceptions consists of saving the callee-saved
registers before they get used by exception handlers, and recording the fact
that we are running in an exception.
Upon exception exit, it must be recorded that the task is not in an exception
anymore.
*/
#define _ASMLANGUAGE
#include <offsets.h>
#include <toolchain.h>
#include <sections.h>
#include <nanok.h>
#include <nanokernel/cpu.h>
_ASM_FILE_PROLOGUE
/*******************************************************************************
*
* _GdbStubExcEntry - exception entry extra work when GDB_INFO is enabled
*
* During normal system operation, the callee-saved registers are saved lazily
* only when a context switch is required. To allow looking at the current
* threads registers while debugging an exception/interrupt, they must be saved
* upon entry since the handler could be using them: thus, looking at the CPU
* registers would show the current system state and not the current *thread*'s
* state.
*
* Also, record the fact that the thread is currently interrupted so that VQEMU
* looks into the CCS and not the CPU registers to obtain the current thread's
* register values.
*
* NOTE:
* - must be called with interrupts locked
* - cannot use r0 without saving it first
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _GdbStubExcEntry)
ldr r1, =_NanoKernel
ldr r2, [r1, #__tNANO_flags_OFFSET]
/* already in an exception, do not update the registers */
ands r3, r2, #EXC_ACTIVE
it ne
bxne lr
orrs r2, #EXC_ACTIVE
str r2, [r1, #__tNANO_flags_OFFSET]
ldr r1, [r1, #__tNANO_current_OFFSET]
str r2, [r1, #__tCCS_flags_OFFSET]
/* save callee-saved + psp in CCS */
adds r1, #__tCCS_preempReg_OFFSET
mrs ip, PSP
stmia r1, {v1-v8, ip}
bx lr
/*******************************************************************************
*
* _GdbStubExcExit - exception exit extra clean up when GDB_INFO is enabled
*
* Record the fact that the thread is not interrupted anymore so that VQEMU
* looks at the CPU registers and not into the CCS to obtain the current
* thread's register values. Only do this if this is not a nested exception.
*
* NOTE:
* - must be called with interrupts locked
* - cannot use r0 without saving it first
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _GdbStubExcExit)
/* if we're nested (ie. !RETTOBASE), do not reset EXC_ACTIVE */
ldr r1, =_SCS_ICSR
ldr r1, [r1]
ands r1, #_SCS_ICSR_RETTOBASE
it eq
bxeq lr
ldr r1, =_NanoKernel
ldr r2, [r1, #__tNANO_flags_OFFSET]
bic r2, #EXC_ACTIVE
str r2, [r1, #__tNANO_flags_OFFSET]
ldr r1, [r1, #__tNANO_current_OFFSET]
str r2, [r1, #__tCCS_flags_OFFSET]
bx lr
/*******************************************************************************
*
* _GdbStubIrqVectorTableEntry - stub for ISRs installed directly in
* vector table
*
* VxMicro on Cortex-M3/4 allows users to configure the kernel such that
* ISRs are installed directly in the vector table for maximum efficiency.
*
* When OS-awareness is enabled, a stub must be inserted to invoke
* _GdbStubExcEntry() before the user ISR runs, to save the current task's
* registers. This stub thus gets inserted in the vector table instead of the
* user's ISR. The user's IRQ vector table gets pushed after the vector table
* automatically by the linker script: this is all transparent to the user.
* This stub must also act as a demuxer that find the running exception and
* invoke the user's real ISR.
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _GdbStubIrqVectorTableEntry)
_GDB_STUB_EXC_ENTRY
mrs r0, IPSR /* get exception number */
sub r0, r0, #16 /* get IRQ number */
ldr r1, =_IrqVectorTable
/* grab real ISR at address: r1 + (r0 << 2) (table is 4-byte wide) */
ldr r1, [r1, r0, LSL #2]
/* jump to ISR, no return: ISR is responsible for calling _IntExit */
bx r1

View file

@ -0,0 +1,52 @@
/* gdb_stub_irq_vector_table.c - stubs for IRQ part of vector table */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
When GDB is enabled, the static IRQ vector table needs to install the
_GdbStubIrqVectorTableEntry stub to do some work before calling the
user-installed ISRs.
*/
#include <toolchain.h>
#include <sections.h>
#include <nanokernel/cpu.h>
typedef void (*vth)(void); /* Vector Table Handler */
#if defined(CONFIG_GDB_INFO) && !defined(CONFIG_SW_ISR_TABLE)
vth __gdb_stub_irq_vector_table _GdbStubIrqVectorTable[CONFIG_NUM_IRQS] = {
[0 ...(CONFIG_NUM_IRQS - 1)] = _GdbStubIrqVectorTableEntry
};
#endif /* CONFIG_GDB_INFO && !CONFIG_SW_ISR_TABLE */

65
arch/arm/core/irq_init.c Normal file
View file

@ -0,0 +1,65 @@
/* irq_init.c - ARM Cortex-M interrupt initialization */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
The ARM Cortex-M architecture provides its own fiber_abort() to deal with
different CPU modes (handler vs thread) when a fiber aborts. When its entry
point returns or when it aborts itself, the CPU is in thread mode and must
call _Swap() (which triggers a service call), but when in handler mode, the
CPU must exit handler mode to cause the context switch, and thus must queue
the PendSV exception.
*/
#include <toolchain.h>
#include <sections.h>
#include <nanokernel.h>
#include <nanokernel/cpu.h>
/*******************************************************************************
*
* _IntLibInit - initialize interrupts
*
* Ensures all interrupts have their priority set to _EXC_IRQ_DEFAULT_PRIO and
* not 0, which they have it set to when coming out of reset. This ensures that
* interrupt locking via BASEPRI works as expected.
*
* RETURNS: N/A
*/
void _IntLibInit(void)
{
int irq = 0;
for (; irq < CONFIG_NUM_IRQS; irq++) {
_NvicIrqPrioSet(irq, _EXC_IRQ_DEFAULT_PRIO);
}
}

189
arch/arm/core/irq_manage.c Normal file
View file

@ -0,0 +1,189 @@
/* irq_manage.c - ARM CORTEX-M3 interrupt management */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Interrupt management: enabling/disabling and dynamic ISR connecting/replacing.
SW_ISR_TABLE_DYNAMIC has to be enabled for connecting ISRs at runtime.
*/
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <misc/__assert.h>
#include <toolchain.h>
#include <sections.h>
#include <sw_isr_table.h>
extern void __reserved(void);
/*******************************************************************************
*
* irq_handler_set - replace an interrupt handler by another
*
* An interrupt's ISR can be replaced at runtime. Care must be taken that the
* interrupt is disabled before doing this.
*
* This routine will hang if <old> is not found in the table and ASSERT_ON is
* enabled.
*
* RETURNS: N/A
*/
void irq_handler_set(unsigned int irq,
void (*old)(void *arg),
void (*new)(void *arg),
void *arg)
{
int key = irq_lock_inline();
__ASSERT(old == _IsrTable[irq].isr, "expected ISR not found in table");
if (old == _IsrTable[irq].isr) {
_IsrTable[irq].isr = new;
_IsrTable[irq].arg = arg;
}
irq_unlock_inline(key);
}
/*******************************************************************************
*
* irq_enable - enable an interrupt line
*
* Clear possible pending interrupts on the line, and enable the interrupt
* line. After this call, the CPU will receive interrupts for the specified
* <irq>.
*
* RETURNS: N/A
*/
void irq_enable(unsigned int irq)
{
/* before enabling interrupts, ensure that interrupt is cleared */
_NvicIrqUnpend(irq);
_NvicIrqEnable(irq);
}
/*******************************************************************************
*
* irq_disable - disable an interrupt line
*
* Disable an interrupt line. After this call, the CPU will stop receiving
* interrupts for the specified <irq>.
*
* RETURNS: N/A
*/
void irq_disable(unsigned int irq)
{
_NvicIrqDisable(irq);
}
/*******************************************************************************
*
* irq_priority_set - set an interrupt's priority
*
* Valid values are from 1 to 255. Interrupts of priority 1 are not masked when
* interrupts are locked system-wide, so care must be taken when using them. ISR
* installed with priority 1 interrupts cannot make kernel calls.
*
* Priority 0 is reserved for kernel usage and cannot be used.
*
* The priority is verified if ASSERT_ON is enabled.
*
* RETURNS: N/A
*/
void irq_priority_set(unsigned int irq,
unsigned int prio)
{
__ASSERT(prio > 0 && prio < 256, "invalid priority!");
_NvicIrqPrioSet(irq, _EXC_PRIO(prio));
}
/*******************************************************************************
*
* _SpuriousIRQ - spurious interrupt handler
*
* Installed in all dynamic interrupt slots at boot time. Throws an error if
* called.
*
* See __reserved().
*
* RETURNS: N/A
*/
void _SpuriousIRQ(void *unused)
{
ARG_UNUSED(unused);
__reserved();
}
/*******************************************************************************
*
* irq_connect - connect an ISR to an interrupt line
*
* <isr> is connected to interrupt line <irq> (exception #<irq>+16). No prior
* ISR can have been connected on <irq> interrupt line since the system booted.
*
* This routine will hang if another ISR was connected for interrupt line <irq>
* and ASSERT_ON is enabled; if ASSERT_ON is disabled, it will fail silently.
*
* RETURNS: the interrupt line number
*/
int irq_connect(unsigned int irq,
unsigned int prio,
void (*isr)(void *arg),
void *arg)
{
irq_handler_set(irq, _SpuriousIRQ, isr, arg);
irq_priority_set(irq, prio);
return irq;
}
/*******************************************************************************
*
* irq_disconnect - disconnect an ISR from an interrupt line
*
* Interrupt line <irq> (exception #<irq>+16) is disconnected from its ISR and
* the latter is replaced by _SpuriousIRQ(). irq_disable() should have
* been called before invoking this routine.
*
* RETURNS: N/A
*/
void irq_disconnect(unsigned int irq)
{
irq_handler_set(irq, _IsrTable[irq].isr, _SpuriousIRQ, NULL);
}

111
arch/arm/core/isr_wrapper.s Normal file
View file

@ -0,0 +1,111 @@
/* isr_wrapper.s - ARM CORTEX-M3 wrapper for ISRs with parameter */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Wrapper installed in vector table for handling dynamic interrupts that accept
a parameter.
*/
#define _ASMLANGUAGE
#include <offsets.h>
#include <toolchain.h>
#include <sections.h>
#include <sw_isr_table.h>
#include <nanok.h>
#include <nanokernel/cpu.h>
_ASM_FILE_PROLOGUE
GDATA(_IsrTable)
GTEXT(_IsrWrapper)
GTEXT(_IntExit)
/*******************************************************************************
*
* _IsrWrapper - wrapper around ISRs when inserted in software ISR table
*
* When inserted in the vector table, _IsrWrapper() demuxes the ISR table using
* the running interrupt number as the index, and invokes the registered ISR
* with its correspoding argument. When returning from the ISR, it determines
* if a context switch needs to happen (see documentation for __pendsv()) and
* pends the PendSV exception if so: the latter will perform the context switch
* itself.
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, _IsrWrapper)
_GDB_STUB_EXC_ENTRY
push {lr} /* lr is now the first item on the stack */
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
/*
* All interrupts are disabled when handling idle wakeup.
* For tickless idle, this ensures that the calculation and programming of
* the device for the next timer deadline is not interrupted.
* For non-tickless idle, this ensures that the clearing of the kernel idle
* state is not interrupted.
* In each case, _SysPowerSaveIdleExit is called with interrupts disabled.
*/
cpsid i /* PRIMASK = 1 */
/* is this a wakeup from idle ? */
ldr r2, =_NanoKernel
ldr r0, [r2, #__tNANO_idle_OFFSET] /* requested idle duration, in ticks */
cmp r0, #0
ittt ne
movne r1, #0
strne r1, [r2, #__tNANO_idle_OFFSET] /* clear kernel idle state */
blxne _SysPowerSaveIdleExit
cpsie i /* re-enable interrupts (PRIMASK = 0) */
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
mrs r0, IPSR /* get exception number */
sub r0, r0, #16 /* get IRQ number */
lsl r0, r0, #3 /* table is 8-byte wide */
ldr r1, =_IsrTable
add r1, r1, r0 /* table entry: ISRs must have their MSB set to stay
* in thumb mode */
ldmia r1,{r0,r3} /* arg in r0, ISR in r3 */
blx r3 /* call ISR */
pop {lr}
/* exception return is done in _IntExit(), including _GDB_STUB_EXC_EXIT */
b _IntExit

View file

@ -0,0 +1,77 @@
/* nano_fiber_abort.c - ARM Cortex-M fiber_abort() routine */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
The ARM Cortex-M architecture provides its own fiber_abort() to deal with
different CPU modes (handler vs thread) when a fiber aborts. When its entry
point returns or when it aborts itself, the CPU is in thread mode and must
call _Swap() (which triggers a service call), but when in handler mode, the
CPU must exit handler mode to cause the context switch, and thus must queue
the PendSV exception.
*/
#ifdef CONFIG_MICROKERNEL
#include <microkernel/k_struct.h>
#include <microkernel.h>
#endif
#include <nanok.h>
#include <toolchain.h>
#include <sections.h>
#include <nanokernel.h>
#include <nanokernel/cpu.h>
extern void _NanoFiberSwap(void);
/*******************************************************************************
*
* fiber_abort - abort the currently executing fiber
*
* Possible reasons for a fiber aborting:
*
* - the fiber explicitly aborts itself by calling this routine
* - the fiber implicitly aborts by returning from its entry point
* - the fiber encounters a fatal exception
*
* RETURNS: N/A
*/
void fiber_abort(void)
{
_ContextExitRtn(_NanoKernel.current);
if (_ScbIsInThreadMode()) {
_NanoFiberSwap();
} else {
_ScbPendsvSet();
}
}

151
arch/arm/core/nanocontext.c Normal file
View file

@ -0,0 +1,151 @@
/* nanocontext.c - new context creation for ARM Cortex-M */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
Core nanokernel fiber related primitives for the ARM Cortex-M processor
architecture.
*/
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <toolchain.h>
#include <nanok.h>
#include <nanocontextentry.h>
tNANO _NanoKernel = {0};
#if defined(CONFIG_HOST_TOOLS_SUPPORT)
#define TOOLS_SUPPORT_INIT(pCcs) toolsSupportInit(pCcs)
#else
#define TOOLS_SUPPORT_INIT(pCcs) \
do {/* do nothing */ \
} while ((0))
#endif
#if defined(CONFIG_HOST_TOOLS_SUPPORT)
/*******************************************************************************
*
* toolsSupportInit - initialize host-tools support when needed
*
* Currently only inserts the new context in the list of active contexts.
*
* RETURNS: N/A
*/
static ALWAYS_INLINE void toolsSupportInit(struct s_CCS *pCcs /* context */
)
{
unsigned int key;
/*
* Add the newly initialized context to head of the list of contexts.
* This singly linked list of contexts maintains ALL the contexts in the
* system: both tasks and fibers regardless of whether they are
* runnable.
*/
key = irq_lock();
pCcs->activeLink = _NanoKernel.contexts;
_NanoKernel.contexts = pCcs;
irq_unlock(key);
}
#endif /* CONFIG_HOST_TOOLS_SUPPORT */
/*******************************************************************************
*
* _NewContext - intialize a new context (thread) from its stack space
*
* The control structure (CCS) is put at the lower address of the stack. An
* initial context, to be "restored" by __pendsv(), is put at the other end of
* the stack, and thus reusable by the stack when not needed anymore.
*
* The initial context is an exception stack frame (ESF) since exiting the
* PendSV exception will want to pop an ESF. Interestingly, even if the lsb of
* an instruction address to jump to must always be set since the CPU always
* runs in thumb mode, the ESF expects the real address of the instruction,
* with the lsb *not* set (instructions are always aligned on 16 bit halfwords).
* Since the compiler automatically sets the lsb of function addresses, we have
* to unset it manually before storing it in the 'pc' field of the ESF.
*
* <options> is currently unused.
*
* RETURNS: N/A
*/
void *_NewContext(
char *pStackMem, /* stack memory */
unsigned stackSize, /* stack size in bytes */
_ContextEntry pEntry, /* entry point */
void *parameter1, /* entry point first param */
void *parameter2, /* entry point second param */
void *parameter3, /* entry point third param */
int priority, /* context priority (-1 for tasks) */
unsigned options /* misc options (future) */
)
{
char *stackEnd = pStackMem + stackSize;
struct __esf *pInitCtx;
tCCS *pCcs = (void *)ROUND_UP(pStackMem, sizeof(uint32_t));
/* carve the context entry struct from the "base" of the stack */
pInitCtx = (struct __esf *)(STACK_ROUND_DOWN(stackEnd) -
sizeof(struct __esf));
pInitCtx->pc = ((uint32_t)_ContextEntryRtn) & 0xfffffffe;
pInitCtx->a1 = (uint32_t)pEntry;
pInitCtx->a2 = (uint32_t)parameter1;
pInitCtx->a3 = (uint32_t)parameter2;
pInitCtx->a4 = (uint32_t)parameter3;
pInitCtx->xpsr =
0x01000000UL; /* clear all, thumb bit is 1, even if RO */
pCcs->link = NULL;
pCcs->flags = priority == -1 ? TASK | PREEMPTIBLE : FIBER;
pCcs->prio = priority;
#ifdef CONFIG_CONTEXT_CUSTOM_DATA
/* Initialize custom data field (value is opaque to kernel) */
pCcs->custom_data = NULL;
#endif
pCcs->preempReg.psp = (uint32_t)pInitCtx;
pCcs->basepri = 0;
/* initial values in all other registers/CCS entries are irrelevant */
TOOLS_SUPPORT_INIT(pCcs);
return pCcs;
}

132
arch/arm/core/nanofatal.c Normal file
View file

@ -0,0 +1,132 @@
/* nanofatal.c - nanokernel fatal error handler for ARM Cortex-M */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module provides the _NanoFatalErrorHandler() routine for ARM Cortex-M.
*/
/* includes */
#include <toolchain.h>
#include <sections.h>
#include <cputype.h>
#include <nanokernel.h>
#include <nanokernel/cpu.h>
#include <nanok.h>
#ifdef CONFIG_PRINTK
#include <misc/printk.h>
#define PR_EXC(...) printk(__VA_ARGS__)
#else
#define PR_EXC(...)
#endif /* CONFIG_PRINTK */
/* globals */
/*
* Define a default ESF for use with _NanoFatalErrorHandler() in the event
* the caller does not have a NANO_ESF to pass
*/
const NANO_ESF __defaultEsf = {0xdeaddead, /* a1 */
0xdeaddead, /* a2 */
0xdeaddead, /* a3 */
0xdeaddead, /* a4 */
0xdeaddead, /* ip */
0xdeaddead, /* lr */
0xdeaddead, /* pc */
0xdeaddead, /* xpsr */
};
/*******************************************************************************
*
* _NanoFatalErrorHandler - nanokernel fatal error handler
*
* This routine is called when fatal error conditions are detected by software
* and is responsible only for reporting the error. Once reported, it then
* invokes the user provided routine _SysFatalErrorHandler() which is
* responsible for implementing the error handling policy.
*
* The caller is expected to always provide a usable ESF. In the event that the
* fatal error does not have a hardware generated ESF, the caller should either
* create its own or use a pointer to the global default ESF <__defaultEsf>.
*
* RETURNS: This function does not return.
*
* \NOMANUAL
*/
FUNC_NORETURN void _NanoFatalErrorHandler(
unsigned int reason, /* reason that handler was called */
const NANO_ESF *pEsf /* pointer to exception stack frame */
)
{
switch (reason) {
case _NANO_ERR_INVALID_TASK_EXIT:
PR_EXC("***** Invalid Exit Software Error! *****\n");
break;
#if defined(CONFIG_STACK_CANARIES)
case _NANO_ERR_STACK_CHK_FAIL:
PR_EXC("***** Stack Check Fail! *****\n");
break;
#endif /* CONFIG_STACK_CANARIES */
#ifdef CONFIG_ENHANCED_SECURITY
case _NANO_ERR_INVALID_STRING_OP:
PR_EXC("**** Invalid string operation! ****\n");
break;
#endif /* CONFIG_ENHANCED_SECURITY */
default:
PR_EXC("**** Unknown Fatal Error %d! ****\n", reason);
break;
}
PR_EXC("Current context ID = 0x%x\n"
"Faulting instruction address = 0x%x\n",
context_self_get(),
pEsf->pc);
/*
* Now that the error has been reported, call the user implemented
* policy
* to respond to the error. The decisions as to what responses are
* appropriate to the various errors are something the customer must
* decide.
*/
_SysFatalErrorHandler(reason, pEsf);
for (;;)
;
}

View file

@ -0,0 +1,104 @@
/* offsets.c - ARM nano kernel structure member offset definition file */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module is responsible for the generation of the absolute symbols whose
value represents the member offsets for various ARM nanokernel
structures.
All of the absolute symbols defined by this module will be present in the
final microkernel or nanokernel ELF image (due to the linker's reference to
the _OffsetAbsSyms symbol).
INTERNAL
It is NOT necessary to define the offset for every member of a structure.
Typically, only those members that are accessed by assembly language routines
are defined; however, it doesn't hurt to define all fields for the sake of
completeness.
*/
#include <genOffset.h>
#include <nanok.h>
#include <offsets/common.h>
/* ARM-specific tNANO structure member offsets */
GEN_OFFSET_SYM(tNANO, flags);
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
GEN_OFFSET_SYM(tNANO, idle);
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
/* ARM-specific tCCS structure member offsets */
GEN_OFFSET_SYM(tCCS, basepri);
#ifdef CONFIG_CONTEXT_CUSTOM_DATA
GEN_OFFSET_SYM(tCCS, custom_data);
#endif
/* ARM-specific ESF structure member offsets */
GEN_OFFSET_SYM(tESF, a1);
GEN_OFFSET_SYM(tESF, a2);
GEN_OFFSET_SYM(tESF, a3);
GEN_OFFSET_SYM(tESF, a4);
GEN_OFFSET_SYM(tESF, ip);
GEN_OFFSET_SYM(tESF, lr);
GEN_OFFSET_SYM(tESF, pc);
GEN_OFFSET_SYM(tESF, xpsr);
/* size of the entire tESF structure */
GEN_ABSOLUTE_SYM(__tESF_SIZEOF, sizeof(tESF));
/* ARM-specific preempt registers structure member offsets */
GEN_OFFSET_SYM(tPreempt, v1);
GEN_OFFSET_SYM(tPreempt, v2);
GEN_OFFSET_SYM(tPreempt, v3);
GEN_OFFSET_SYM(tPreempt, v4);
GEN_OFFSET_SYM(tPreempt, v5);
GEN_OFFSET_SYM(tPreempt, v6);
GEN_OFFSET_SYM(tPreempt, v7);
GEN_OFFSET_SYM(tPreempt, v8);
GEN_OFFSET_SYM(tPreempt, psp);
/* size of the entire preempt registers structure */
GEN_ABSOLUTE_SYM(__tPreempt_SIZEOF, sizeof(tPreempt));
/* size of the tCCS structure sans save area for floating point regs */
GEN_ABSOLUTE_SYM(__tCCS_NOFLOAT_SIZEOF, sizeof(tCCS));
GEN_ABS_SYM_END

223
arch/arm/core/swap.s Normal file
View file

@ -0,0 +1,223 @@
/* swap.s - thread context switching for ARM Cortex-M */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
This module implements the routines necessary for thread context switching
on ARM Cortex-M3/M4 CPUs.
*/
#define _ASMLANGUAGE
#include <nanok.h>
#include <offsets.h>
#include <toolchain.h>
#include <nanokernel/cpu.h>
_ASM_FILE_PROLOGUE
GTEXT(_Swap)
GTEXT(__svc)
GTEXT(__pendsv)
GDATA(_NanoKernel)
/*******************************************************************************
*
* __pendsv - PendSV exception handler, handling context switches
*
* The PendSV exception is the only context in the system that can perform
* context switching. When an execution context finds out it has to switch
* contexts, it pends the PendSV exception.
*
* When PendSV is pended, the decision that a context switch must happen has
* already been taken. In other words, when __pendsv() runs, we *know* we have
* to swap *something*.
*
* The scheduling algorithm is simple: schedule the head of the runnable FIBER
* context list, which is represented by _NanoKernel.fiber. If there are no
* runnable FIBER contexts, then schedule the TASK context represented by
* _NanoKernel.task. The _NanoKernel.task field will never be NULL.
*/
SECTION_FUNC(TEXT, __pendsv)
_GDB_STUB_EXC_ENTRY
/* load _Nanokernel into r1 and current tCCS into r2 */
ldr r1, =_NanoKernel
ldr r2, [r1, #__tNANO_current_OFFSET]
/* addr of callee-saved regs in CCS in r0 */
add r0, r2, #__tCCS_preempReg_OFFSET
/* save callee-saved + psp in CCS */
mrs ip, PSP
stmia r0, {v1-v8, ip}
/*
* Prepare to clear PendSV with interrupts unlocked, but
* don't clear it yet. PendSV must not be cleared until
* the new thread is context-switched in since all decisions
* to pend PendSV have been taken with the current kernel
* state and this is what we're handling currently.
*/
ldr ip, =_SCS_ICSR
ldr r3, =_SCS_ICSR_UNPENDSV
/* protect the kernel state while we play with the thread lists */
movs.n r0, #_EXC_IRQ_DEFAULT_PRIO
msr BASEPRI, r0
/* find out incoming context (fiber or task) */
/* is there a fiber ready ? */
ldr r2, [r1, #__tNANO_fiber_OFFSET]
cmp r2, #0
/*
* if so, remove fiber from list
* else, the task is the thread we're switching in
*/
itte ne
ldrne.w r0, [r2, #__tCCS_link_OFFSET] /* then */
strne.w r0, [r1, #__tNANO_fiber_OFFSET] /* then */
ldreq.w r2, [r1, #__tNANO_task_OFFSET] /* else */
/* r2 contains the new thread */
ldr r0, [r2, #__tCCS_flags_OFFSET]
str r0, [r1, #__tNANO_flags_OFFSET]
str r2, [r1, #__tNANO_current_OFFSET]
/*
* Clear PendSV so that if another interrupt comes in and
* decides, with the new kernel state baseed on the new thread
* being context-switched in, that it needs to reschedules, it
* will take, but that previously pended PendSVs do not take,
* since they were based on the previous kernel state and this
* has been handled.
*/
/* _SCS_ICSR is still in ip and _SCS_ICSR_UNPENDSV in r3 */
str r3, [ip, #0]
/* restore BASEPRI for the incoming thread */
ldr r0, [r2, #__tCCS_basepri_OFFSET]
mov ip, #0
str ip, [r2, #__tCCS_basepri_OFFSET]
msr BASEPRI, r0
/* load callee-saved + psp from CCS */
add r0, r2, #__tCCS_preempReg_OFFSET
ldmia r0, {v1-v8, ip}
msr PSP, ip
_GDB_STUB_EXC_EXIT
/* exc return */
bx lr
/*******************************************************************************
*
* __svc - service call handler
*
* The service call (svc) is only used in _Swap() to enter handler mode so we
* can go through the PendSV exception to perform a context switch.
*
* RETURNS: N/A
*/
SECTION_FUNC(TEXT, __svc)
_GDB_STUB_EXC_ENTRY
/*
* Unlock interrupts:
* - in a SVC call, so protected against context switches
* - allow PendSV, since it's running at prio 0xff
*/
eors.n r0, r0
msr BASEPRI, r0
/* set PENDSV bit, pending the PendSV exception */
ldr r1, =_SCS_ICSR
ldr r2, =_SCS_ICSR_PENDSV
str r2, [r1, #0]
_GDB_STUB_EXC_EXIT
/* handler mode exit, to PendSV */
bx lr
/*******************************************************************************
*
* _Swap - initiate a cooperative context switch
*
* The _Swap() routine is invoked by various nanokernel services to effect
* a cooperative context context switch. Prior to invoking _Swap(), the caller
* disables interrupts via irq_lock() and the return 'key' is passed as a
* parameter to _Swap(). The 'key' actually represents the BASEPRI register
* prior to disabling interrupts via the BASEPRI mechanism.
*
* _Swap() itself does not do much.
*
* It simply stores the intlock key (the BASEPRI value) parameter into
* current->basepri, and then triggers a service call exception (svc) to setup
* the PendSV exception, which does the heavy lifting of context switching.
* This is the only place we have to save BASEPRI since the other paths to
* __pendsv all come from handling an interrupt, which means we know the
* interrupts were not locked: in that case the BASEPRI value is 0.
*
* Given that _Swap() is called to effect a cooperative context context switch,
* only the caller-saved integer registers need to be saved in the tCCS of the
* outgoing context. This is all performed by the hardware, which stores it in
* its exception stack frame, created when handling the svc exception.
*
* RETURNS: may contain a return value setup by a call to fiberRtnValueSet()
*
* C function prototype:
*
* unsigned int _Swap (unsigned int basepri);
*
*/
SECTION_FUNC(TEXT, _Swap)
ldr r1, =_NanoKernel
ldr r2, [r1, #__tNANO_current_OFFSET]
str r0, [r2, #__tCCS_basepri_OFFSET]
svc #0
/* r0 contains the return value if needed */
bx lr

View file

@ -0,0 +1,86 @@
/* task_abort.c - ARM Cortex-M _TaskAbort() routine */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of Wind River Systems nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
DESCRIPTION
The ARM Cortex-M architecture provides its own _TaskAbort() to deal with
different CPU modes (handler vs thread) when a task aborts. When its entry
point returns or when it aborts itself, the CPU is in thread mode and must
call the equivalent of task_abort(<self>), but when in handler mode, the
CPU must queue a packet to K_swapper(), then exit handler mode to queue the
PendSV exception and cause the immediate context switch to K_swapper.
*/
#ifdef CONFIG_MICROKERNEL
#include <toolchain.h>
#include <sections.h>
#include <minik.h>
#include <nanok.h>
#include <microkernel.h>
#include <nanokernel.h>
#include <misc/__assert.h>
static struct k_args cmdpacket;
/*******************************************************************************
*
* _TaskAbort - abort the current task
*
* Possible reasons for a task aborting:
*
* - the task explicitly aborts itself by calling this routine
* - the task implicitly aborts by returning from its entry point
* - the task encounters a fatal exception
*
* RETURNS: N/A
*
* \NOMANUAL
*/
void _TaskAbort(void)
{
const int taskAbortCode = 1;
if (_ScbIsInThreadMode()) {
_task_ioctl(K_Task->Ident, taskAbortCode);
} else {
cmdpacket.Comm = TSKOP;
cmdpacket.Args.g1.task = K_Task->Ident;
cmdpacket.Args.g1.opt = taskAbortCode;
cmdpacket.Srce = 0;
K_Task->Args = &cmdpacket;
nano_isr_stack_push(&K_Args, (uint32_t) &cmdpacket);
_ScbPendsvSet();
}
}
#endif /* CONFIG_MICROKERNEL */