Rename _NanoKernel to _nanokernel
Updating global variable's name to follow a consistent naming convention. Change accomplished with the following script: #!/bin/bash echo "Searching for ${1} to replace with ${2}" find ./ \( -name "*.[chs]" -o -name "sysgen.py" -o -name "*.kconf" -o -name "*.arch" \) \ ! -path "./host/src/genIdt/*" \ ! -path "*/outdir/*" | xargs sed -i 's/\b'${1}'\b/'${2}'/g'; Change-Id: Icf5900c057f3412d3c7725c07176fe125c374958 Signed-off-by: Yonattan Louise <yonattan.a.louise.mendoza@intel.com>
This commit is contained in:
parent
7770ec2db1
commit
4d19693b33
29 changed files with 111 additions and 111 deletions
|
@ -53,7 +53,7 @@ struct init_stack_frame {
|
||||||
uint32_t r0;
|
uint32_t r0;
|
||||||
};
|
};
|
||||||
|
|
||||||
tNANO _NanoKernel = {0};
|
tNANO _nanokernel = {0};
|
||||||
|
|
||||||
#if defined(CONFIG_CONTEXT_MONITOR)
|
#if defined(CONFIG_CONTEXT_MONITOR)
|
||||||
#define CONTEXT_MONITOR_INIT(pCcs) context_monitor_init(pCcs)
|
#define CONTEXT_MONITOR_INIT(pCcs) context_monitor_init(pCcs)
|
||||||
|
@ -85,8 +85,8 @@ static ALWAYS_INLINE void context_monitor_init(struct s_CCS *pCcs /* context */
|
||||||
*/
|
*/
|
||||||
|
|
||||||
key = irq_lock_inline();
|
key = irq_lock_inline();
|
||||||
pCcs->next_context = _NanoKernel.contexts;
|
pCcs->next_context = _nanokernel.contexts;
|
||||||
_NanoKernel.contexts = pCcs;
|
_nanokernel.contexts = pCcs;
|
||||||
irq_unlock_inline(key);
|
irq_unlock_inline(key);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_CONTEXT_MONITOR */
|
#endif /* CONFIG_CONTEXT_MONITOR */
|
||||||
|
|
|
@ -83,7 +83,7 @@ SECTION_FUNC(TEXT, _firq_enter)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* save LP_START/LP_COUNT/LP_END variables */
|
/* save LP_START/LP_COUNT/LP_END variables */
|
||||||
mov_s r1, _NanoKernel
|
mov_s r1, _nanokernel
|
||||||
|
|
||||||
/* cannot store lp_count directly to memory */
|
/* cannot store lp_count directly to memory */
|
||||||
mov r2, lp_count
|
mov r2, lp_count
|
||||||
|
@ -106,12 +106,12 @@ SECTION_FUNC(TEXT, _firq_enter)
|
||||||
|
|
||||||
SECTION_FUNC(TEXT, _firq_exit)
|
SECTION_FUNC(TEXT, _firq_exit)
|
||||||
|
|
||||||
mov_s r1, _NanoKernel
|
mov_s r1, _nanokernel
|
||||||
ld_s r2, [r1, __tNANO_current_OFFSET]
|
ld_s r2, [r1, __tNANO_current_OFFSET]
|
||||||
|
|
||||||
#ifndef CONFIG_FIRQ_NO_LPCC
|
#ifndef CONFIG_FIRQ_NO_LPCC
|
||||||
|
|
||||||
/* assumption: r1 contains _NanoKernel, r2 contains the current thread */
|
/* assumption: r1 contains _nanokernel, r2 contains the current thread */
|
||||||
|
|
||||||
/* restore LP_START/LP_COUNT/LP_END variables */
|
/* restore LP_START/LP_COUNT/LP_END variables */
|
||||||
|
|
||||||
|
@ -185,7 +185,7 @@ _firq_reschedule:
|
||||||
|
|
||||||
st ilink, [sp, __tISF_pc_OFFSET] /* ilink into pc */
|
st ilink, [sp, __tISF_pc_OFFSET] /* ilink into pc */
|
||||||
|
|
||||||
mov_s r1, _NanoKernel
|
mov_s r1, _nanokernel
|
||||||
ld r2, [r1, __tNANO_current_OFFSET]
|
ld r2, [r1, __tNANO_current_OFFSET]
|
||||||
|
|
||||||
_save_callee_saved_regs
|
_save_callee_saved_regs
|
||||||
|
|
|
@ -88,12 +88,12 @@ IRQ stack frame layout:
|
||||||
|
|
||||||
The context switch code adopts this standard so that it is easier to follow:
|
The context switch code adopts this standard so that it is easier to follow:
|
||||||
|
|
||||||
- r1 contains _NanoKernel ASAP and is not overwritten over the lifespan of
|
- r1 contains _nanokernel ASAP and is not overwritten over the lifespan of
|
||||||
the functions.
|
the functions.
|
||||||
- r2 contains _NanoKernel.current ASAP, and the incoming thread when we
|
- r2 contains _nanokernel.current ASAP, and the incoming thread when we
|
||||||
transition from outgoing context to incoming context
|
transition from outgoing context to incoming context
|
||||||
|
|
||||||
Not loading _NanoKernel into r0 allows loading _NanoKernel without stomping on
|
Not loading _nanokernel into r0 allows loading _nanokernel without stomping on
|
||||||
the parameter in r0 in _Swap().
|
the parameter in r0 in _Swap().
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -64,7 +64,7 @@ GTEXT(_rirq_exit)
|
||||||
|
|
||||||
SECTION_FUNC(TEXT, _rirq_enter)
|
SECTION_FUNC(TEXT, _rirq_enter)
|
||||||
|
|
||||||
mov r1, _NanoKernel
|
mov r1, _nanokernel
|
||||||
ld r2, [r1, __tNANO_current_OFFSET]
|
ld r2, [r1, __tNANO_current_OFFSET]
|
||||||
#if CONFIG_NUM_REGULAR_IRQ_PRIO_LEVELS == 1
|
#if CONFIG_NUM_REGULAR_IRQ_PRIO_LEVELS == 1
|
||||||
st sp, [r2, __tCCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
|
st sp, [r2, __tCCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
|
||||||
|
@ -84,7 +84,7 @@ SECTION_FUNC(TEXT, _rirq_enter)
|
||||||
|
|
||||||
SECTION_FUNC(TEXT, _rirq_exit)
|
SECTION_FUNC(TEXT, _rirq_exit)
|
||||||
|
|
||||||
mov r1, _NanoKernel
|
mov r1, _nanokernel
|
||||||
ld r2, [r1, __tNANO_current_OFFSET]
|
ld r2, [r1, __tNANO_current_OFFSET]
|
||||||
|
|
||||||
#if CONFIG_NUM_REGULAR_IRQ_PRIO_LEVELS > 1
|
#if CONFIG_NUM_REGULAR_IRQ_PRIO_LEVELS > 1
|
||||||
|
|
|
@ -49,7 +49,7 @@ See isr_wrapper.s for details.
|
||||||
|
|
||||||
GTEXT(_Swap)
|
GTEXT(_Swap)
|
||||||
|
|
||||||
GDATA(_NanoKernel)
|
GDATA(_nanokernel)
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
*
|
*
|
||||||
|
@ -87,7 +87,7 @@ SECTION_FUNC(TEXT, _Swap)
|
||||||
|
|
||||||
/* interrupts are locked, interrupt key is in r0 */
|
/* interrupts are locked, interrupt key is in r0 */
|
||||||
|
|
||||||
mov r1, _NanoKernel
|
mov r1, _nanokernel
|
||||||
ld r2, [r1, __tNANO_current_OFFSET]
|
ld r2, [r1, __tNANO_current_OFFSET]
|
||||||
|
|
||||||
/* save intlock key */
|
/* save intlock key */
|
||||||
|
|
|
@ -173,7 +173,7 @@ typedef struct firq_regs tFirqRegs;
|
||||||
|
|
||||||
struct s_CCS {
|
struct s_CCS {
|
||||||
struct s_CCS *link; /* node in singly-linked list
|
struct s_CCS *link; /* node in singly-linked list
|
||||||
* _NanoKernel.fibers */
|
* _nanokernel.fibers */
|
||||||
uint32_t flags; /* bitmask of flags above */
|
uint32_t flags; /* bitmask of flags above */
|
||||||
uint32_t intlock_key; /* interrupt key when relinquishing control */
|
uint32_t intlock_key; /* interrupt key when relinquishing control */
|
||||||
int relinquish_cause; /* one of the _CAUSE_xxxx definitions above */
|
int relinquish_cause; /* one of the _CAUSE_xxxx definitions above */
|
||||||
|
@ -210,14 +210,14 @@ struct s_NANO {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FIRQ stack pointer is installed once in the second bank's SP, so
|
* FIRQ stack pointer is installed once in the second bank's SP, so
|
||||||
* there is no need to track it in _NanoKernel.
|
* there is no need to track it in _nanokernel.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
struct firq_regs firq_regs;
|
struct firq_regs firq_regs;
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef struct s_NANO tNANO;
|
typedef struct s_NANO tNANO;
|
||||||
extern tNANO _NanoKernel;
|
extern tNANO _nanokernel;
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_ARCV2
|
#ifdef CONFIG_CPU_ARCV2
|
||||||
#include <v2/cache.h>
|
#include <v2/cache.h>
|
||||||
|
|
|
@ -69,7 +69,7 @@ static ALWAYS_INLINE void _irq_setup(void)
|
||||||
nano_cpu_sleep_mode = _ARC_V2_WAKE_IRQ_LEVEL;
|
nano_cpu_sleep_mode = _ARC_V2_WAKE_IRQ_LEVEL;
|
||||||
_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_CTRL, aux_irq_ctrl_value);
|
_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_CTRL, aux_irq_ctrl_value);
|
||||||
|
|
||||||
_NanoKernel.rirq_sp = _interrupt_stack + CONFIG_ISR_STACK_SIZE;
|
_nanokernel.rirq_sp = _interrupt_stack + CONFIG_ISR_STACK_SIZE;
|
||||||
_firq_stack_setup();
|
_firq_stack_setup();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -92,7 +92,7 @@ SECTION_FUNC(TEXT, _CpuIdleInit)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
SECTION_FUNC(TEXT, _NanoIdleValGet)
|
SECTION_FUNC(TEXT, _NanoIdleValGet)
|
||||||
ldr r0, =_NanoKernel
|
ldr r0, =_nanokernel
|
||||||
ldr r0, [r0, #__tNANO_idle_OFFSET]
|
ldr r0, [r0, #__tNANO_idle_OFFSET]
|
||||||
bx lr
|
bx lr
|
||||||
|
|
||||||
|
@ -110,7 +110,7 @@ SECTION_FUNC(TEXT, _NanoIdleValGet)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
SECTION_FUNC(TEXT, _NanoIdleValClear)
|
SECTION_FUNC(TEXT, _NanoIdleValClear)
|
||||||
ldr r0, =_NanoKernel
|
ldr r0, =_nanokernel
|
||||||
eors.n r1, r1
|
eors.n r1, r1
|
||||||
str r1, [r0, #__tNANO_idle_OFFSET]
|
str r1, [r0, #__tNANO_idle_OFFSET]
|
||||||
bx lr
|
bx lr
|
||||||
|
|
|
@ -49,7 +49,7 @@ _ASM_FILE_PROLOGUE
|
||||||
|
|
||||||
GTEXT(_ExcExit)
|
GTEXT(_ExcExit)
|
||||||
GTEXT(_IntExit)
|
GTEXT(_IntExit)
|
||||||
GDATA(_NanoKernel)
|
GDATA(_nanokernel)
|
||||||
|
|
||||||
#if CONFIG_GDB_INFO
|
#if CONFIG_GDB_INFO
|
||||||
#define _EXIT_EXC_IF_FIBER_PREEMPTED beq _ExcExitWithGdbStub
|
#define _EXIT_EXC_IF_FIBER_PREEMPTED beq _ExcExitWithGdbStub
|
||||||
|
@ -104,7 +104,7 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit)
|
||||||
|
|
||||||
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)
|
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)
|
||||||
|
|
||||||
ldr r1, =_NanoKernel
|
ldr r1, =_nanokernel
|
||||||
|
|
||||||
/* is the current thread preemptible (task) ? */
|
/* is the current thread preemptible (task) ? */
|
||||||
ldr r2, [r1, #__tNANO_flags_OFFSET]
|
ldr r2, [r1, #__tNANO_flags_OFFSET]
|
||||||
|
|
|
@ -75,7 +75,7 @@ _ASM_FILE_PROLOGUE
|
||||||
|
|
||||||
SECTION_FUNC(TEXT, _GdbStubExcEntry)
|
SECTION_FUNC(TEXT, _GdbStubExcEntry)
|
||||||
|
|
||||||
ldr r1, =_NanoKernel
|
ldr r1, =_nanokernel
|
||||||
ldr r2, [r1, #__tNANO_flags_OFFSET]
|
ldr r2, [r1, #__tNANO_flags_OFFSET]
|
||||||
|
|
||||||
/* already in an exception, do not update the registers */
|
/* already in an exception, do not update the registers */
|
||||||
|
@ -119,7 +119,7 @@ SECTION_FUNC(TEXT, _GdbStubExcExit)
|
||||||
it eq
|
it eq
|
||||||
bxeq lr
|
bxeq lr
|
||||||
|
|
||||||
ldr r1, =_NanoKernel
|
ldr r1, =_nanokernel
|
||||||
ldr r2, [r1, #__tNANO_flags_OFFSET]
|
ldr r2, [r1, #__tNANO_flags_OFFSET]
|
||||||
|
|
||||||
bic r2, #EXC_ACTIVE
|
bic r2, #EXC_ACTIVE
|
||||||
|
|
|
@ -85,7 +85,7 @@ SECTION_FUNC(TEXT, _isr_wrapper)
|
||||||
cpsid i /* PRIMASK = 1 */
|
cpsid i /* PRIMASK = 1 */
|
||||||
|
|
||||||
/* is this a wakeup from idle ? */
|
/* is this a wakeup from idle ? */
|
||||||
ldr r2, =_NanoKernel
|
ldr r2, =_nanokernel
|
||||||
ldr r0, [r2, #__tNANO_idle_OFFSET] /* requested idle duration, in ticks */
|
ldr r0, [r2, #__tNANO_idle_OFFSET] /* requested idle duration, in ticks */
|
||||||
cmp r0, #0
|
cmp r0, #0
|
||||||
ittt ne
|
ittt ne
|
||||||
|
|
|
@ -68,7 +68,7 @@ extern void _nano_fiber_swap(void);
|
||||||
|
|
||||||
void fiber_abort(void)
|
void fiber_abort(void)
|
||||||
{
|
{
|
||||||
_context_exit(_NanoKernel.current);
|
_context_exit(_nanokernel.current);
|
||||||
if (_ScbIsInThreadMode()) {
|
if (_ScbIsInThreadMode()) {
|
||||||
_nano_fiber_swap();
|
_nano_fiber_swap();
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -42,7 +42,7 @@ architecture.
|
||||||
#include <nanok.h>
|
#include <nanok.h>
|
||||||
#include <nanocontextentry.h>
|
#include <nanocontextentry.h>
|
||||||
|
|
||||||
tNANO _NanoKernel = {0};
|
tNANO _nanokernel = {0};
|
||||||
|
|
||||||
#if defined(CONFIG_CONTEXT_MONITOR)
|
#if defined(CONFIG_CONTEXT_MONITOR)
|
||||||
#define CONTEXT_MONITOR_INIT(pCcs) _context_monitor_init(pCcs)
|
#define CONTEXT_MONITOR_INIT(pCcs) _context_monitor_init(pCcs)
|
||||||
|
@ -75,8 +75,8 @@ static ALWAYS_INLINE void _context_monitor_init(struct s_CCS *pCcs /* context */
|
||||||
*/
|
*/
|
||||||
|
|
||||||
key = irq_lock();
|
key = irq_lock();
|
||||||
pCcs->next_context = _NanoKernel.contexts;
|
pCcs->next_context = _nanokernel.contexts;
|
||||||
_NanoKernel.contexts = pCcs;
|
_nanokernel.contexts = pCcs;
|
||||||
irq_unlock(key);
|
irq_unlock(key);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_CONTEXT_MONITOR */
|
#endif /* CONFIG_CONTEXT_MONITOR */
|
||||||
|
|
|
@ -49,7 +49,7 @@ GTEXT(_Swap)
|
||||||
GTEXT(__svc)
|
GTEXT(__svc)
|
||||||
GTEXT(__pendsv)
|
GTEXT(__pendsv)
|
||||||
|
|
||||||
GDATA(_NanoKernel)
|
GDATA(_nanokernel)
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
*
|
*
|
||||||
|
@ -64,9 +64,9 @@ GDATA(_NanoKernel)
|
||||||
* to swap *something*.
|
* to swap *something*.
|
||||||
*
|
*
|
||||||
* The scheduling algorithm is simple: schedule the head of the runnable FIBER
|
* The scheduling algorithm is simple: schedule the head of the runnable FIBER
|
||||||
* context list, which is represented by _NanoKernel.fiber. If there are no
|
* context list, which is represented by _nanokernel.fiber. If there are no
|
||||||
* runnable FIBER contexts, then schedule the TASK context represented by
|
* runnable FIBER contexts, then schedule the TASK context represented by
|
||||||
* _NanoKernel.task. The _NanoKernel.task field will never be NULL.
|
* _nanokernel.task. The _nanokernel.task field will never be NULL.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
SECTION_FUNC(TEXT, __pendsv)
|
SECTION_FUNC(TEXT, __pendsv)
|
||||||
|
@ -74,7 +74,7 @@ SECTION_FUNC(TEXT, __pendsv)
|
||||||
_GDB_STUB_EXC_ENTRY
|
_GDB_STUB_EXC_ENTRY
|
||||||
|
|
||||||
/* load _Nanokernel into r1 and current tCCS into r2 */
|
/* load _Nanokernel into r1 and current tCCS into r2 */
|
||||||
ldr r1, =_NanoKernel
|
ldr r1, =_nanokernel
|
||||||
ldr r2, [r1, #__tNANO_current_OFFSET]
|
ldr r2, [r1, #__tNANO_current_OFFSET]
|
||||||
|
|
||||||
/* addr of callee-saved regs in CCS in r0 */
|
/* addr of callee-saved regs in CCS in r0 */
|
||||||
|
@ -213,7 +213,7 @@ SECTION_FUNC(TEXT, __svc)
|
||||||
|
|
||||||
SECTION_FUNC(TEXT, _Swap)
|
SECTION_FUNC(TEXT, _Swap)
|
||||||
|
|
||||||
ldr r1, =_NanoKernel
|
ldr r1, =_nanokernel
|
||||||
ldr r2, [r1, #__tNANO_current_OFFSET]
|
ldr r2, [r1, #__tNANO_current_OFFSET]
|
||||||
str r0, [r2, #__tCCS_basepri_OFFSET]
|
str r0, [r2, #__tCCS_basepri_OFFSET]
|
||||||
|
|
||||||
|
|
|
@ -125,7 +125,7 @@ typedef struct preempt tPreempt;
|
||||||
|
|
||||||
#ifndef _ASMLANGUAGE
|
#ifndef _ASMLANGUAGE
|
||||||
struct s_CCS {
|
struct s_CCS {
|
||||||
struct s_CCS *link; /* singly-linked list in _NanoKernel.fibers */
|
struct s_CCS *link; /* singly-linked list in _nanokernel.fibers */
|
||||||
uint32_t flags;
|
uint32_t flags;
|
||||||
uint32_t basepri;
|
uint32_t basepri;
|
||||||
int prio;
|
int prio;
|
||||||
|
@ -159,7 +159,7 @@ struct s_NANO {
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef struct s_NANO tNANO;
|
typedef struct s_NANO tNANO;
|
||||||
extern tNANO _NanoKernel;
|
extern tNANO _nanokernel;
|
||||||
|
|
||||||
#endif /* _ASMLANGUAGE */
|
#endif /* _ASMLANGUAGE */
|
||||||
|
|
||||||
|
@ -168,7 +168,7 @@ extern void _FaultInit(void);
|
||||||
extern void _CpuIdleInit(void);
|
extern void _CpuIdleInit(void);
|
||||||
static ALWAYS_INLINE void nanoArchInit(void)
|
static ALWAYS_INLINE void nanoArchInit(void)
|
||||||
{
|
{
|
||||||
_NanoKernel.flags = FIBER;
|
_nanokernel.flags = FIBER;
|
||||||
_InterruptStackSetup();
|
_InterruptStackSetup();
|
||||||
_ExcSetup();
|
_ExcSetup();
|
||||||
_FaultInit();
|
_FaultInit();
|
||||||
|
|
|
@ -159,7 +159,7 @@ SECTION_FUNC(TEXT, _ExcEnt)
|
||||||
|
|
||||||
#if defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO)
|
#if defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO)
|
||||||
|
|
||||||
movl _NanoKernel + __tNANO_current_OFFSET, %ecx
|
movl _nanokernel + __tNANO_current_OFFSET, %ecx
|
||||||
|
|
||||||
incl __tCCS_excNestCount_OFFSET(%ecx) /* inc exception nest count */
|
incl __tCCS_excNestCount_OFFSET(%ecx) /* inc exception nest count */
|
||||||
|
|
||||||
|
@ -241,7 +241,7 @@ SECTION_FUNC(TEXT, _ExcExit)
|
||||||
|
|
||||||
#if defined(CONFIG_SUPPORT_FP_SHARING) || defined(CONFIG_GDB_INFO)
|
#if defined(CONFIG_SUPPORT_FP_SHARING) || defined(CONFIG_GDB_INFO)
|
||||||
|
|
||||||
movl _NanoKernel + __tNANO_current_OFFSET, %ecx
|
movl _nanokernel + __tNANO_current_OFFSET, %ecx
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Must lock interrupts to prevent outside interference.
|
* Must lock interrupts to prevent outside interference.
|
||||||
|
|
|
@ -80,7 +80,7 @@ entering and exiting a C interrupt handler.
|
||||||
*
|
*
|
||||||
* This function is called from the interrupt stub created by irq_connect()
|
* This function is called from the interrupt stub created by irq_connect()
|
||||||
* to inform the VxMicro kernel of an interrupt. This routine increments
|
* to inform the VxMicro kernel of an interrupt. This routine increments
|
||||||
* _NanoKernel.nested (to support interrupt nesting), switches to the
|
* _nanokernel.nested (to support interrupt nesting), switches to the
|
||||||
* base of the interrupt stack, if not already on the interrupt stack, and then
|
* base of the interrupt stack, if not already on the interrupt stack, and then
|
||||||
* saves the volatile integer registers onto the stack. Finally, control is
|
* saves the volatile integer registers onto the stack. Finally, control is
|
||||||
* returned back to the interrupt stub code (which will then invoke the
|
* returned back to the interrupt stub code (which will then invoke the
|
||||||
|
@ -176,9 +176,9 @@ SECTION_FUNC(TEXT, _IntEnt)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
/* load %ecx with &_NanoKernel */
|
/* load %ecx with &_nanokernel */
|
||||||
|
|
||||||
movl $_NanoKernel, %ecx
|
movl $_nanokernel, %ecx
|
||||||
|
|
||||||
/* switch to the interrupt stack for the non-nested case */
|
/* switch to the interrupt stack for the non-nested case */
|
||||||
|
|
||||||
|
@ -246,7 +246,7 @@ BRANCH_LABEL(_HandleIdle)
|
||||||
*
|
*
|
||||||
* This function is called from the interrupt stub created by irq_connect()
|
* This function is called from the interrupt stub created by irq_connect()
|
||||||
* to inform the VxMicro kernel that the processing of an interrupt has
|
* to inform the VxMicro kernel that the processing of an interrupt has
|
||||||
* completed. This routine decrements _NanoKernel.nested (to support interrupt
|
* completed. This routine decrements _nanokernel.nested (to support interrupt
|
||||||
* nesting), restores the volatile integer registers, and then switches
|
* nesting), restores the volatile integer registers, and then switches
|
||||||
* back to the interrupted context's stack, if this isn't a nested interrupt.
|
* back to the interrupted context's stack, if this isn't a nested interrupt.
|
||||||
*
|
*
|
||||||
|
@ -273,7 +273,7 @@ SECTION_FUNC(TEXT, _IntExit)
|
||||||
|
|
||||||
/* determine whether exiting from a nested interrupt */
|
/* determine whether exiting from a nested interrupt */
|
||||||
|
|
||||||
movl $_NanoKernel, %ecx
|
movl $_nanokernel, %ecx
|
||||||
decl __tNANO_nested_OFFSET(%ecx) /* dec interrupt nest count */
|
decl __tNANO_nested_OFFSET(%ecx) /* dec interrupt nest count */
|
||||||
jne nestedInterrupt /* 'iret' if nested case */
|
jne nestedInterrupt /* 'iret' if nested case */
|
||||||
|
|
||||||
|
@ -281,7 +281,7 @@ SECTION_FUNC(TEXT, _IntExit)
|
||||||
/*
|
/*
|
||||||
* Determine whether the execution of the ISR requires a context
|
* Determine whether the execution of the ISR requires a context
|
||||||
* switch. If the interrupted context is PREEMPTIBLE and
|
* switch. If the interrupted context is PREEMPTIBLE and
|
||||||
* _NanoKernel.fiber is non-NULL, a _Swap() needs to occur.
|
* _nanokernel.fiber is non-NULL, a _Swap() needs to occur.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
movl __tNANO_current_OFFSET (%ecx), %eax
|
movl __tNANO_current_OFFSET (%ecx), %eax
|
||||||
|
@ -335,7 +335,7 @@ SECTION_FUNC(TEXT, _IntExit)
|
||||||
* since it has served its purpose.
|
* since it has served its purpose.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
movl _NanoKernel + __tNANO_current_OFFSET, %eax
|
movl _nanokernel + __tNANO_current_OFFSET, %eax
|
||||||
andl $~INT_ACTIVE, __tCCS_flags_OFFSET (%eax)
|
andl $~INT_ACTIVE, __tCCS_flags_OFFSET (%eax)
|
||||||
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
|
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
|
||||||
|
|
||||||
|
|
|
@ -51,7 +51,7 @@ processor architecture.
|
||||||
|
|
||||||
/* the one and only nanokernel control structure */
|
/* the one and only nanokernel control structure */
|
||||||
|
|
||||||
tNANO _NanoKernel = {0};
|
tNANO _nanokernel = {0};
|
||||||
|
|
||||||
/* forward declaration */
|
/* forward declaration */
|
||||||
|
|
||||||
|
@ -199,8 +199,8 @@ static void _NewContextInternal(
|
||||||
*/
|
*/
|
||||||
|
|
||||||
imask = irq_lock();
|
imask = irq_lock();
|
||||||
ccs->next_context = _NanoKernel.contexts;
|
ccs->next_context = _nanokernel.contexts;
|
||||||
_NanoKernel.contexts = ccs;
|
_nanokernel.contexts = ccs;
|
||||||
irq_unlock(imask);
|
irq_unlock(imask);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_CONTEXT_MONITOR */
|
#endif /* CONFIG_CONTEXT_MONITOR */
|
||||||
|
@ -382,7 +382,7 @@ void *_NewContext(
|
||||||
|
|
||||||
void _NanoEssentialContextSet(void)
|
void _NanoEssentialContextSet(void)
|
||||||
{
|
{
|
||||||
_NanoKernel.current->flags |= ESSENTIAL;
|
_nanokernel.current->flags |= ESSENTIAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
|
@ -400,5 +400,5 @@ void _NanoEssentialContextSet(void)
|
||||||
|
|
||||||
void _NanoEssentialContextClear(void)
|
void _NanoEssentialContextClear(void)
|
||||||
{
|
{
|
||||||
_NanoKernel.current->flags &= ~ESSENTIAL;
|
_nanokernel.current->flags &= ~ESSENTIAL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -214,7 +214,7 @@ void _FpEnable(tCCS *ccs,
|
||||||
* preserved).
|
* preserved).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
fp_owner = _NanoKernel.current_fp;
|
fp_owner = _nanokernel.current_fp;
|
||||||
if (fp_owner) {
|
if (fp_owner) {
|
||||||
if (fp_owner->flags & INT_OR_EXC_MASK) {
|
if (fp_owner->flags & INT_OR_EXC_MASK) {
|
||||||
_FpCtxSave(fp_owner);
|
_FpCtxSave(fp_owner);
|
||||||
|
@ -227,7 +227,7 @@ void _FpEnable(tCCS *ccs,
|
||||||
|
|
||||||
/* Associate the new FP context with the specified task/fiber */
|
/* Associate the new FP context with the specified task/fiber */
|
||||||
|
|
||||||
if (ccs == _NanoKernel.current) {
|
if (ccs == _nanokernel.current) {
|
||||||
/*
|
/*
|
||||||
* When enabling FP support for self, just claim ownership of
|
* When enabling FP support for self, just claim ownership of
|
||||||
*the FPU
|
*the FPU
|
||||||
|
@ -237,14 +237,14 @@ void _FpEnable(tCCS *ccs,
|
||||||
*CCS.)
|
*CCS.)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
_NanoKernel.current_fp = ccs;
|
_nanokernel.current_fp = ccs;
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* When enabling FP support for someone else, assign ownership
|
* When enabling FP support for someone else, assign ownership
|
||||||
* of the FPU to them (unless we need it ourselves).
|
* of the FPU to them (unless we need it ourselves).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if ((_NanoKernel.current->flags & USE_FP) != USE_FP) {
|
if ((_nanokernel.current->flags & USE_FP) != USE_FP) {
|
||||||
/*
|
/*
|
||||||
* We are not FP-capable, so mark FPU as owned by the
|
* We are not FP-capable, so mark FPU as owned by the
|
||||||
* context
|
* context
|
||||||
|
@ -253,7 +253,7 @@ void _FpEnable(tCCS *ccs,
|
||||||
* FP access by setting CR0[TS] to its original state.
|
* FP access by setting CR0[TS] to its original state.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
_NanoKernel.current_fp = ccs;
|
_nanokernel.current_fp = ccs;
|
||||||
#ifdef CONFIG_AUTOMATIC_FP_ENABLING
|
#ifdef CONFIG_AUTOMATIC_FP_ENABLING
|
||||||
_FpAccessDisable();
|
_FpAccessDisable();
|
||||||
#endif /* CONFIG_AUTOMATIC_FP_ENABLING */
|
#endif /* CONFIG_AUTOMATIC_FP_ENABLING */
|
||||||
|
@ -362,15 +362,15 @@ void _FpDisable(tCCS *ccs)
|
||||||
|
|
||||||
ccs->flags &= ~(USE_FP | USE_SSE);
|
ccs->flags &= ~(USE_FP | USE_SSE);
|
||||||
|
|
||||||
if (ccs == _NanoKernel.current) {
|
if (ccs == _nanokernel.current) {
|
||||||
#ifdef CONFIG_AUTOMATIC_FP_ENABLING
|
#ifdef CONFIG_AUTOMATIC_FP_ENABLING
|
||||||
_FpAccessDisable();
|
_FpAccessDisable();
|
||||||
#endif /* CONFIG_AUTOMATIC_FP_ENABLING */
|
#endif /* CONFIG_AUTOMATIC_FP_ENABLING */
|
||||||
|
|
||||||
_NanoKernel.current_fp = (tCCS *)0;
|
_nanokernel.current_fp = (tCCS *)0;
|
||||||
} else {
|
} else {
|
||||||
if (_NanoKernel.current_fp == ccs)
|
if (_nanokernel.current_fp == ccs)
|
||||||
_NanoKernel.current_fp = (tCCS *)0;
|
_nanokernel.current_fp = (tCCS *)0;
|
||||||
}
|
}
|
||||||
|
|
||||||
irq_unlock_inline(imask);
|
irq_unlock_inline(imask);
|
||||||
|
@ -453,7 +453,7 @@ void _FpNotAvailableExcHandler(NANO_ESF * pEsf /* not used */
|
||||||
enableOption = USE_FP;
|
enableOption = USE_FP;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
_FpEnable(_NanoKernel.current, enableOption);
|
_FpEnable(_nanokernel.current, enableOption);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_AUTOMATIC_FP_ENABLING */
|
#endif /* CONFIG_AUTOMATIC_FP_ENABLING */
|
||||||
|
|
|
@ -95,9 +95,9 @@ save frame on the stack.
|
||||||
* potential security leaks.
|
* potential security leaks.
|
||||||
*
|
*
|
||||||
* The scheduling algorithm is simple: schedule the head of the runnable
|
* The scheduling algorithm is simple: schedule the head of the runnable
|
||||||
* FIBER context list, which is represented by _NanoKernel.fiber. If there are
|
* FIBER context list, which is represented by _nanokernel.fiber. If there are
|
||||||
* no runnable FIBER contexts, then schedule the TASK context represented
|
* no runnable FIBER contexts, then schedule the TASK context represented
|
||||||
* by _NanoKernel.task. The _NanoKernel.task field will never be NULL.
|
* by _nanokernel.task. The _nanokernel.task field will never be NULL.
|
||||||
*
|
*
|
||||||
* RETURNS: may contain a return value setup by a call to fiberRtnValueSet()
|
* RETURNS: may contain a return value setup by a call to fiberRtnValueSet()
|
||||||
*
|
*
|
||||||
|
@ -108,7 +108,7 @@ save frame on the stack.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
SECTION_FUNC(TEXT, _Swap)
|
SECTION_FUNC(TEXT, _Swap)
|
||||||
movl $_NanoKernel, %eax
|
movl $_nanokernel, %eax
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Push all non-volatile registers onto the stack; do not copy
|
* Push all non-volatile registers onto the stack; do not copy
|
||||||
|
@ -139,7 +139,7 @@ SECTION_FUNC(TEXT, _Swap)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Determine what FIBER or TASK context needs to be swapped in.
|
* Determine what FIBER or TASK context needs to be swapped in.
|
||||||
* Note that the %eax still contains &_NanoKernel.
|
* Note that the %eax still contains &_nanokernel.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
movl __tNANO_fiber_OFFSET (%eax), %ecx
|
movl __tNANO_fiber_OFFSET (%eax), %ecx
|
||||||
|
@ -155,7 +155,7 @@ SECTION_FUNC(TEXT, _Swap)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There are no FIBER context in the run queue, thus swap in the
|
* There are no FIBER context in the run queue, thus swap in the
|
||||||
* TASK context specified via _NanoKernel.task. The 'task' field
|
* TASK context specified via _nanokernel.task. The 'task' field
|
||||||
* will _never_ be NULL.
|
* will _never_ be NULL.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -168,7 +168,7 @@ BRANCH_LABEL(swapTask)
|
||||||
/*
|
/*
|
||||||
* At this point, the %ecx register contains the 'tCCS *' of
|
* At this point, the %ecx register contains the 'tCCS *' of
|
||||||
* the TASK or FIBER to be swapped in, and %eax still
|
* the TASK or FIBER to be swapped in, and %eax still
|
||||||
* contains &_NanoKernel.
|
* contains &_nanokernel.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
BRANCH_LABEL(restoreContext)
|
BRANCH_LABEL(restoreContext)
|
||||||
|
@ -338,7 +338,7 @@ BRANCH_LABEL(CROHandlingDone)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/* update _NanoKernel.current to reflect incoming context */
|
/* update _nanokernel.current to reflect incoming context */
|
||||||
|
|
||||||
movl %ecx, __tNANO_current_OFFSET (%eax)
|
movl %ecx, __tNANO_current_OFFSET (%eax)
|
||||||
|
|
||||||
|
|
|
@ -777,10 +777,10 @@ typedef struct s_NANO {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There is only a single instance of the s_NANO structure, given that there
|
* There is only a single instance of the s_NANO structure, given that there
|
||||||
* is only a single nanokernel in the system: _NanoKernel
|
* is only a single nanokernel in the system: _nanokernel
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern tNANO _NanoKernel;
|
extern tNANO _nanokernel;
|
||||||
|
|
||||||
/* inline function definitions */
|
/* inline function definitions */
|
||||||
|
|
||||||
|
@ -805,12 +805,12 @@ static inline void nanoArchInit(void)
|
||||||
extern void *__DummyExcEnt;
|
extern void *__DummyExcEnt;
|
||||||
extern char _interrupt_stack[CONFIG_ISR_STACK_SIZE];
|
extern char _interrupt_stack[CONFIG_ISR_STACK_SIZE];
|
||||||
|
|
||||||
_NanoKernel.nested = 0;
|
_nanokernel.nested = 0;
|
||||||
|
|
||||||
#ifdef CONFIG_NO_ISRS
|
#ifdef CONFIG_NO_ISRS
|
||||||
_NanoKernel.common_isp = (char *)NULL;
|
_nanokernel.common_isp = (char *)NULL;
|
||||||
#else /* notdef CONFIG_NO_ISRS */
|
#else /* notdef CONFIG_NO_ISRS */
|
||||||
_NanoKernel.common_isp = (char *)STACK_ROUND_DOWN(
|
_nanokernel.common_isp = (char *)STACK_ROUND_DOWN(
|
||||||
&_interrupt_stack[CONFIG_ISR_STACK_SIZE - 1]);
|
&_interrupt_stack[CONFIG_ISR_STACK_SIZE - 1]);
|
||||||
#endif /* notdef CONFIG_NO_ISRS */
|
#endif /* notdef CONFIG_NO_ISRS */
|
||||||
|
|
||||||
|
@ -932,7 +932,7 @@ static inline void _IntLibInit(void)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define _IS_IN_ISR() (_NanoKernel.nested != 0)
|
#define _IS_IN_ISR() (_nanokernel.nested != 0)
|
||||||
|
|
||||||
#endif /* _ASMLANGUAGE */
|
#endif /* _ASMLANGUAGE */
|
||||||
|
|
||||||
|
|
|
@ -112,7 +112,7 @@ FUNC_NORETURN void K_swapper(int parameter1, /* not used */
|
||||||
/* indicate that failure of this fiber may be fatal to the entire system
|
/* indicate that failure of this fiber may be fatal to the entire system
|
||||||
*/
|
*/
|
||||||
|
|
||||||
_NanoKernel.current->flags |= ESSENTIAL;
|
_nanokernel.current->flags |= ESSENTIAL;
|
||||||
|
|
||||||
while (1) { /* forever */
|
while (1) { /* forever */
|
||||||
pArgs = (struct k_args *)nano_fiber_stack_pop_wait(
|
pArgs = (struct k_args *)nano_fiber_stack_pop_wait(
|
||||||
|
@ -140,7 +140,7 @@ FUNC_NORETURN void K_swapper(int parameter1, /* not used */
|
||||||
/* check if another fiber (of equal or greater priority)
|
/* check if another fiber (of equal or greater priority)
|
||||||
* needs to run */
|
* needs to run */
|
||||||
|
|
||||||
if (_NanoKernel.fiber) {
|
if (_nanokernel.fiber) {
|
||||||
fiber_yield();
|
fiber_yield();
|
||||||
}
|
}
|
||||||
} while (nano_fiber_stack_pop(&_k_command_stack, (void *)&pArgs));
|
} while (nano_fiber_stack_pop(&_k_command_stack, (void *)&pArgs));
|
||||||
|
@ -160,7 +160,7 @@ FUNC_NORETURN void K_swapper(int parameter1, /* not used */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
_k_current_task = pNextTask;
|
_k_current_task = pNextTask;
|
||||||
_NanoKernel.task = (tCCS *)pNextTask->workspace;
|
_nanokernel.task = (tCCS *)pNextTask->workspace;
|
||||||
|
|
||||||
#ifdef CONFIG_TASK_MONITOR
|
#ifdef CONFIG_TASK_MONITOR
|
||||||
if (_k_monitor_mask & MON_TSWAP) {
|
if (_k_monitor_mask & MON_TSWAP) {
|
||||||
|
|
|
@ -56,9 +56,9 @@ data structure.
|
||||||
|
|
||||||
void nano_cpu_set_idle(int32_t ticks)
|
void nano_cpu_set_idle(int32_t ticks)
|
||||||
{
|
{
|
||||||
extern tNANO _NanoKernel;
|
extern tNANO _nanokernel;
|
||||||
|
|
||||||
_NanoKernel.idle = ticks;
|
_nanokernel.idle = ticks;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
|
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
|
||||||
|
|
|
@ -63,7 +63,7 @@ static inline tCCS *_nano_wait_q_remove_no_check(struct _nano_queue *wait_q)
|
||||||
}
|
}
|
||||||
ccs->link = 0;
|
ccs->link = 0;
|
||||||
|
|
||||||
_insert_ccs((tCCS **)&_NanoKernel.fiber, ccs);
|
_insert_ccs((tCCS **)&_nanokernel.fiber, ccs);
|
||||||
return ccs;
|
return ccs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -79,8 +79,8 @@ static inline tCCS *_nano_wait_q_remove(struct _nano_queue *wait_q)
|
||||||
/* put current fiber on specified wait queue */
|
/* put current fiber on specified wait queue */
|
||||||
static inline void _nano_wait_q_put(struct _nano_queue *wait_q)
|
static inline void _nano_wait_q_put(struct _nano_queue *wait_q)
|
||||||
{
|
{
|
||||||
((tCCS *)wait_q->tail)->link = _NanoKernel.current;
|
((tCCS *)wait_q->tail)->link = _nanokernel.current;
|
||||||
wait_q->tail = _NanoKernel.current;
|
wait_q->tail = _nanokernel.current;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* _kernel_nanokernel_include_wait_q__h_ */
|
#endif /* _kernel_nanokernel_include_wait_q__h_ */
|
||||||
|
|
|
@ -54,7 +54,7 @@ for the context's "program" to use as it sees fit.
|
||||||
void context_custom_data_set(void *value /* new value */
|
void context_custom_data_set(void *value /* new value */
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
_NanoKernel.current->custom_data = value;
|
_nanokernel.current->custom_data = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
|
@ -69,6 +69,6 @@ void context_custom_data_set(void *value /* new value */
|
||||||
|
|
||||||
void *context_custom_data_get(void)
|
void *context_custom_data_get(void)
|
||||||
{
|
{
|
||||||
return _NanoKernel.current->custom_data;
|
return _nanokernel.current->custom_data;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_CONTEXT_CUSTOM_DATA */
|
#endif /* CONFIG_CONTEXT_CUSTOM_DATA */
|
||||||
|
|
|
@ -122,7 +122,7 @@ void _stack_push_non_preemptible(
|
||||||
if (ccs) {
|
if (ccs) {
|
||||||
stack->fiber = 0;
|
stack->fiber = 0;
|
||||||
fiberRtnValueSet(ccs, data);
|
fiberRtnValueSet(ccs, data);
|
||||||
_insert_ccs((tCCS **)&_NanoKernel.fiber, ccs);
|
_insert_ccs((tCCS **)&_nanokernel.fiber, ccs);
|
||||||
} else {
|
} else {
|
||||||
*(stack->next) = data;
|
*(stack->next) = data;
|
||||||
stack->next++;
|
stack->next++;
|
||||||
|
@ -156,7 +156,7 @@ void nano_task_stack_push(
|
||||||
if (ccs) {
|
if (ccs) {
|
||||||
stack->fiber = 0;
|
stack->fiber = 0;
|
||||||
fiberRtnValueSet(ccs, data);
|
fiberRtnValueSet(ccs, data);
|
||||||
_insert_ccs((tCCS **)&_NanoKernel.fiber, ccs);
|
_insert_ccs((tCCS **)&_nanokernel.fiber, ccs);
|
||||||
_Swap(imask);
|
_Swap(imask);
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
|
@ -240,7 +240,7 @@ uint32_t nano_fiber_stack_pop_wait(
|
||||||
imask = irq_lock_inline();
|
imask = irq_lock_inline();
|
||||||
|
|
||||||
if (stack->next == stack->base) {
|
if (stack->next == stack->base) {
|
||||||
stack->fiber = _NanoKernel.current;
|
stack->fiber = _nanokernel.current;
|
||||||
data = (uint32_t)_Swap(imask);
|
data = (uint32_t)_Swap(imask);
|
||||||
} else {
|
} else {
|
||||||
stack->next--;
|
stack->next--;
|
||||||
|
|
|
@ -68,12 +68,12 @@ void _context_exit(tCCS *pContext)
|
||||||
* fibers regardless of whether they are runnable.
|
* fibers regardless of whether they are runnable.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (pContext == _NanoKernel.contexts) {
|
if (pContext == _nanokernel.contexts) {
|
||||||
_NanoKernel.contexts = _NanoKernel.contexts->next_context;
|
_nanokernel.contexts = _nanokernel.contexts->next_context;
|
||||||
} else {
|
} else {
|
||||||
tCCS *pPrevContext;
|
tCCS *pPrevContext;
|
||||||
|
|
||||||
pPrevContext = _NanoKernel.contexts;
|
pPrevContext = _nanokernel.contexts;
|
||||||
while (pContext != pPrevContext->next_context) {
|
while (pContext != pPrevContext->next_context) {
|
||||||
pPrevContext = pPrevContext->next_context;
|
pPrevContext = pPrevContext->next_context;
|
||||||
}
|
}
|
||||||
|
@ -127,7 +127,7 @@ FUNC_NORETURN void _context_entry(
|
||||||
* so if it has nothing left to do just let it idle forever
|
* so if it has nothing left to do just let it idle forever
|
||||||
*/
|
*/
|
||||||
|
|
||||||
while (((_NanoKernel.current)->flags & TASK) == TASK) {
|
while (((_nanokernel.current)->flags & TASK) == TASK) {
|
||||||
nano_cpu_idle();
|
nano_cpu_idle();
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_NANOKERNEL */
|
#endif /* CONFIG_NANOKERNEL */
|
||||||
|
@ -141,7 +141,7 @@ FUNC_NORETURN void _context_entry(
|
||||||
/* Gracefully terminate the currently executing context */
|
/* Gracefully terminate the currently executing context */
|
||||||
|
|
||||||
#ifdef CONFIG_MICROKERNEL
|
#ifdef CONFIG_MICROKERNEL
|
||||||
if (((_NanoKernel.current)->flags & TASK) == TASK) {
|
if (((_nanokernel.current)->flags & TASK) == TASK) {
|
||||||
extern FUNC_NORETURN void _TaskAbort(void);
|
extern FUNC_NORETURN void _TaskAbort(void);
|
||||||
_TaskAbort();
|
_TaskAbort();
|
||||||
} else
|
} else
|
||||||
|
|
|
@ -91,7 +91,7 @@ void _insert_ccs(tCCS **queue, tCCS *ccs)
|
||||||
|
|
||||||
nano_context_id_t context_self_get(void)
|
nano_context_id_t context_self_get(void)
|
||||||
{
|
{
|
||||||
return _NanoKernel.current;
|
return _nanokernel.current;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
|
@ -108,7 +108,7 @@ nano_context_type_t context_type_get(void)
|
||||||
if (_IS_IN_ISR())
|
if (_IS_IN_ISR())
|
||||||
return NANO_CTX_ISR;
|
return NANO_CTX_ISR;
|
||||||
|
|
||||||
if ((_NanoKernel.current->flags & TASK) == TASK)
|
if ((_nanokernel.current->flags & TASK) == TASK)
|
||||||
return NANO_CTX_TASK;
|
return NANO_CTX_TASK;
|
||||||
|
|
||||||
return NANO_CTX_FIBER;
|
return NANO_CTX_FIBER;
|
||||||
|
@ -128,7 +128,7 @@ nano_context_type_t context_type_get(void)
|
||||||
int _context_essential_check(tCCS *pCtx /* pointer to context */
|
int _context_essential_check(tCCS *pCtx /* pointer to context */
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
return ((pCtx == NULL) ? _NanoKernel.current : pCtx)->flags & ESSENTIAL;
|
return ((pCtx == NULL) ? _nanokernel.current : pCtx)->flags & ESSENTIAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* currently the fiber and task implementations are identical */
|
/* currently the fiber and task implementations are identical */
|
||||||
|
@ -149,7 +149,7 @@ FUNC_ALIAS(_fiber_start, fiber_start, void);
|
||||||
* Given that this routine is _not_ ISR-callable, the following code is used
|
* Given that this routine is _not_ ISR-callable, the following code is used
|
||||||
* to differentiate between a task and fiber context:
|
* to differentiate between a task and fiber context:
|
||||||
*
|
*
|
||||||
* if ((_NanoKernel.current->flags & TASK) == TASK)
|
* if ((_nanokernel.current->flags & TASK) == TASK)
|
||||||
*
|
*
|
||||||
* Given that the _fiber_start() primitive is not considered real-time
|
* Given that the _fiber_start() primitive is not considered real-time
|
||||||
* performance critical, a runtime check to differentiate between a calling
|
* performance critical, a runtime check to differentiate between a calling
|
||||||
|
@ -191,14 +191,14 @@ void _fiber_start(char *pStack,
|
||||||
|
|
||||||
/* insert thew newly crafted CCS into the fiber runnable context list */
|
/* insert thew newly crafted CCS into the fiber runnable context list */
|
||||||
|
|
||||||
_insert_ccs((tCCS **)&_NanoKernel.fiber, ccs);
|
_insert_ccs((tCCS **)&_nanokernel.fiber, ccs);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Simply return to the caller if the current context is FIBER,
|
* Simply return to the caller if the current context is FIBER,
|
||||||
* otherwise swap into the newly created fiber context
|
* otherwise swap into the newly created fiber context
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if ((_NanoKernel.current->flags & TASK) == TASK)
|
if ((_nanokernel.current->flags & TASK) == TASK)
|
||||||
_Swap(imask);
|
_Swap(imask);
|
||||||
else
|
else
|
||||||
irq_unlock(imask);
|
irq_unlock(imask);
|
||||||
|
@ -222,15 +222,15 @@ void fiber_yield(void)
|
||||||
{
|
{
|
||||||
unsigned int imask = irq_lock_inline();
|
unsigned int imask = irq_lock_inline();
|
||||||
|
|
||||||
if ((_NanoKernel.fiber != (tCCS *)NULL) &&
|
if ((_nanokernel.fiber != (tCCS *)NULL) &&
|
||||||
(_NanoKernel.current->prio >= _NanoKernel.fiber->prio)) {
|
(_nanokernel.current->prio >= _nanokernel.fiber->prio)) {
|
||||||
/*
|
/*
|
||||||
* Reinsert current context into the list of runnable contexts,
|
* Reinsert current context into the list of runnable contexts,
|
||||||
* and
|
* and
|
||||||
* then swap to the context at the head of the fiber list.
|
* then swap to the context at the head of the fiber list.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
_insert_ccs(&(_NanoKernel.fiber), _NanoKernel.current);
|
_insert_ccs(&(_nanokernel.fiber), _nanokernel.current);
|
||||||
_Swap(imask);
|
_Swap(imask);
|
||||||
} else
|
} else
|
||||||
irq_unlock_inline(imask);
|
irq_unlock_inline(imask);
|
||||||
|
@ -290,7 +290,7 @@ FUNC_NORETURN void fiber_abort(void)
|
||||||
{
|
{
|
||||||
/* Do normal context exit cleanup, then give up CPU control */
|
/* Do normal context exit cleanup, then give up CPU control */
|
||||||
|
|
||||||
_context_exit(_NanoKernel.current);
|
_context_exit(_nanokernel.current);
|
||||||
_nano_fiber_swap();
|
_nano_fiber_swap();
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -90,9 +90,9 @@ extern void main(int argc, char *argv[], char *envp[]);
|
||||||
* main () -> kernel_init () -> task_fiber_start(... K_swapper ...)
|
* main () -> kernel_init () -> task_fiber_start(... K_swapper ...)
|
||||||
*
|
*
|
||||||
* The _nano_init() routine initializes a context for the main() routine
|
* The _nano_init() routine initializes a context for the main() routine
|
||||||
* (aka background context which is a task context)), and sets _NanoKernel.task
|
* (aka background context which is a task context)), and sets _nanokernel.task
|
||||||
* to the 'tCCS *' for the new context. The _NanoKernel.current field is set to
|
* to the 'tCCS *' for the new context. The _nanokernel.current field is set to
|
||||||
* the provided <dummyOutContext> tCCS, however _NanoKernel.fiber is set to
|
* the provided <dummyOutContext> tCCS, however _nanokernel.fiber is set to
|
||||||
* NULL.
|
* NULL.
|
||||||
*
|
*
|
||||||
* Thus the subsequent invocation of _nano_fiber_swap() depicted above results
|
* Thus the subsequent invocation of _nano_fiber_swap() depicted above results
|
||||||
|
@ -117,7 +117,7 @@ void _nano_init(tCCS *dummyOutContext, int argc, char *argv[], char *envp[])
|
||||||
* needed to identify it as a dummy context.
|
* needed to identify it as a dummy context.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
_NanoKernel.current = dummyOutContext;
|
_nanokernel.current = dummyOutContext;
|
||||||
|
|
||||||
dummyOutContext->link =
|
dummyOutContext->link =
|
||||||
(tCCS *)NULL; /* context not inserted into list */
|
(tCCS *)NULL; /* context not inserted into list */
|
||||||
|
@ -145,7 +145,7 @@ void _nano_init(tCCS *dummyOutContext, int argc, char *argv[], char *envp[])
|
||||||
* 'main'.
|
* 'main'.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
_NanoKernel.task =
|
_nanokernel.task =
|
||||||
_NewContext(_k_init_and_idle_task_stack, /* pStackMem */
|
_NewContext(_k_init_and_idle_task_stack, /* pStackMem */
|
||||||
CONFIG_MAIN_STACK_SIZE, /* stackSize */
|
CONFIG_MAIN_STACK_SIZE, /* stackSize */
|
||||||
(_ContextEntry)main, /* pEntry */
|
(_ContextEntry)main, /* pEntry */
|
||||||
|
@ -159,13 +159,13 @@ void _nano_init(tCCS *dummyOutContext, int argc, char *argv[], char *envp[])
|
||||||
/* indicate that failure of this task may be fatal to the entire system
|
/* indicate that failure of this task may be fatal to the entire system
|
||||||
*/
|
*/
|
||||||
|
|
||||||
_NanoKernel.task->flags |= ESSENTIAL;
|
_nanokernel.task->flags |= ESSENTIAL;
|
||||||
|
|
||||||
#if defined(CONFIG_MICROKERNEL)
|
#if defined(CONFIG_MICROKERNEL)
|
||||||
/* fill in microkernel's TCB, which is the last element in _k_task_list[]
|
/* fill in microkernel's TCB, which is the last element in _k_task_list[]
|
||||||
*/
|
*/
|
||||||
|
|
||||||
_k_task_list[_k_task_count].workspace = (char *)_NanoKernel.task;
|
_k_task_list[_k_task_count].workspace = (char *)_nanokernel.task;
|
||||||
_k_task_list[_k_task_count].worksize = CONFIG_MAIN_STACK_SIZE;
|
_k_task_list[_k_task_count].worksize = CONFIG_MAIN_STACK_SIZE;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -175,9 +175,9 @@ void _nano_init(tCCS *dummyOutContext, int argc, char *argv[], char *envp[])
|
||||||
* as the currently executing fiber context.
|
* as the currently executing fiber context.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
_NanoKernel.fiber = NULL;
|
_nanokernel.fiber = NULL;
|
||||||
#ifdef CONFIG_FP_SHARING
|
#ifdef CONFIG_FP_SHARING
|
||||||
_NanoKernel.current_fp = NULL;
|
_nanokernel.current_fp = NULL;
|
||||||
#endif /* CONFIG_FP_SHARING */
|
#endif /* CONFIG_FP_SHARING */
|
||||||
|
|
||||||
nanoArchInit();
|
nanoArchInit();
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue