Rename _NanoKernel to _nanokernel
Updating global variable's name to follow a consistent naming convention. Change accomplished with the following script: #!/bin/bash echo "Searching for ${1} to replace with ${2}" find ./ \( -name "*.[chs]" -o -name "sysgen.py" -o -name "*.kconf" -o -name "*.arch" \) \ ! -path "./host/src/genIdt/*" \ ! -path "*/outdir/*" | xargs sed -i 's/\b'${1}'\b/'${2}'/g'; Change-Id: Icf5900c057f3412d3c7725c07176fe125c374958 Signed-off-by: Yonattan Louise <yonattan.a.louise.mendoza@intel.com>
This commit is contained in:
parent
7770ec2db1
commit
4d19693b33
29 changed files with 111 additions and 111 deletions
|
@ -53,7 +53,7 @@ struct init_stack_frame {
|
|||
uint32_t r0;
|
||||
};
|
||||
|
||||
tNANO _NanoKernel = {0};
|
||||
tNANO _nanokernel = {0};
|
||||
|
||||
#if defined(CONFIG_CONTEXT_MONITOR)
|
||||
#define CONTEXT_MONITOR_INIT(pCcs) context_monitor_init(pCcs)
|
||||
|
@ -85,8 +85,8 @@ static ALWAYS_INLINE void context_monitor_init(struct s_CCS *pCcs /* context */
|
|||
*/
|
||||
|
||||
key = irq_lock_inline();
|
||||
pCcs->next_context = _NanoKernel.contexts;
|
||||
_NanoKernel.contexts = pCcs;
|
||||
pCcs->next_context = _nanokernel.contexts;
|
||||
_nanokernel.contexts = pCcs;
|
||||
irq_unlock_inline(key);
|
||||
}
|
||||
#endif /* CONFIG_CONTEXT_MONITOR */
|
||||
|
|
|
@ -83,7 +83,7 @@ SECTION_FUNC(TEXT, _firq_enter)
|
|||
*/
|
||||
|
||||
/* save LP_START/LP_COUNT/LP_END variables */
|
||||
mov_s r1, _NanoKernel
|
||||
mov_s r1, _nanokernel
|
||||
|
||||
/* cannot store lp_count directly to memory */
|
||||
mov r2, lp_count
|
||||
|
@ -106,12 +106,12 @@ SECTION_FUNC(TEXT, _firq_enter)
|
|||
|
||||
SECTION_FUNC(TEXT, _firq_exit)
|
||||
|
||||
mov_s r1, _NanoKernel
|
||||
mov_s r1, _nanokernel
|
||||
ld_s r2, [r1, __tNANO_current_OFFSET]
|
||||
|
||||
#ifndef CONFIG_FIRQ_NO_LPCC
|
||||
|
||||
/* assumption: r1 contains _NanoKernel, r2 contains the current thread */
|
||||
/* assumption: r1 contains _nanokernel, r2 contains the current thread */
|
||||
|
||||
/* restore LP_START/LP_COUNT/LP_END variables */
|
||||
|
||||
|
@ -185,7 +185,7 @@ _firq_reschedule:
|
|||
|
||||
st ilink, [sp, __tISF_pc_OFFSET] /* ilink into pc */
|
||||
|
||||
mov_s r1, _NanoKernel
|
||||
mov_s r1, _nanokernel
|
||||
ld r2, [r1, __tNANO_current_OFFSET]
|
||||
|
||||
_save_callee_saved_regs
|
||||
|
|
|
@ -88,12 +88,12 @@ IRQ stack frame layout:
|
|||
|
||||
The context switch code adopts this standard so that it is easier to follow:
|
||||
|
||||
- r1 contains _NanoKernel ASAP and is not overwritten over the lifespan of
|
||||
- r1 contains _nanokernel ASAP and is not overwritten over the lifespan of
|
||||
the functions.
|
||||
- r2 contains _NanoKernel.current ASAP, and the incoming thread when we
|
||||
- r2 contains _nanokernel.current ASAP, and the incoming thread when we
|
||||
transition from outgoing context to incoming context
|
||||
|
||||
Not loading _NanoKernel into r0 allows loading _NanoKernel without stomping on
|
||||
Not loading _nanokernel into r0 allows loading _nanokernel without stomping on
|
||||
the parameter in r0 in _Swap().
|
||||
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ GTEXT(_rirq_exit)
|
|||
|
||||
SECTION_FUNC(TEXT, _rirq_enter)
|
||||
|
||||
mov r1, _NanoKernel
|
||||
mov r1, _nanokernel
|
||||
ld r2, [r1, __tNANO_current_OFFSET]
|
||||
#if CONFIG_NUM_REGULAR_IRQ_PRIO_LEVELS == 1
|
||||
st sp, [r2, __tCCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
|
||||
|
@ -84,7 +84,7 @@ SECTION_FUNC(TEXT, _rirq_enter)
|
|||
|
||||
SECTION_FUNC(TEXT, _rirq_exit)
|
||||
|
||||
mov r1, _NanoKernel
|
||||
mov r1, _nanokernel
|
||||
ld r2, [r1, __tNANO_current_OFFSET]
|
||||
|
||||
#if CONFIG_NUM_REGULAR_IRQ_PRIO_LEVELS > 1
|
||||
|
|
|
@ -49,7 +49,7 @@ See isr_wrapper.s for details.
|
|||
|
||||
GTEXT(_Swap)
|
||||
|
||||
GDATA(_NanoKernel)
|
||||
GDATA(_nanokernel)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
|
@ -87,7 +87,7 @@ SECTION_FUNC(TEXT, _Swap)
|
|||
|
||||
/* interrupts are locked, interrupt key is in r0 */
|
||||
|
||||
mov r1, _NanoKernel
|
||||
mov r1, _nanokernel
|
||||
ld r2, [r1, __tNANO_current_OFFSET]
|
||||
|
||||
/* save intlock key */
|
||||
|
|
|
@ -173,7 +173,7 @@ typedef struct firq_regs tFirqRegs;
|
|||
|
||||
struct s_CCS {
|
||||
struct s_CCS *link; /* node in singly-linked list
|
||||
* _NanoKernel.fibers */
|
||||
* _nanokernel.fibers */
|
||||
uint32_t flags; /* bitmask of flags above */
|
||||
uint32_t intlock_key; /* interrupt key when relinquishing control */
|
||||
int relinquish_cause; /* one of the _CAUSE_xxxx definitions above */
|
||||
|
@ -210,14 +210,14 @@ struct s_NANO {
|
|||
|
||||
/*
|
||||
* FIRQ stack pointer is installed once in the second bank's SP, so
|
||||
* there is no need to track it in _NanoKernel.
|
||||
* there is no need to track it in _nanokernel.
|
||||
*/
|
||||
|
||||
struct firq_regs firq_regs;
|
||||
};
|
||||
|
||||
typedef struct s_NANO tNANO;
|
||||
extern tNANO _NanoKernel;
|
||||
extern tNANO _nanokernel;
|
||||
|
||||
#ifdef CONFIG_CPU_ARCV2
|
||||
#include <v2/cache.h>
|
||||
|
|
|
@ -69,7 +69,7 @@ static ALWAYS_INLINE void _irq_setup(void)
|
|||
nano_cpu_sleep_mode = _ARC_V2_WAKE_IRQ_LEVEL;
|
||||
_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_CTRL, aux_irq_ctrl_value);
|
||||
|
||||
_NanoKernel.rirq_sp = _interrupt_stack + CONFIG_ISR_STACK_SIZE;
|
||||
_nanokernel.rirq_sp = _interrupt_stack + CONFIG_ISR_STACK_SIZE;
|
||||
_firq_stack_setup();
|
||||
}
|
||||
|
||||
|
|
|
@ -92,7 +92,7 @@ SECTION_FUNC(TEXT, _CpuIdleInit)
|
|||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, _NanoIdleValGet)
|
||||
ldr r0, =_NanoKernel
|
||||
ldr r0, =_nanokernel
|
||||
ldr r0, [r0, #__tNANO_idle_OFFSET]
|
||||
bx lr
|
||||
|
||||
|
@ -110,7 +110,7 @@ SECTION_FUNC(TEXT, _NanoIdleValGet)
|
|||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, _NanoIdleValClear)
|
||||
ldr r0, =_NanoKernel
|
||||
ldr r0, =_nanokernel
|
||||
eors.n r1, r1
|
||||
str r1, [r0, #__tNANO_idle_OFFSET]
|
||||
bx lr
|
||||
|
|
|
@ -49,7 +49,7 @@ _ASM_FILE_PROLOGUE
|
|||
|
||||
GTEXT(_ExcExit)
|
||||
GTEXT(_IntExit)
|
||||
GDATA(_NanoKernel)
|
||||
GDATA(_nanokernel)
|
||||
|
||||
#if CONFIG_GDB_INFO
|
||||
#define _EXIT_EXC_IF_FIBER_PREEMPTED beq _ExcExitWithGdbStub
|
||||
|
@ -104,7 +104,7 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit)
|
|||
|
||||
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)
|
||||
|
||||
ldr r1, =_NanoKernel
|
||||
ldr r1, =_nanokernel
|
||||
|
||||
/* is the current thread preemptible (task) ? */
|
||||
ldr r2, [r1, #__tNANO_flags_OFFSET]
|
||||
|
|
|
@ -75,7 +75,7 @@ _ASM_FILE_PROLOGUE
|
|||
|
||||
SECTION_FUNC(TEXT, _GdbStubExcEntry)
|
||||
|
||||
ldr r1, =_NanoKernel
|
||||
ldr r1, =_nanokernel
|
||||
ldr r2, [r1, #__tNANO_flags_OFFSET]
|
||||
|
||||
/* already in an exception, do not update the registers */
|
||||
|
@ -119,7 +119,7 @@ SECTION_FUNC(TEXT, _GdbStubExcExit)
|
|||
it eq
|
||||
bxeq lr
|
||||
|
||||
ldr r1, =_NanoKernel
|
||||
ldr r1, =_nanokernel
|
||||
ldr r2, [r1, #__tNANO_flags_OFFSET]
|
||||
|
||||
bic r2, #EXC_ACTIVE
|
||||
|
|
|
@ -85,7 +85,7 @@ SECTION_FUNC(TEXT, _isr_wrapper)
|
|||
cpsid i /* PRIMASK = 1 */
|
||||
|
||||
/* is this a wakeup from idle ? */
|
||||
ldr r2, =_NanoKernel
|
||||
ldr r2, =_nanokernel
|
||||
ldr r0, [r2, #__tNANO_idle_OFFSET] /* requested idle duration, in ticks */
|
||||
cmp r0, #0
|
||||
ittt ne
|
||||
|
|
|
@ -68,7 +68,7 @@ extern void _nano_fiber_swap(void);
|
|||
|
||||
void fiber_abort(void)
|
||||
{
|
||||
_context_exit(_NanoKernel.current);
|
||||
_context_exit(_nanokernel.current);
|
||||
if (_ScbIsInThreadMode()) {
|
||||
_nano_fiber_swap();
|
||||
} else {
|
||||
|
|
|
@ -42,7 +42,7 @@ architecture.
|
|||
#include <nanok.h>
|
||||
#include <nanocontextentry.h>
|
||||
|
||||
tNANO _NanoKernel = {0};
|
||||
tNANO _nanokernel = {0};
|
||||
|
||||
#if defined(CONFIG_CONTEXT_MONITOR)
|
||||
#define CONTEXT_MONITOR_INIT(pCcs) _context_monitor_init(pCcs)
|
||||
|
@ -75,8 +75,8 @@ static ALWAYS_INLINE void _context_monitor_init(struct s_CCS *pCcs /* context */
|
|||
*/
|
||||
|
||||
key = irq_lock();
|
||||
pCcs->next_context = _NanoKernel.contexts;
|
||||
_NanoKernel.contexts = pCcs;
|
||||
pCcs->next_context = _nanokernel.contexts;
|
||||
_nanokernel.contexts = pCcs;
|
||||
irq_unlock(key);
|
||||
}
|
||||
#endif /* CONFIG_CONTEXT_MONITOR */
|
||||
|
|
|
@ -49,7 +49,7 @@ GTEXT(_Swap)
|
|||
GTEXT(__svc)
|
||||
GTEXT(__pendsv)
|
||||
|
||||
GDATA(_NanoKernel)
|
||||
GDATA(_nanokernel)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
|
@ -64,9 +64,9 @@ GDATA(_NanoKernel)
|
|||
* to swap *something*.
|
||||
*
|
||||
* The scheduling algorithm is simple: schedule the head of the runnable FIBER
|
||||
* context list, which is represented by _NanoKernel.fiber. If there are no
|
||||
* context list, which is represented by _nanokernel.fiber. If there are no
|
||||
* runnable FIBER contexts, then schedule the TASK context represented by
|
||||
* _NanoKernel.task. The _NanoKernel.task field will never be NULL.
|
||||
* _nanokernel.task. The _nanokernel.task field will never be NULL.
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, __pendsv)
|
||||
|
@ -74,7 +74,7 @@ SECTION_FUNC(TEXT, __pendsv)
|
|||
_GDB_STUB_EXC_ENTRY
|
||||
|
||||
/* load _Nanokernel into r1 and current tCCS into r2 */
|
||||
ldr r1, =_NanoKernel
|
||||
ldr r1, =_nanokernel
|
||||
ldr r2, [r1, #__tNANO_current_OFFSET]
|
||||
|
||||
/* addr of callee-saved regs in CCS in r0 */
|
||||
|
@ -213,7 +213,7 @@ SECTION_FUNC(TEXT, __svc)
|
|||
|
||||
SECTION_FUNC(TEXT, _Swap)
|
||||
|
||||
ldr r1, =_NanoKernel
|
||||
ldr r1, =_nanokernel
|
||||
ldr r2, [r1, #__tNANO_current_OFFSET]
|
||||
str r0, [r2, #__tCCS_basepri_OFFSET]
|
||||
|
||||
|
|
|
@ -125,7 +125,7 @@ typedef struct preempt tPreempt;
|
|||
|
||||
#ifndef _ASMLANGUAGE
|
||||
struct s_CCS {
|
||||
struct s_CCS *link; /* singly-linked list in _NanoKernel.fibers */
|
||||
struct s_CCS *link; /* singly-linked list in _nanokernel.fibers */
|
||||
uint32_t flags;
|
||||
uint32_t basepri;
|
||||
int prio;
|
||||
|
@ -159,7 +159,7 @@ struct s_NANO {
|
|||
};
|
||||
|
||||
typedef struct s_NANO tNANO;
|
||||
extern tNANO _NanoKernel;
|
||||
extern tNANO _nanokernel;
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
|
@ -168,7 +168,7 @@ extern void _FaultInit(void);
|
|||
extern void _CpuIdleInit(void);
|
||||
static ALWAYS_INLINE void nanoArchInit(void)
|
||||
{
|
||||
_NanoKernel.flags = FIBER;
|
||||
_nanokernel.flags = FIBER;
|
||||
_InterruptStackSetup();
|
||||
_ExcSetup();
|
||||
_FaultInit();
|
||||
|
|
|
@ -159,7 +159,7 @@ SECTION_FUNC(TEXT, _ExcEnt)
|
|||
|
||||
#if defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO)
|
||||
|
||||
movl _NanoKernel + __tNANO_current_OFFSET, %ecx
|
||||
movl _nanokernel + __tNANO_current_OFFSET, %ecx
|
||||
|
||||
incl __tCCS_excNestCount_OFFSET(%ecx) /* inc exception nest count */
|
||||
|
||||
|
@ -241,7 +241,7 @@ SECTION_FUNC(TEXT, _ExcExit)
|
|||
|
||||
#if defined(CONFIG_SUPPORT_FP_SHARING) || defined(CONFIG_GDB_INFO)
|
||||
|
||||
movl _NanoKernel + __tNANO_current_OFFSET, %ecx
|
||||
movl _nanokernel + __tNANO_current_OFFSET, %ecx
|
||||
|
||||
/*
|
||||
* Must lock interrupts to prevent outside interference.
|
||||
|
|
|
@ -80,7 +80,7 @@ entering and exiting a C interrupt handler.
|
|||
*
|
||||
* This function is called from the interrupt stub created by irq_connect()
|
||||
* to inform the VxMicro kernel of an interrupt. This routine increments
|
||||
* _NanoKernel.nested (to support interrupt nesting), switches to the
|
||||
* _nanokernel.nested (to support interrupt nesting), switches to the
|
||||
* base of the interrupt stack, if not already on the interrupt stack, and then
|
||||
* saves the volatile integer registers onto the stack. Finally, control is
|
||||
* returned back to the interrupt stub code (which will then invoke the
|
||||
|
@ -176,9 +176,9 @@ SECTION_FUNC(TEXT, _IntEnt)
|
|||
#endif
|
||||
|
||||
|
||||
/* load %ecx with &_NanoKernel */
|
||||
/* load %ecx with &_nanokernel */
|
||||
|
||||
movl $_NanoKernel, %ecx
|
||||
movl $_nanokernel, %ecx
|
||||
|
||||
/* switch to the interrupt stack for the non-nested case */
|
||||
|
||||
|
@ -246,7 +246,7 @@ BRANCH_LABEL(_HandleIdle)
|
|||
*
|
||||
* This function is called from the interrupt stub created by irq_connect()
|
||||
* to inform the VxMicro kernel that the processing of an interrupt has
|
||||
* completed. This routine decrements _NanoKernel.nested (to support interrupt
|
||||
* completed. This routine decrements _nanokernel.nested (to support interrupt
|
||||
* nesting), restores the volatile integer registers, and then switches
|
||||
* back to the interrupted context's stack, if this isn't a nested interrupt.
|
||||
*
|
||||
|
@ -273,7 +273,7 @@ SECTION_FUNC(TEXT, _IntExit)
|
|||
|
||||
/* determine whether exiting from a nested interrupt */
|
||||
|
||||
movl $_NanoKernel, %ecx
|
||||
movl $_nanokernel, %ecx
|
||||
decl __tNANO_nested_OFFSET(%ecx) /* dec interrupt nest count */
|
||||
jne nestedInterrupt /* 'iret' if nested case */
|
||||
|
||||
|
@ -281,7 +281,7 @@ SECTION_FUNC(TEXT, _IntExit)
|
|||
/*
|
||||
* Determine whether the execution of the ISR requires a context
|
||||
* switch. If the interrupted context is PREEMPTIBLE and
|
||||
* _NanoKernel.fiber is non-NULL, a _Swap() needs to occur.
|
||||
* _nanokernel.fiber is non-NULL, a _Swap() needs to occur.
|
||||
*/
|
||||
|
||||
movl __tNANO_current_OFFSET (%ecx), %eax
|
||||
|
@ -335,7 +335,7 @@ SECTION_FUNC(TEXT, _IntExit)
|
|||
* since it has served its purpose.
|
||||
*/
|
||||
|
||||
movl _NanoKernel + __tNANO_current_OFFSET, %eax
|
||||
movl _nanokernel + __tNANO_current_OFFSET, %eax
|
||||
andl $~INT_ACTIVE, __tCCS_flags_OFFSET (%eax)
|
||||
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ processor architecture.
|
|||
|
||||
/* the one and only nanokernel control structure */
|
||||
|
||||
tNANO _NanoKernel = {0};
|
||||
tNANO _nanokernel = {0};
|
||||
|
||||
/* forward declaration */
|
||||
|
||||
|
@ -199,8 +199,8 @@ static void _NewContextInternal(
|
|||
*/
|
||||
|
||||
imask = irq_lock();
|
||||
ccs->next_context = _NanoKernel.contexts;
|
||||
_NanoKernel.contexts = ccs;
|
||||
ccs->next_context = _nanokernel.contexts;
|
||||
_nanokernel.contexts = ccs;
|
||||
irq_unlock(imask);
|
||||
}
|
||||
#endif /* CONFIG_CONTEXT_MONITOR */
|
||||
|
@ -382,7 +382,7 @@ void *_NewContext(
|
|||
|
||||
void _NanoEssentialContextSet(void)
|
||||
{
|
||||
_NanoKernel.current->flags |= ESSENTIAL;
|
||||
_nanokernel.current->flags |= ESSENTIAL;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
|
@ -400,5 +400,5 @@ void _NanoEssentialContextSet(void)
|
|||
|
||||
void _NanoEssentialContextClear(void)
|
||||
{
|
||||
_NanoKernel.current->flags &= ~ESSENTIAL;
|
||||
_nanokernel.current->flags &= ~ESSENTIAL;
|
||||
}
|
||||
|
|
|
@ -214,7 +214,7 @@ void _FpEnable(tCCS *ccs,
|
|||
* preserved).
|
||||
*/
|
||||
|
||||
fp_owner = _NanoKernel.current_fp;
|
||||
fp_owner = _nanokernel.current_fp;
|
||||
if (fp_owner) {
|
||||
if (fp_owner->flags & INT_OR_EXC_MASK) {
|
||||
_FpCtxSave(fp_owner);
|
||||
|
@ -227,7 +227,7 @@ void _FpEnable(tCCS *ccs,
|
|||
|
||||
/* Associate the new FP context with the specified task/fiber */
|
||||
|
||||
if (ccs == _NanoKernel.current) {
|
||||
if (ccs == _nanokernel.current) {
|
||||
/*
|
||||
* When enabling FP support for self, just claim ownership of
|
||||
*the FPU
|
||||
|
@ -237,14 +237,14 @@ void _FpEnable(tCCS *ccs,
|
|||
*CCS.)
|
||||
*/
|
||||
|
||||
_NanoKernel.current_fp = ccs;
|
||||
_nanokernel.current_fp = ccs;
|
||||
} else {
|
||||
/*
|
||||
* When enabling FP support for someone else, assign ownership
|
||||
* of the FPU to them (unless we need it ourselves).
|
||||
*/
|
||||
|
||||
if ((_NanoKernel.current->flags & USE_FP) != USE_FP) {
|
||||
if ((_nanokernel.current->flags & USE_FP) != USE_FP) {
|
||||
/*
|
||||
* We are not FP-capable, so mark FPU as owned by the
|
||||
* context
|
||||
|
@ -253,7 +253,7 @@ void _FpEnable(tCCS *ccs,
|
|||
* FP access by setting CR0[TS] to its original state.
|
||||
*/
|
||||
|
||||
_NanoKernel.current_fp = ccs;
|
||||
_nanokernel.current_fp = ccs;
|
||||
#ifdef CONFIG_AUTOMATIC_FP_ENABLING
|
||||
_FpAccessDisable();
|
||||
#endif /* CONFIG_AUTOMATIC_FP_ENABLING */
|
||||
|
@ -362,15 +362,15 @@ void _FpDisable(tCCS *ccs)
|
|||
|
||||
ccs->flags &= ~(USE_FP | USE_SSE);
|
||||
|
||||
if (ccs == _NanoKernel.current) {
|
||||
if (ccs == _nanokernel.current) {
|
||||
#ifdef CONFIG_AUTOMATIC_FP_ENABLING
|
||||
_FpAccessDisable();
|
||||
#endif /* CONFIG_AUTOMATIC_FP_ENABLING */
|
||||
|
||||
_NanoKernel.current_fp = (tCCS *)0;
|
||||
_nanokernel.current_fp = (tCCS *)0;
|
||||
} else {
|
||||
if (_NanoKernel.current_fp == ccs)
|
||||
_NanoKernel.current_fp = (tCCS *)0;
|
||||
if (_nanokernel.current_fp == ccs)
|
||||
_nanokernel.current_fp = (tCCS *)0;
|
||||
}
|
||||
|
||||
irq_unlock_inline(imask);
|
||||
|
@ -453,7 +453,7 @@ void _FpNotAvailableExcHandler(NANO_ESF * pEsf /* not used */
|
|||
enableOption = USE_FP;
|
||||
#endif
|
||||
|
||||
_FpEnable(_NanoKernel.current, enableOption);
|
||||
_FpEnable(_nanokernel.current, enableOption);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_AUTOMATIC_FP_ENABLING */
|
||||
|
|
|
@ -95,9 +95,9 @@ save frame on the stack.
|
|||
* potential security leaks.
|
||||
*
|
||||
* The scheduling algorithm is simple: schedule the head of the runnable
|
||||
* FIBER context list, which is represented by _NanoKernel.fiber. If there are
|
||||
* FIBER context list, which is represented by _nanokernel.fiber. If there are
|
||||
* no runnable FIBER contexts, then schedule the TASK context represented
|
||||
* by _NanoKernel.task. The _NanoKernel.task field will never be NULL.
|
||||
* by _nanokernel.task. The _nanokernel.task field will never be NULL.
|
||||
*
|
||||
* RETURNS: may contain a return value setup by a call to fiberRtnValueSet()
|
||||
*
|
||||
|
@ -108,7 +108,7 @@ save frame on the stack.
|
|||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, _Swap)
|
||||
movl $_NanoKernel, %eax
|
||||
movl $_nanokernel, %eax
|
||||
|
||||
/*
|
||||
* Push all non-volatile registers onto the stack; do not copy
|
||||
|
@ -139,7 +139,7 @@ SECTION_FUNC(TEXT, _Swap)
|
|||
|
||||
/*
|
||||
* Determine what FIBER or TASK context needs to be swapped in.
|
||||
* Note that the %eax still contains &_NanoKernel.
|
||||
* Note that the %eax still contains &_nanokernel.
|
||||
*/
|
||||
|
||||
movl __tNANO_fiber_OFFSET (%eax), %ecx
|
||||
|
@ -155,7 +155,7 @@ SECTION_FUNC(TEXT, _Swap)
|
|||
|
||||
/*
|
||||
* There are no FIBER context in the run queue, thus swap in the
|
||||
* TASK context specified via _NanoKernel.task. The 'task' field
|
||||
* TASK context specified via _nanokernel.task. The 'task' field
|
||||
* will _never_ be NULL.
|
||||
*/
|
||||
|
||||
|
@ -168,7 +168,7 @@ BRANCH_LABEL(swapTask)
|
|||
/*
|
||||
* At this point, the %ecx register contains the 'tCCS *' of
|
||||
* the TASK or FIBER to be swapped in, and %eax still
|
||||
* contains &_NanoKernel.
|
||||
* contains &_nanokernel.
|
||||
*/
|
||||
|
||||
BRANCH_LABEL(restoreContext)
|
||||
|
@ -338,7 +338,7 @@ BRANCH_LABEL(CROHandlingDone)
|
|||
|
||||
|
||||
|
||||
/* update _NanoKernel.current to reflect incoming context */
|
||||
/* update _nanokernel.current to reflect incoming context */
|
||||
|
||||
movl %ecx, __tNANO_current_OFFSET (%eax)
|
||||
|
||||
|
|
|
@ -777,10 +777,10 @@ typedef struct s_NANO {
|
|||
|
||||
/*
|
||||
* There is only a single instance of the s_NANO structure, given that there
|
||||
* is only a single nanokernel in the system: _NanoKernel
|
||||
* is only a single nanokernel in the system: _nanokernel
|
||||
*/
|
||||
|
||||
extern tNANO _NanoKernel;
|
||||
extern tNANO _nanokernel;
|
||||
|
||||
/* inline function definitions */
|
||||
|
||||
|
@ -805,12 +805,12 @@ static inline void nanoArchInit(void)
|
|||
extern void *__DummyExcEnt;
|
||||
extern char _interrupt_stack[CONFIG_ISR_STACK_SIZE];
|
||||
|
||||
_NanoKernel.nested = 0;
|
||||
_nanokernel.nested = 0;
|
||||
|
||||
#ifdef CONFIG_NO_ISRS
|
||||
_NanoKernel.common_isp = (char *)NULL;
|
||||
_nanokernel.common_isp = (char *)NULL;
|
||||
#else /* notdef CONFIG_NO_ISRS */
|
||||
_NanoKernel.common_isp = (char *)STACK_ROUND_DOWN(
|
||||
_nanokernel.common_isp = (char *)STACK_ROUND_DOWN(
|
||||
&_interrupt_stack[CONFIG_ISR_STACK_SIZE - 1]);
|
||||
#endif /* notdef CONFIG_NO_ISRS */
|
||||
|
||||
|
@ -932,7 +932,7 @@ static inline void _IntLibInit(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
#define _IS_IN_ISR() (_NanoKernel.nested != 0)
|
||||
#define _IS_IN_ISR() (_nanokernel.nested != 0)
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ FUNC_NORETURN void K_swapper(int parameter1, /* not used */
|
|||
/* indicate that failure of this fiber may be fatal to the entire system
|
||||
*/
|
||||
|
||||
_NanoKernel.current->flags |= ESSENTIAL;
|
||||
_nanokernel.current->flags |= ESSENTIAL;
|
||||
|
||||
while (1) { /* forever */
|
||||
pArgs = (struct k_args *)nano_fiber_stack_pop_wait(
|
||||
|
@ -140,7 +140,7 @@ FUNC_NORETURN void K_swapper(int parameter1, /* not used */
|
|||
/* check if another fiber (of equal or greater priority)
|
||||
* needs to run */
|
||||
|
||||
if (_NanoKernel.fiber) {
|
||||
if (_nanokernel.fiber) {
|
||||
fiber_yield();
|
||||
}
|
||||
} while (nano_fiber_stack_pop(&_k_command_stack, (void *)&pArgs));
|
||||
|
@ -160,7 +160,7 @@ FUNC_NORETURN void K_swapper(int parameter1, /* not used */
|
|||
#endif
|
||||
|
||||
_k_current_task = pNextTask;
|
||||
_NanoKernel.task = (tCCS *)pNextTask->workspace;
|
||||
_nanokernel.task = (tCCS *)pNextTask->workspace;
|
||||
|
||||
#ifdef CONFIG_TASK_MONITOR
|
||||
if (_k_monitor_mask & MON_TSWAP) {
|
||||
|
|
|
@ -56,9 +56,9 @@ data structure.
|
|||
|
||||
void nano_cpu_set_idle(int32_t ticks)
|
||||
{
|
||||
extern tNANO _NanoKernel;
|
||||
extern tNANO _nanokernel;
|
||||
|
||||
_NanoKernel.idle = ticks;
|
||||
_nanokernel.idle = ticks;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
|
||||
|
|
|
@ -63,7 +63,7 @@ static inline tCCS *_nano_wait_q_remove_no_check(struct _nano_queue *wait_q)
|
|||
}
|
||||
ccs->link = 0;
|
||||
|
||||
_insert_ccs((tCCS **)&_NanoKernel.fiber, ccs);
|
||||
_insert_ccs((tCCS **)&_nanokernel.fiber, ccs);
|
||||
return ccs;
|
||||
}
|
||||
|
||||
|
@ -79,8 +79,8 @@ static inline tCCS *_nano_wait_q_remove(struct _nano_queue *wait_q)
|
|||
/* put current fiber on specified wait queue */
|
||||
static inline void _nano_wait_q_put(struct _nano_queue *wait_q)
|
||||
{
|
||||
((tCCS *)wait_q->tail)->link = _NanoKernel.current;
|
||||
wait_q->tail = _NanoKernel.current;
|
||||
((tCCS *)wait_q->tail)->link = _nanokernel.current;
|
||||
wait_q->tail = _nanokernel.current;
|
||||
}
|
||||
|
||||
#endif /* _kernel_nanokernel_include_wait_q__h_ */
|
||||
|
|
|
@ -54,7 +54,7 @@ for the context's "program" to use as it sees fit.
|
|||
void context_custom_data_set(void *value /* new value */
|
||||
)
|
||||
{
|
||||
_NanoKernel.current->custom_data = value;
|
||||
_nanokernel.current->custom_data = value;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
|
@ -69,6 +69,6 @@ void context_custom_data_set(void *value /* new value */
|
|||
|
||||
void *context_custom_data_get(void)
|
||||
{
|
||||
return _NanoKernel.current->custom_data;
|
||||
return _nanokernel.current->custom_data;
|
||||
}
|
||||
#endif /* CONFIG_CONTEXT_CUSTOM_DATA */
|
||||
|
|
|
@ -122,7 +122,7 @@ void _stack_push_non_preemptible(
|
|||
if (ccs) {
|
||||
stack->fiber = 0;
|
||||
fiberRtnValueSet(ccs, data);
|
||||
_insert_ccs((tCCS **)&_NanoKernel.fiber, ccs);
|
||||
_insert_ccs((tCCS **)&_nanokernel.fiber, ccs);
|
||||
} else {
|
||||
*(stack->next) = data;
|
||||
stack->next++;
|
||||
|
@ -156,7 +156,7 @@ void nano_task_stack_push(
|
|||
if (ccs) {
|
||||
stack->fiber = 0;
|
||||
fiberRtnValueSet(ccs, data);
|
||||
_insert_ccs((tCCS **)&_NanoKernel.fiber, ccs);
|
||||
_insert_ccs((tCCS **)&_nanokernel.fiber, ccs);
|
||||
_Swap(imask);
|
||||
return;
|
||||
} else {
|
||||
|
@ -240,7 +240,7 @@ uint32_t nano_fiber_stack_pop_wait(
|
|||
imask = irq_lock_inline();
|
||||
|
||||
if (stack->next == stack->base) {
|
||||
stack->fiber = _NanoKernel.current;
|
||||
stack->fiber = _nanokernel.current;
|
||||
data = (uint32_t)_Swap(imask);
|
||||
} else {
|
||||
stack->next--;
|
||||
|
|
|
@ -68,12 +68,12 @@ void _context_exit(tCCS *pContext)
|
|||
* fibers regardless of whether they are runnable.
|
||||
*/
|
||||
|
||||
if (pContext == _NanoKernel.contexts) {
|
||||
_NanoKernel.contexts = _NanoKernel.contexts->next_context;
|
||||
if (pContext == _nanokernel.contexts) {
|
||||
_nanokernel.contexts = _nanokernel.contexts->next_context;
|
||||
} else {
|
||||
tCCS *pPrevContext;
|
||||
|
||||
pPrevContext = _NanoKernel.contexts;
|
||||
pPrevContext = _nanokernel.contexts;
|
||||
while (pContext != pPrevContext->next_context) {
|
||||
pPrevContext = pPrevContext->next_context;
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ FUNC_NORETURN void _context_entry(
|
|||
* so if it has nothing left to do just let it idle forever
|
||||
*/
|
||||
|
||||
while (((_NanoKernel.current)->flags & TASK) == TASK) {
|
||||
while (((_nanokernel.current)->flags & TASK) == TASK) {
|
||||
nano_cpu_idle();
|
||||
}
|
||||
#endif /* CONFIG_NANOKERNEL */
|
||||
|
@ -141,7 +141,7 @@ FUNC_NORETURN void _context_entry(
|
|||
/* Gracefully terminate the currently executing context */
|
||||
|
||||
#ifdef CONFIG_MICROKERNEL
|
||||
if (((_NanoKernel.current)->flags & TASK) == TASK) {
|
||||
if (((_nanokernel.current)->flags & TASK) == TASK) {
|
||||
extern FUNC_NORETURN void _TaskAbort(void);
|
||||
_TaskAbort();
|
||||
} else
|
||||
|
|
|
@ -91,7 +91,7 @@ void _insert_ccs(tCCS **queue, tCCS *ccs)
|
|||
|
||||
nano_context_id_t context_self_get(void)
|
||||
{
|
||||
return _NanoKernel.current;
|
||||
return _nanokernel.current;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
|
@ -108,7 +108,7 @@ nano_context_type_t context_type_get(void)
|
|||
if (_IS_IN_ISR())
|
||||
return NANO_CTX_ISR;
|
||||
|
||||
if ((_NanoKernel.current->flags & TASK) == TASK)
|
||||
if ((_nanokernel.current->flags & TASK) == TASK)
|
||||
return NANO_CTX_TASK;
|
||||
|
||||
return NANO_CTX_FIBER;
|
||||
|
@ -128,7 +128,7 @@ nano_context_type_t context_type_get(void)
|
|||
int _context_essential_check(tCCS *pCtx /* pointer to context */
|
||||
)
|
||||
{
|
||||
return ((pCtx == NULL) ? _NanoKernel.current : pCtx)->flags & ESSENTIAL;
|
||||
return ((pCtx == NULL) ? _nanokernel.current : pCtx)->flags & ESSENTIAL;
|
||||
}
|
||||
|
||||
/* currently the fiber and task implementations are identical */
|
||||
|
@ -149,7 +149,7 @@ FUNC_ALIAS(_fiber_start, fiber_start, void);
|
|||
* Given that this routine is _not_ ISR-callable, the following code is used
|
||||
* to differentiate between a task and fiber context:
|
||||
*
|
||||
* if ((_NanoKernel.current->flags & TASK) == TASK)
|
||||
* if ((_nanokernel.current->flags & TASK) == TASK)
|
||||
*
|
||||
* Given that the _fiber_start() primitive is not considered real-time
|
||||
* performance critical, a runtime check to differentiate between a calling
|
||||
|
@ -191,14 +191,14 @@ void _fiber_start(char *pStack,
|
|||
|
||||
/* insert thew newly crafted CCS into the fiber runnable context list */
|
||||
|
||||
_insert_ccs((tCCS **)&_NanoKernel.fiber, ccs);
|
||||
_insert_ccs((tCCS **)&_nanokernel.fiber, ccs);
|
||||
|
||||
/*
|
||||
* Simply return to the caller if the current context is FIBER,
|
||||
* otherwise swap into the newly created fiber context
|
||||
*/
|
||||
|
||||
if ((_NanoKernel.current->flags & TASK) == TASK)
|
||||
if ((_nanokernel.current->flags & TASK) == TASK)
|
||||
_Swap(imask);
|
||||
else
|
||||
irq_unlock(imask);
|
||||
|
@ -222,15 +222,15 @@ void fiber_yield(void)
|
|||
{
|
||||
unsigned int imask = irq_lock_inline();
|
||||
|
||||
if ((_NanoKernel.fiber != (tCCS *)NULL) &&
|
||||
(_NanoKernel.current->prio >= _NanoKernel.fiber->prio)) {
|
||||
if ((_nanokernel.fiber != (tCCS *)NULL) &&
|
||||
(_nanokernel.current->prio >= _nanokernel.fiber->prio)) {
|
||||
/*
|
||||
* Reinsert current context into the list of runnable contexts,
|
||||
* and
|
||||
* then swap to the context at the head of the fiber list.
|
||||
*/
|
||||
|
||||
_insert_ccs(&(_NanoKernel.fiber), _NanoKernel.current);
|
||||
_insert_ccs(&(_nanokernel.fiber), _nanokernel.current);
|
||||
_Swap(imask);
|
||||
} else
|
||||
irq_unlock_inline(imask);
|
||||
|
@ -290,7 +290,7 @@ FUNC_NORETURN void fiber_abort(void)
|
|||
{
|
||||
/* Do normal context exit cleanup, then give up CPU control */
|
||||
|
||||
_context_exit(_NanoKernel.current);
|
||||
_context_exit(_nanokernel.current);
|
||||
_nano_fiber_swap();
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -90,9 +90,9 @@ extern void main(int argc, char *argv[], char *envp[]);
|
|||
* main () -> kernel_init () -> task_fiber_start(... K_swapper ...)
|
||||
*
|
||||
* The _nano_init() routine initializes a context for the main() routine
|
||||
* (aka background context which is a task context)), and sets _NanoKernel.task
|
||||
* to the 'tCCS *' for the new context. The _NanoKernel.current field is set to
|
||||
* the provided <dummyOutContext> tCCS, however _NanoKernel.fiber is set to
|
||||
* (aka background context which is a task context)), and sets _nanokernel.task
|
||||
* to the 'tCCS *' for the new context. The _nanokernel.current field is set to
|
||||
* the provided <dummyOutContext> tCCS, however _nanokernel.fiber is set to
|
||||
* NULL.
|
||||
*
|
||||
* Thus the subsequent invocation of _nano_fiber_swap() depicted above results
|
||||
|
@ -117,7 +117,7 @@ void _nano_init(tCCS *dummyOutContext, int argc, char *argv[], char *envp[])
|
|||
* needed to identify it as a dummy context.
|
||||
*/
|
||||
|
||||
_NanoKernel.current = dummyOutContext;
|
||||
_nanokernel.current = dummyOutContext;
|
||||
|
||||
dummyOutContext->link =
|
||||
(tCCS *)NULL; /* context not inserted into list */
|
||||
|
@ -145,7 +145,7 @@ void _nano_init(tCCS *dummyOutContext, int argc, char *argv[], char *envp[])
|
|||
* 'main'.
|
||||
*/
|
||||
|
||||
_NanoKernel.task =
|
||||
_nanokernel.task =
|
||||
_NewContext(_k_init_and_idle_task_stack, /* pStackMem */
|
||||
CONFIG_MAIN_STACK_SIZE, /* stackSize */
|
||||
(_ContextEntry)main, /* pEntry */
|
||||
|
@ -159,13 +159,13 @@ void _nano_init(tCCS *dummyOutContext, int argc, char *argv[], char *envp[])
|
|||
/* indicate that failure of this task may be fatal to the entire system
|
||||
*/
|
||||
|
||||
_NanoKernel.task->flags |= ESSENTIAL;
|
||||
_nanokernel.task->flags |= ESSENTIAL;
|
||||
|
||||
#if defined(CONFIG_MICROKERNEL)
|
||||
/* fill in microkernel's TCB, which is the last element in _k_task_list[]
|
||||
*/
|
||||
|
||||
_k_task_list[_k_task_count].workspace = (char *)_NanoKernel.task;
|
||||
_k_task_list[_k_task_count].workspace = (char *)_nanokernel.task;
|
||||
_k_task_list[_k_task_count].worksize = CONFIG_MAIN_STACK_SIZE;
|
||||
#endif
|
||||
|
||||
|
@ -175,9 +175,9 @@ void _nano_init(tCCS *dummyOutContext, int argc, char *argv[], char *envp[])
|
|||
* as the currently executing fiber context.
|
||||
*/
|
||||
|
||||
_NanoKernel.fiber = NULL;
|
||||
_nanokernel.fiber = NULL;
|
||||
#ifdef CONFIG_FP_SHARING
|
||||
_NanoKernel.current_fp = NULL;
|
||||
_nanokernel.current_fp = NULL;
|
||||
#endif /* CONFIG_FP_SHARING */
|
||||
|
||||
nanoArchInit();
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue