clarify use of term 'context'
The term 'context' is vague and overloaded. Its usage for 'an execution context' is now referred as such, in both comments and some APIs' names. When the execution context can only be a fiber or a task (i.e. not an ISR), it is referred to as a 'thread', again in comments and everywhere in the code. APIs that had their names changed: - nano_context_id_t is now nano_thread_id_t - context_self_get() is now sys_thread_self_get() - context_type_get() is now sys_execution_context_type_get() - context_custom_data_set/get() are now sys_thread_custom_data_set/get() The 'context' prefix namespace does not have to be reserved by the kernel anymore. The Context Control Structure (CCS) data structure is now the Thread Control Structure (TCS): - struct ccs is now struct tcs - tCCS is now tTCS Change-Id: I7526a76c5b01e7c86333078e2d2e77c9feef5364 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
parent
849865046b
commit
0dcad8331b
96 changed files with 1082 additions and 1086 deletions
|
@ -1,4 +1,4 @@
|
|||
/* context.c - new context creation for ARCv2 */
|
||||
/* thread.c - new thread creation for ARCv2 */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2014 Wind River Systems, Inc.
|
||||
|
@ -57,52 +57,50 @@ struct init_stack_frame {
|
|||
|
||||
tNANO _nanokernel = {0};
|
||||
|
||||
#if defined(CONFIG_CONTEXT_MONITOR)
|
||||
#define CONTEXT_MONITOR_INIT(pCcs) context_monitor_init(pCcs)
|
||||
#if defined(CONFIG_THREAD_MONITOR)
|
||||
#define THREAD_MONITOR_INIT(tcs) thread_monitor_init(tcs)
|
||||
#else
|
||||
#define CONTEXT_MONITOR_INIT(pCcs) \
|
||||
#define THREAD_MONITOR_INIT(tcs) \
|
||||
do {/* do nothing */ \
|
||||
} while ((0))
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_CONTEXT_MONITOR)
|
||||
#if defined(CONFIG_THREAD_MONITOR)
|
||||
/*
|
||||
* @brief Initialize context monitoring support
|
||||
* @brief Initialize thread monitoring support
|
||||
*
|
||||
* Currently only inserts the new context in the list of active contexts.
|
||||
* Currently only inserts the new thread in the list of active threads.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
||||
static ALWAYS_INLINE void context_monitor_init(struct ccs *pCcs /* context */
|
||||
)
|
||||
static ALWAYS_INLINE void thread_monitor_init(struct tcs *tcs)
|
||||
{
|
||||
unsigned int key;
|
||||
|
||||
/*
|
||||
* Add the newly initialized context to head of the list of contexts.
|
||||
* This singly linked list of contexts maintains ALL the contexts in the
|
||||
* system: both tasks and fibers regardless of whether they are
|
||||
* runnable.
|
||||
* Add the newly initialized thread to head of the list of threads. This
|
||||
* singly linked list of threads maintains ALL the threads in the system:
|
||||
* both tasks and fibers regardless of whether they are runnable.
|
||||
*/
|
||||
|
||||
key = irq_lock();
|
||||
pCcs->next_context = _nanokernel.contexts;
|
||||
_nanokernel.contexts = pCcs;
|
||||
tcs->next_thread = _nanokernel.threads;
|
||||
_nanokernel.threads = tcs;
|
||||
irq_unlock(key);
|
||||
}
|
||||
#endif /* CONFIG_CONTEXT_MONITOR */
|
||||
#endif /* CONFIG_THREAD_MONITOR */
|
||||
|
||||
/*
|
||||
* @brief Initialize a new context (thread) from its stack space
|
||||
* @brief Initialize a new thread from its stack space
|
||||
*
|
||||
* The control structure (CCS) is put at the lower address of the stack. An
|
||||
* The control structure (TCS) is put at the lower address of the stack. An
|
||||
* initial context, to be "restored" by __return_from_coop(), is put at
|
||||
* the other end of the stack, and thus reusable by the stack when not
|
||||
* needed anymore.
|
||||
*
|
||||
* The initial context is a basic stack frame that contains arguments for
|
||||
* _context_entry() return address, that points at _context_entry()
|
||||
* _thread_entry() return address, that points at _thread_entry()
|
||||
* and status register.
|
||||
*
|
||||
* <options> is currently unused.
|
||||
|
@ -110,10 +108,10 @@ static ALWAYS_INLINE void context_monitor_init(struct ccs *pCcs /* context */
|
|||
* @return N/A
|
||||
*/
|
||||
|
||||
void _NewContext(
|
||||
void _new_thread(
|
||||
char *pStackMem, /* pointer to aligned stack memory */
|
||||
unsigned stackSize, /* stack size in bytes */
|
||||
_ContextEntry pEntry, /* context (thread) entry point routine */
|
||||
_thread_entry_t pEntry, /* thread entry point routine */
|
||||
void *parameter1, /* first param to entry point */
|
||||
void *parameter2, /* second param to entry point */
|
||||
void *parameter3, /* third param to entry point */
|
||||
|
@ -124,18 +122,18 @@ void _NewContext(
|
|||
char *stackEnd = pStackMem + stackSize;
|
||||
struct init_stack_frame *pInitCtx;
|
||||
|
||||
tCCS *pCcs = (tCCS *) pStackMem;
|
||||
struct tcs *tcs = (struct tcs *) pStackMem;
|
||||
|
||||
#ifdef CONFIG_INIT_STACKS
|
||||
memset(pStackMem, 0xaa, stackSize);
|
||||
#endif
|
||||
|
||||
/* carve the context entry struct from the "base" of the stack */
|
||||
/* carve the thread entry struct from the "base" of the stack */
|
||||
|
||||
pInitCtx = (struct init_stack_frame *)(STACK_ROUND_DOWN(stackEnd) -
|
||||
sizeof(struct init_stack_frame));
|
||||
|
||||
pInitCtx->pc = ((uint32_t)_ContextEntryWrapper);
|
||||
pInitCtx->pc = ((uint32_t)_thread_entry_wrapper);
|
||||
pInitCtx->r0 = (uint32_t)pEntry;
|
||||
pInitCtx->r1 = (uint32_t)parameter1;
|
||||
pInitCtx->r2 = (uint32_t)parameter2;
|
||||
|
@ -149,14 +147,14 @@ void _NewContext(
|
|||
*/
|
||||
pInitCtx->status32 = _ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL);
|
||||
|
||||
pCcs->link = NULL;
|
||||
pCcs->flags = priority == -1 ? TASK | PREEMPTIBLE : FIBER;
|
||||
pCcs->prio = priority;
|
||||
tcs->link = NULL;
|
||||
tcs->flags = priority == -1 ? TASK | PREEMPTIBLE : FIBER;
|
||||
tcs->prio = priority;
|
||||
|
||||
#ifdef CONFIG_CONTEXT_CUSTOM_DATA
|
||||
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
||||
/* Initialize custom data field (value is opaque to kernel) */
|
||||
|
||||
pCcs->custom_data = NULL;
|
||||
tcs->custom_data = NULL;
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -165,13 +163,13 @@ void _NewContext(
|
|||
* dst[31:6] dst[5] dst[4] dst[3:0]
|
||||
* 26'd0 1 STATUS32.IE STATUS32.E[3:0]
|
||||
*/
|
||||
pCcs->intlock_key = 0x3F;
|
||||
pCcs->relinquish_cause = _CAUSE_COOP;
|
||||
pCcs->preempReg.sp = (uint32_t)pInitCtx - __tCalleeSaved_SIZEOF;
|
||||
tcs->intlock_key = 0x3F;
|
||||
tcs->relinquish_cause = _CAUSE_COOP;
|
||||
tcs->preempReg.sp = (uint32_t)pInitCtx - __tCalleeSaved_SIZEOF;
|
||||
|
||||
_nano_timeout_ccs_init(pCcs);
|
||||
_nano_timeout_tcs_init(tcs);
|
||||
|
||||
/* initial values in all other registers/CCS entries are irrelevant */
|
||||
/* initial values in all other registers/TCS entries are irrelevant */
|
||||
|
||||
CONTEXT_MONITOR_INIT(pCcs);
|
||||
THREAD_MONITOR_INIT(tcs);
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/* ctx_wrapper.S - wrapper for _context_entry */
|
||||
/* thread_entry_wrapper.S - wrapper for _thread_entry */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2014 Wind River Systems, Inc.
|
||||
* Copyright (c) 2014-2015 Wind River Systems, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
|
@ -32,8 +32,7 @@
|
|||
|
||||
/*
|
||||
* DESCRIPTION
|
||||
* Wrapper for _context_entry routine when called from
|
||||
* the initial context
|
||||
* Wrapper for _thread_entry routine when called from the initial context.
|
||||
*/
|
||||
|
||||
#define _ASMLANGUAGE
|
||||
|
@ -41,24 +40,23 @@
|
|||
#include <toolchain.h>
|
||||
#include <sections.h>
|
||||
|
||||
GTEXT(_ContextEntryWrapper)
|
||||
GTEXT(_context_entry)
|
||||
|
||||
GTEXT(_thread_entry_wrapper)
|
||||
GTEXT(_thread_entry)
|
||||
|
||||
/*
|
||||
* @brief Wrapper for _context_entry
|
||||
* @brief Wrapper for _thread_entry
|
||||
*
|
||||
* The routine pops parameters for the _context_entry from
|
||||
* stack frame, prepared by the _NewContext() routine
|
||||
* The routine pops parameters for the _thread_entry from stack frame, prepared
|
||||
* by the _new_thread() routine.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, _ContextEntryWrapper)
|
||||
SECTION_FUNC(TEXT, _thread_entry_wrapper)
|
||||
|
||||
pop_s r3
|
||||
pop_s r2
|
||||
pop_s r1
|
||||
pop_s r0
|
||||
j _context_entry
|
||||
j _thread_entry
|
||||
nop
|
||||
|
|
|
@ -147,7 +147,7 @@ SECTION_FUNC(TEXT, _firq_exit)
|
|||
.balign 4
|
||||
_check_if_current_is_the_task:
|
||||
|
||||
ld r0, [r2, __tCCS_flags_OFFSET]
|
||||
ld r0, [r2, __tTCS_flags_OFFSET]
|
||||
and.f r0, r0, PREEMPTIBLE
|
||||
bnz.nd _check_if_a_fiber_is_ready
|
||||
rtie
|
||||
|
@ -190,12 +190,12 @@ _firq_reschedule:
|
|||
|
||||
_save_callee_saved_regs
|
||||
|
||||
st _CAUSE_FIRQ, [r2, __tCCS_relinquish_cause_OFFSET]
|
||||
st _CAUSE_FIRQ, [r2, __tTCS_relinquish_cause_OFFSET]
|
||||
|
||||
ld r2, [r1, __tNANO_fiber_OFFSET]
|
||||
|
||||
st r2, [r1, __tNANO_current_OFFSET]
|
||||
ld r3, [r2, __tCCS_link_OFFSET]
|
||||
ld r3, [r2, __tTCS_link_OFFSET]
|
||||
st r3, [r1, __tNANO_fiber_OFFSET]
|
||||
|
||||
/*
|
||||
|
@ -204,7 +204,7 @@ _firq_reschedule:
|
|||
*/
|
||||
_load_callee_saved_regs
|
||||
|
||||
ld r3, [r2, __tCCS_relinquish_cause_OFFSET]
|
||||
ld r3, [r2, __tTCS_relinquish_cause_OFFSET]
|
||||
|
||||
breq.nd r3, _CAUSE_RIRQ, _firq_return_from_rirq
|
||||
nop
|
||||
|
@ -216,8 +216,8 @@ _firq_reschedule:
|
|||
.balign 4
|
||||
_firq_return_from_coop:
|
||||
|
||||
ld r3, [r2, __tCCS_intlock_key_OFFSET]
|
||||
st 0, [r2, __tCCS_intlock_key_OFFSET]
|
||||
ld r3, [r2, __tTCS_intlock_key_OFFSET]
|
||||
st 0, [r2, __tTCS_intlock_key_OFFSET]
|
||||
|
||||
/* pc into ilink */
|
||||
pop_s r0
|
||||
|
@ -235,7 +235,7 @@ _firq_return_from_coop:
|
|||
or.nz r0, r0, _ARC_V2_STATUS32_IE
|
||||
sr r0, [_ARC_V2_STATUS32_P0]
|
||||
|
||||
ld r0, [r2, __tCCS_return_value_OFFSET]
|
||||
ld r0, [r2, __tTCS_return_value_OFFSET]
|
||||
rtie
|
||||
|
||||
.balign 4
|
||||
|
|
|
@ -92,9 +92,9 @@ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
|
|||
PR_EXC("**** Unknown Fatal Error %d! ****\n", reason);
|
||||
break;
|
||||
}
|
||||
PR_EXC("Current context ID = 0x%x\n"
|
||||
PR_EXC("Current thread ID = 0x%x\n"
|
||||
"Faulting instruction address = 0x%x\n",
|
||||
context_self_get(),
|
||||
sys_thread_self_get(),
|
||||
_arc_v2_aux_reg_read(_ARC_V2_ERET));
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/* isr_wrapper.S - wrapper around ISRs with logic for context switching */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2014 Wind River Systems, Inc.
|
||||
* Copyright (c) 2014-2015 Wind River Systems, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
|
@ -91,7 +91,7 @@ The context switch code adopts this standard so that it is easier to follow:
|
|||
- r1 contains _nanokernel ASAP and is not overwritten over the lifespan of
|
||||
the functions.
|
||||
- r2 contains _nanokernel.current ASAP, and the incoming thread when we
|
||||
transition from outgoing context to incoming context
|
||||
transition from outgoing thread to incoming thread
|
||||
|
||||
Not loading _nanokernel into r0 allows loading _nanokernel without stomping on
|
||||
the parameter in r0 in _Swap().
|
||||
|
@ -120,11 +120,11 @@ done upfront, and the rest is done when needed:
|
|||
o RIRQ
|
||||
|
||||
All needed regisers to run C code in the ISR are saved automatically
|
||||
on the outgoing context's stack: loop, status32, pc, and the caller-
|
||||
on the outgoing thread's stack: loop, status32, pc, and the caller-
|
||||
saved GPRs. That stack frame layout is pre-determined. If returning
|
||||
to a fiber, the stack is popped and no registers have to be saved by
|
||||
the kernel. If a context switch is required, the callee-saved GPRs
|
||||
are then saved in the context control structure (CCS).
|
||||
are then saved in the thread control structure (TCS).
|
||||
|
||||
o FIRQ
|
||||
|
||||
|
@ -137,7 +137,7 @@ o FIRQ
|
|||
CPU switches back to bank 0 for the GPRs. If a context switch is
|
||||
needed, at this point only are all the registers saved. First, a
|
||||
stack frame with the same layout as the automatic RIRQ one is created
|
||||
and then the callee-saved GPRs are saved in the CCS. status32_p0 and
|
||||
and then the callee-saved GPRs are saved in the TCS. status32_p0 and
|
||||
ilink are saved in this case, not status32 and pc.
|
||||
|
||||
To create the stack frame, the FIRQ handling code must first go back to using
|
||||
|
@ -148,7 +148,7 @@ o FIRQ
|
|||
o coop
|
||||
|
||||
When a coop context switch is done, the callee-saved registers are
|
||||
saved in the CCS. The other GPRs do not need to be saved, since the
|
||||
saved in the TCS. The other GPRs do not need to be saved, since the
|
||||
compiler has already placed them on the stack.
|
||||
|
||||
For restoring the contexts, there are six cases. In all cases, the
|
||||
|
|
|
@ -52,19 +52,19 @@ completeness.
|
|||
#include <nano_private.h>
|
||||
#include <nano_offsets.h>
|
||||
|
||||
/* ARCv2-specific tCCS structure member offsets */
|
||||
/* ARCv2-specific tNANO structure member offsets */
|
||||
GEN_OFFSET_SYM(tNANO, rirq_sp);
|
||||
GEN_OFFSET_SYM(tNANO, firq_regs);
|
||||
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
|
||||
GEN_OFFSET_SYM(tNANO, idle);
|
||||
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
|
||||
|
||||
/* ARCv2-specific tCCS structure member offsets */
|
||||
GEN_OFFSET_SYM(tCCS, intlock_key);
|
||||
GEN_OFFSET_SYM(tCCS, relinquish_cause);
|
||||
GEN_OFFSET_SYM(tCCS, return_value);
|
||||
#ifdef CONFIG_CONTEXT_CUSTOM_DATA
|
||||
GEN_OFFSET_SYM(tCCS, custom_data);
|
||||
/* ARCv2-specific struct tcs structure member offsets */
|
||||
GEN_OFFSET_SYM(tTCS, intlock_key);
|
||||
GEN_OFFSET_SYM(tTCS, relinquish_cause);
|
||||
GEN_OFFSET_SYM(tTCS, return_value);
|
||||
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
||||
GEN_OFFSET_SYM(tTCS, custom_data);
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -120,7 +120,7 @@ GEN_OFFSET_SYM(tFirqRegs, lp_start);
|
|||
GEN_OFFSET_SYM(tFirqRegs, lp_end);
|
||||
GEN_ABSOLUTE_SYM(__tFirqRegs_SIZEOF, sizeof(tFirqRegs));
|
||||
|
||||
/* size of the tCCS structure sans save area for floating point regs */
|
||||
GEN_ABSOLUTE_SYM(__tCCS_NOFLOAT_SIZEOF, sizeof(tCCS));
|
||||
/* size of the struct tcs structure sans save area for floating point regs */
|
||||
GEN_ABSOLUTE_SYM(__tTCS_NOFLOAT_SIZEOF, sizeof(tTCS));
|
||||
|
||||
GEN_ABS_SYM_END
|
||||
|
|
|
@ -67,7 +67,7 @@ SECTION_FUNC(TEXT, _rirq_enter)
|
|||
mov r1, _nanokernel
|
||||
ld r2, [r1, __tNANO_current_OFFSET]
|
||||
#if CONFIG_NUM_REGULAR_IRQ_PRIO_LEVELS == 1
|
||||
st sp, [r2, __tCCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
|
||||
st sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
|
||||
ld sp, [r1, __tNANO_rirq_sp_OFFSET]
|
||||
#else
|
||||
#error regular irq nesting is not implemented
|
||||
|
@ -104,7 +104,7 @@ SECTION_FUNC(TEXT, _rirq_exit)
|
|||
|
||||
cmp r0, r3
|
||||
brgt.nd _rirq_return_from_rirq
|
||||
ld sp, [r2, __tCCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
|
||||
ld sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -116,15 +116,15 @@ SECTION_FUNC(TEXT, _rirq_exit)
|
|||
* b) needs to load it to restore the interrupted context.
|
||||
*/
|
||||
|
||||
ld r0, [r2, __tCCS_flags_OFFSET]
|
||||
ld r0, [r2, __tTCS_flags_OFFSET]
|
||||
and.f r0, r0, PREEMPTIBLE
|
||||
bz.d _rirq_no_reschedule
|
||||
ld sp, [r2, __tCCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
|
||||
ld sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
|
||||
|
||||
ld r0, [r1, __tNANO_fiber_OFFSET] /* incoming fiber in r0 */
|
||||
cmp r0, 0
|
||||
bz.d _rirq_no_reschedule
|
||||
ld sp, [r2, __tCCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
|
||||
ld sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
|
||||
|
||||
.balign 4
|
||||
_rirq_reschedule:
|
||||
|
@ -132,12 +132,12 @@ _rirq_reschedule:
|
|||
/* _save_callee_saved_regs expects outgoing thread in r2 */
|
||||
_save_callee_saved_regs
|
||||
|
||||
st _CAUSE_RIRQ, [r2, __tCCS_relinquish_cause_OFFSET]
|
||||
st _CAUSE_RIRQ, [r2, __tTCS_relinquish_cause_OFFSET]
|
||||
|
||||
/* incoming fiber is in r0: it becomes the new 'current' */
|
||||
mov r2, r0
|
||||
st r2, [r1, __tNANO_current_OFFSET]
|
||||
ld r3, [r2, __tCCS_link_OFFSET]
|
||||
ld r3, [r2, __tTCS_link_OFFSET]
|
||||
st r3, [r1, __tNANO_fiber_OFFSET]
|
||||
|
||||
/*
|
||||
|
@ -146,7 +146,7 @@ _rirq_reschedule:
|
|||
*/
|
||||
_load_callee_saved_regs
|
||||
|
||||
ld r3, [r2, __tCCS_relinquish_cause_OFFSET]
|
||||
ld r3, [r2, __tTCS_relinquish_cause_OFFSET]
|
||||
|
||||
breq.nd r3, _CAUSE_RIRQ, _rirq_return_from_rirq
|
||||
nop
|
||||
|
@ -162,8 +162,8 @@ _rirq_return_from_coop:
|
|||
|
||||
/* update status32.ie (explanation in firq_exit:_firq_return_from_coop) */
|
||||
ld r0, [sp, 4]
|
||||
ld r3, [r2, __tCCS_intlock_key_OFFSET]
|
||||
st 0, [r2, __tCCS_intlock_key_OFFSET]
|
||||
ld r3, [r2, __tTCS_intlock_key_OFFSET]
|
||||
st 0, [r2, __tTCS_intlock_key_OFFSET]
|
||||
cmp r3, 0
|
||||
or.ne r0, r0, _ARC_V2_STATUS32_IE
|
||||
st r0, [sp, 4]
|
||||
|
@ -174,7 +174,7 @@ _rirq_return_from_coop:
|
|||
* b) a real value will be pushed in r0 */
|
||||
|
||||
/* push return value on stack */
|
||||
ld r0, [r2, __tCCS_return_value_OFFSET]
|
||||
ld r0, [r2, __tTCS_return_value_OFFSET]
|
||||
push_s r0
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/* swap.S - thread context switching */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2014 Wind River Systems, Inc.
|
||||
* Copyright (c) 2014-2015 Wind River Systems, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
|
@ -56,20 +56,20 @@ GDATA(_nanokernel)
|
|||
* @brief Initiate a cooperative context switch
|
||||
*
|
||||
* The _Swap() routine is invoked by various nanokernel services to effect
|
||||
* a cooperative context context switch. Prior to invoking _Swap(), the caller
|
||||
* a cooperative context switch. Prior to invoking _Swap(), the caller
|
||||
* disables interrupts via nanoCpuIntLock() and the return 'key' is passed as a
|
||||
* parameter to _Swap(). The key is in fact the value stored in the register
|
||||
* operand of a CLRI instruction.
|
||||
*
|
||||
* It stores the intlock key parameter into current->intlock_key.
|
||||
|
||||
* Given that _Swap() is called to effect a cooperative context context switch,
|
||||
* Given that _Swap() is called to effect a cooperative context switch,
|
||||
* the caller-saved integer registers are saved on the stack by the function
|
||||
* call preamble to _Swap(). This creates a custom stack frame that will be
|
||||
* popped when returning from _Swap(), but is not suitable for handling a return
|
||||
* from an exception. Thus, the fact that the thread is pending because of a
|
||||
* cooperative call to _Swap() has to be recorded via the _CAUSE_COOP code in
|
||||
* the relinquish_cause of the context's tCCS. The _IrqExit()/_FirqExit() code
|
||||
* the relinquish_cause of the thread's tTCS. The _IrqExit()/_FirqExit() code
|
||||
* will take care of doing the right thing to restore the thread status.
|
||||
*
|
||||
* When _Swap() is invoked, we know the decision to perform a context switch or
|
||||
|
@ -91,8 +91,8 @@ SECTION_FUNC(TEXT, _Swap)
|
|||
ld r2, [r1, __tNANO_current_OFFSET]
|
||||
|
||||
/* save intlock key */
|
||||
st r0, [r2, __tCCS_intlock_key_OFFSET]
|
||||
st _CAUSE_COOP, [r2, __tCCS_relinquish_cause_OFFSET]
|
||||
st r0, [r2, __tTCS_intlock_key_OFFSET]
|
||||
st _CAUSE_COOP, [r2, __tTCS_relinquish_cause_OFFSET]
|
||||
|
||||
/*
|
||||
* Save status32 and blink on the stack before the callee-saved registers.
|
||||
|
@ -110,8 +110,8 @@ SECTION_FUNC(TEXT, _Swap)
|
|||
.balign 4
|
||||
_swap_to_a_fiber:
|
||||
|
||||
ld r3, [r2, __tCCS_link_OFFSET]
|
||||
b.d _finish_swapping_to_context /* always execute delay slot */
|
||||
ld r3, [r2, __tTCS_link_OFFSET]
|
||||
b.d _finish_swapping_to_thread /* always execute delay slot */
|
||||
st r3, [r1, __tNANO_fiber_OFFSET] /* delay slot */
|
||||
|
||||
.balign 4
|
||||
|
@ -122,14 +122,14 @@ _swap_to_the_task:
|
|||
/* fall through */
|
||||
|
||||
.balign 4
|
||||
_finish_swapping_to_context:
|
||||
_finish_swapping_to_thread:
|
||||
|
||||
|
||||
/* entering here, r2 contains the new current context */
|
||||
/* entering here, r2 contains the new current thread */
|
||||
#if 0
|
||||
/* don't save flags in tNANO: slower, error-prone, and might not even give
|
||||
* a speed boost where it's supposed to */
|
||||
ld r3, [r2, __tCCS_flags_OFFSET]
|
||||
ld r3, [r2, __tTCS_flags_OFFSET]
|
||||
st r3, [r1, __tNANO_flags_OFFSET]
|
||||
#endif
|
||||
|
||||
|
@ -138,7 +138,7 @@ _finish_swapping_to_context:
|
|||
|
||||
_load_callee_saved_regs
|
||||
|
||||
ld r3, [r2, __tCCS_relinquish_cause_OFFSET]
|
||||
ld r3, [r2, __tTCS_relinquish_cause_OFFSET]
|
||||
|
||||
breq.nd r3, _CAUSE_RIRQ, _swap_return_from_rirq
|
||||
nop
|
||||
|
@ -150,9 +150,9 @@ _finish_swapping_to_context:
|
|||
.balign 4
|
||||
_swap_return_from_coop:
|
||||
|
||||
ld r1, [r2, __tCCS_intlock_key_OFFSET]
|
||||
st 0, [r2, __tCCS_intlock_key_OFFSET]
|
||||
ld r0, [r2, __tCCS_return_value_OFFSET]
|
||||
ld r1, [r2, __tTCS_intlock_key_OFFSET]
|
||||
st 0, [r2, __tTCS_intlock_key_OFFSET]
|
||||
ld r0, [r2, __tTCS_return_value_OFFSET]
|
||||
|
||||
/*
|
||||
* Adjust the stack here in case we go to _return_from_exc: this allows
|
||||
|
|
|
@ -63,14 +63,14 @@
|
|||
st fp, [sp, __tCalleeSaved_fp_OFFSET]
|
||||
st r30, [sp, __tCalleeSaved_r30_OFFSET]
|
||||
|
||||
/* save stack pointer in tCCS */
|
||||
st sp, [r2, __tCCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
|
||||
/* save stack pointer in struct tcs */
|
||||
st sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
|
||||
.endm
|
||||
|
||||
/* entering this macro, current is in r2 */
|
||||
.macro _load_callee_saved_regs
|
||||
/* restore stack pointer from tCCS */
|
||||
ld sp, [r2, __tCCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
|
||||
/* restore stack pointer from struct tcs */
|
||||
ld sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
|
||||
|
||||
ld r13, [sp, __tCalleeSaved_r13_OFFSET]
|
||||
ld r14, [sp, __tCalleeSaved_r14_OFFSET]
|
||||
|
@ -165,8 +165,8 @@
|
|||
*
|
||||
* The pc and status32 values will still be on the stack. We cannot
|
||||
* pop them yet because the callers of _pop_irq_stack_frame must reload
|
||||
* status32 differently depending on the context they are running in
|
||||
* (_Swap(), firq or exception).
|
||||
* status32 differently depending on the execution context they are running
|
||||
* in (_Swap(), firq or exception).
|
||||
*/
|
||||
add_s sp, sp, __tISF_SIZEOF
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ static inline void nonEssentialTaskAbort(void)
|
|||
* This routine implements the corrective action to be taken when the system
|
||||
* detects a fatal error.
|
||||
*
|
||||
* This sample implementation attempts to abort the current context and allow
|
||||
* This sample implementation attempts to abort the current thread and allow
|
||||
* the system to continue executing, which may permit the system to continue
|
||||
* functioning with degraded capabilities.
|
||||
*
|
||||
|
@ -85,12 +85,12 @@ void _SysFatalErrorHandler(
|
|||
const NANO_ESF *pEsf /* pointer to exception stack frame */
|
||||
)
|
||||
{
|
||||
nano_context_type_t curCtx = context_type_get();
|
||||
nano_context_type_t curCtx = sys_execution_context_type_get();
|
||||
|
||||
ARG_UNUSED(reason);
|
||||
ARG_UNUSED(pEsf);
|
||||
|
||||
if ((curCtx == NANO_CTX_ISR) || _context_essential_check(NULL)) {
|
||||
if ((curCtx == NANO_CTX_ISR) || _is_thread_essential(NULL)) {
|
||||
PRINTK("Fatal fault in %s ! Spinning...\n",
|
||||
NANO_CTX_ISR == curCtx
|
||||
? "ISR"
|
||||
|
|
|
@ -24,7 +24,7 @@ CONFIG_XIP=y
|
|||
# CONFIG_BOOT_BANNER is not set
|
||||
CONFIG_MAIN_STACK_SIZE=1024
|
||||
CONFIG_ISR_STACK_SIZE=2048
|
||||
# CONFIG_CONTEXT_CUSTOM_DATA is not set
|
||||
# CONFIG_THREAD_CUSTOM_DATA is not set
|
||||
# CONFIG_NANO_TIMEOUTS is not set
|
||||
# CONFIG_NANO_TIMERS is not set
|
||||
|
||||
|
|
|
@ -143,16 +143,16 @@ typedef struct firq_regs tFirqRegs;
|
|||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
/* Bitmask definitions for the tCCS->flags bit field */
|
||||
/* Bitmask definitions for the struct tcs->flags bit field */
|
||||
|
||||
#define FIBER 0x000
|
||||
#define TASK 0x001 /* 1 = task context, 0 = fiber context */
|
||||
#define TASK 0x001 /* 1 = task, 0 = fiber */
|
||||
|
||||
#define INT_ACTIVE 0x002 /* 1 = context is executing interrupt handler */
|
||||
#define EXC_ACTIVE 0x004 /* 1 = context is executing exception handler */
|
||||
#define USE_FP 0x010 /* 1 = context uses floating point unit */
|
||||
#define PREEMPTIBLE 0x020 /* 1 = preemptible context */
|
||||
#define ESSENTIAL 0x200 /* 1 = system context that must not abort */
|
||||
#define INT_ACTIVE 0x002 /* 1 = execution context is interrupt handler */
|
||||
#define EXC_ACTIVE 0x004 /* 1 = executino context is exception handler */
|
||||
#define USE_FP 0x010 /* 1 = thread uses floating point unit */
|
||||
#define PREEMPTIBLE 0x020 /* 1 = preemptible thread */
|
||||
#define ESSENTIAL 0x200 /* 1 = system thread that must not abort */
|
||||
#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */
|
||||
|
||||
/* stacks */
|
||||
|
@ -165,7 +165,7 @@ typedef struct firq_regs tFirqRegs;
|
|||
#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE)
|
||||
|
||||
/*
|
||||
* Reason a context has relinquished control: fibers can only be in the NONE
|
||||
* Reason a thread has relinquished control: fibers can only be in the NONE
|
||||
* or COOP state, tasks can be one in the four.
|
||||
*/
|
||||
#define _CAUSE_NONE 0
|
||||
|
@ -175,21 +175,21 @@ typedef struct firq_regs tFirqRegs;
|
|||
|
||||
#ifndef _ASMLANGUAGE
|
||||
|
||||
struct ccs {
|
||||
struct ccs *link; /* node in singly-linked list
|
||||
struct tcs {
|
||||
struct tcs *link; /* node in singly-linked list
|
||||
* _nanokernel.fibers */
|
||||
uint32_t flags; /* bitmask of flags above */
|
||||
uint32_t intlock_key; /* interrupt key when relinquishing control */
|
||||
int relinquish_cause; /* one of the _CAUSE_xxxx definitions above */
|
||||
unsigned int return_value; /* return value from _Swap */
|
||||
int prio; /* fiber priority, -1 for a task */
|
||||
#ifdef CONFIG_CONTEXT_CUSTOM_DATA
|
||||
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
||||
void *custom_data; /* available for custom use */
|
||||
#endif
|
||||
struct coop coopReg;
|
||||
struct preempt preempReg;
|
||||
#ifdef CONFIG_CONTEXT_MONITOR
|
||||
struct ccs *next_context; /* next item in list of ALL fiber+tasks */
|
||||
#ifdef CONFIG_THREAD_MONITOR
|
||||
struct tcs *next_thread; /* next item in list of ALL fiber+tasks */
|
||||
#endif
|
||||
#ifdef CONFIG_NANO_TIMEOUTS
|
||||
struct _nano_timeout nano_timeout;
|
||||
|
@ -197,16 +197,16 @@ struct ccs {
|
|||
};
|
||||
|
||||
struct s_NANO {
|
||||
tCCS *fiber; /* singly linked list of runnable fiber contexts */
|
||||
tCCS *task; /* current task the nanokernel knows about */
|
||||
tCCS *current; /* currently scheduled context (fiber or task) */
|
||||
struct tcs *fiber; /* singly linked list of runnable fibers */
|
||||
struct tcs *task; /* current task the nanokernel knows about */
|
||||
struct tcs *current; /* currently scheduled thread (fiber or task) */
|
||||
|
||||
#ifdef CONFIG_CONTEXT_MONITOR
|
||||
tCCS *contexts; /* singly linked list of ALL fiber+tasks */
|
||||
#ifdef CONFIG_THREAD_MONITOR
|
||||
struct tcs *threads; /* singly linked list of ALL fiber+tasks */
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FP_SHARING
|
||||
tCCS *current_fp; /* context (fiber or task) that owns the FP regs */
|
||||
struct tcs *current_fp; /* thread (fiber or task) that owns the FP regs */
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
|
||||
|
@ -246,14 +246,14 @@ static ALWAYS_INLINE void nanoArchInit(void)
|
|||
*
|
||||
* The register used to store the return value from a function call invocation
|
||||
* to <value>. It is assumed that the specified <fiber> is pending, and thus
|
||||
* the fiber's context is stored in its tCCS structure.
|
||||
* the fiber's thread is stored in its struct tcs structure.
|
||||
*
|
||||
* @return N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
static ALWAYS_INLINE void fiberRtnValueSet(tCCS *fiber, unsigned int value)
|
||||
static ALWAYS_INLINE void fiberRtnValueSet(struct tcs *fiber, unsigned int value)
|
||||
{
|
||||
fiber->return_value = value;
|
||||
}
|
||||
|
@ -275,7 +275,7 @@ static ALWAYS_INLINE int _IS_IN_ISR(void)
|
|||
}
|
||||
|
||||
extern void nanoCpuAtomicIdle(unsigned int);
|
||||
extern void _ContextEntryWrapper(void);
|
||||
extern void _thread_entry_wrapper(void);
|
||||
|
||||
static inline void _IntLibInit(void)
|
||||
{
|
||||
|
|
|
@ -21,7 +21,7 @@ CONFIG_XIP=y
|
|||
# CONFIG_BOOT_BANNER is not set
|
||||
CONFIG_MAIN_STACK_SIZE=1024
|
||||
CONFIG_ISR_STACK_SIZE=2048
|
||||
# CONFIG_CONTEXT_CUSTOM_DATA is not set
|
||||
# CONFIG_THREAD_CUSTOM_DATA is not set
|
||||
# CONFIG_NANO_TIMEOUTS is not set
|
||||
# CONFIG_NANO_TIMERS is not set
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ CONFIG_XIP=y
|
|||
# CONFIG_BOOT_BANNER is not set
|
||||
CONFIG_MAIN_STACK_SIZE=1024
|
||||
CONFIG_ISR_STACK_SIZE=2048
|
||||
# CONFIG_CONTEXT_CUSTOM_DATA is not set
|
||||
# CONFIG_THREAD_CUSTOM_DATA is not set
|
||||
# CONFIG_NANO_TIMEOUTS is not set
|
||||
# CONFIG_NANO_TIMERS is not set
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ CONFIG_XIP=y
|
|||
# CONFIG_BOOT_BANNER is not set
|
||||
CONFIG_MAIN_STACK_SIZE=1024
|
||||
CONFIG_ISR_STACK_SIZE=2048
|
||||
# CONFIG_CONTEXT_CUSTOM_DATA is not set
|
||||
# CONFIG_THREAD_CUSTOM_DATA is not set
|
||||
# CONFIG_NANO_TIMEOUTS is not set
|
||||
CONFIG_NANO_TIMERS=y
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ CONFIG_XIP=y
|
|||
# CONFIG_BOOT_BANNER is not set
|
||||
CONFIG_MAIN_STACK_SIZE=1024
|
||||
CONFIG_ISR_STACK_SIZE=2048
|
||||
# CONFIG_CONTEXT_CUSTOM_DATA is not set
|
||||
# CONFIG_THREAD_CUSTOM_DATA is not set
|
||||
# CONFIG_NANO_TIMEOUTS is not set
|
||||
CONFIG_NANO_TIMERS=y
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* context.c - new context creation for ARM Cortex-M */
|
||||
/* thread.c - new thread creation for ARM Cortex-M */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2013-2014 Wind River Systems, Inc.
|
||||
|
@ -47,48 +47,47 @@ architecture.
|
|||
|
||||
tNANO _nanokernel = {0};
|
||||
|
||||
#if defined(CONFIG_CONTEXT_MONITOR)
|
||||
#define CONTEXT_MONITOR_INIT(pCcs) _context_monitor_init(pCcs)
|
||||
#if defined(CONFIG_THREAD_MONITOR)
|
||||
#define THREAD_MONITOR_INIT(tcs) _thread_monitor_init(tcs)
|
||||
#else
|
||||
#define CONTEXT_MONITOR_INIT(pCcs) \
|
||||
#define THREAD_MONITOR_INIT(tcs) \
|
||||
do {/* do nothing */ \
|
||||
} while ((0))
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_CONTEXT_MONITOR)
|
||||
#if defined(CONFIG_THREAD_MONITOR)
|
||||
/**
|
||||
*
|
||||
* @brief Initialize context monitoring support
|
||||
* @brief Initialize thread monitoring support
|
||||
*
|
||||
* Currently only inserts the new context in the list of active contexts.
|
||||
* Currently only inserts the new thread in the list of active threads.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
||||
static ALWAYS_INLINE void _context_monitor_init(struct ccs *pCcs /* context */
|
||||
static ALWAYS_INLINE void _thread_monitor_init(struct tcs *tcs /* thread */
|
||||
)
|
||||
{
|
||||
unsigned int key;
|
||||
|
||||
/*
|
||||
* Add the newly initialized context to head of the list of contexts.
|
||||
* This singly linked list of contexts maintains ALL the contexts in the
|
||||
* system: both tasks and fibers regardless of whether they are
|
||||
* runnable.
|
||||
* Add the newly initialized thread to head of the list of threads. This
|
||||
* singly linked list of threads maintains ALL the threads in the system:
|
||||
* both tasks and fibers regardless of whether they are runnable.
|
||||
*/
|
||||
|
||||
key = irq_lock();
|
||||
pCcs->next_context = _nanokernel.contexts;
|
||||
_nanokernel.contexts = pCcs;
|
||||
tcs->next_thread = _nanokernel.threads;
|
||||
_nanokernel.threads = tcs;
|
||||
irq_unlock(key);
|
||||
}
|
||||
#endif /* CONFIG_CONTEXT_MONITOR */
|
||||
#endif /* CONFIG_THREAD_MONITOR */
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Intialize a new context (thread) from its stack space
|
||||
* @brief Intialize a new thread from its stack space
|
||||
*
|
||||
* The control structure (CCS) is put at the lower address of the stack. An
|
||||
* The control structure (TCS) is put at the lower address of the stack. An
|
||||
* initial context, to be "restored" by __pendsv(), is put at the other end of
|
||||
* the stack, and thus reusable by the stack when not needed anymore.
|
||||
*
|
||||
|
@ -105,31 +104,31 @@ static ALWAYS_INLINE void _context_monitor_init(struct ccs *pCcs /* context */
|
|||
* @return N/A
|
||||
*/
|
||||
|
||||
void _NewContext(
|
||||
void _new_thread(
|
||||
char *pStackMem, /* aligned stack memory */
|
||||
unsigned stackSize, /* stack size in bytes */
|
||||
_ContextEntry pEntry, /* entry point */
|
||||
_thread_entry_t pEntry, /* entry point */
|
||||
void *parameter1, /* entry point first param */
|
||||
void *parameter2, /* entry point second param */
|
||||
void *parameter3, /* entry point third param */
|
||||
int priority, /* context priority (-1 for tasks) */
|
||||
int priority, /* thread priority (-1 for tasks) */
|
||||
unsigned options /* misc options (future) */
|
||||
)
|
||||
{
|
||||
char *stackEnd = pStackMem + stackSize;
|
||||
struct __esf *pInitCtx;
|
||||
tCCS *pCcs = (tCCS *) pStackMem;
|
||||
struct tcs *tcs = (struct tcs *) pStackMem;
|
||||
|
||||
#ifdef CONFIG_INIT_STACKS
|
||||
memset(pStackMem, 0xaa, stackSize);
|
||||
#endif
|
||||
|
||||
/* carve the context entry struct from the "base" of the stack */
|
||||
/* carve the thread entry struct from the "base" of the stack */
|
||||
|
||||
pInitCtx = (struct __esf *)(STACK_ROUND_DOWN(stackEnd) -
|
||||
sizeof(struct __esf));
|
||||
|
||||
pInitCtx->pc = ((uint32_t)_context_entry) & 0xfffffffe;
|
||||
pInitCtx->pc = ((uint32_t)_thread_entry) & 0xfffffffe;
|
||||
pInitCtx->a1 = (uint32_t)pEntry;
|
||||
pInitCtx->a2 = (uint32_t)parameter1;
|
||||
pInitCtx->a3 = (uint32_t)parameter2;
|
||||
|
@ -137,22 +136,22 @@ void _NewContext(
|
|||
pInitCtx->xpsr =
|
||||
0x01000000UL; /* clear all, thumb bit is 1, even if RO */
|
||||
|
||||
pCcs->link = NULL;
|
||||
pCcs->flags = priority == -1 ? TASK | PREEMPTIBLE : FIBER;
|
||||
pCcs->prio = priority;
|
||||
tcs->link = NULL;
|
||||
tcs->flags = priority == -1 ? TASK | PREEMPTIBLE : FIBER;
|
||||
tcs->prio = priority;
|
||||
|
||||
#ifdef CONFIG_CONTEXT_CUSTOM_DATA
|
||||
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
||||
/* Initialize custom data field (value is opaque to kernel) */
|
||||
|
||||
pCcs->custom_data = NULL;
|
||||
tcs->custom_data = NULL;
|
||||
#endif
|
||||
|
||||
pCcs->preempReg.psp = (uint32_t)pInitCtx;
|
||||
pCcs->basepri = 0;
|
||||
tcs->preempReg.psp = (uint32_t)pInitCtx;
|
||||
tcs->basepri = 0;
|
||||
|
||||
_nano_timeout_ccs_init(pCcs);
|
||||
_nano_timeout_tcs_init(tcs);
|
||||
|
||||
/* initial values in all other registers/CCS entries are irrelevant */
|
||||
/* initial values in all other registers/TCS entries are irrelevant */
|
||||
|
||||
CONTEXT_MONITOR_INIT(pCcs);
|
||||
THREAD_MONITOR_INIT(tcs);
|
||||
}
|
||||
|
|
|
@ -146,9 +146,9 @@ SECTION_FUNC(TEXT, nano_cpu_idle)
|
|||
*
|
||||
* @brief Atomically re-enable interrupts and enter low power mode
|
||||
*
|
||||
* This function is utilized by the nanokernel object "wait" APIs for task
|
||||
* contexts, e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(), nano_task_stack_pop_wait(),
|
||||
* and nano_task_fifo_get_wait().
|
||||
* This function is utilized by the nanokernel object "wait" APIs for tasks,
|
||||
* e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(),
|
||||
* nano_task_stack_pop_wait(), and nano_task_fifo_get_wait().
|
||||
*
|
||||
* INTERNAL
|
||||
* The requirements for nano_cpu_atomic_idle() are as follows:
|
||||
|
|
|
@ -104,9 +104,9 @@ FUNC_NORETURN void _NanoFatalErrorHandler(
|
|||
PR_EXC("**** Unknown Fatal Error %d! ****\n", reason);
|
||||
break;
|
||||
}
|
||||
PR_EXC("Current context ID = 0x%x\n"
|
||||
PR_EXC("Current thread ID = 0x%x\n"
|
||||
"Faulting instruction address = 0x%x\n",
|
||||
context_self_get(),
|
||||
sys_thread_self_get(),
|
||||
pEsf->pc);
|
||||
|
||||
/*
|
||||
|
|
|
@ -84,7 +84,7 @@ void _FaultDump(const NANO_ESF *esf, int fault)
|
|||
|
||||
PR_EXC("Fault! EXC #%d, Thread: %x, instr @ %x\n",
|
||||
fault,
|
||||
context_self_get(),
|
||||
sys_thread_self_get(),
|
||||
esf->pc);
|
||||
|
||||
if (3 == fault) { /* hard fault */
|
||||
|
@ -120,7 +120,7 @@ void _FaultDump(const NANO_ESF *esf, int fault)
|
|||
#if (CONFIG_FAULT_DUMP == 2)
|
||||
/**
|
||||
*
|
||||
* @brief Dump context information
|
||||
* @brief Dump thread information
|
||||
*
|
||||
* See _FaultDump() for example.
|
||||
*
|
||||
|
@ -129,11 +129,11 @@ void _FaultDump(const NANO_ESF *esf, int fault)
|
|||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
static void _FaultContextShow(const NANO_ESF *esf)
|
||||
static void _FaultThreadShow(const NANO_ESF *esf)
|
||||
{
|
||||
PR_EXC(" Executing context ID (thread): 0x%x\n"
|
||||
PR_EXC(" Executing thread ID (thread): 0x%x\n"
|
||||
" Faulting instruction address: 0x%x\n",
|
||||
context_self_get(),
|
||||
sys_thread_self_get(),
|
||||
esf->pc);
|
||||
}
|
||||
|
||||
|
@ -153,7 +153,7 @@ static void _MpuFault(const NANO_ESF *esf,
|
|||
{
|
||||
PR_EXC("***** MPU FAULT *****\n");
|
||||
|
||||
_FaultContextShow(esf);
|
||||
_FaultThreadShow(esf);
|
||||
|
||||
if (_ScbMemFaultIsStacking()) {
|
||||
PR_EXC(" Stacking error\n");
|
||||
|
@ -188,7 +188,7 @@ static void _BusFault(const NANO_ESF *esf,
|
|||
{
|
||||
PR_EXC("***** BUS FAULT *****\n");
|
||||
|
||||
_FaultContextShow(esf);
|
||||
_FaultThreadShow(esf);
|
||||
|
||||
if (_ScbBusFaultIsStacking()) {
|
||||
PR_EXC(" Stacking error\n");
|
||||
|
@ -228,7 +228,7 @@ static void _UsageFault(const NANO_ESF *esf)
|
|||
{
|
||||
PR_EXC("***** USAGE FAULT *****\n");
|
||||
|
||||
_FaultContextShow(esf);
|
||||
_FaultThreadShow(esf);
|
||||
|
||||
/* bits are sticky: they stack and must be reset */
|
||||
if (_ScbUsageFaultIsDivByZero()) {
|
||||
|
@ -325,7 +325,7 @@ static void _ReservedException(const NANO_ESF *esf,
|
|||
*
|
||||
* eg. (precise bus error escalated to hard fault):
|
||||
*
|
||||
* Executing context ID (thread): 0x200000dc
|
||||
* Executing thread ID (thread): 0x200000dc
|
||||
* Faulting instruction address: 0x000011d3
|
||||
* ***** HARD FAULT *****
|
||||
* Fault escalation (see below)
|
||||
|
|
|
@ -66,7 +66,7 @@ the PendSV exception.
|
|||
|
||||
void fiber_abort(void)
|
||||
{
|
||||
_context_exit(_nanokernel.current);
|
||||
_thread_exit(_nanokernel.current);
|
||||
if (_ScbIsInThreadMode()) {
|
||||
_nano_fiber_swap();
|
||||
} else {
|
||||
|
|
|
@ -63,7 +63,7 @@ _ASM_FILE_PROLOGUE
|
|||
* state.
|
||||
*
|
||||
* Also, record the fact that the thread is currently interrupted so that VQEMU
|
||||
* looks into the CCS and not the CPU registers to obtain the current thread's
|
||||
* looks into the TCS and not the CPU registers to obtain the current thread's
|
||||
* register values.
|
||||
*
|
||||
* NOTE:
|
||||
|
@ -86,10 +86,10 @@ SECTION_FUNC(TEXT, _GdbStubExcEntry)
|
|||
orrs r2, #EXC_ACTIVE
|
||||
str r2, [r1, #__tNANO_flags_OFFSET]
|
||||
ldr r1, [r1, #__tNANO_current_OFFSET]
|
||||
str r2, [r1, #__tCCS_flags_OFFSET]
|
||||
str r2, [r1, #__tTCS_flags_OFFSET]
|
||||
|
||||
/* save callee-saved + psp in CCS */
|
||||
adds r1, #__tCCS_preempReg_OFFSET
|
||||
/* save callee-saved + psp in TCS */
|
||||
adds r1, #__tTCS_preempReg_OFFSET
|
||||
mrs ip, PSP
|
||||
stmia r1, {v1-v8, ip}
|
||||
|
||||
|
@ -100,7 +100,7 @@ SECTION_FUNC(TEXT, _GdbStubExcEntry)
|
|||
* @brief Exception exit extra clean up when GDB_INFO is enabled
|
||||
*
|
||||
* Record the fact that the thread is not interrupted anymore so that VQEMU
|
||||
* looks at the CPU registers and not into the CCS to obtain the current
|
||||
* looks at the CPU registers and not into the TCS to obtain the current
|
||||
* thread's register values. Only do this if this is not a nested exception.
|
||||
*
|
||||
* NOTE:
|
||||
|
@ -125,7 +125,7 @@ SECTION_FUNC(TEXT, _GdbStubExcExit)
|
|||
bic r2, #EXC_ACTIVE
|
||||
str r2, [r1, #__tNANO_flags_OFFSET]
|
||||
ldr r1, [r1, #__tNANO_current_OFFSET]
|
||||
str r2, [r1, #__tCCS_flags_OFFSET]
|
||||
str r2, [r1, #__tTCS_flags_OFFSET]
|
||||
|
||||
bx lr
|
||||
|
||||
|
|
|
@ -59,11 +59,11 @@ GEN_OFFSET_SYM(tNANO, flags);
|
|||
GEN_OFFSET_SYM(tNANO, idle);
|
||||
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
|
||||
|
||||
/* ARM-specific tCCS structure member offsets */
|
||||
/* ARM-specific struct tcs structure member offsets */
|
||||
|
||||
GEN_OFFSET_SYM(tCCS, basepri);
|
||||
#ifdef CONFIG_CONTEXT_CUSTOM_DATA
|
||||
GEN_OFFSET_SYM(tCCS, custom_data);
|
||||
GEN_OFFSET_SYM(tTCS, basepri);
|
||||
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
||||
GEN_OFFSET_SYM(tTCS, custom_data);
|
||||
#endif
|
||||
|
||||
/* ARM-specific ESF structure member offsets */
|
||||
|
@ -97,8 +97,8 @@ GEN_OFFSET_SYM(tPreempt, psp);
|
|||
|
||||
GEN_ABSOLUTE_SYM(__tPreempt_SIZEOF, sizeof(tPreempt));
|
||||
|
||||
/* size of the tCCS structure sans save area for floating point regs */
|
||||
/* size of the struct tcs structure sans save area for floating point regs */
|
||||
|
||||
GEN_ABSOLUTE_SYM(__tCCS_NOFLOAT_SIZEOF, sizeof(tCCS));
|
||||
GEN_ABSOLUTE_SYM(__tTCS_NOFLOAT_SIZEOF, sizeof(tTCS));
|
||||
|
||||
GEN_ABS_SYM_END
|
||||
|
|
|
@ -55,18 +55,17 @@ GDATA(_nanokernel)
|
|||
*
|
||||
* @brief PendSV exception handler, handling context switches
|
||||
*
|
||||
* The PendSV exception is the only context in the system that can perform
|
||||
* context switching. When an execution context finds out it has to switch
|
||||
* contexts, it pends the PendSV exception.
|
||||
* The PendSV exception is the only execution context in the system that can
|
||||
* perform context switching. When an execution context finds out it has to
|
||||
* switch contexts, it pends the PendSV exception.
|
||||
*
|
||||
* When PendSV is pended, the decision that a context switch must happen has
|
||||
* already been taken. In other words, when __pendsv() runs, we *know* we have
|
||||
* to swap *something*.
|
||||
*
|
||||
* The scheduling algorithm is simple: schedule the head of the runnable FIBER
|
||||
* context list, which is represented by _nanokernel.fiber. If there are no
|
||||
* runnable FIBER contexts, then schedule the TASK context represented by
|
||||
* _nanokernel.task. The _nanokernel.task field will never be NULL.
|
||||
* The scheduling algorithm is simple: schedule the head of the runnable fibers
|
||||
* list (_nanokernel.fiber). If there are no runnable fibers, then schedule the
|
||||
* task (_nanokernel.task). The _nanokernel.task field will never be NULL.
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, __pendsv)
|
||||
|
@ -80,14 +79,14 @@ SECTION_FUNC(TEXT, __pendsv)
|
|||
pop {lr}
|
||||
#endif
|
||||
|
||||
/* load _Nanokernel into r1 and current tCCS into r2 */
|
||||
/* load _Nanokernel into r1 and current tTCS into r2 */
|
||||
ldr r1, =_nanokernel
|
||||
ldr r2, [r1, #__tNANO_current_OFFSET]
|
||||
|
||||
/* addr of callee-saved regs in CCS in r0 */
|
||||
add r0, r2, #__tCCS_preempReg_OFFSET
|
||||
/* addr of callee-saved regs in TCS in r0 */
|
||||
add r0, r2, #__tTCS_preempReg_OFFSET
|
||||
|
||||
/* save callee-saved + psp in CCS */
|
||||
/* save callee-saved + psp in TCS */
|
||||
mrs ip, PSP
|
||||
stmia r0, {v1-v8, ip}
|
||||
|
||||
|
@ -105,7 +104,7 @@ SECTION_FUNC(TEXT, __pendsv)
|
|||
movs.n r0, #_EXC_IRQ_DEFAULT_PRIO
|
||||
msr BASEPRI, r0
|
||||
|
||||
/* find out incoming context (fiber or task) */
|
||||
/* find out incoming thread (fiber or task) */
|
||||
|
||||
/* is there a fiber ready ? */
|
||||
ldr r2, [r1, #__tNANO_fiber_OFFSET]
|
||||
|
@ -116,12 +115,12 @@ SECTION_FUNC(TEXT, __pendsv)
|
|||
* else, the task is the thread we're switching in
|
||||
*/
|
||||
itte ne
|
||||
ldrne.w r0, [r2, #__tCCS_link_OFFSET] /* then */
|
||||
ldrne.w r0, [r2, #__tTCS_link_OFFSET] /* then */
|
||||
strne.w r0, [r1, #__tNANO_fiber_OFFSET] /* then */
|
||||
ldreq.w r2, [r1, #__tNANO_task_OFFSET] /* else */
|
||||
|
||||
/* r2 contains the new thread */
|
||||
ldr r0, [r2, #__tCCS_flags_OFFSET]
|
||||
ldr r0, [r2, #__tTCS_flags_OFFSET]
|
||||
str r0, [r1, #__tNANO_flags_OFFSET]
|
||||
str r2, [r1, #__tNANO_current_OFFSET]
|
||||
|
||||
|
@ -138,13 +137,13 @@ SECTION_FUNC(TEXT, __pendsv)
|
|||
str r3, [ip, #0]
|
||||
|
||||
/* restore BASEPRI for the incoming thread */
|
||||
ldr r0, [r2, #__tCCS_basepri_OFFSET]
|
||||
ldr r0, [r2, #__tTCS_basepri_OFFSET]
|
||||
mov ip, #0
|
||||
str ip, [r2, #__tCCS_basepri_OFFSET]
|
||||
str ip, [r2, #__tTCS_basepri_OFFSET]
|
||||
msr BASEPRI, r0
|
||||
|
||||
/* load callee-saved + psp from CCS */
|
||||
add r0, r2, #__tCCS_preempReg_OFFSET
|
||||
/* load callee-saved + psp from TCS */
|
||||
add r0, r2, #__tTCS_preempReg_OFFSET
|
||||
ldmia r0, {v1-v8, ip}
|
||||
msr PSP, ip
|
||||
|
||||
|
@ -205,9 +204,9 @@ SECTION_FUNC(TEXT, __svc)
|
|||
* __pendsv all come from handling an interrupt, which means we know the
|
||||
* interrupts were not locked: in that case the BASEPRI value is 0.
|
||||
*
|
||||
* Given that _Swap() is called to effect a cooperative context context switch,
|
||||
* only the caller-saved integer registers need to be saved in the tCCS of the
|
||||
* outgoing context. This is all performed by the hardware, which stores it in
|
||||
* Given that _Swap() is called to effect a cooperative context switch,
|
||||
* only the caller-saved integer registers need to be saved in the TCS of the
|
||||
* outgoing thread. This is all performed by the hardware, which stores it in
|
||||
* its exception stack frame, created when handling the svc exception.
|
||||
*
|
||||
* @return may contain a return value setup by a call to fiberRtnValueSet()
|
||||
|
@ -222,7 +221,7 @@ SECTION_FUNC(TEXT, _Swap)
|
|||
|
||||
ldr r1, =_nanokernel
|
||||
ldr r2, [r1, #__tNANO_current_OFFSET]
|
||||
str r0, [r2, #__tCCS_basepri_OFFSET]
|
||||
str r0, [r2, #__tTCS_basepri_OFFSET]
|
||||
|
||||
svc #0
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ static inline void nonEssentialTaskAbort(void)
|
|||
* This routine implements the corrective action to be taken when the system
|
||||
* detects a fatal error.
|
||||
*
|
||||
* This sample implementation attempts to abort the current context and allow
|
||||
* This sample implementation attempts to abort the current thread and allow
|
||||
* the system to continue executing, which may permit the system to continue
|
||||
* functioning with degraded capabilities.
|
||||
*
|
||||
|
@ -85,12 +85,12 @@ void _SysFatalErrorHandler(
|
|||
const NANO_ESF * pEsf /* pointer to exception stack frame */
|
||||
)
|
||||
{
|
||||
nano_context_type_t curCtx = context_type_get();
|
||||
nano_context_type_t curCtx = sys_execution_context_type_get();
|
||||
|
||||
ARG_UNUSED(reason);
|
||||
ARG_UNUSED(pEsf);
|
||||
|
||||
if ((curCtx == NANO_CTX_ISR) || _context_essential_check(NULL)) {
|
||||
if ((curCtx == NANO_CTX_ISR) || _is_thread_essential(NULL)) {
|
||||
PRINTK("Fatal fault in %s ! Spinning...\n",
|
||||
NANO_CTX_ISR == curCtx
|
||||
? "ISR"
|
||||
|
|
|
@ -25,7 +25,7 @@ CONFIG_XIP=y
|
|||
# CONFIG_BOOT_BANNER is not set
|
||||
CONFIG_MAIN_STACK_SIZE=1024
|
||||
CONFIG_ISR_STACK_SIZE=2048
|
||||
# CONFIG_CONTEXT_CUSTOM_DATA is not set
|
||||
# CONFIG_THREAD_CUSTOM_DATA is not set
|
||||
# CONFIG_NANO_TIMEOUTS is not set
|
||||
# CONFIG_NANO_TIMERS is not set
|
||||
|
||||
|
|
|
@ -96,20 +96,20 @@ typedef struct preempt tPreempt;
|
|||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
/* Bitmask definitions for the tCCS->flags bit field */
|
||||
/* Bitmask definitions for the struct tcs.flags bit field */
|
||||
|
||||
#define FIBER 0x000
|
||||
#define TASK 0x001 /* 1 = task context, 0 = fiber context */
|
||||
#define INT_ACTIVE 0x002 /* 1 = context is executing interrupt handler */
|
||||
#define EXC_ACTIVE 0x004 /* 1 = context is executing exception handler */
|
||||
#define USE_FP 0x010 /* 1 = context uses floating point unit */
|
||||
#define TASK 0x001 /* 1 = task, 0 = fiber */
|
||||
#define INT_ACTIVE 0x002 /* 1 = executino context is interrupt handler */
|
||||
#define EXC_ACTIVE 0x004 /* 1 = executino context is exception handler */
|
||||
#define USE_FP 0x010 /* 1 = thread uses floating point unit */
|
||||
#define PREEMPTIBLE \
|
||||
0x020 /* 1 = preemptible context \
|
||||
0x020 /* 1 = preemptible thread \
|
||||
* NOTE: the value must be < 0x100 to be able to \
|
||||
* use a small thumb instr with immediate \
|
||||
* when loading PREEMPTIBLE in a GPR \
|
||||
*/
|
||||
#define ESSENTIAL 0x200 /* 1 = system context that must not abort */
|
||||
#define ESSENTIAL 0x200 /* 1 = system thread that must not abort */
|
||||
#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */
|
||||
|
||||
/* stacks */
|
||||
|
@ -126,18 +126,18 @@ typedef struct preempt tPreempt;
|
|||
#endif
|
||||
|
||||
#ifndef _ASMLANGUAGE
|
||||
struct ccs {
|
||||
struct ccs *link; /* singly-linked list in _nanokernel.fibers */
|
||||
struct tcs {
|
||||
struct tcs *link; /* singly-linked list in _nanokernel.fibers */
|
||||
uint32_t flags;
|
||||
uint32_t basepri;
|
||||
int prio;
|
||||
#ifdef CONFIG_CONTEXT_CUSTOM_DATA
|
||||
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
||||
void *custom_data; /* available for custom use */
|
||||
#endif
|
||||
struct coop coopReg;
|
||||
struct preempt preempReg;
|
||||
#if defined(CONFIG_CONTEXT_MONITOR)
|
||||
struct ccs *next_context; /* next item in list of ALL fiber+tasks */
|
||||
#if defined(CONFIG_THREAD_MONITOR)
|
||||
struct tcs *next_thread; /* next item in list of ALL fiber+tasks */
|
||||
#endif
|
||||
#ifdef CONFIG_NANO_TIMEOUTS
|
||||
struct _nano_timeout nano_timeout;
|
||||
|
@ -145,17 +145,17 @@ struct ccs {
|
|||
};
|
||||
|
||||
struct s_NANO {
|
||||
tCCS *fiber; /* singly linked list of runnable fiber contexts */
|
||||
tCCS *task; /* pointer to runnable task context */
|
||||
tCCS *current; /* currently scheduled context (fiber or task) */
|
||||
int flags; /* tCCS->flags of 'current' context */
|
||||
struct tcs *fiber; /* singly linked list of runnable fiber */
|
||||
struct tcs *task; /* pointer to runnable task */
|
||||
struct tcs *current; /* currently scheduled thread (fiber or task) */
|
||||
int flags; /* struct tcs->flags of 'current' thread */
|
||||
|
||||
#if defined(CONFIG_CONTEXT_MONITOR)
|
||||
tCCS *contexts; /* singly linked list of ALL fiber+tasks */
|
||||
#if defined(CONFIG_THREAD_MONITOR)
|
||||
struct tcs *threads; /* singly linked list of ALL fiber+tasks */
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FP_SHARING
|
||||
tCCS *current_fp; /* context (fiber or task) that owns the FP regs */
|
||||
struct tcs *current_fp; /* thread (fiber or task) that owns the FP regs */
|
||||
#endif /* CONFIG_FP_SHARING */
|
||||
|
||||
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
|
||||
|
@ -190,7 +190,7 @@ static ALWAYS_INLINE void nanoArchInit(void)
|
|||
*
|
||||
* The register used to store the return value from a function call invocation
|
||||
* to <value>. It is assumed that the specified <fiber> is pending, and thus
|
||||
* the fiber's context is stored in its tCCS structure.
|
||||
* the fiber's thread is stored in its struct tcs structure.
|
||||
*
|
||||
* @return N/A
|
||||
*
|
||||
|
@ -198,7 +198,7 @@ static ALWAYS_INLINE void nanoArchInit(void)
|
|||
*/
|
||||
|
||||
static ALWAYS_INLINE void fiberRtnValueSet(
|
||||
tCCS *fiber, /* pointer to fiber */
|
||||
struct tcs *fiber, /* pointer to fiber */
|
||||
unsigned int value /* value to set as return value */
|
||||
)
|
||||
{
|
||||
|
|
|
@ -29,7 +29,7 @@ CONFIG_ENHANCED_SECURITY=y
|
|||
# CONFIG_BOOT_BANNER is not set
|
||||
CONFIG_MAIN_STACK_SIZE=1024
|
||||
CONFIG_ISR_STACK_SIZE=2048
|
||||
# CONFIG_CONTEXT_CUSTOM_DATA is not set
|
||||
# CONFIG_THREAD_CUSTOM_DATA is not set
|
||||
# CONFIG_NANO_TIMEOUTS is not set
|
||||
# CONFIG_NANO_TIMERS is not set
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ CONFIG_ENHANCED_SECURITY=y
|
|||
# CONFIG_BOOT_BANNER is not set
|
||||
CONFIG_MAIN_STACK_SIZE=1024
|
||||
CONFIG_ISR_STACK_SIZE=2048
|
||||
# CONFIG_CONTEXT_CUSTOM_DATA is not set
|
||||
# CONFIG_THREAD_CUSTOM_DATA is not set
|
||||
# CONFIG_NANO_TIMEOUTS is not set
|
||||
# CONFIG_NANO_TIMERS is not set
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ CONFIG_ENHANCED_SECURITY=y
|
|||
# CONFIG_BOOT_BANNER is not set
|
||||
CONFIG_MAIN_STACK_SIZE=1024
|
||||
CONFIG_ISR_STACK_SIZE=2048
|
||||
# CONFIG_CONTEXT_CUSTOM_DATA is not set
|
||||
# CONFIG_THREAD_CUSTOM_DATA is not set
|
||||
# CONFIG_NANO_TIMEOUTS is not set
|
||||
# CONFIG_NANO_TIMERS is not set
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ CONFIG_ENHANCED_SECURITY=y
|
|||
# CONFIG_BOOT_BANNER is not set
|
||||
CONFIG_MAIN_STACK_SIZE=1024
|
||||
CONFIG_ISR_STACK_SIZE=2048
|
||||
# CONFIG_CONTEXT_CUSTOM_DATA is not set
|
||||
# CONFIG_THREAD_CUSTOM_DATA is not set
|
||||
# CONFIG_NANO_TIMEOUTS is not set
|
||||
CONFIG_NANO_TIMERS=y
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ CONFIG_ENHANCED_SECURITY=y
|
|||
# CONFIG_BOOT_BANNER is not set
|
||||
CONFIG_MAIN_STACK_SIZE=1024
|
||||
CONFIG_ISR_STACK_SIZE=2048
|
||||
# CONFIG_CONTEXT_CUSTOM_DATA is not set
|
||||
# CONFIG_THREAD_CUSTOM_DATA is not set
|
||||
# CONFIG_NANO_TIMEOUTS is not set
|
||||
CONFIG_NANO_TIMERS=y
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ CONFIG_ENHANCED_SECURITY=y
|
|||
# CONFIG_BOOT_BANNER is not set
|
||||
CONFIG_MAIN_STACK_SIZE=1024
|
||||
CONFIG_ISR_STACK_SIZE=2048
|
||||
# CONFIG_CONTEXT_CUSTOM_DATA is not set
|
||||
# CONFIG_THREAD_CUSTOM_DATA is not set
|
||||
# CONFIG_NANO_TIMEOUTS is not set
|
||||
CONFIG_NANO_TIMERS=y
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* context.c - nanokernel context support primitives */
|
||||
/* thread.c - nanokernel thread support primitives */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2010-2015 Wind River Systems, Inc.
|
||||
|
@ -56,54 +56,55 @@ tNANO _nanokernel = {0};
|
|||
/* forward declaration */
|
||||
|
||||
#ifdef CONFIG_GDB_INFO
|
||||
void _ContextEntryWrapper(_ContextEntry, _ContextArg, _ContextArg, _ContextArg);
|
||||
void _thread_entry_wrapper(_thread_entry_t, _thread_arg_t,
|
||||
_thread_arg_t, _thread_arg_t);
|
||||
#endif /* CONFIG_GDB_INFO */
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Initialize a new execution context
|
||||
* @brief Initialize a new execution thread
|
||||
*
|
||||
* This function is utilized to initialize all execution contexts (both fiber
|
||||
* This function is utilized to initialize all execution threads (both fiber
|
||||
* and task). The 'priority' parameter will be set to -1 for the creation of
|
||||
* task context.
|
||||
* task.
|
||||
*
|
||||
* This function is called by _NewContext() to initialize task contexts.
|
||||
* This function is called by _new_thread() to initialize tasks.
|
||||
*
|
||||
* @return N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
static void _NewContextInternal(
|
||||
char *pStackMem, /* pointer to context stack memory */
|
||||
static void _new_thread_internal(
|
||||
char *pStackMem, /* pointer to thread stack memory */
|
||||
unsigned stackSize, /* size of stack in bytes */
|
||||
int priority, /* context priority */
|
||||
unsigned options /* context options: USE_FP, USE_SSE */
|
||||
int priority, /* thread priority */
|
||||
unsigned options /* thread options: USE_FP, USE_SSE */
|
||||
)
|
||||
{
|
||||
unsigned long *pInitialCtx;
|
||||
tCCS *ccs = (tCCS *) pStackMem; /* pointer to the new task's ccs */
|
||||
struct tcs *tcs = (struct tcs *)pStackMem; /* ptr to the new task's tcs */
|
||||
|
||||
#ifndef CONFIG_FP_SHARING
|
||||
ARG_UNUSED(options);
|
||||
#endif /* !CONFIG_FP_SHARING */
|
||||
|
||||
ccs->link = (tCCS *)NULL; /* context not inserted into list yet */
|
||||
ccs->prio = priority;
|
||||
tcs->link = (struct tcs *)NULL; /* thread not inserted into list yet */
|
||||
tcs->prio = priority;
|
||||
#if (defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO))
|
||||
ccs->excNestCount = 0;
|
||||
tcs->excNestCount = 0;
|
||||
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
|
||||
|
||||
|
||||
if (priority == -1)
|
||||
ccs->flags = PREEMPTIBLE | TASK;
|
||||
tcs->flags = PREEMPTIBLE | TASK;
|
||||
else
|
||||
ccs->flags = FIBER;
|
||||
tcs->flags = FIBER;
|
||||
|
||||
#ifdef CONFIG_CONTEXT_CUSTOM_DATA
|
||||
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
||||
/* Initialize custom data field (value is opaque to kernel) */
|
||||
|
||||
ccs->custom_data = NULL;
|
||||
tcs->custom_data = NULL;
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -118,33 +119,33 @@ static void _NewContextInternal(
|
|||
pInitialCtx = (unsigned long *)STACK_ROUND_DOWN(pStackMem + stackSize);
|
||||
|
||||
/*
|
||||
* We subtract 11 here to account for the context entry routine
|
||||
* We subtract 11 here to account for the thread entry routine
|
||||
* parameters
|
||||
* (4 of them), eflags, eip, and the edi/esi/ebx/ebp/eax registers.
|
||||
*/
|
||||
pInitialCtx -= 11;
|
||||
|
||||
ccs->coopReg.esp = (unsigned long)pInitialCtx;
|
||||
PRINTK("\nInitial context ESP = 0x%x\n", ccs->coopReg.esp);
|
||||
tcs->coopReg.esp = (unsigned long)pInitialCtx;
|
||||
PRINTK("\nInitial context ESP = 0x%x\n", tcs->coopReg.esp);
|
||||
|
||||
#ifdef CONFIG_FP_SHARING
|
||||
/*
|
||||
* Indicate if the context is permitted to use floating point instructions.
|
||||
* Indicate if the thread is permitted to use floating point instructions.
|
||||
*
|
||||
* The first time the new context is scheduled by _Swap() it is guaranteed
|
||||
* The first time the new thread is scheduled by _Swap() it is guaranteed
|
||||
* to inherit an FPU that is in a "sane" state (if the most recent user of
|
||||
* the FPU was cooperatively swapped out) or a completely "clean" state
|
||||
* (if the most recent user of the FPU was pre-empted, or if the new context
|
||||
* (if the most recent user of the FPU was pre-empted, or if the new thread
|
||||
* is the first user of the FPU).
|
||||
*
|
||||
* The USE_FP flag bit is set in the tCCS structure if a context is
|
||||
* The USE_FP flag bit is set in the struct tcs structure if a thread is
|
||||
* authorized to use _any_ non-integer capability, whether it's the basic
|
||||
* x87 FPU/MMX capability, SSE instructions, or a combination of both. The
|
||||
* USE_SSE flag bit is set only if a context can use SSE instructions.
|
||||
* USE_SSE flag bit is set only if a thread can use SSE instructions.
|
||||
*
|
||||
* Note: Callers need not follow the aforementioned protocol when passing
|
||||
* in context options. It is legal for the caller to specify _only_ the
|
||||
* USE_SSE option bit if a context will be utilizing SSE instructions (and
|
||||
* in thread options. It is legal for the caller to specify _only_ the
|
||||
* USE_SSE option bit if a thread will be utilizing SSE instructions (and
|
||||
* possibly x87 FPU/MMX instructions).
|
||||
*/
|
||||
|
||||
|
@ -155,7 +156,7 @@ static void _NewContextInternal(
|
|||
* correctly. The issue is that SysGen will utilize group 0x10 user-defined
|
||||
* groups, and thus tasks placed in the user-defined group will have the
|
||||
* SSE_GROUP (but not the FPU_GROUP) bit set. This results in both the USE_FP
|
||||
* and USE_SSE bits being set in the tCCS. For systems configured only with
|
||||
* and USE_SSE bits being set in the struct tcs. For systems configured only with
|
||||
* FLOAT, the setting of the USE_SSE is harmless, but the setting of USE_FP is
|
||||
* wasteful. Thus to ensure that that systems configured only with FLOAT
|
||||
* behave as expected, the USE_SSE option bit is ignored.
|
||||
|
@ -178,52 +179,50 @@ static void _NewContextInternal(
|
|||
#endif
|
||||
|
||||
if (options != 0) {
|
||||
ccs->flags |= (options | USE_FP);
|
||||
tcs->flags |= (options | USE_FP);
|
||||
}
|
||||
#endif /* CONFIG_FP_SHARING */
|
||||
|
||||
PRINTK("\ntCCS * = 0x%x", ccs);
|
||||
PRINTK("\nstruct tcs * = 0x%x", tcs);
|
||||
|
||||
#if defined(CONFIG_CONTEXT_MONITOR)
|
||||
#if defined(CONFIG_THREAD_MONITOR)
|
||||
{
|
||||
unsigned int imask;
|
||||
|
||||
/*
|
||||
* Add the newly initialized context to head of the list of
|
||||
* contexts.
|
||||
* This singly linked list of contexts maintains ALL the
|
||||
* contexts in the
|
||||
* Add the newly initialized thread to head of the list of threads.
|
||||
* This singly linked list of threads maintains ALL the threads in the
|
||||
* system: both tasks and fibers regardless of whether they are
|
||||
* runnable.
|
||||
*/
|
||||
|
||||
imask = irq_lock();
|
||||
ccs->next_context = _nanokernel.contexts;
|
||||
_nanokernel.contexts = ccs;
|
||||
tcs->next_thread = _nanokernel.threads;
|
||||
_nanokernel.threads = tcs;
|
||||
irq_unlock(imask);
|
||||
}
|
||||
#endif /* CONFIG_CONTEXT_MONITOR */
|
||||
#endif /* CONFIG_THREAD_MONITOR */
|
||||
|
||||
_nano_timeout_ccs_init(ccs);
|
||||
_nano_timeout_tcs_init(tcs);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GDB_INFO
|
||||
/**
|
||||
*
|
||||
* @brief Adjust stack before invoking _context_entry
|
||||
* @brief Adjust stack before invoking _thread_entry
|
||||
*
|
||||
* This function adjusts the initial stack frame created by _NewContext()
|
||||
* This function adjusts the initial stack frame created by _new_thread()
|
||||
* such that the GDB stack frame unwinders recognize it as the outermost frame
|
||||
* in the context's stack. The function then jumps to _context_entry().
|
||||
* in the thread's stack. The function then jumps to _thread_entry().
|
||||
*
|
||||
* GDB normally stops unwinding a stack when it detects that it has
|
||||
* reached a function called main(). Kernel tasks, however, do not have
|
||||
* a main() function, and there does not appear to be a simple way of stopping
|
||||
* the unwinding of the stack.
|
||||
*
|
||||
* Given the initial context created by _NewContext(), GDB expects to find a
|
||||
* return address on the stack immediately above the context entry routine
|
||||
* _context_entry, in the location occupied by the initial EFLAGS.
|
||||
* Given the initial thread created by _new_thread(), GDB expects to find a
|
||||
* return address on the stack immediately above the thread entry routine
|
||||
* _thread_entry, in the location occupied by the initial EFLAGS.
|
||||
* GDB attempts to examine the memory at this return address, which typically
|
||||
* results in an invalid access to page 0 of memory.
|
||||
*
|
||||
|
@ -243,7 +242,7 @@ static void _NewContextInternal(
|
|||
* |__________________|
|
||||
* | initial EFLAGS | <---- ESP when invoked by _Swap()
|
||||
* |__________________| (Zeroed by this routine)
|
||||
* | entryRtn | <----- Context Entry Routine invoked by _Swap()
|
||||
* | entryRtn | <----- Thread Entry Routine invoked by _Swap()
|
||||
* |__________________| (This routine if GDB_INFO)
|
||||
* | <edi> | \
|
||||
* |__________________| |
|
||||
|
@ -258,106 +257,104 @@ static void _NewContextInternal(
|
|||
*
|
||||
*
|
||||
* The initial EFLAGS cannot be overwritten until after _Swap() has swapped in
|
||||
* the new context for the first time. This routine is called by _Swap() the
|
||||
* first time that the new context is swapped in, and it jumps to
|
||||
* _context_entry after it has done its work.
|
||||
* the new thread for the first time. This routine is called by _Swap() the
|
||||
* first time that the new thread is swapped in, and it jumps to
|
||||
* _thread_entry after it has done its work.
|
||||
*
|
||||
* @return this routine does NOT return.
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
__asm__("\t.globl _context_entry\n"
|
||||
__asm__("\t.globl _thread_entry\n"
|
||||
"\t.section .text\n"
|
||||
"_ContextEntryWrapper:\n" /* should place this func .S file and use
|
||||
"_thread_entry_wrapper:\n" /* should place this func .S file and use
|
||||
SECTION_FUNC */
|
||||
"\tmovl $0, (%esp)\n" /* zero initialEFLAGS location */
|
||||
"\tjmp _context_entry\n");
|
||||
"\tjmp _thread_entry\n");
|
||||
#endif /* CONFIG_GDB_INFO */
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Create a new kernel execution context
|
||||
* @brief Create a new kernel execution thread
|
||||
*
|
||||
* This function is utilized to create execution contexts for both fiber
|
||||
* contexts and kernel task contexts.
|
||||
* This function is utilized to create execution threads for both fiber
|
||||
* threads and kernel tasks.
|
||||
*
|
||||
* The "context control block" (CCS) is carved from the "end" of the specified
|
||||
* context stack memory.
|
||||
* The "thread control block" (TCS) is carved from the "end" of the specified
|
||||
* thread stack memory.
|
||||
*
|
||||
* @return opaque pointer to initialized CCS structure
|
||||
* @return opaque pointer to initialized TCS structure
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
void _NewContext(
|
||||
void _new_thread(
|
||||
char *pStackMem, /* pointer to aligned stack memory */
|
||||
unsigned stackSize, /* size of stack in bytes */
|
||||
_ContextEntry pEntry, /* context entry point function */
|
||||
void *parameter1, /* first parameter to context entry point function */
|
||||
void *parameter2, /* second parameter to context entry point function */
|
||||
void *parameter3, /* third parameter to context entry point function */
|
||||
int priority, /* context priority */
|
||||
unsigned options /* context options: USE_FP, USE_SSE */
|
||||
_thread_entry_t pEntry, /* thread entry point function */
|
||||
void *parameter1, /* first parameter to thread entry point function */
|
||||
void *parameter2, /* second parameter to thread entry point function */
|
||||
void *parameter3, /* third parameter to thread entry point function */
|
||||
int priority, /* thread priority */
|
||||
unsigned options /* thread options: USE_FP, USE_SSE */
|
||||
)
|
||||
{
|
||||
unsigned long *pInitialContext;
|
||||
unsigned long *pInitialThread;
|
||||
|
||||
#ifdef CONFIG_INIT_STACKS
|
||||
memset(pStackMem, 0xaa, stackSize);
|
||||
#endif
|
||||
|
||||
/* carve the context entry struct from the "base" of the stack */
|
||||
/* carve the thread entry struct from the "base" of the stack */
|
||||
|
||||
pInitialContext =
|
||||
pInitialThread =
|
||||
(unsigned long *)STACK_ROUND_DOWN(pStackMem + stackSize);
|
||||
|
||||
/*
|
||||
* Create an initial context on the stack expected by the _Swap()
|
||||
* primitive.
|
||||
* Given that both task and fiber contexts execute at privilege 0, the
|
||||
* setup for both contexts are equivalent.
|
||||
* Given that both task and fibers execute at privilege 0, the
|
||||
* setup for both threads are equivalent.
|
||||
*/
|
||||
|
||||
/* push arguments required by _context_entry() */
|
||||
/* push arguments required by _thread_entry() */
|
||||
|
||||
*--pInitialContext = (unsigned long)parameter3;
|
||||
*--pInitialContext = (unsigned long)parameter2;
|
||||
*--pInitialContext = (unsigned long)parameter1;
|
||||
*--pInitialContext = (unsigned long)pEntry;
|
||||
*--pInitialThread = (unsigned long)parameter3;
|
||||
*--pInitialThread = (unsigned long)parameter2;
|
||||
*--pInitialThread = (unsigned long)parameter1;
|
||||
*--pInitialThread = (unsigned long)pEntry;
|
||||
|
||||
/* push initial EFLAGS; only modify IF and IOPL bits */
|
||||
|
||||
*--pInitialContext = (EflagsGet() & ~EFLAGS_MASK) | EFLAGS_INITIAL;
|
||||
*--pInitialThread = (EflagsGet() & ~EFLAGS_MASK) | EFLAGS_INITIAL;
|
||||
|
||||
#ifdef CONFIG_GDB_INFO
|
||||
|
||||
/*
|
||||
* Arrange for the _ContextEntryWrapper() function to be called
|
||||
* to adjust the stack before _context_entry() is invoked.
|
||||
* Arrange for the _thread_entry_wrapper() function to be called
|
||||
* to adjust the stack before _thread_entry() is invoked.
|
||||
*/
|
||||
|
||||
*--pInitialContext = (unsigned long)_ContextEntryWrapper;
|
||||
*--pInitialThread = (unsigned long)_thread_entry_wrapper;
|
||||
|
||||
#else /* CONFIG_GDB_INFO */
|
||||
|
||||
*--pInitialContext = (unsigned long)_context_entry;
|
||||
*--pInitialThread = (unsigned long)_thread_entry;
|
||||
|
||||
#endif /* CONFIG_GDB_INFO */
|
||||
|
||||
/*
|
||||
* note: stack area for edi, esi, ebx, ebp, and eax registers can be
|
||||
* left
|
||||
* uninitialized, since _context_entry() doesn't care about the values
|
||||
* uninitialized, since _thread_entry() doesn't care about the values
|
||||
* of these registers when it begins execution
|
||||
*/
|
||||
|
||||
/*
|
||||
* For kernel tasks and fibers the context the context control struct
|
||||
* (CCS)
|
||||
* is located at the "low end" of memory set aside for the context's
|
||||
* stack
|
||||
* For kernel tasks and fibers the thread the thread control struct (TCS)
|
||||
* is located at the "low end" of memory set aside for the thread's stack.
|
||||
*/
|
||||
|
||||
_NewContextInternal(pStackMem, stackSize, priority, options);
|
||||
_new_thread_internal(pStackMem, stackSize, priority, options);
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/* cpuhalt.S - CPU power management code for IA-32 */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2011-2014 Wind River Systems, Inc.
|
||||
* Copyright (c) 2011-2015 Wind River Systems, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
|
@ -98,9 +98,9 @@ SECTION_FUNC(TEXT, nano_cpu_idle)
|
|||
*
|
||||
* @brief Atomically re-enable interrupts and enter low power mode
|
||||
*
|
||||
* This function is utilized by the nanokernel object "wait" APIs for task
|
||||
* contexts, e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(), nano_task_stack_pop_wait(),
|
||||
* and nano_task_fifo_get_wait().
|
||||
* This function is utilized by the nanokernel object "wait" APIs for tasks,
|
||||
* e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(),
|
||||
* nano_task_stack_pop_wait(), and nano_task_fifo_get_wait().
|
||||
*
|
||||
* INTERNAL
|
||||
* The requirements for nano_cpu_atomic_idle() are as follows:
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/* crt0.S - crt0 module for the IA-32 boards */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2010-2014 Wind River Systems, Inc.
|
||||
* Copyright (c) 2010-2015 Wind River Systems, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
|
@ -102,7 +102,7 @@ __start:
|
|||
#endif
|
||||
/*
|
||||
* Ensure interrupts are disabled. Interrupts are enabled when
|
||||
* the first kernel thread context switch occurs.
|
||||
* the first context switch occurs.
|
||||
*/
|
||||
|
||||
cli
|
||||
|
@ -247,7 +247,7 @@ SECTION_FUNC(TEXT_START, __start)
|
|||
|
||||
/*
|
||||
* Ensure interrupts are disabled. Interrupts are enabled when
|
||||
* the first kernel thread context switch occurs.
|
||||
* the first context switch occurs.
|
||||
*/
|
||||
|
||||
cli
|
||||
|
|
|
@ -68,7 +68,7 @@ an error code is present on the stack or not.
|
|||
|
||||
/@ _ExcExit() will adjust the stack to discard the error code @/
|
||||
|
||||
0x0f jmp _ExcExit /@ restore context context @/
|
||||
0x0f jmp _ExcExit /@ restore thread context @/
|
||||
Machine code: 0xe9, 0x00, 0x00, 0x00, 0x00
|
||||
|
||||
NOTE: Be sure to update the arch specific definition of the _EXC_STUB_SIZE
|
||||
|
@ -95,7 +95,7 @@ void _NanoCpuExcConnectAtDpl(unsigned int vector,
|
|||
* an interrupt asserted as a direct result of program execution as opposed
|
||||
* to a hardware device asserting an interrupt.
|
||||
*
|
||||
* When the exception specified by <vector> is asserted, the current context
|
||||
* When the exception specified by <vector> is asserted, the current thread
|
||||
* is saved on the current stack, i.e. a switch to some other stack is not
|
||||
* performed, followed by executing <routine> which has the following signature:
|
||||
*
|
||||
|
@ -135,7 +135,7 @@ void nanoCpuExcConnect(unsigned int vector, /* interrupt vector: 0 to 255 on
|
|||
* an interrupt asserted as a direct result of program execution as opposed
|
||||
* to a hardware device asserting an interrupt.
|
||||
*
|
||||
* When the exception specified by <vector> is asserted, the current context
|
||||
* When the exception specified by <vector> is asserted, the current thread
|
||||
* is saved on the current stack, i.e. a switch to some other stack is not
|
||||
* performed, followed by executing <routine> which has the following signature:
|
||||
*
|
||||
|
@ -214,7 +214,7 @@ void _NanoCpuExcConnectAtDpl(
|
|||
/*
|
||||
* generate code that invokes _ExcExit(); note that a jump is used,
|
||||
* since _ExcExit() takes care of popping the error code and returning
|
||||
* back to the context that triggered the exception
|
||||
* back to the execution context that triggered the exception
|
||||
*/
|
||||
|
||||
STUB_PTR[offsetAdjust] = IA32_JMP_OPCODE;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/* excstub.S - exception management support for IA-32 architecture */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2011-2014 Wind River Systems, Inc.
|
||||
* Copyright (c) 2011-2015 Wind River Systems, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
|
@ -63,7 +63,7 @@ and exiting a C exception handler.
|
|||
*
|
||||
* This function is called from the exception stub created by nanoCpuExcConnect()
|
||||
* to inform the kernel of an exception. This routine currently does
|
||||
* _not_ increment a context/interrupt specific exception count. Also,
|
||||
* _not_ increment a thread/interrupt specific exception count. Also,
|
||||
* execution of the exception handler occurs on the current stack, i.e.
|
||||
* _ExcEnt() does not switch to another stack. The volatile integer
|
||||
* registers are saved on the stack, and control is returned back to the
|
||||
|
@ -126,7 +126,7 @@ SECTION_FUNC(TEXT, _ExcEnt)
|
|||
|
||||
/*
|
||||
* Push the remaining volatile registers on the existing stack.
|
||||
* Note that eax has already been saved on the context stack.
|
||||
* Note that eax has already been saved on the execution context stack.
|
||||
*/
|
||||
|
||||
pushl %ecx
|
||||
|
@ -161,34 +161,34 @@ SECTION_FUNC(TEXT, _ExcEnt)
|
|||
|
||||
movl _nanokernel + __tNANO_current_OFFSET, %ecx
|
||||
|
||||
incl __tCCS_excNestCount_OFFSET(%ecx) /* inc exception nest count */
|
||||
incl __tTCS_excNestCount_OFFSET(%ecx) /* inc exception nest count */
|
||||
|
||||
#ifdef CONFIG_GDB_INFO
|
||||
|
||||
/*
|
||||
* Save the pointer to the stack frame (NANO_ESF *) in
|
||||
* the current context if this is the outermost exception.
|
||||
* the current execution context if this is the outermost exception.
|
||||
* The ESF pointer is used by debug tools to locate the volatile
|
||||
* registers and the stack of the preempted context.
|
||||
* registers and the stack of the preempted thread.
|
||||
*/
|
||||
|
||||
testl $EXC_ACTIVE, __tCCS_flags_OFFSET (%ecx)
|
||||
testl $EXC_ACTIVE, __tTCS_flags_OFFSET (%ecx)
|
||||
jne alreadyInException
|
||||
movl %esp, __tCCS_esfPtr_OFFSET(%ecx)
|
||||
movl %esp, __tTCS_esfPtr_OFFSET(%ecx)
|
||||
|
||||
BRANCH_LABEL(alreadyInException)
|
||||
|
||||
#endif /* CONFIG_GDB_INFO */
|
||||
|
||||
/*
|
||||
* Set the EXC_ACTIVE bit in the tCCS of the current context.
|
||||
* This enables _Swap() to preserve the context's FP registers
|
||||
* Set the EXC_ACTIVE bit in the TCS of the current thread.
|
||||
* This enables _Swap() to preserve the thread's FP registers
|
||||
* (where needed) if the exception handler causes a context switch.
|
||||
* It also indicates to debug tools that an exception is being
|
||||
* handled in the event of a context switch.
|
||||
*/
|
||||
|
||||
orl $EXC_ACTIVE, __tCCS_flags_OFFSET(%ecx)
|
||||
orl $EXC_ACTIVE, __tTCS_flags_OFFSET(%ecx)
|
||||
|
||||
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
|
||||
|
||||
|
@ -222,7 +222,7 @@ BRANCH_LABEL(allDone)
|
|||
* This function is called from the exception stub created by nanoCpuExcConnect()
|
||||
* to inform the kernel that the processing of an exception has
|
||||
* completed. This routine restores the volatile integer registers and
|
||||
* then control is returned back to the interrupted context or ISR.
|
||||
* then control is returned back to the interrupted thread or ISR.
|
||||
*
|
||||
* @return N/A
|
||||
*
|
||||
|
@ -257,18 +257,18 @@ SECTION_FUNC(TEXT, _ExcExit)
|
|||
* Determine whether exiting from a nested interrupt.
|
||||
*/
|
||||
|
||||
decl __tCCS_excNestCount_OFFSET(%ecx) /* dec exception nest count */
|
||||
decl __tTCS_excNestCount_OFFSET(%ecx) /* dec exception nest count */
|
||||
|
||||
cmpl $0, __tCCS_excNestCount_OFFSET(%ecx)
|
||||
cmpl $0, __tTCS_excNestCount_OFFSET(%ecx)
|
||||
jne nestedException
|
||||
|
||||
/*
|
||||
* Clear the EXC_ACTIVE bit in the tCCS of the current context
|
||||
* Clear the EXC_ACTIVE bit in the tTCS of the current execution context
|
||||
* if we are not in a nested exception (ie, when we exit the outermost
|
||||
* exception).
|
||||
*/
|
||||
|
||||
andl $~EXC_ACTIVE, __tCCS_flags_OFFSET (%ecx)
|
||||
andl $~EXC_ACTIVE, __tTCS_flags_OFFSET (%ecx)
|
||||
|
||||
BRANCH_LABEL(nestedException)
|
||||
|
||||
|
|
|
@ -118,9 +118,9 @@ FUNC_NORETURN void _NanoFatalErrorHandler(
|
|||
break;
|
||||
}
|
||||
|
||||
printk("Current context ID = 0x%x\n"
|
||||
printk("Current thread ID = 0x%x\n"
|
||||
"Faulting instruction address = 0x%x\n",
|
||||
context_self_get(),
|
||||
sys_thread_self_get(),
|
||||
pEsf->eip);
|
||||
#endif /* CONFIG_PRINTK */
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ floating point resources, by allowing the system to save FPU state information
|
|||
in a task or fiber's stack region when a pre-emptive context switch occurs.
|
||||
|
||||
The floating point resource sharing mechanism is designed for minimal
|
||||
intrusiveness. Floating point context saving is only performed for tasks and
|
||||
intrusiveness. Floating point thread saving is only performed for tasks and
|
||||
fibers that explicitly enable FP resource sharing, to avoid impacting the stack
|
||||
size requirements of all other tasks and fibers. For those tasks and fibers
|
||||
that do require FP resource sharing, a "lazy save/restore" mechanism is employed
|
||||
|
@ -48,10 +48,10 @@ that they will be altered, or when there is no need to preserve their contents.
|
|||
The following APIs are provided to allow floating point resource sharing to be
|
||||
enabled or disabled at run-time:
|
||||
|
||||
void fiber_float_enable (nano_context_id_t ctxId, unsigned int options)
|
||||
void task_float_enable (nano_context_id_t ctxId, unsigned int options)
|
||||
void fiber_float_disable (nano_context_id_t ctxId)
|
||||
void task_float_disable (nano_context_id_t ctxId)
|
||||
void fiber_float_enable (nano_thread_id_t thread_id, unsigned int options)
|
||||
void task_float_enable (nano_thread_id_t thread_id, unsigned int options)
|
||||
void fiber_float_disable (nano_thread_id_t thread_id)
|
||||
void task_float_disable (nano_thread_id_t thread_id)
|
||||
|
||||
The 'options' parameter is used to specify what non-integer capabilities are
|
||||
being used. The same options accepted by fiber_fiber_start() are used in the
|
||||
|
@ -117,15 +117,15 @@ extern uint32_t _sse_mxcsr_default_value; /* SSE control/status register default
|
|||
* @brief Save non-integer context information
|
||||
*
|
||||
* This routine saves the system's "live" non-integer context into the
|
||||
* specified CCS. If the specified task or fiber supports SSE then
|
||||
* x87/MMX/SSEx context info is saved, otherwise only x87/MMX context is saved.
|
||||
* specified TCS. If the specified task or fiber supports SSE then
|
||||
* x87/MMX/SSEx thread info is saved, otherwise only x87/MMX thread is saved.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
||||
static void _FpCtxSave(tCCS *ccs)
|
||||
static void _FpCtxSave(struct tcs *tcs)
|
||||
{
|
||||
_do_fp_ctx_save(ccs->flags & USE_SSE, &ccs->preempFloatReg);
|
||||
_do_fp_ctx_save(tcs->flags & USE_SSE, &tcs->preempFloatReg);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -137,9 +137,9 @@ static void _FpCtxSave(tCCS *ccs)
|
|||
* @return N/A
|
||||
*/
|
||||
|
||||
static inline void _FpCtxInit(tCCS *ccs)
|
||||
static inline void _FpCtxInit(struct tcs *tcs)
|
||||
{
|
||||
_do_fp_ctx_init(ccs->flags & USE_SSE);
|
||||
_do_fp_ctx_init(tcs->flags & USE_SSE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -154,7 +154,7 @@ static inline void _FpCtxInit(tCCS *ccs)
|
|||
* a) USE_FP indicates x87 FPU and MMX registers only
|
||||
* b) USE_SSE indicates x87 FPU and MMX and SSEx registers
|
||||
*
|
||||
* Invoking this routine creates a floating point context for the task/fiber
|
||||
* Invoking this routine creates a floating point thread for the task/fiber
|
||||
* that corresponds to an FPU that has been reset. The system will thereafter
|
||||
* protect the task/fiber's FP context so that it is not altered during
|
||||
* a pre-emptive context switch.
|
||||
|
@ -181,12 +181,12 @@ static inline void _FpCtxInit(tCCS *ccs)
|
|||
* tasks and fibers.
|
||||
*/
|
||||
|
||||
void _FpEnable(tCCS *ccs,
|
||||
void _FpEnable(struct tcs *tcs,
|
||||
unsigned int options /* USE_FP or USE_SSE */
|
||||
)
|
||||
{
|
||||
unsigned int imask;
|
||||
tCCS *fp_owner;
|
||||
struct tcs *fp_owner;
|
||||
|
||||
/* Lock interrupts to prevent a pre-emptive context switch from occuring
|
||||
*/
|
||||
|
@ -195,7 +195,7 @@ void _FpEnable(tCCS *ccs,
|
|||
|
||||
/* Indicate task/fiber requires non-integer context saving */
|
||||
|
||||
ccs->flags |= options | USE_FP; /* USE_FP is treated as a "dirty bit" */
|
||||
tcs->flags |= options | USE_FP; /* USE_FP is treated as a "dirty bit" */
|
||||
|
||||
#ifdef CONFIG_AUTOMATIC_FP_ENABLING
|
||||
/*
|
||||
|
@ -223,21 +223,20 @@ void _FpEnable(tCCS *ccs,
|
|||
|
||||
/* Now create a virgin FP context */
|
||||
|
||||
_FpCtxInit(ccs);
|
||||
_FpCtxInit(tcs);
|
||||
|
||||
/* Associate the new FP context with the specified task/fiber */
|
||||
|
||||
if (ccs == _nanokernel.current) {
|
||||
if (tcs == _nanokernel.current) {
|
||||
/*
|
||||
* When enabling FP support for self, just claim ownership of
|
||||
*the FPU
|
||||
* and leave CR0[TS] unset.
|
||||
*
|
||||
* (Note: the FP context is "live" in hardware, not saved in
|
||||
*CCS.)
|
||||
* (Note: the FP context is "live" in hardware, not saved in TCS.)
|
||||
*/
|
||||
|
||||
_nanokernel.current_fp = ccs;
|
||||
_nanokernel.current_fp = tcs;
|
||||
} else {
|
||||
/*
|
||||
* When enabling FP support for someone else, assign ownership
|
||||
|
@ -247,13 +246,13 @@ void _FpEnable(tCCS *ccs,
|
|||
if ((_nanokernel.current->flags & USE_FP) != USE_FP) {
|
||||
/*
|
||||
* We are not FP-capable, so mark FPU as owned by the
|
||||
* context
|
||||
* thread
|
||||
* we've just enabled FP support for, then disable our
|
||||
* own
|
||||
* FP access by setting CR0[TS] to its original state.
|
||||
*/
|
||||
|
||||
_nanokernel.current_fp = ccs;
|
||||
_nanokernel.current_fp = tcs;
|
||||
#ifdef CONFIG_AUTOMATIC_FP_ENABLING
|
||||
_FpAccessDisable();
|
||||
#endif /* CONFIG_AUTOMATIC_FP_ENABLING */
|
||||
|
@ -261,7 +260,7 @@ void _FpEnable(tCCS *ccs,
|
|||
/*
|
||||
* We are FP-capable (and thus had FPU ownership on
|
||||
*entry), so save
|
||||
* the new FP context in their CCS, leave FPU ownership
|
||||
* the new FP context in their TCS, leave FPU ownership
|
||||
*with self,
|
||||
* and leave CR0[TS] unset.
|
||||
*
|
||||
|
@ -280,7 +279,7 @@ void _FpEnable(tCCS *ccs,
|
|||
*exception.)
|
||||
*/
|
||||
|
||||
_FpCtxSave(ccs);
|
||||
_FpCtxSave(tcs);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -345,7 +344,7 @@ FUNC_ALIAS(_FpEnable, task_float_enable, void);
|
|||
* tasks and fibers.
|
||||
*/
|
||||
|
||||
void _FpDisable(tCCS *ccs)
|
||||
void _FpDisable(struct tcs *tcs)
|
||||
{
|
||||
unsigned int imask;
|
||||
|
||||
|
@ -360,17 +359,17 @@ void _FpDisable(tCCS *ccs)
|
|||
* of the options specified at the time support was enabled.
|
||||
*/
|
||||
|
||||
ccs->flags &= ~(USE_FP | USE_SSE);
|
||||
tcs->flags &= ~(USE_FP | USE_SSE);
|
||||
|
||||
if (ccs == _nanokernel.current) {
|
||||
if (tcs == _nanokernel.current) {
|
||||
#ifdef CONFIG_AUTOMATIC_FP_ENABLING
|
||||
_FpAccessDisable();
|
||||
#endif /* CONFIG_AUTOMATIC_FP_ENABLING */
|
||||
|
||||
_nanokernel.current_fp = (tCCS *)0;
|
||||
_nanokernel.current_fp = (struct tcs *)0;
|
||||
} else {
|
||||
if (_nanokernel.current_fp == ccs)
|
||||
_nanokernel.current_fp = (tCCS *)0;
|
||||
if (_nanokernel.current_fp == tcs)
|
||||
_nanokernel.current_fp = (struct tcs *)0;
|
||||
}
|
||||
|
||||
irq_unlock(imask);
|
||||
|
@ -437,7 +436,7 @@ void _FpNotAvailableExcHandler(NANO_ESF * pEsf /* not used */
|
|||
ARG_UNUSED(pEsf);
|
||||
|
||||
/*
|
||||
* Assume the exception did not occur in the context of an ISR.
|
||||
* Assume the exception did not occur in the thread of an ISR.
|
||||
* (In other words, CPU cycles will not be consumed to perform
|
||||
* error checking to ensure the exception was not generated in an ISR.)
|
||||
*/
|
||||
|
|
|
@ -78,7 +78,7 @@ a "beginning of interrupt" (BOI) callout and an "end of interrupt" (EOI) callout
|
|||
0x23 addl $(4 * numParams), %esp /@ pop parameters @/
|
||||
Machine code: 0x83, 0xc4, (4 * numParams)
|
||||
|
||||
0x26 jmp _IntExit /@ restore context or reschedule @/
|
||||
0x26 jmp _IntExit /@ restore thread or reschedule @/
|
||||
Machine code: 0xe9, 0x00, 0x00, 0x00, 0x00
|
||||
|
||||
NOTE: Be sure to update the arch specific definition of the _INT_STUB_SIZE macro
|
||||
|
@ -252,7 +252,7 @@ void _IntVecSet(
|
|||
*
|
||||
* When the device asserts an interrupt on the specified <irq>, a switch to
|
||||
* the interrupt stack is performed (if not already executing on the interrupt
|
||||
* stack), followed by saving the integer (i.e. non-floating point) context of
|
||||
* stack), followed by saving the integer (i.e. non-floating point) thread of
|
||||
* the currently executing task, fiber, or ISR. The ISR specified by <routine>
|
||||
* will then be invoked with the single <parameter>. When the ISR returns, a
|
||||
* context switch may occur.
|
||||
|
@ -447,8 +447,8 @@ int irq_connect(
|
|||
offsetAdjust += 3;
|
||||
|
||||
/*
|
||||
* generate code that invokes _IntExit(); note that a jump is used,
|
||||
* since _IntExit() takes care of returning back to the context that
|
||||
* generate code that invokes _IntExit(); note that a jump is used, since
|
||||
* _IntExit() takes care of returning back to the execution context that
|
||||
* experienced the interrupt (i.e. branch tail optimization)
|
||||
*/
|
||||
|
||||
|
|
|
@ -183,11 +183,11 @@ SECTION_FUNC(TEXT, _IntEnt)
|
|||
|
||||
/* switch to base of the interrupt stack */
|
||||
|
||||
movl %esp, %edx /* save current context stack pointer */
|
||||
movl %esp, %edx /* save current thread's stack pointer */
|
||||
movl __tNANO_common_isp_OFFSET(%ecx), %esp /* load new sp value */
|
||||
|
||||
|
||||
/* save context stack pointer onto base of interrupt stack */
|
||||
/* save thread's stack pointer onto base of interrupt stack */
|
||||
|
||||
pushl %edx /* Save stack pointer */
|
||||
|
||||
|
@ -243,11 +243,12 @@ BRANCH_LABEL(_HandleIdle)
|
|||
* to inform the kernel that the processing of an interrupt has
|
||||
* completed. This routine decrements _nanokernel.nested (to support interrupt
|
||||
* nesting), restores the volatile integer registers, and then switches
|
||||
* back to the interrupted context's stack, if this isn't a nested interrupt.
|
||||
* back to the interrupted execution context's stack, if this isn't a nested
|
||||
* interrupt.
|
||||
*
|
||||
* Finally, control is returned back to the interrupted fiber context or ISR.
|
||||
* Finally, control is returned back to the interrupted fiber or ISR.
|
||||
* A context switch _may_ occur if the interrupted context was a task context,
|
||||
* in which case one or more other fiber and task contexts will execute before
|
||||
* in which case one or more other fibers and tasks will execute before
|
||||
* this routine resumes and control gets returned to the interrupted task.
|
||||
*
|
||||
* @return N/A
|
||||
|
@ -275,37 +276,37 @@ SECTION_FUNC(TEXT, _IntExit)
|
|||
|
||||
/*
|
||||
* Determine whether the execution of the ISR requires a context
|
||||
* switch. If the interrupted context is PREEMPTIBLE and
|
||||
* switch. If the interrupted thread is PREEMPTIBLE (a task) and
|
||||
* _nanokernel.fiber is non-NULL, a _Swap() needs to occur.
|
||||
*/
|
||||
|
||||
movl __tNANO_current_OFFSET (%ecx), %eax
|
||||
testl $PREEMPTIBLE, __tCCS_flags_OFFSET(%eax)
|
||||
testl $PREEMPTIBLE, __tTCS_flags_OFFSET(%eax)
|
||||
je noReschedule
|
||||
cmpl $0, __tNANO_fiber_OFFSET (%ecx)
|
||||
je noReschedule
|
||||
|
||||
/*
|
||||
* Set the INT_ACTIVE bit in the tCCS to allow the upcoming call to
|
||||
* Set the INT_ACTIVE bit in the tTCS to allow the upcoming call to
|
||||
* _Swap() to determine whether non-floating registers need to be
|
||||
* preserved using the lazy save/restore algorithm, or to indicate to
|
||||
* debug tools that a preemptive context switch has occurred.
|
||||
*
|
||||
* Setting the NO_METRICS bit tells _Swap() that the per-context
|
||||
* Setting the NO_METRICS bit tells _Swap() that the per-execution context
|
||||
* [totalRunTime] calculation has already been performed and that
|
||||
* there is no need to do it again.
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO)
|
||||
orl $INT_ACTIVE, __tCCS_flags_OFFSET(%eax)
|
||||
orl $INT_ACTIVE, __tTCS_flags_OFFSET(%eax)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* A context reschedule is required: keep the volatile registers of
|
||||
* the interrupted context on the context's stack. Utilize
|
||||
* the interrupted thread on the context's stack. Utilize
|
||||
* the existing _Swap() primitive to save the remaining
|
||||
* thread's registers (including floating point) and perform
|
||||
* a switch to the new context.
|
||||
* a switch to the new thread.
|
||||
*/
|
||||
|
||||
popl %esp /* switch back to kernel stack */
|
||||
|
@ -314,10 +315,10 @@ SECTION_FUNC(TEXT, _IntExit)
|
|||
call _Swap
|
||||
|
||||
/*
|
||||
* The interrupted context thread has now been scheduled,
|
||||
* The interrupted thread has now been scheduled,
|
||||
* as the result of a _later_ invocation of _Swap().
|
||||
*
|
||||
* Now need to restore the interrupted context's environment before
|
||||
* Now need to restore the interrupted thread's environment before
|
||||
* returning control to it at the point where it was interrupted ...
|
||||
*/
|
||||
|
||||
|
@ -326,12 +327,12 @@ SECTION_FUNC(TEXT, _IntExit)
|
|||
defined(CONFIG_GDB_INFO) )
|
||||
/*
|
||||
* _Swap() has restored the floating point registers, if needed.
|
||||
* Clear the INT_ACTIVE bit of the interrupted context's tCCS
|
||||
* Clear the INT_ACTIVE bit of the interrupted thread's TCS
|
||||
* since it has served its purpose.
|
||||
*/
|
||||
|
||||
movl _nanokernel + __tNANO_current_OFFSET, %eax
|
||||
andl $~INT_ACTIVE, __tCCS_flags_OFFSET (%eax)
|
||||
andl $~INT_ACTIVE, __tTCS_flags_OFFSET (%eax)
|
||||
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
|
||||
|
||||
|
||||
|
@ -340,7 +341,7 @@ SECTION_FUNC(TEXT, _IntExit)
|
|||
|
||||
|
||||
|
||||
/* Restore volatile registers and return to the interrupted context */
|
||||
/* Restore volatile registers and return to the interrupted thread */
|
||||
#ifdef CONFIG_INT_LATENCY_BENCHMARK
|
||||
call _int_latency_stop
|
||||
#endif
|
||||
|
@ -397,7 +398,8 @@ BRANCH_LABEL(nestedInterrupt)
|
|||
* A spurious interrupt is considered a fatal condition, thus this routine
|
||||
* merely sets up the 'reason' and 'pEsf' parameters to the routine
|
||||
* _SysFatalHwErrorHandler(). In other words, there is no provision to return
|
||||
* to the interrupted context and thus the volatile registers are not saved.
|
||||
* to the interrupted execution context and thus the volatile registers are not
|
||||
* saved.
|
||||
*
|
||||
* @return Never returns
|
||||
*
|
||||
|
|
|
@ -65,27 +65,27 @@ GEN_OFFSET_SYM(tNANO, common_isp);
|
|||
GEN_OFFSET_SYM(tNANO, idle);
|
||||
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
|
||||
|
||||
/* Intel-specific tCCS structure member offsets */
|
||||
/* Intel-specific struct tcs structure member offsets */
|
||||
|
||||
#ifdef CONFIG_GDB_INFO
|
||||
GEN_OFFSET_SYM(tCCS, esfPtr);
|
||||
GEN_OFFSET_SYM(tTCS, esfPtr);
|
||||
#endif /* CONFIG_GDB_INFO */
|
||||
#if (defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO))
|
||||
GEN_OFFSET_SYM(tCCS, excNestCount);
|
||||
GEN_OFFSET_SYM(tTCS, excNestCount);
|
||||
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
|
||||
#ifdef CONFIG_CONTEXT_CUSTOM_DATA
|
||||
GEN_OFFSET_SYM(tCCS, custom_data); /* available for custom use */
|
||||
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
||||
GEN_OFFSET_SYM(tTCS, custom_data); /* available for custom use */
|
||||
#endif
|
||||
GEN_OFFSET_SYM(tCCS, coopFloatReg); /* start of coop FP register set */
|
||||
GEN_OFFSET_SYM(tCCS, preempFloatReg); /* start of prempt FP register set */
|
||||
GEN_OFFSET_SYM(tTCS, coopFloatReg); /* start of coop FP register set */
|
||||
GEN_OFFSET_SYM(tTCS, preempFloatReg); /* start of prempt FP register set */
|
||||
|
||||
/* size of the tCCS structure sans save area for floating point regs */
|
||||
/* size of the struct tcs structure sans save area for floating point regs */
|
||||
|
||||
GEN_ABSOLUTE_SYM(__tCCS_NOFLOAT_SIZEOF,
|
||||
sizeof(tCCS) - sizeof(tCoopFloatReg) -
|
||||
GEN_ABSOLUTE_SYM(__tTCS_NOFLOAT_SIZEOF,
|
||||
sizeof(tTCS) - sizeof(tCoopFloatReg) -
|
||||
sizeof(tPreempFloatReg));
|
||||
|
||||
/* tCoopReg structure member offsets: tCCS->coopReg is of type tCoopReg */
|
||||
/* tCoopReg structure member offsets: tTCS->coopReg is of type tCoopReg */
|
||||
|
||||
GEN_OFFSET_SYM(tCoopReg, esp);
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ Intel-specific parts of start_task(). Only FP functionality currently.
|
|||
/*
|
||||
* The following IA-32-specific task group is used for tasks that use SSE
|
||||
* instructions. It is *not* formally reserved by SysGen for this purpose.
|
||||
* See comments in context.c regarding the use of SSE_GROUP, and comments
|
||||
* See comments in thread.c regarding the use of SSE_GROUP, and comments
|
||||
* in task.h regarding task groups reserved by SysGen.
|
||||
*
|
||||
* This identifier corresponds to the first user-defined task group.
|
||||
|
@ -60,28 +60,28 @@ Intel-specific parts of start_task(). Only FP functionality currently.
|
|||
|
||||
void _StartTaskArch(
|
||||
struct k_task *X, /* ptr to task control block */
|
||||
unsigned int *pOpt /* context options container */
|
||||
unsigned int *pOpt /* thread options container */
|
||||
)
|
||||
{
|
||||
/*
|
||||
* The IA-32 nanokernel implementation uses the USE_FP bit in the
|
||||
* tCCS->flags structure as a "dirty bit". The USE_FP flag bit will be
|
||||
* set whenever a context uses any non-integer capability, whether it's
|
||||
* struct tcs->flags structure as a "dirty bit". The USE_FP flag bit will be
|
||||
* set whenever a thread uses any non-integer capability, whether it's
|
||||
* just the x87 FPU capability, SSE instructions, or a combination of
|
||||
* both. The USE_SSE flag bit will only be set if a context uses SSE
|
||||
* both. The USE_SSE flag bit will only be set if a thread uses SSE
|
||||
* instructions.
|
||||
*
|
||||
* However, callers of fiber_fiber_start(), task_fiber_start(), or even
|
||||
* _NewContext() don't need to follow the protocol used by the IA-32
|
||||
* nanokernel w.r.t. managing the tCCS->flags field. If a context
|
||||
* _new_thread() don't need to follow the protocol used by the IA-32
|
||||
* nanokernel w.r.t. managing the struct tcs->flags field. If a thread
|
||||
* will be utilizing just the x87 FPU capability, then the USE_FP
|
||||
* option bit is specified. If a context will be utilizing SSE
|
||||
* option bit is specified. If a thread will be utilizing SSE
|
||||
* instructions (and possibly x87 FPU capability), then only the
|
||||
* USE_SSE option bit needs to be specified.
|
||||
*
|
||||
* Likewise, the placement of tasks into "groups" doesn't need to follow
|
||||
* the protocol used by the IA-32 nanokernel w.r.t. managing the
|
||||
* tCCS->flags field. If a task will utilize just the x87 FPU
|
||||
* struct tcs->flags field. If a task will utilize just the x87 FPU
|
||||
*capability,
|
||||
* then the task only needs to be placed in the FPU_GROUP group.
|
||||
* If a task utilizes SSE instructions (and possibly x87 FPU
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/* swap.S - nanokernel swapper code for IA-32 */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2010-2014 Wind River Systems, Inc.
|
||||
* Copyright (c) 2010-2015 Wind River Systems, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
|
@ -59,17 +59,17 @@ save frame on the stack.
|
|||
* @brief Initiate a cooperative context switch
|
||||
*
|
||||
* The _Swap() routine is invoked by various nanokernel services to effect
|
||||
* a cooperative context context switch. Prior to invoking _Swap(), the
|
||||
* a cooperative context switch. Prior to invoking _Swap(), the
|
||||
* caller disables interrupts (via irq_lock) and the return 'key'
|
||||
* is passed as a parameter to _Swap(). The 'key' actually represents
|
||||
* the EFLAGS register prior to disabling interrupts via a 'cli' instruction.
|
||||
*
|
||||
* Given that _Swap() is called to effect a cooperative context context switch,
|
||||
* only the non-volatile integer registers need to be saved in the tCCS of the
|
||||
* outgoing context. The restoration of the integer registers of the incoming
|
||||
* context depends on whether that context was preemptively context switched
|
||||
* out. The INT_ACTIVE and EXC_ACTIVE bits in the tCCS->flags field will signify
|
||||
* that the context was preemptively context switched out, and thus both the
|
||||
* Given that _Swap() is called to effect a cooperative context switch,
|
||||
* only the non-volatile integer registers need to be saved in the TCS of the
|
||||
* outgoing thread. The restoration of the integer registers of the incoming
|
||||
* thread depends on whether that thread was preemptively context switched
|
||||
* out. The INT_ACTIVE and EXC_ACTIVE bits in the tTCS->flags field will signify
|
||||
* that the thread was preemptively context switched out, and thus both the
|
||||
* volatile and non-volatile integer registers need to be restored.
|
||||
*
|
||||
* The non-volatile registers need to be scrubbed to ensure they contain no
|
||||
|
@ -82,22 +82,21 @@ save frame on the stack.
|
|||
* for potential security impacts.
|
||||
*
|
||||
* Floating point registers are handled using a lazy save/restore
|
||||
* mechanism since it's expected relatively few contexts will be created
|
||||
* mechanism since it's expected relatively few threads will be created
|
||||
* with the USE_FP or USE_SSE option bits. The nanokernel data structure
|
||||
* maintains a 'current_fp' field to keep track of the context that "owns"
|
||||
* maintains a 'current_fp' field to keep track of the thread that "owns"
|
||||
* the floating point registers. Floating point registers consist of
|
||||
* ST0->ST7 (x87 FPU and MMX registers) and XMM0 -> XMM7.
|
||||
*
|
||||
* All floating point registers are considered 'volatile' thus they will
|
||||
* only be saved/restored when a preemptive context context switch occurs.
|
||||
* only be saved/restored when a preemptive context switch occurs.
|
||||
*
|
||||
* Floating point registers are currently NOT scrubbed, and are subject to
|
||||
* potential security leaks.
|
||||
*
|
||||
* The scheduling algorithm is simple: schedule the head of the runnable
|
||||
* FIBER context list, which is represented by _nanokernel.fiber. If there are
|
||||
* no runnable FIBER contexts, then schedule the TASK context represented
|
||||
* by _nanokernel.task. The _nanokernel.task field will never be NULL.
|
||||
* The scheduling algorithm is simple: schedule the head of the runnable fiber
|
||||
* list (_nanokernel.fiber). If there are no runnable fibers, then schedule
|
||||
* the task (_nanokernel.task). The _nanokernel.task field will never be NULL.
|
||||
*
|
||||
* @return may contain a return value setup by a call to fiberRtnValueSet()
|
||||
*
|
||||
|
@ -112,9 +111,9 @@ SECTION_FUNC(TEXT, _Swap)
|
|||
|
||||
/*
|
||||
* Push all non-volatile registers onto the stack; do not copy
|
||||
* any of these registers into the tCCS. Only the 'esp' register
|
||||
* any of these registers into the tTCS. Only the 'esp' register
|
||||
* after all the pushes have been performed) will be stored in the
|
||||
* tCCS.
|
||||
* tTCS.
|
||||
*/
|
||||
|
||||
pushl %edi
|
||||
|
@ -131,10 +130,10 @@ SECTION_FUNC(TEXT, _Swap)
|
|||
pushl %ebx
|
||||
|
||||
|
||||
/* save esp into tCCS structure */
|
||||
/* save esp into tTCS structure */
|
||||
|
||||
movl __tNANO_current_OFFSET (%eax), %ecx
|
||||
movl %esp, __tCCS_coopReg_OFFSET + __tCoopReg_esp_OFFSET (%ecx)
|
||||
movl %esp, __tTCS_coopReg_OFFSET + __tCoopReg_esp_OFFSET (%ecx)
|
||||
|
||||
#ifdef CONFIG_PROFILER_CONTEXT_SWITCH
|
||||
/* save %eax since it used as the return value for _Swap */
|
||||
|
@ -146,7 +145,7 @@ SECTION_FUNC(TEXT, _Swap)
|
|||
#endif
|
||||
|
||||
/*
|
||||
* Determine what FIBER or TASK context needs to be swapped in.
|
||||
* Determine what thread needs to be swapped in.
|
||||
* Note that the %eax still contains &_nanokernel.
|
||||
*/
|
||||
|
||||
|
@ -154,17 +153,16 @@ SECTION_FUNC(TEXT, _Swap)
|
|||
testl %ecx, %ecx
|
||||
jz swapTask /* Jump if no ready fibers */
|
||||
|
||||
/* remove the head 'tCCS *' from the runnable context list */
|
||||
/* remove the head 'TCS *' from the runnable fiber list */
|
||||
|
||||
movl __tCCS_link_OFFSET (%ecx), %ebx
|
||||
movl __tTCS_link_OFFSET (%ecx), %ebx
|
||||
movl %ebx, __tNANO_fiber_OFFSET (%eax)
|
||||
jmp restoreContext
|
||||
|
||||
|
||||
/*
|
||||
* There are no FIBER context in the run queue, thus swap in the
|
||||
* TASK context specified via _nanokernel.task. The 'task' field
|
||||
* will _never_ be NULL.
|
||||
* There are no fiber in the run queue, thus swap in the task
|
||||
* (_nanokernel.task). The 'task' field will _never_ be NULL.
|
||||
*/
|
||||
|
||||
BRANCH_LABEL(swapTask)
|
||||
|
@ -174,7 +172,7 @@ BRANCH_LABEL(swapTask)
|
|||
|
||||
|
||||
/*
|
||||
* At this point, the %ecx register contains the 'tCCS *' of
|
||||
* At this point, the %ecx register contains the 'tTCS *' of
|
||||
* the TASK or FIBER to be swapped in, and %eax still
|
||||
* contains &_nanokernel.
|
||||
*/
|
||||
|
@ -184,15 +182,15 @@ BRANCH_LABEL(restoreContext)
|
|||
#ifdef CONFIG_FP_SHARING
|
||||
#ifdef CONFIG_AUTOMATIC_FP_ENABLING
|
||||
/*
|
||||
* Clear the CR0[TS] bit (in the event the current context
|
||||
* Clear the CR0[TS] bit (in the event the current thread
|
||||
* doesn't have floating point enabled) to prevent the "device not
|
||||
* available" exception when executing the subsequent fxsave/fnsave
|
||||
* and/or fxrstor/frstor instructions.
|
||||
*
|
||||
* Indeed, it's possible that none of the aforementioned instructions
|
||||
* need to be executed, for example, the incoming context doesn't
|
||||
* need to be executed, for example, the incoming thread doesn't
|
||||
* utilize floating point operations. However, the code responsible
|
||||
* for setting the CR0[TS] bit appropriately for the incoming context
|
||||
* for setting the CR0[TS] bit appropriately for the incoming thread
|
||||
* (just after the 'restoreContext_NoFloatSwap' label) will leverage
|
||||
* the fact that the following 'clts' was performed already.
|
||||
*/
|
||||
|
@ -202,18 +200,18 @@ BRANCH_LABEL(restoreContext)
|
|||
|
||||
|
||||
/*
|
||||
* Determine whether the incoming context utilizes non-integer
|
||||
* capabilities _and_ whether the context was context switched
|
||||
* Determine whether the incoming thread utilizes non-integer
|
||||
* capabilities _and_ whether the thread was context switched
|
||||
* out preemptively.
|
||||
*/
|
||||
|
||||
testl $USE_FP, __tCCS_flags_OFFSET (%ecx)
|
||||
testl $USE_FP, __tTCS_flags_OFFSET (%ecx)
|
||||
je restoreContext_NoFloatSwap
|
||||
|
||||
|
||||
/*
|
||||
* The incoming context uses non-integer capabilities (x87 FPU and/or
|
||||
* XMM regs): Was it the last context to use non-integer capabilities?
|
||||
* The incoming thread uses non-integer capabilities (x87 FPU and/or
|
||||
* XMM regs): Was it the last thread to use non-integer capabilities?
|
||||
* If so, there there is no need to restore the non-integer context.
|
||||
*/
|
||||
|
||||
|
@ -223,10 +221,10 @@ BRANCH_LABEL(restoreContext)
|
|||
|
||||
|
||||
/*
|
||||
* The incoming context uses non-integer capabilities (x87 FPU and/or
|
||||
* XMM regs) and it was _not_ the last context to use the non-integer
|
||||
* The incoming thread uses non-integer capabilities (x87 FPU and/or
|
||||
* XMM regs) and it was _not_ the last thread to use the non-integer
|
||||
* capabilities: Check whether the current FP context actually needs
|
||||
* to be saved before swapping in the context of the incoming context
|
||||
* to be saved before swapping in the context of the incoming thread
|
||||
*/
|
||||
|
||||
testl %ebx, %ebx
|
||||
|
@ -234,8 +232,8 @@ BRANCH_LABEL(restoreContext)
|
|||
|
||||
|
||||
/*
|
||||
* The incoming context uses non-integer capabilities (x87 FPU and/or
|
||||
* XMM regs) and it was _not_ the last context to use the non-integer
|
||||
* The incoming thread uses non-integer capabilities (x87 FPU and/or
|
||||
* XMM regs) and it was _not_ the last thread to use the non-integer
|
||||
* capabilities _and_ the current FP context needs to be saved.
|
||||
*
|
||||
* Given that the ST[0] -> ST[7] and XMM0 -> XMM7 registers are all
|
||||
|
@ -243,21 +241,21 @@ BRANCH_LABEL(restoreContext)
|
|||
* was preemptively context switched.
|
||||
*/
|
||||
|
||||
testl $INT_OR_EXC_MASK, __tCCS_flags_OFFSET (%ebx)
|
||||
testl $INT_OR_EXC_MASK, __tTCS_flags_OFFSET (%ebx)
|
||||
je restoreContext_NoFloatSave
|
||||
|
||||
|
||||
#ifdef CONFIG_SSE
|
||||
testl $USE_SSE, __tCCS_flags_OFFSET (%ebx)
|
||||
testl $USE_SSE, __tTCS_flags_OFFSET (%ebx)
|
||||
je x87FloatSave
|
||||
|
||||
/*
|
||||
* 'fxsave' does NOT perform an implicit 'fninit', therefore issue an
|
||||
* 'fninit' to ensure a "clean" FPU state for the incoming context
|
||||
* 'fninit' to ensure a "clean" FPU state for the incoming thread
|
||||
* (for the case when the fxrstor is not executed).
|
||||
*/
|
||||
|
||||
fxsave __tCCS_preempFloatReg_OFFSET (%ebx)
|
||||
fxsave __tTCS_preempFloatReg_OFFSET (%ebx)
|
||||
fninit
|
||||
jmp floatSaveDone
|
||||
|
||||
|
@ -266,7 +264,7 @@ BRANCH_LABEL(x87FloatSave)
|
|||
|
||||
/* 'fnsave' performs an implicit 'fninit' after saving state! */
|
||||
|
||||
fnsave __tCCS_preempFloatReg_OFFSET (%ebx)
|
||||
fnsave __tTCS_preempFloatReg_OFFSET (%ebx)
|
||||
|
||||
/* fall through to 'floatSaveDone' */
|
||||
|
||||
|
@ -274,61 +272,61 @@ BRANCH_LABEL(floatSaveDone)
|
|||
BRANCH_LABEL(restoreContext_NoFloatSave)
|
||||
|
||||
/*********************************************************
|
||||
* Restore floating point context of the incoming context.
|
||||
* Restore floating point context of the incoming thread.
|
||||
*********************************************************/
|
||||
|
||||
/*
|
||||
* Again, given that the ST[0] -> ST[7] and XMM0 -> XMM7 registers are
|
||||
* all 'volatile', only restore the registers if the incoming
|
||||
* context was previously preemptively context switched out.
|
||||
* thread was previously preemptively context switched out.
|
||||
*/
|
||||
|
||||
testl $INT_OR_EXC_MASK, __tCCS_flags_OFFSET (%ecx)
|
||||
testl $INT_OR_EXC_MASK, __tTCS_flags_OFFSET (%ecx)
|
||||
je restoreContext_NoFloatRestore
|
||||
|
||||
#ifdef CONFIG_SSE
|
||||
testl $USE_SSE, __tCCS_flags_OFFSET (%ecx)
|
||||
testl $USE_SSE, __tTCS_flags_OFFSET (%ecx)
|
||||
je x87FloatRestore
|
||||
|
||||
fxrstor __tCCS_preempFloatReg_OFFSET (%ecx)
|
||||
fxrstor __tTCS_preempFloatReg_OFFSET (%ecx)
|
||||
jmp floatRestoreDone
|
||||
|
||||
BRANCH_LABEL(x87FloatRestore)
|
||||
|
||||
#endif /* CONFIG_SSE */
|
||||
|
||||
frstor __tCCS_preempFloatReg_OFFSET (%ecx)
|
||||
frstor __tTCS_preempFloatReg_OFFSET (%ecx)
|
||||
|
||||
/* fall through to 'floatRestoreDone' */
|
||||
|
||||
BRANCH_LABEL(floatRestoreDone)
|
||||
BRANCH_LABEL(restoreContext_NoFloatRestore)
|
||||
|
||||
/* record that the incoming context "owns" the non-integer registers */
|
||||
/* record that the incoming thread "owns" the non-integer registers */
|
||||
|
||||
movl %ecx, __tNANO_current_fp_OFFSET (%eax)
|
||||
|
||||
|
||||
/*
|
||||
* Branch point when none of the non-integer registers need to be
|
||||
* swapped either due to a) the incoming context does not
|
||||
* USE_FP | USE_SSE, or b) the incoming context is the same as
|
||||
* the last context that utilized the non-integer registers.
|
||||
* swapped either due to a) the incoming thread does not
|
||||
* USE_FP | USE_SSE, or b) the incoming thread is the same as
|
||||
* the last thread that utilized the non-integer registers.
|
||||
*/
|
||||
|
||||
BRANCH_LABEL(restoreContext_NoFloatSwap)
|
||||
|
||||
#ifdef CONFIG_AUTOMATIC_FP_ENABLING
|
||||
/*
|
||||
* Leave CR0[TS] clear if incoming context utilizes "floating point"
|
||||
* Leave CR0[TS] clear if incoming thread utilizes "floating point"
|
||||
* instructions
|
||||
*/
|
||||
|
||||
testl $USE_FP, __tCCS_flags_OFFSET (%ecx)
|
||||
testl $USE_FP, __tTCS_flags_OFFSET (%ecx)
|
||||
jne CROHandlingDone
|
||||
|
||||
/*
|
||||
* The incoming context does NOT currently utilize "floating point"
|
||||
* The incoming thread does NOT currently utilize "floating point"
|
||||
* instructions, so set CR0[TS] to ensure the "device not available"
|
||||
* exception occurs on the first attempt to access a x87 FPU, MMX,
|
||||
* or XMM register.
|
||||
|
@ -346,13 +344,13 @@ BRANCH_LABEL(CROHandlingDone)
|
|||
|
||||
|
||||
|
||||
/* update _nanokernel.current to reflect incoming context */
|
||||
/* update _nanokernel.current to reflect incoming thread */
|
||||
|
||||
movl %ecx, __tNANO_current_OFFSET (%eax)
|
||||
|
||||
/* recover task/fiber stack pointer from tCCS */
|
||||
/* recover task/fiber stack pointer from tTCS */
|
||||
|
||||
movl __tCCS_coopReg_OFFSET + __tCoopReg_esp_OFFSET (%ecx), %esp
|
||||
movl __tTCS_coopReg_OFFSET + __tCoopReg_esp_OFFSET (%ecx), %esp
|
||||
|
||||
|
||||
/* load return value from a possible fiberRtnValueSet() */
|
||||
|
@ -370,8 +368,8 @@ BRANCH_LABEL(CROHandlingDone)
|
|||
* For a non-preemptive context switch, it is checked that the volatile
|
||||
* integer registers have the following values:
|
||||
*
|
||||
* 1. ECX - points to the task's own CCS structure.
|
||||
* 2. EDX - contains the flags field of the task's own CCS structure.
|
||||
* 1. ECX - points to the task's own TCS structure.
|
||||
* 2. EDX - contains the flags field of the task's own TCS structure.
|
||||
* 3. EAX - may contain one of the two values:
|
||||
* (a) the return value for _Swap() that was set up by a
|
||||
* call to fiberRtnValueSet()
|
||||
|
|
|
@ -54,7 +54,7 @@ supported platforms.
|
|||
* This routine implements the corrective action to be taken when the system
|
||||
* detects a fatal error.
|
||||
*
|
||||
* This sample implementation attempts to abort the current context and allow
|
||||
* This sample implementation attempts to abort the current thread and allow
|
||||
* the system to continue executing, which may permit the system to continue
|
||||
* functioning with degraded capabilities.
|
||||
*
|
||||
|
@ -72,12 +72,12 @@ FUNC_NORETURN void _SysFatalErrorHandler(
|
|||
const NANO_ESF * pEsf /* pointer to exception stack frame */
|
||||
)
|
||||
{
|
||||
nano_context_type_t curCtx = context_type_get();
|
||||
nano_context_type_t curCtx = sys_execution_context_type_get();
|
||||
|
||||
ARG_UNUSED(reason);
|
||||
ARG_UNUSED(pEsf);
|
||||
|
||||
if ((curCtx != NANO_CTX_ISR) && !_context_essential_check(NULL)) {
|
||||
if ((curCtx != NANO_CTX_ISR) && !_is_thread_essential(NULL)) {
|
||||
#ifdef CONFIG_MICROKERNEL
|
||||
if (curCtx == NANO_CTX_TASK) {
|
||||
extern FUNC_NORETURN void _TaskAbort(void);
|
||||
|
|
|
@ -33,7 +33,7 @@ CONFIG_ENHANCED_SECURITY=y
|
|||
# CONFIG_BOOT_BANNER is not set
|
||||
CONFIG_MAIN_STACK_SIZE=1024
|
||||
CONFIG_ISR_STACK_SIZE=2048
|
||||
# CONFIG_CONTEXT_CUSTOM_DATA is not set
|
||||
# CONFIG_THREAD_CUSTOM_DATA is not set
|
||||
# CONFIG_NANO_TIMEOUTS is not set
|
||||
# CONFIG_NANO_TIMERS is not set
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ static inline unsigned int EflagsGet(void)
|
|||
* @brief Disallow use of floating point capabilities
|
||||
*
|
||||
* This routine sets CR0[TS] to 1, which disallows the use of FP instructions
|
||||
* by the currently executing context.
|
||||
* by the currently executing thread.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
@ -100,8 +100,8 @@ static inline void _FpAccessDisable(void)
|
|||
*
|
||||
* This routine saves the system's "live" non-integer context into the
|
||||
* specified area. If the specified task or fiber supports SSE then
|
||||
* x87/MMX/SSEx context info is saved, otherwise only x87/MMX context is saved.
|
||||
* Function is invoked by _FpCtxSave(tCCS *ccs)
|
||||
* x87/MMX/SSEx thread info is saved, otherwise only x87/MMX thread is saved.
|
||||
* Function is invoked by _FpCtxSave(struct tcs *tcs)
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
@ -131,7 +131,7 @@ static inline void _do_fp_ctx_save(int flags, void *preemp_float_reg)
|
|||
* @brief Initialize non-integer context information
|
||||
*
|
||||
* This routine initializes the system's "live" non-integer context.
|
||||
* Function is invoked by _FpCtxInit(tCCS *ccs)
|
||||
* Function is invoked by _FpCtxInit(struct tcs *tcs)
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
@ -144,7 +144,7 @@ static inline void _do_fp_ctx_init(int flags)
|
|||
|
||||
#ifdef CONFIG_SSE
|
||||
if (flags) {
|
||||
/* initialize SSE (since context uses it) */
|
||||
/* initialize SSE (since thread uses it) */
|
||||
__asm__ volatile("ldmxcsr _sse_mxcsr_default_value\n\t");
|
||||
|
||||
}
|
||||
|
|
|
@ -68,11 +68,11 @@ offsets.o module.
|
|||
#define STACK_ALIGN_SIZE 4
|
||||
|
||||
/*
|
||||
* Bitmask definitions for the tCCS->flags bit field
|
||||
* Bitmask definitions for the struct tcs->flags bit field
|
||||
*
|
||||
* The USE_FP flag bit will be set whenever a context uses any non-integer
|
||||
* The USE_FP flag bit will be set whenever a thread uses any non-integer
|
||||
* capability, whether it's just the x87 FPU capability, SSE instructions, or
|
||||
* a combination of both. The USE_SSE flag bit will only be set if a context
|
||||
* a combination of both. The USE_SSE flag bit will only be set if a thread
|
||||
* uses SSE instructions.
|
||||
*
|
||||
* Note: Any change to the definitions USE_FP and USE_SSE must also be made to
|
||||
|
@ -80,13 +80,13 @@ offsets.o module.
|
|||
*/
|
||||
|
||||
#define FIBER 0
|
||||
#define TASK 0x1 /* 1 = task context, 0 = fiber context */
|
||||
#define INT_ACTIVE 0x2 /* 1 = context is executing interrupt handler */
|
||||
#define EXC_ACTIVE 0x4 /* 1 = context is executing exception handler */
|
||||
#define USE_FP 0x10 /* 1 = context uses floating point unit */
|
||||
#define USE_SSE 0x20 /* 1 = context uses SSEx instructions */
|
||||
#define PREEMPTIBLE 0x100 /* 1 = preemptible context */
|
||||
#define ESSENTIAL 0x200 /* 1 = system context that must not abort */
|
||||
#define TASK 0x1 /* 1 = task, 0 = fiber */
|
||||
#define INT_ACTIVE 0x2 /* 1 = executing context is interrupt handler */
|
||||
#define EXC_ACTIVE 0x4 /* 1 = executing context is exception handler */
|
||||
#define USE_FP 0x10 /* 1 = thread uses floating point unit */
|
||||
#define USE_SSE 0x20 /* 1 = thread uses SSEx instructions */
|
||||
#define PREEMPTIBLE 0x100 /* 1 = preemptible thread */
|
||||
#define ESSENTIAL 0x200 /* 1 = system thread that must not abort */
|
||||
#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */
|
||||
#define NO_METRICS_BIT_OFFSET 0xa /* Bit position of NO_METRICS */
|
||||
|
||||
|
@ -443,7 +443,7 @@ typedef struct s_coopReg {
|
|||
* The following registers are considered non-volatile, i.e.
|
||||
*callee-save,
|
||||
* but their values are pushed onto the stack rather than stored in the
|
||||
*tCCS
|
||||
* TCS
|
||||
* structure:
|
||||
*
|
||||
* unsigned long ebp;
|
||||
|
@ -472,7 +472,7 @@ typedef struct s_preempReg {
|
|||
* restore the values of these registers in order to support interrupt
|
||||
* nesting. The stubs do _not_ copy the saved values from the stack
|
||||
*into
|
||||
* the tCCS.
|
||||
* the TCS.
|
||||
*
|
||||
* unsigned long eax;
|
||||
* unsigned long ecx;
|
||||
|
@ -616,32 +616,32 @@ typedef struct s_coopFloatReg {
|
|||
|
||||
typedef struct s_preempFloatReg {
|
||||
union {
|
||||
tFpRegSet fpRegs; /* contexts with USE_FP utilize this format */
|
||||
tFpRegSetEx fpRegsEx; /* contexts with USE_SSE utilize this
|
||||
tFpRegSet fpRegs; /* threads with USE_FP utilize this format */
|
||||
tFpRegSetEx fpRegsEx; /* threads with USE_SSE utilize this
|
||||
format */
|
||||
} floatRegsUnion;
|
||||
} tPreempFloatReg;
|
||||
|
||||
/*
|
||||
* The context control stucture definition. It contains the
|
||||
* various fields to manage a _single_ context. The CCS will be aligned
|
||||
* The thread control stucture definition. It contains the
|
||||
* various fields to manage a _single_ thread. The TCS will be aligned
|
||||
* to the appropriate architecture specific boundary via the
|
||||
* _NewContext() call.
|
||||
* _new_thread() call.
|
||||
*/
|
||||
|
||||
struct ccs {
|
||||
struct tcs {
|
||||
/*
|
||||
* Link to next context in singly-linked context list (such as
|
||||
* Link to next thread in singly-linked thread list (such as
|
||||
* prioritized
|
||||
* list of runnable fibers, or list of fibers waiting on a nanokernel
|
||||
* FIFO).
|
||||
*/
|
||||
|
||||
struct ccs *link;
|
||||
struct tcs *link;
|
||||
|
||||
/*
|
||||
* See the above flag definitions above for valid bit settings. This
|
||||
* field must remain near the start of the tCCS structure, specifically
|
||||
* field must remain near the start of struct tcs, specifically
|
||||
* before any #ifdef'ed fields since the host tools currently use a
|
||||
* fixed
|
||||
* offset to read the 'flags' field.
|
||||
|
@ -651,21 +651,21 @@ struct ccs {
|
|||
|
||||
/*
|
||||
* Storage space for integer registers. These must also remain near
|
||||
* the start of the tCCS structure for the same reason mention for
|
||||
* the start of struct tcs for the same reason mention for
|
||||
* 'flags'.
|
||||
*/
|
||||
|
||||
tCoopReg coopReg; /* non-volatile integer register storage */
|
||||
tPreempReg preempReg; /* volatile integer register storage */
|
||||
|
||||
#if defined(CONFIG_CONTEXT_MONITOR)
|
||||
struct ccs *next_context; /* next item in list of ALL fiber+tasks */
|
||||
#if defined(CONFIG_THREAD_MONITOR)
|
||||
struct tcs *next_thread; /* next item in list of ALL fiber+tasks */
|
||||
#endif
|
||||
#ifdef CONFIG_GDB_INFO
|
||||
void *esfPtr; /* pointer to exception stack frame saved by */
|
||||
/* outermost exception wrapper */
|
||||
#endif /* CONFIG_GDB_INFO */
|
||||
int prio; /* context priority used to sort linked list */
|
||||
int prio; /* thread priority used to sort linked list */
|
||||
#if (defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO))
|
||||
/*
|
||||
* Nested exception count to maintain setting of EXC_ACTIVE flag across
|
||||
|
@ -676,7 +676,7 @@ struct ccs {
|
|||
unsigned excNestCount; /* nested exception count */
|
||||
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
|
||||
|
||||
#ifdef CONFIG_CONTEXT_CUSTOM_DATA
|
||||
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
||||
void *custom_data; /* available for custom use */
|
||||
#endif
|
||||
|
||||
|
@ -686,15 +686,15 @@ struct ccs {
|
|||
|
||||
/*
|
||||
* The location of all floating point related structures/fields MUST be
|
||||
* located at the end of the tCCS structure. This way only the
|
||||
* located at the end of struct tcs. This way only the
|
||||
*fibers/tasks
|
||||
* that actually utilize non-integer capabilities need to account for
|
||||
* the increased memory required for storing FP state when sizing
|
||||
*stacks.
|
||||
*
|
||||
* Given that stacks "grow down" on IA-32, and the tCCS structure is
|
||||
* Given that stacks "grow down" on IA-32, and the TCS is
|
||||
*located
|
||||
* at the start of a context's "workspace" memory, the stacks of
|
||||
* at the start of a thread's "workspace" memory, the stacks of
|
||||
*fibers/tasks
|
||||
* that do not utilize floating point instruction can effectively
|
||||
*consume
|
||||
|
@ -708,15 +708,15 @@ struct ccs {
|
|||
|
||||
/*
|
||||
* The nanokernel structure definition. It contains various fields to
|
||||
* manage _all_ the contexts in the nanokernel (system level).
|
||||
* manage _all_ the threads in the nanokernel (system level).
|
||||
*/
|
||||
|
||||
typedef struct s_NANO {
|
||||
tCCS *fiber; /* singly linked list of runnable fiber contexts */
|
||||
tCCS *task; /* pointer to runnable task context */
|
||||
tCCS *current; /* currently scheduled context (fiber or task) */
|
||||
#if defined(CONFIG_CONTEXT_MONITOR)
|
||||
tCCS *contexts; /* singly linked list of ALL fiber+tasks */
|
||||
struct tcs *fiber; /* singly linked list of runnable fibers */
|
||||
struct tcs *task; /* pointer to runnable task */
|
||||
struct tcs *current; /* currently scheduled thread (fiber or task) */
|
||||
#if defined(CONFIG_THREAD_MONITOR)
|
||||
struct tcs *threads; /* singly linked list of ALL fiber+tasks */
|
||||
#endif
|
||||
unsigned nested; /* nested interrupt count */
|
||||
char *common_isp; /* interrupt stack pointer base */
|
||||
|
@ -731,13 +731,13 @@ typedef struct s_NANO {
|
|||
* A 'current_sse' field does not exist in addition to the 'current_fp'
|
||||
* field since it's not possible to divide the IA-32 non-integer
|
||||
* registers
|
||||
* into 2 distinct blocks owned by differing contexts. In other words,
|
||||
* into 2 distinct blocks owned by differing threads. In other words,
|
||||
* given that the 'fxnsave/fxrstor' instructions save/restore both the
|
||||
* X87 FPU and XMM registers, it's not possible for a context to only
|
||||
* X87 FPU and XMM registers, it's not possible for a thread to only
|
||||
* "own" the XMM registers.
|
||||
*/
|
||||
|
||||
tCCS *current_fp; /* context (fiber or task) that owns the FP regs */
|
||||
struct tcs *current_fp; /* thread (fiber or task) that owns the FP regs */
|
||||
#endif /* CONFIG_FP_SHARING */
|
||||
#ifdef CONFIG_NANO_TIMEOUTS
|
||||
sys_dlist_t timeout_q;
|
||||
|
@ -815,7 +815,7 @@ static inline void nanoArchInit(void)
|
|||
*
|
||||
* The register used to store the return value from a function call invocation is
|
||||
* set to <value>. It is assumed that the specified <fiber> is pending, and
|
||||
* thus the fibers context is stored in its tCCS structure.
|
||||
* thus the fibers context is stored in its TCS.
|
||||
*
|
||||
* @return N/A
|
||||
*
|
||||
|
@ -823,7 +823,7 @@ static inline void nanoArchInit(void)
|
|||
*/
|
||||
|
||||
static inline void fiberRtnValueSet(
|
||||
tCCS *fiber, /* pointer to fiber */
|
||||
struct tcs *fiber, /* pointer to fiber */
|
||||
unsigned int value /* value to set as return value */
|
||||
)
|
||||
{
|
||||
|
|
|
@ -28,9 +28,6 @@ the Namespace`_ for details.
|
|||
+-------------------+---------------------------------------------------------+
|
||||
| atomic\_ | Denotes an atomic operation (e.g. atomic_inc). |
|
||||
+-------------------+---------------------------------------------------------+
|
||||
| context\ | Denotes an operation invoked by a fiber or a task (e.g |
|
||||
| | context_type_get). |
|
||||
+-------------------+---------------------------------------------------------+
|
||||
| fiber\_ | Denotes an operation invoked by a fiber; typically a |
|
||||
| | microkernel operation (e.g. fiber_event_send). |
|
||||
+-------------------+---------------------------------------------------------+
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
.. _context_services:
|
||||
|
||||
Context Services
|
||||
################
|
||||
Execution Context Services
|
||||
##########################
|
||||
|
||||
Concepts
|
||||
********
|
||||
|
||||
Each kernel context has an associated *context type*, which indicates whether
|
||||
Each kernel execution context has an associated *type*, which indicates whether
|
||||
the context is a task, a fiber, or the kernel's interrupt handling context.
|
||||
Task and fiber contexts also have an associated *context identifier* value,
|
||||
which is used to uniquely identify these contexts.
|
||||
Task and fiber contexts also have an associated *thread identifier* value,
|
||||
which is used to uniquely identify these threads.
|
||||
|
||||
Each task and fiber context may also support a 32-bit *custom data* value.
|
||||
Each task and fiber may also support a 32-bit *thread custom data* value.
|
||||
This value is accessible only by the task or fiber itself, and can be used
|
||||
by the application for any purpose. The default custom data value for a
|
||||
task or fiber is zero.
|
||||
|
@ -24,8 +24,8 @@ task or fiber is zero.
|
|||
Purpose
|
||||
*******
|
||||
|
||||
Use the kernel context services when writing code that needs to operate
|
||||
differently when executed by different contexts.
|
||||
Use the kernel execution context services when writing code that needs to
|
||||
operate differently when executed by different contexts.
|
||||
|
||||
|
||||
Usage
|
||||
|
@ -34,15 +34,15 @@ Usage
|
|||
Configuring Custom Data Support
|
||||
===============================
|
||||
|
||||
Use the :option:`CONTEXT_CUSTOM_DATA` configuration option
|
||||
to enable support for context custom data. By default, custom data
|
||||
Use the :option:`THREAD_CUSTOM_DATA` configuration option
|
||||
to enable support for thread custom data. By default, custom data
|
||||
support is disabled.
|
||||
|
||||
|
||||
Example: Performing Context-Specific Processing
|
||||
===============================================
|
||||
This code shows how a routine can use a context's custom data value
|
||||
to limit the number of times a context may call the routine.
|
||||
Example: Performing Execution Context-Specific Processing
|
||||
=========================================================
|
||||
This code shows how a routine can use a thread's custom data value
|
||||
to limit the number of times a thread may call the routine.
|
||||
Counting is not performed when the routine is called by an ISR, which does not
|
||||
have a custom data value.
|
||||
|
||||
|
@ -58,12 +58,12 @@ have a custom data value.
|
|||
{
|
||||
uint32_t call_count;
|
||||
|
||||
if (context_type_get() != NANO_CTX_ISR) {
|
||||
call_count = (uint32_t)context_custom_data_get();
|
||||
if (sys_execution_context_type_get() != NANO_CTX_ISR) {
|
||||
call_count = (uint32_t)sys_thread_custom_data_get();
|
||||
if (call_count == CALL_LIMIT)
|
||||
return -1;
|
||||
call_count++;
|
||||
context_custom_data_set((void *)call_count);
|
||||
sys_thread_custom_data_set((void *)call_count);
|
||||
}
|
||||
|
||||
/* do rest of routine's processing */
|
||||
|
@ -74,21 +74,21 @@ have a custom data value.
|
|||
APIs
|
||||
****
|
||||
|
||||
The following kernel context APIs are provided by :file:`microkernel.h`
|
||||
and by :file:`nanokernel.h`:
|
||||
The following kernel execution context APIs are provided by
|
||||
:file:`microkernel.h` and by :file:`nanokernel.h`:
|
||||
|
||||
+-------------------------------------+---------------------------------------+
|
||||
+--------------------------------------------+---------------------------------------+
|
||||
| Call | Description |
|
||||
+=====================================+=======================================+
|
||||
| :c:func:`context_self_get()` | Gets context identifier of currently |
|
||||
+============================================+=======================================+
|
||||
| :c:func:`sys_thread_self_get()` | Gets thread identifier of currently |
|
||||
| | executing task or fiber. |
|
||||
+-------------------------------------+---------------------------------------+
|
||||
| :c:func:`context_type_get()` | Gets type of currently executing |
|
||||
+--------------------------------------------+---------------------------------------+
|
||||
| :c:func:`sys_execution_context_type_get()` | Gets type of currently executing |
|
||||
| | context (i.e. task, fiber, or ISR). |
|
||||
+-------------------------------------+---------------------------------------+
|
||||
| :c:func:`context_custom_data_set()` | Writes custom data for currently |
|
||||
+--------------------------------------------+---------------------------------------+
|
||||
| :c:func:`sys_thread_custom_data_set()` | Writes custom data for currently |
|
||||
| | executing task or fiber. |
|
||||
+-------------------------------------+---------------------------------------+
|
||||
| :c:func:`context_custom_data_get()` | Reads custom data for currently |
|
||||
+--------------------------------------------+---------------------------------------+
|
||||
| :c:func:`sys_thread_custom_data_get()` | Reads custom data for currently |
|
||||
| | executing task or fiber. |
|
||||
+-------------------------------------+---------------------------------------+
|
||||
+--------------------------------------------+---------------------------------------+
|
||||
|
|
|
@ -77,12 +77,12 @@ extern void _irq_exit(void);
|
|||
* Invoking a kernel routine with interrupts locked may result in
|
||||
* interrupts being re-enabled for an unspecified period of time. If the
|
||||
* called routine blocks, interrupts will be re-enabled while another
|
||||
* context executes, or while the system is idle.
|
||||
* thread executes, or while the system is idle.
|
||||
*
|
||||
* The "interrupt disable state" is an attribute of a context. Thus, if a
|
||||
* The "interrupt disable state" is an attribute of a thread. Thus, if a
|
||||
* fiber or task disables interrupts and subsequently invokes a kernel
|
||||
* routine that causes the calling context to block, the interrupt
|
||||
* disable state will be restored when the context is later rescheduled
|
||||
* routine that causes the calling thread to block, the interrupt
|
||||
* disable state will be restored when the thread is later rescheduled
|
||||
* for execution.
|
||||
*
|
||||
* @return An architecture-dependent lock-out key representing the
|
||||
|
|
|
@ -129,12 +129,12 @@ static ALWAYS_INLINE unsigned int find_lsb_set(uint32_t op)
|
|||
* Invoking a kernel routine with interrupts locked may result in
|
||||
* interrupts being re-enabled for an unspecified period of time. If the
|
||||
* called routine blocks, interrupts will be re-enabled while another
|
||||
* context executes, or while the system is idle.
|
||||
* thread executes, or while the system is idle.
|
||||
*
|
||||
* The "interrupt disable state" is an attribute of a context. Thus, if a
|
||||
* The "interrupt disable state" is an attribute of a thread. Thus, if a
|
||||
* fiber or task disables interrupts and subsequently invokes a kernel
|
||||
* routine that causes the calling context to block, the interrupt
|
||||
* disable state will be restored when the context is later rescheduled
|
||||
* routine that causes the calling thread to block, the interrupt
|
||||
* disable state will be restored when the thread is later rescheduled
|
||||
* for execution.
|
||||
*
|
||||
* @return An architecture-dependent lock-out key representing the
|
||||
|
|
|
@ -88,8 +88,8 @@
|
|||
#endif
|
||||
|
||||
/*
|
||||
* The CCS must be aligned to the same boundary as that used by the floating
|
||||
* point register set. This applies even for contexts that don't initially
|
||||
* The TCS must be aligned to the same boundary as that used by the floating
|
||||
* point register set. This applies even for threads that don't initially
|
||||
* use floating point, since it is possible to enable floating point support
|
||||
* later on.
|
||||
*/
|
||||
|
@ -313,12 +313,12 @@ void _int_latency_stop(void);
|
|||
* Invoking a kernel routine with interrupts locked may result in
|
||||
* interrupts being re-enabled for an unspecified period of time. If the
|
||||
* called routine blocks, interrupts will be re-enabled while another
|
||||
* context executes, or while the system is idle.
|
||||
* thread executes, or while the system is idle.
|
||||
*
|
||||
* The "interrupt disable state" is an attribute of a context. Thus, if a
|
||||
* The "interrupt disable state" is an attribute of a thread. Thus, if a
|
||||
* fiber or task disables interrupts and subsequently invokes a kernel
|
||||
* routine that causes the calling context to block, the interrupt
|
||||
* disable state will be restored when the context is later rescheduled
|
||||
* routine that causes the calling thread to block, the interrupt
|
||||
* disable state will be restored when the thread is later rescheduled
|
||||
* for execution.
|
||||
*
|
||||
* @return An architecture-dependent lock-out key representing the
|
||||
|
@ -379,10 +379,10 @@ typedef void (*NANO_EOI_GET_FUNC) (void *);
|
|||
#ifdef CONFIG_FP_SHARING
|
||||
/* Definitions for the 'options' parameter to the fiber_fiber_start() API */
|
||||
|
||||
/** context uses floating point unit */
|
||||
/** thread uses floating point unit */
|
||||
#define USE_FP 0x10
|
||||
#ifdef CONFIG_SSE
|
||||
/** context uses SSEx instructions */
|
||||
/** thread uses SSEx instructions */
|
||||
#define USE_SSE 0x20
|
||||
#endif /* CONFIG_SSE */
|
||||
#endif /* CONFIG_FP_SHARING */
|
||||
|
@ -406,14 +406,16 @@ extern void irq_disable(unsigned int irq);
|
|||
#ifdef CONFIG_FP_SHARING
|
||||
/**
|
||||
* @brief Enable floating point hardware resources sharing
|
||||
* Dynamically enable/disable the capability of a context to share floating
|
||||
* Dynamically enable/disable the capability of a thread to share floating
|
||||
* point hardware resources. The same "floating point" options accepted by
|
||||
* fiber_fiber_start() are accepted by these APIs (i.e. USE_FP and USE_SSE).
|
||||
*/
|
||||
extern void fiber_float_enable(nano_context_id_t ctx, unsigned int options);
|
||||
extern void task_float_enable(nano_context_id_t ctx, unsigned int options);
|
||||
extern void fiber_float_disable(nano_context_id_t ctx);
|
||||
extern void task_float_disable(nano_context_id_t ctx);
|
||||
extern void fiber_float_enable(nano_thread_id_t thread_id,
|
||||
unsigned int options);
|
||||
extern void task_float_enable(nano_thread_id_t thread_id,
|
||||
unsigned int options);
|
||||
extern void fiber_float_disable(nano_thread_id_t thread_id);
|
||||
extern void task_float_disable(nano_thread_id_t thread_id);
|
||||
#endif /* CONFIG_FP_SHARING */
|
||||
|
||||
#include <stddef.h> /* for size_t */
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
|
||||
#if defined(CONFIG_BLUETOOTH_DEBUG)
|
||||
#define BT_DBG(fmt, ...) printf("bt: %s (%p): " fmt, __func__, \
|
||||
context_self_get(), ##__VA_ARGS__)
|
||||
sys_thread_self_get(), ##__VA_ARGS__)
|
||||
#define BT_ERR(fmt, ...) printf("bt: %s: " fmt, __func__, ##__VA_ARGS__)
|
||||
#define BT_WARN(fmt, ...) printf("bt: %s: " fmt, __func__, ##__VA_ARGS__)
|
||||
#define BT_INFO(fmt, ...) printf("bt: " fmt, ##__VA_ARGS__)
|
||||
|
|
|
@ -106,7 +106,7 @@ void sys_event_logger_put(struct event_logger *logger, uint16_t event_id,
|
|||
* size the function returns -EMSGSIZE. Otherwise return the number of 32-bit
|
||||
* words copied. The functon retrieves messages in FIFO order. If there is no
|
||||
* message in the buffer the fuction returns immediately. It can only be called
|
||||
* from a fiber context.
|
||||
* from a fiber.
|
||||
*
|
||||
* @param logger Pointer to the event logger used.
|
||||
* @param buffer Pointer to the buffer where the message will be copied.
|
||||
|
@ -128,7 +128,7 @@ int sys_event_logger_get(struct event_logger *logger, uint32_t *buffer,
|
|||
* size the function returns -EMSGSIZE. Otherwise return the number of 32-bit
|
||||
* words copied. The functon retrieves messages in FIFO order. The caller pends
|
||||
* if there is no message available in the buffer. It can only be called from a
|
||||
* fiber context.
|
||||
* fiber.
|
||||
*
|
||||
* @param logger Pointer to the event logger used.
|
||||
* @param buffer Pointer to the buffer where the message will be copied.
|
||||
|
@ -150,7 +150,7 @@ int sys_event_logger_get_wait(struct event_logger *logger, uint32_t *buffer,
|
|||
* size the function returns -EMSGSIZE. Otherwise return the number of dwords
|
||||
* copied. The functon retrieves messages in FIFO order. The caller pends if
|
||||
* there is no message available in the buffer until a new message is added or
|
||||
* the timeout expires. It can only be called from a fiber context.
|
||||
* the timeout expires. It can only be called from a fiber.
|
||||
*
|
||||
* @param logger Pointer to the event logger used.
|
||||
* @param buffer Pointer to the buffer where the message will be copied.
|
||||
|
|
|
@ -153,7 +153,7 @@ void sys_profiler_put_timed(uint16_t event_id);
|
|||
* @brief Register the fiber that calls the function as collector
|
||||
*
|
||||
* @details Initialize internal profiling data. This avoid registering the
|
||||
* context switch of the collector fiber when CONFIG_PROFILE_CONTEXT_SWITCH
|
||||
* context switch of the collector fiber when CONFIG_PROFILE_THREAD_SWITCH
|
||||
* is enable.
|
||||
*
|
||||
* @return No return value.
|
||||
|
|
|
@ -72,11 +72,11 @@ struct _nano_timeout {
|
|||
int32_t delta_ticks_from_prev;
|
||||
};
|
||||
|
||||
struct ccs;
|
||||
struct tcs;
|
||||
|
||||
/* architecture-independent nanokernel public APIs */
|
||||
|
||||
typedef struct ccs *nano_context_id_t;
|
||||
typedef struct tcs *nano_thread_id_t;
|
||||
|
||||
typedef void (*nano_fiber_entry_t)(int i1, int i2);
|
||||
|
||||
|
@ -91,31 +91,31 @@ typedef int nano_context_type_t;
|
|||
#define TICKS_NONE 0
|
||||
|
||||
/*
|
||||
* context APIs
|
||||
* execution context APIs
|
||||
*/
|
||||
extern nano_context_id_t context_self_get(void);
|
||||
extern nano_context_type_t context_type_get(void);
|
||||
extern int _context_essential_check(nano_context_id_t pCtx);
|
||||
extern nano_thread_id_t sys_thread_self_get(void);
|
||||
extern nano_context_type_t sys_execution_context_type_get(void);
|
||||
extern int _is_thread_essential(nano_thread_id_t pCtx);
|
||||
|
||||
/*
|
||||
* fiber APIs
|
||||
*/
|
||||
/* scheduling context independent method (when context is not known) */
|
||||
/* execution context-independent method (when context is not known) */
|
||||
void fiber_start(char *stack, unsigned stack_size, nano_fiber_entry_t entry,
|
||||
int arg1, int arg2, unsigned prio, unsigned options);
|
||||
|
||||
/* methods for fibers */
|
||||
|
||||
/**
|
||||
* @brief Initialize and start a fiber context
|
||||
* @brief Initialize and start a fiber
|
||||
*
|
||||
* This routine initilizes and starts a fiber context; it can be called from
|
||||
* either a fiber or a task context. When this routine is called from a
|
||||
* This routine initilizes and starts a fiber; it can be called from
|
||||
* either a fiber or a task. When this routine is called from a
|
||||
* task, the newly created fiber will start executing immediately.
|
||||
*
|
||||
* INTERNAL
|
||||
* Given that this routine is _not_ ISR-callable, the following code is used
|
||||
* to differentiate between a task and fiber context:
|
||||
* to differentiate between a task and fiber:
|
||||
*
|
||||
* if ((_nanokernel.current->flags & TASK) == TASK)
|
||||
*
|
||||
|
@ -137,14 +137,14 @@ extern void fiber_fiber_start(char *pStack, unsigned int stackSize,
|
|||
unsigned options);
|
||||
|
||||
/**
|
||||
* @brief Yield the current context
|
||||
* @brief Yield the current fiber
|
||||
*
|
||||
* Invocation of this routine results in the current context yielding to
|
||||
* another context of the same or higher priority. If there doesn't exist
|
||||
* any other contexts of the same or higher priority that are runnable, this
|
||||
* Invocation of this routine results in the current fiber yielding to
|
||||
* another fiber of the same or higher priority. If there doesn't exist
|
||||
* any other fibers of the same or higher priority that are runnable, this
|
||||
* routine will return immediately.
|
||||
*
|
||||
* This routine can only be called from a fiber context.
|
||||
* This routine can only be called from a fiber.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
@ -160,7 +160,7 @@ extern void fiber_yield(void);
|
|||
* - the fiber has implicitly aborted itself (by returning from its entry point),
|
||||
* - the fiber has encountered a fatal exception.
|
||||
*
|
||||
* This routine can only be called from a fiber context.
|
||||
* This routine can only be called from a fiber.
|
||||
*
|
||||
* @return This function never returns
|
||||
*/
|
||||
|
@ -220,7 +220,7 @@ extern void fiber_fiber_delayed_start_cancel(void *handle);
|
|||
/* methods for tasks */
|
||||
|
||||
/**
|
||||
* @brief Initialize and start a fiber in a task context
|
||||
* @brief Initialize and start a fiber from a task
|
||||
*
|
||||
* @sa fiber_fiber_start
|
||||
*/
|
||||
|
@ -230,7 +230,7 @@ extern void task_fiber_start(char *pStack, unsigned int stackSize,
|
|||
#ifdef CONFIG_NANO_TIMEOUTS
|
||||
|
||||
/**
|
||||
* @brief Start a fiber in a task context, but delay its execution
|
||||
* @brief Start a fiber from a task, but delay its execution
|
||||
*
|
||||
* @sa fiber_fiber_delayed_start
|
||||
*/
|
||||
|
@ -240,7 +240,7 @@ extern void *task_fiber_delayed_start(char *stack,
|
|||
int param2, unsigned int priority,
|
||||
unsigned int options, int32_t timeout_in_ticks);
|
||||
/**
|
||||
* @brief Cancel a delayed fiber start in task context
|
||||
* @brief Cancel a delayed fiber start in task
|
||||
*
|
||||
* @sa fiber_fiber_delayed_start
|
||||
*/
|
||||
|
@ -264,7 +264,7 @@ struct nano_fifo {
|
|||
* This function initializes a nanokernel multiple-waiter fifo (fifo) object
|
||||
* structure.
|
||||
*
|
||||
* It may be called from either a fiber or task context.
|
||||
* It may be called from either a fiber or task.
|
||||
*
|
||||
* The wait queue and data queue occupy the same space since there cannot
|
||||
* be both queued data and pending fibers in the FIFO. Care must be taken
|
||||
|
@ -282,16 +282,15 @@ struct nano_fifo {
|
|||
* @return N/A
|
||||
*/
|
||||
extern void nano_fifo_init(struct nano_fifo *chan);
|
||||
/* scheduling context independent methods (when context is not known) */
|
||||
/* execution context-independent methods (when context is not known) */
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Add an element to the end of a fifo
|
||||
*
|
||||
* This is a convenience wrapper for the context-specific APIs. This is
|
||||
* helpful whenever the exact scheduling context is not known, but should
|
||||
* be avoided when the context is known up-front (to avoid unnecessary
|
||||
* overhead).
|
||||
* This is a convenience wrapper for the execution context-specific APIs. This
|
||||
* is helpful whenever the exact execution context is not known, but should be
|
||||
* avoided when the context is known up-front (to avoid unnecessary overhead).
|
||||
*
|
||||
* @param nano_fifo FIFO on which to interact.
|
||||
* @param data Data to send.
|
||||
|
@ -321,10 +320,9 @@ extern void *nano_fifo_get(struct nano_fifo *chan);
|
|||
*
|
||||
* @brief Get the head element of a fifo, poll/pend if empty
|
||||
*
|
||||
* This is a convenience wrapper for the context-specific APIs. This is
|
||||
* helpful whenever the exact scheduling context is not known, but should
|
||||
* be avoided when the context is known up-front (to avoid unnecessary
|
||||
* overhead).
|
||||
* This is a convenience wrapper for the execution context-specific APIs. This
|
||||
* is helpful whenever the exact execution context is not known, but should be
|
||||
* avoided when the context is known up-front (to avoid unnecessary overhead).
|
||||
*
|
||||
* @warning It's only valid to call this API from a fiber or a task.
|
||||
*
|
||||
|
@ -340,8 +338,8 @@ extern void *nano_fifo_get_wait(struct nano_fifo *chan);
|
|||
*
|
||||
* @brief Add an element to the end of a FIFO from an ISR context.
|
||||
*
|
||||
* This is an alias for the context-specific API. This is
|
||||
* helpful whenever the exact scheduling context is known. Its use
|
||||
* This is an alias for the execution context-specific API. This is
|
||||
* helpful whenever the exact execution context is known. Its use
|
||||
* avoids unnecessary overhead.
|
||||
*
|
||||
* @param nano_fifo FIFO on which to interact.
|
||||
|
@ -370,10 +368,10 @@ extern void *nano_isr_fifo_get(struct nano_fifo *chan);
|
|||
|
||||
/**
|
||||
*
|
||||
* @brief Add an element to the end of a FIFO from a fiber context.
|
||||
* @brief Add an element to the end of a FIFO from a fiber.
|
||||
*
|
||||
* This is an alias for the context-specific API. This is
|
||||
* helpful whenever the exact scheduling context is known. Its use
|
||||
* This is an alias for the execution context-specific API. This is
|
||||
* helpful whenever the exact execution context is known. Its use
|
||||
* avoids unnecessary overhead.
|
||||
*
|
||||
* @param nano_fifo FIFO on which to interact.
|
||||
|
@ -384,10 +382,10 @@ extern void *nano_isr_fifo_get(struct nano_fifo *chan);
|
|||
extern void nano_fiber_fifo_put(struct nano_fifo *chan, void *data);
|
||||
|
||||
/**
|
||||
* @brief Get an element from the head of a FIFO from a fiber context.
|
||||
* @brief Get an element from the head of a FIFO from a fiber.
|
||||
*
|
||||
* Remove the head element from the specified nanokernel multiple-waiter fifo
|
||||
* linked list fifo. It may be called from a fiber context.
|
||||
* linked list fifo. It may be called from a fiber.
|
||||
*
|
||||
* The first word in the element contains invalid data because that memory
|
||||
* location was used to store a pointer to the next element in the linked list.
|
||||
|
@ -403,7 +401,7 @@ extern void *nano_fiber_fifo_get(struct nano_fifo *chan);
|
|||
* @brief Get the head element of a fifo, wait if emtpy
|
||||
*
|
||||
* Remove the head element from the specified system-level multiple-waiter
|
||||
* fifo; it can only be called from a fiber context.
|
||||
* fifo; it can only be called from a fiber.
|
||||
*
|
||||
* If no elements are available, the calling fiber will pend until an element
|
||||
* is put onto the fifo.
|
||||
|
@ -416,7 +414,7 @@ extern void *nano_fiber_fifo_get(struct nano_fifo *chan);
|
|||
* @return Pointer to head element in the list
|
||||
*
|
||||
* @note There exists a separate nano_task_fifo_get_wait() implementation
|
||||
* since a task context cannot pend on a nanokernel object. Instead tasks will
|
||||
* since a task cannot pend on a nanokernel object. Instead tasks will
|
||||
* poll the fifo object.
|
||||
*/
|
||||
extern void *nano_fiber_fifo_get_wait(struct nano_fifo *chan);
|
||||
|
@ -426,7 +424,7 @@ extern void *nano_fiber_fifo_get_wait(struct nano_fifo *chan);
|
|||
* @brief get the head element of a fifo, pend with a timeout if empty
|
||||
*
|
||||
* Remove the head element from the specified nanokernel fifo; it can only be
|
||||
* called from a fiber context.
|
||||
* called from a fiber.
|
||||
*
|
||||
* If no elements are available, the calling fiber will pend until an element
|
||||
* is put onto the fifo, or the timeout expires, whichever comes first.
|
||||
|
@ -453,7 +451,7 @@ extern void *nano_fiber_fifo_get_wait_timeout(struct nano_fifo *chan,
|
|||
* @brief Add an element to the end of a fifo
|
||||
*
|
||||
* This routine adds an element to the end of a fifo object; it can be called
|
||||
* from only a task context. A fiber pending on the fifo object will be made
|
||||
* from only a task. A fiber pending on the fifo object will be made
|
||||
* ready, and will preempt the running task immediately.
|
||||
*
|
||||
* If a fiber is waiting on the fifo, the address of the element is returned to
|
||||
|
@ -473,7 +471,7 @@ extern void *nano_task_fifo_get(struct nano_fifo *chan);
|
|||
* @brief Get the head element of a fifo, poll if empty
|
||||
*
|
||||
* Remove the head element from the specified system-level multiple-waiter
|
||||
* fifo; it can only be called from a task context.
|
||||
* fifo; it can only be called from a task.
|
||||
*
|
||||
* If no elements are available, the calling task will poll until an
|
||||
* until an element is put onto the fifo.
|
||||
|
@ -494,7 +492,7 @@ extern void *nano_task_fifo_get_wait(struct nano_fifo *chan);
|
|||
* @brief get the head element of a fifo, poll with a timeout if empty
|
||||
*
|
||||
* Remove the head element from the specified nanokernel fifo; it can only be
|
||||
* called from a task context.
|
||||
* called from a task.
|
||||
*
|
||||
* If no elements are available, the calling task will poll until an element
|
||||
* is put onto the fifo, or the timeout expires, whichever comes first.
|
||||
|
@ -527,7 +525,7 @@ struct nano_lifo {
|
|||
* This function initializes a nanokernel system-level linked list lifo
|
||||
* object structure.
|
||||
*
|
||||
* It may be called from either a fiber or task context.
|
||||
* It may be called from either a fiber or task.
|
||||
*
|
||||
* @param chan LIFO to initialize.
|
||||
*
|
||||
|
@ -573,7 +571,7 @@ extern void *nano_isr_lifo_get(struct nano_lifo *chan);
|
|||
* @brief Prepend an element to a LIFO without a context switch.
|
||||
*
|
||||
* This routine adds an element to the head of a LIFO object; it may be
|
||||
* called from a fibercontext. A fiber pending on the LIFO
|
||||
* called from a fiber. A fiber pending on the LIFO
|
||||
* object will be made ready, but will NOT be scheduled to execute.
|
||||
*
|
||||
* @param chan LIFO from which to put.
|
||||
|
@ -587,7 +585,7 @@ extern void nano_fiber_lifo_put(struct nano_lifo *chan, void *data);
|
|||
* @brief Remove the first element from a linked list LIFO
|
||||
*
|
||||
* Remove the first element from the specified nanokernel linked list LIFO;
|
||||
* it may be called from a fiber context.
|
||||
* it may be called from a fiber.
|
||||
*
|
||||
* If no elements are available, NULL is returned. The first word in the
|
||||
* element contains invalid data because that memory location was used to store
|
||||
|
@ -603,7 +601,7 @@ extern void *nano_fiber_lifo_get(struct nano_lifo *chan);
|
|||
* @brief Get the first element from a LIFO, wait if empty.
|
||||
*
|
||||
* Remove the first element from the specified system-level linked list LIFO;
|
||||
* it can only be called from a fiber context.
|
||||
* it can only be called from a fiber.
|
||||
*
|
||||
* If no elements are available, the calling fiber will pend until an element
|
||||
* is put onto the list.
|
||||
|
@ -623,7 +621,7 @@ extern void *nano_fiber_lifo_get_wait(struct nano_lifo *chan);
|
|||
* @brief get the first element from a LIFO, wait with a timeout if empty
|
||||
*
|
||||
* Remove the first element from the specified system-level linked list lifo;
|
||||
* it can only be called from a fiber context.
|
||||
* it can only be called from a fiber.
|
||||
*
|
||||
* If no elements are available, the calling fiber will pend until an element
|
||||
* is put onto the list, or the timeout expires, whichever comes first.
|
||||
|
@ -647,7 +645,7 @@ extern void *nano_fiber_lifo_get_wait_timeout(struct nano_lifo *chan,
|
|||
* @brief Add an element to the head of a linked list LIFO
|
||||
*
|
||||
* This routine adds an element to the head of a LIFO object; it can be
|
||||
* called only from a task context. A fiber pending on the LIFO
|
||||
* called only from a task. A fiber pending on the LIFO
|
||||
* object will be made ready and will preempt the running task immediately.
|
||||
*
|
||||
* This API can only be called by a task.
|
||||
|
@ -663,7 +661,7 @@ extern void nano_task_lifo_put(struct nano_lifo *chan, void *data);
|
|||
* @brief Remove the first element from a linked list LIFO
|
||||
*
|
||||
* Remove the first element from the specified nanokernel linked list LIFO;
|
||||
* it may be called from a task context.
|
||||
* it may be called from a task.
|
||||
*
|
||||
* If no elements are available, NULL is returned. The first word in the
|
||||
* element contains invalid data because that memory location was used to store
|
||||
|
@ -679,7 +677,7 @@ extern void *nano_task_lifo_get(struct nano_lifo *chan);
|
|||
* @brief Get the first element from a LIFO, poll if empty.
|
||||
*
|
||||
* Remove the first element from the specified nanokernel linked list LIFO; it
|
||||
* can only be called from a task context.
|
||||
* can only be called from a task.
|
||||
*
|
||||
* If no elements are available, the calling task will poll until an element is
|
||||
* put onto the list.
|
||||
|
@ -701,7 +699,7 @@ extern void *nano_task_lifo_get_wait(struct nano_lifo *chan);
|
|||
* @brief get the first element from a lifo, poll if empty.
|
||||
*
|
||||
* Remove the first element from the specified nanokernel linked list lifo; it
|
||||
* can only be called from a task context.
|
||||
* can only be called from a task.
|
||||
*
|
||||
* If no elements are available, the calling task will poll until an element is
|
||||
* put onto the list, or the timeout expires, whichever comes first.
|
||||
|
@ -733,7 +731,7 @@ struct nano_sem {
|
|||
* This function initializes a nanokernel semaphore object structure. After
|
||||
* initialization, the semaphore count will be 0.
|
||||
*
|
||||
* It may be called from either a fiber or task context.
|
||||
* It may be called from either a fiber or task.
|
||||
*
|
||||
* @param chan Pointer to a nano_sem structure.
|
||||
*
|
||||
|
@ -741,16 +739,15 @@ struct nano_sem {
|
|||
*/
|
||||
extern void nano_sem_init(struct nano_sem *chan);
|
||||
|
||||
/* scheduling context independent methods (when context is not known) */
|
||||
/* execution context-independent methods (when context is not known) */
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Give a nanokernel semaphore
|
||||
*
|
||||
* This is a convenience wrapper for the context-specific APIs. This is
|
||||
* helpful whenever the exact scheduling context is not known, but should
|
||||
* be avoided when the context is known up-front (to avoid unnecessary
|
||||
* overhead).
|
||||
* This is a convenience wrapper for the execution context-specific APIs. This
|
||||
* is helpful whenever the exact execution context is not known, but should be
|
||||
* avoided when the context is known up-front (to avoid unnecessary overhead).
|
||||
*
|
||||
* @param chan Pointer to a nano_sem structure.
|
||||
*
|
||||
|
@ -762,10 +759,9 @@ extern void nano_sem_give(struct nano_sem *chan);
|
|||
*
|
||||
* @brief Take a nanokernel semaphore, poll/pend if not available
|
||||
*
|
||||
* This is a convenience wrapper for the context-specific APIs. This is
|
||||
* helpful whenever the exact scheduling context is not known, but should
|
||||
* be avoided when the context is known up-front (to avoid unnecessary
|
||||
* overhead).
|
||||
* This is a convenience wrapper for the execution context-specific APIs. This
|
||||
* is helpful whenever the exact execution context is not known, but should be
|
||||
* avoided when the context is known up-front (to avoid unnecessary overhead).
|
||||
*
|
||||
* It's only valid to call this API from a fiber or a task.
|
||||
*
|
||||
|
@ -815,7 +811,7 @@ extern int nano_isr_sem_take(struct nano_sem *chan);
|
|||
* @brief Give a nanokernel semaphore (no context switch)
|
||||
*
|
||||
* This routine performs a "give" operation on a nanokernel sempahore object;
|
||||
* it may be call from a fiber context. A fiber pending on
|
||||
* it may be call from a fiber. A fiber pending on
|
||||
* the semaphore object will be made ready, but will NOT be scheduled to
|
||||
* execute.
|
||||
*
|
||||
|
@ -829,8 +825,7 @@ extern void nano_fiber_sem_give(struct nano_sem *chan);
|
|||
*
|
||||
* @brief Take a nanokernel semaphore, fail if unavailable
|
||||
*
|
||||
* Attempt to take a nanokernel sempahore; it may be called from a fiber
|
||||
* context.
|
||||
* Attempt to take a nanokernel sempahore; it may be called from a fiber.
|
||||
*
|
||||
* If the semaphore is not available, this function returns immediately, i.e.
|
||||
* a wait (pend) operation will NOT be performed.
|
||||
|
@ -845,10 +840,10 @@ extern int nano_fiber_sem_take(struct nano_sem *chan);
|
|||
*
|
||||
* @brief Test a nanokernel semaphore, wait if unavailable
|
||||
*
|
||||
* Take a nanokernel sempahore; it can only be called from a fiber context.
|
||||
* Take a nanokernel sempahore; it can only be called from a fiber.
|
||||
*
|
||||
* If the nanokernel semaphore is not available, i.e. the event counter
|
||||
* is 0, the calling fiber context will wait (pend) until the semaphore is
|
||||
* is 0, the calling fiber will wait (pend) until the semaphore is
|
||||
* given (via nano_fiber_sem_give/nano_task_sem_give/nano_isr_sem_give).
|
||||
*
|
||||
* @param chan Pointer to a nano_sem structure.
|
||||
|
@ -861,10 +856,10 @@ extern void nano_fiber_sem_take_wait(struct nano_sem *chan);
|
|||
/**
|
||||
* @brief test a nanokernel semaphore, wait with a timeout if unavailable
|
||||
*
|
||||
* Take a nanokernel sempahore; it can only be called from a fiber context.
|
||||
* Take a nanokernel sempahore; it can only be called from a fiber.
|
||||
*
|
||||
* If the nanokernel semaphore is not available, i.e. the event counter
|
||||
* is 0, the calling fiber context will wait (pend) until the semaphore is
|
||||
* is 0, the calling fiber will wait (pend) until the semaphore is
|
||||
* given (via nano_fiber_sem_give/nano_task_sem_give/nano_isr_sem_give). A
|
||||
* timeout can be specified.
|
||||
*
|
||||
|
@ -886,7 +881,7 @@ extern int nano_fiber_sem_take_wait_timeout(struct nano_sem *chan,
|
|||
* @brief Give a nanokernel semaphore
|
||||
*
|
||||
* This routine performs a "give" operation on a nanokernel sempahore object;
|
||||
* it can only be called from a task context. A fiber pending on the
|
||||
* it can only be called from a task. A fiber pending on the
|
||||
* semaphore object will be made ready, and will preempt the running task
|
||||
* immediately.
|
||||
*
|
||||
|
@ -900,8 +895,7 @@ extern void nano_task_sem_give(struct nano_sem *chan);
|
|||
*
|
||||
* @brief Take a nanokernel semaphore, fail if unavailable
|
||||
*
|
||||
* Attempt to take a nanokernel sempahore; it may be called from a task
|
||||
* context.
|
||||
* Attempt to take a nanokernel sempahore; it can only be called from a task.
|
||||
*
|
||||
* If the semaphore is not available, this function returns immediately, i.e.
|
||||
* a wait (pend) operation will NOT be performed.
|
||||
|
@ -916,7 +910,7 @@ extern int nano_task_sem_take(struct nano_sem *chan);
|
|||
*
|
||||
* @brief Take a nanokernel semaphore, poll if unavailable
|
||||
*
|
||||
* Take a nanokernel sempahore; it can only be called from a task context.
|
||||
* Take a nanokernel sempahore; it can only be called from a task.
|
||||
*
|
||||
* If the nanokernel semaphore is not available, i.e. the event counter
|
||||
* is 0, the calling task will poll until the semaphore is given
|
||||
|
@ -932,7 +926,7 @@ extern void nano_task_sem_take_wait(struct nano_sem *chan);
|
|||
/**
|
||||
* @brief test a nanokernel semaphore, poll with a timeout if unavailable
|
||||
*
|
||||
* Take a nanokernel sempahore; it can only be called from a task context.
|
||||
* Take a nanokernel sempahore; it can only be called from a task.
|
||||
*
|
||||
* If the nanokernel semaphore is not available, i.e. the event counter is 0,
|
||||
* the calling task will poll until the semaphore is given (via
|
||||
|
@ -951,7 +945,7 @@ extern int nano_task_sem_take_wait_timeout(struct nano_sem *chan,
|
|||
/* stack APIs */
|
||||
|
||||
struct nano_stack {
|
||||
nano_context_id_t fiber;
|
||||
nano_thread_id_t fiber;
|
||||
uint32_t *base;
|
||||
uint32_t *next;
|
||||
};
|
||||
|
@ -969,11 +963,11 @@ extern void nano_task_stack_push(struct nano_stack *chan, uint32_t data);
|
|||
extern int nano_task_stack_pop(struct nano_stack *chan, uint32_t *data);
|
||||
extern uint32_t nano_task_stack_pop_wait(struct nano_stack *chan);
|
||||
|
||||
/* context custom data APIs */
|
||||
#ifdef CONFIG_CONTEXT_CUSTOM_DATA
|
||||
extern void context_custom_data_set(void *value);
|
||||
extern void *context_custom_data_get(void);
|
||||
#endif /* CONFIG_CONTEXT_CUSTOM_DATA */
|
||||
/* thread custom data APIs */
|
||||
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
||||
extern void sys_thread_custom_data_set(void *value);
|
||||
extern void *sys_thread_custom_data_get(void);
|
||||
#endif /* CONFIG_THREAD_CUSTOM_DATA */
|
||||
|
||||
/* nanokernel timers */
|
||||
|
||||
|
|
|
@ -354,7 +354,7 @@ void _sys_power_save_idle(int32_t ticks)
|
|||
*
|
||||
* This routine is invoked when the kernel leaves the idle state.
|
||||
* Routine can be modified to wake up other devices.
|
||||
* The routine is invoked from interrupt context, with interrupts disabled.
|
||||
* The routine is invoked from interrupt thread, with interrupts disabled.
|
||||
*
|
||||
* @return N/A
|
||||
*
|
||||
|
|
|
@ -156,7 +156,7 @@ FUNC_NORETURN void _k_server(int unused1, int unused2)
|
|||
#endif
|
||||
|
||||
_k_current_task = pNextTask;
|
||||
_nanokernel.task = (tCCS *)pNextTask->workspace;
|
||||
_nanokernel.task = (struct tcs *)pNextTask->workspace;
|
||||
|
||||
#ifdef CONFIG_TASK_MONITOR
|
||||
if (_k_monitor_mask & MON_TSWAP) {
|
||||
|
|
|
@ -206,30 +206,30 @@ static void start_task(struct k_task *X, /* ptr to task control block */
|
|||
void (*func)(void) /* entry point for task */
|
||||
)
|
||||
{
|
||||
unsigned int contextOptions;
|
||||
unsigned int task_options;
|
||||
|
||||
/* Note: the field X->worksize now represents the task size in bytes */
|
||||
|
||||
contextOptions = 0;
|
||||
_START_TASK_ARCH(X, &contextOptions);
|
||||
task_options = 0;
|
||||
_START_TASK_ARCH(X, &task_options);
|
||||
|
||||
/*
|
||||
* The 'func' argument to _NewContext() represents the entry point of
|
||||
* The 'func' argument to _new_thread() represents the entry point of
|
||||
* the
|
||||
* kernel task. The 'parameter1', 'parameter2', & 'parameter3'
|
||||
* arguments
|
||||
* are not applicable to such tasks. A 'priority' of -1 indicates that
|
||||
* the context is a task, rather than a fiber.
|
||||
* the thread is a task, rather than a fiber.
|
||||
*/
|
||||
|
||||
_NewContext((char *)X->workspace, /* pStackMem */
|
||||
_new_thread((char *)X->workspace, /* pStackMem */
|
||||
X->worksize, /* stackSize */
|
||||
(_ContextEntry)func, /* pEntry */
|
||||
(_thread_entry_t)func, /* pEntry */
|
||||
(void *)0, /* parameter1 */
|
||||
(void *)0, /* parameter2 */
|
||||
(void *)0, /* parameter3 */
|
||||
-1, /* priority */
|
||||
contextOptions /* options */
|
||||
task_options /* options */
|
||||
);
|
||||
|
||||
X->fabort = NULL;
|
||||
|
@ -249,9 +249,9 @@ static void start_task(struct k_task *X, /* ptr to task control block */
|
|||
static void abort_task(struct k_task *X)
|
||||
{
|
||||
|
||||
/* Do normal context exit cleanup */
|
||||
/* Do normal thread exit cleanup */
|
||||
|
||||
_context_exit((tCCS *)X->workspace);
|
||||
_thread_exit((struct tcs *)X->workspace);
|
||||
|
||||
/* Set TF_TERM and TF_STOP state flags */
|
||||
|
||||
|
|
|
@ -69,13 +69,13 @@ config ISR_STACK_SIZE
|
|||
This option specifies the size of the stack used by interrupt
|
||||
service routines (ISRs), and during nanokernel initialization.
|
||||
|
||||
config CONTEXT_CUSTOM_DATA
|
||||
config THREAD_CUSTOM_DATA
|
||||
bool
|
||||
prompt "Task and fiber custom data"
|
||||
default n
|
||||
help
|
||||
This option allows each task and fiber to store 32 bits of custom data,
|
||||
which can be accessed using the context_custom_data_xxx() APIs.
|
||||
which can be accessed using the sys_thread_custom_data_xxx() APIs.
|
||||
|
||||
config NANO_TIMEOUTS
|
||||
bool
|
||||
|
|
|
@ -108,7 +108,7 @@ void sys_event_logger_put(struct event_logger *logger, uint16_t event_id,
|
|||
* @details Add an event message to the ring buffer and signal the sync
|
||||
* semaphore using the internal function _sem_give_non_preemptible to inform
|
||||
* that there are event messages available, avoiding the preemptible
|
||||
* behaviour when the function is called from a task context. This function
|
||||
* behaviour when the function is called from a task. This function
|
||||
* should be only used for special cases where the sys_event_logger_put
|
||||
* does not satisfy the needs.
|
||||
*
|
||||
|
|
|
@ -44,48 +44,48 @@ This file contains private nanokernel APIs that are not architecture-specific.
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* helper type alias for context control structure */
|
||||
/* helper type alias for thread control structure */
|
||||
|
||||
typedef struct ccs tCCS;
|
||||
typedef struct tcs tTCS;
|
||||
|
||||
/* context entry point declarations */
|
||||
/* thread entry point declarations */
|
||||
|
||||
typedef void *_ContextArg;
|
||||
typedef void (*_ContextEntry)(_ContextArg arg1,
|
||||
_ContextArg arg2,
|
||||
_ContextArg arg3);
|
||||
typedef void *_thread_arg_t;
|
||||
typedef void (*_thread_entry_t)(_thread_arg_t arg1,
|
||||
_thread_arg_t arg2,
|
||||
_thread_arg_t arg3);
|
||||
|
||||
extern void _context_entry(_ContextEntry,
|
||||
_ContextArg,
|
||||
_ContextArg,
|
||||
_ContextArg);
|
||||
extern void _thread_entry(_thread_entry_t,
|
||||
_thread_arg_t,
|
||||
_thread_arg_t,
|
||||
_thread_arg_t);
|
||||
|
||||
extern void _NewContext(char *pStack, unsigned stackSize,
|
||||
_ContextEntry pEntry, _ContextArg arg1,
|
||||
_ContextArg arg2, _ContextArg arg3,
|
||||
extern void _new_thread(char *pStack, unsigned stackSize,
|
||||
_thread_entry_t pEntry, _thread_arg_t arg1,
|
||||
_thread_arg_t arg2, _thread_arg_t arg3,
|
||||
int prio, unsigned options);
|
||||
|
||||
/* context switching and scheduling-related routines */
|
||||
|
||||
extern void _nano_fiber_schedule(tCCS *ccs);
|
||||
extern void _nano_fiber_schedule(struct tcs *tcs);
|
||||
extern void _nano_fiber_swap(void);
|
||||
|
||||
extern unsigned int _Swap(unsigned int);
|
||||
|
||||
/* set and clear essential fiber/task flag */
|
||||
|
||||
extern void _context_essential_set(void);
|
||||
extern void _context_essential_clear(void);
|
||||
extern void _thread_essential_set(void);
|
||||
extern void _thread_essential_clear(void);
|
||||
|
||||
/* clean up when a context is aborted */
|
||||
/* clean up when a thread is aborted */
|
||||
|
||||
#if defined(CONFIG_CONTEXT_MONITOR)
|
||||
extern void _context_exit(tCCS *ccs);
|
||||
#if defined(CONFIG_THREAD_MONITOR)
|
||||
extern void _thread_exit(struct tcs *tcs);
|
||||
#else
|
||||
#define _context_exit(ccs) \
|
||||
#define _thread_exit(tcs) \
|
||||
do {/* nothing */ \
|
||||
} while (0)
|
||||
#endif /* CONFIG_CONTEXT_MONITOR */
|
||||
#endif /* CONFIG_THREAD_MONITOR */
|
||||
|
||||
/* special nanokernel object APIs */
|
||||
|
||||
|
|
|
@ -46,8 +46,8 @@ GEN_OFFSET_SYM(tNANO, fiber);
|
|||
GEN_OFFSET_SYM(tNANO, task);
|
||||
GEN_OFFSET_SYM(tNANO, current);
|
||||
|
||||
#if defined(CONFIG_CONTEXT_MONITOR)
|
||||
GEN_OFFSET_SYM(tNANO, contexts);
|
||||
#if defined(CONFIG_THREAD_MONITOR)
|
||||
GEN_OFFSET_SYM(tNANO, threads);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FP_SHARING
|
||||
|
@ -58,21 +58,21 @@ GEN_OFFSET_SYM(tNANO, current_fp);
|
|||
|
||||
GEN_ABSOLUTE_SYM(__tNANO_SIZEOF, sizeof(tNANO));
|
||||
|
||||
/* arch-agnostic tCCS structure member offsets */
|
||||
/* arch-agnostic struct tcs structure member offsets */
|
||||
|
||||
GEN_OFFSET_SYM(tCCS, link);
|
||||
GEN_OFFSET_SYM(tCCS, prio);
|
||||
GEN_OFFSET_SYM(tCCS, flags);
|
||||
GEN_OFFSET_SYM(tCCS, coopReg); /* start of coop register set */
|
||||
GEN_OFFSET_SYM(tCCS, preempReg); /* start of prempt register set */
|
||||
GEN_OFFSET_SYM(tTCS, link);
|
||||
GEN_OFFSET_SYM(tTCS, prio);
|
||||
GEN_OFFSET_SYM(tTCS, flags);
|
||||
GEN_OFFSET_SYM(tTCS, coopReg); /* start of coop register set */
|
||||
GEN_OFFSET_SYM(tTCS, preempReg); /* start of prempt register set */
|
||||
|
||||
#if defined(CONFIG_CONTEXT_MONITOR)
|
||||
GEN_OFFSET_SYM(tCCS, next_context);
|
||||
#if defined(CONFIG_THREAD_MONITOR)
|
||||
GEN_OFFSET_SYM(tTCS, next_thread);
|
||||
#endif
|
||||
|
||||
|
||||
/* size of the entire tCCS structure */
|
||||
/* size of the entire struct tcs structure */
|
||||
|
||||
GEN_ABSOLUTE_SYM(__tCCS_SIZEOF, sizeof(tCCS));
|
||||
GEN_ABSOLUTE_SYM(__tTCS_SIZEOF, sizeof(tTCS));
|
||||
|
||||
#endif /* _NANO_OFFSETS__H_ */
|
||||
|
|
|
@ -39,23 +39,23 @@
|
|||
|
||||
#include <misc/dlist.h>
|
||||
|
||||
/* initialize the nano timeouts part of CCS when enabled in the kernel */
|
||||
/* initialize the nano timeouts part of TCS when enabled in the kernel */
|
||||
|
||||
static inline void _nano_timeout_ccs_init(struct ccs *ccs)
|
||||
static inline void _nano_timeout_tcs_init(struct tcs *tcs)
|
||||
{
|
||||
/*
|
||||
* Must be initialized here and when dequeueing a timeout so that code
|
||||
* not dealing with timeouts does not have to handle this, such as when
|
||||
* waiting forever on a semaphore.
|
||||
*/
|
||||
ccs->nano_timeout.delta_ticks_from_prev = -1;
|
||||
tcs->nano_timeout.delta_ticks_from_prev = -1;
|
||||
|
||||
/*
|
||||
* These are initialized when enqueing on the timeout queue:
|
||||
*
|
||||
* ccs->nano_timeout.node.next
|
||||
* ccs->nano_timeout.node.prev
|
||||
* ccs->nano_timeout.wait_q
|
||||
* tcs->nano_timeout.node.next
|
||||
* tcs->nano_timeout.node.prev
|
||||
* tcs->nano_timeout.wait_q
|
||||
*/
|
||||
}
|
||||
|
||||
|
@ -70,13 +70,13 @@ static inline struct _nano_timeout *_nano_timeout_handle_one_timeout(
|
|||
sys_dlist_t *timeout_q)
|
||||
{
|
||||
struct _nano_timeout *t = (void *)sys_dlist_get(timeout_q);
|
||||
struct ccs *ccs = CONTAINER_OF(t, struct ccs, nano_timeout);
|
||||
struct tcs *tcs = CONTAINER_OF(t, struct tcs, nano_timeout);
|
||||
|
||||
if (ccs->nano_timeout.wait_q) {
|
||||
_nano_timeout_remove_ccs_from_wait_q(ccs);
|
||||
fiberRtnValueSet(ccs, (unsigned int)0);
|
||||
if (tcs->nano_timeout.wait_q) {
|
||||
_nano_timeout_remove_tcs_from_wait_q(tcs);
|
||||
fiberRtnValueSet(tcs, (unsigned int)0);
|
||||
}
|
||||
_nano_fiber_schedule(ccs);
|
||||
_nano_fiber_schedule(tcs);
|
||||
t->delta_ticks_from_prev = -1;
|
||||
|
||||
return (struct _nano_timeout *)sys_dlist_peek_head(timeout_q);
|
||||
|
@ -95,10 +95,10 @@ static inline void _nano_timeout_handle_timeouts(void)
|
|||
}
|
||||
|
||||
/* abort a timeout for a specific fiber */
|
||||
static inline void _nano_timeout_abort(struct ccs *ccs)
|
||||
static inline void _nano_timeout_abort(struct tcs *tcs)
|
||||
{
|
||||
sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
|
||||
struct _nano_timeout *t = &ccs->nano_timeout;
|
||||
struct _nano_timeout *t = &tcs->nano_timeout;
|
||||
|
||||
if (-1 == t->delta_ticks_from_prev) {
|
||||
return;
|
||||
|
@ -140,12 +140,12 @@ static int _nano_timeout_insert_point_test(sys_dnode_t *test, void *timeout)
|
|||
}
|
||||
|
||||
/* put a fiber on the timeout queue and record its wait queue */
|
||||
static inline void _nano_timeout_add(struct ccs *ccs,
|
||||
static inline void _nano_timeout_add(struct tcs *tcs,
|
||||
struct _nano_queue *wait_q,
|
||||
int32_t timeout)
|
||||
{
|
||||
sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
|
||||
struct _nano_timeout *t = &ccs->nano_timeout;
|
||||
struct _nano_timeout *t = &tcs->nano_timeout;
|
||||
|
||||
t->delta_ticks_from_prev = timeout;
|
||||
t->wait_q = wait_q;
|
||||
|
|
|
@ -52,26 +52,27 @@ static inline void _nano_wait_q_init(struct _nano_queue *wait_q)
|
|||
* Remove first fiber from a wait queue and put it on the ready queue, knowing
|
||||
* that the wait queue is not empty.
|
||||
*/
|
||||
static inline tCCS *_nano_wait_q_remove_no_check(struct _nano_queue *wait_q)
|
||||
static inline
|
||||
struct tcs *_nano_wait_q_remove_no_check(struct _nano_queue *wait_q)
|
||||
{
|
||||
tCCS *ccs = wait_q->head;
|
||||
struct tcs *tcs = wait_q->head;
|
||||
|
||||
if (wait_q->tail == wait_q->head) {
|
||||
_nano_wait_q_reset(wait_q);
|
||||
} else {
|
||||
wait_q->head = ccs->link;
|
||||
wait_q->head = tcs->link;
|
||||
}
|
||||
ccs->link = 0;
|
||||
tcs->link = 0;
|
||||
|
||||
_nano_fiber_schedule(ccs);
|
||||
return ccs;
|
||||
_nano_fiber_schedule(tcs);
|
||||
return tcs;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove first fiber from a wait queue and put it on the ready queue.
|
||||
* Abort and return NULL if the wait queue is empty.
|
||||
*/
|
||||
static inline tCCS *_nano_wait_q_remove(struct _nano_queue *wait_q)
|
||||
static inline struct tcs *_nano_wait_q_remove(struct _nano_queue *wait_q)
|
||||
{
|
||||
return wait_q->head ? _nano_wait_q_remove_no_check(wait_q) : NULL;
|
||||
}
|
||||
|
@ -79,37 +80,37 @@ static inline tCCS *_nano_wait_q_remove(struct _nano_queue *wait_q)
|
|||
/* put current fiber on specified wait queue */
|
||||
static inline void _nano_wait_q_put(struct _nano_queue *wait_q)
|
||||
{
|
||||
((tCCS *)wait_q->tail)->link = _nanokernel.current;
|
||||
((struct tcs *)wait_q->tail)->link = _nanokernel.current;
|
||||
wait_q->tail = _nanokernel.current;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NANO_TIMEOUTS
|
||||
static inline void _nano_timeout_remove_ccs_from_wait_q(struct ccs *ccs)
|
||||
static inline void _nano_timeout_remove_tcs_from_wait_q(struct tcs *tcs)
|
||||
{
|
||||
struct _nano_queue *wait_q = ccs->nano_timeout.wait_q;
|
||||
struct _nano_queue *wait_q = tcs->nano_timeout.wait_q;
|
||||
|
||||
if (wait_q->head == ccs) {
|
||||
if (wait_q->head == tcs) {
|
||||
if (wait_q->tail == wait_q->head) {
|
||||
_nano_wait_q_reset(wait_q);
|
||||
} else {
|
||||
wait_q->head = ccs->link;
|
||||
wait_q->head = tcs->link;
|
||||
}
|
||||
} else {
|
||||
tCCS *prev = wait_q->head;
|
||||
struct tcs *prev = wait_q->head;
|
||||
|
||||
while (prev->link != ccs) {
|
||||
while (prev->link != tcs) {
|
||||
prev = prev->link;
|
||||
}
|
||||
prev->link = ccs->link;
|
||||
if (wait_q->tail == ccs) {
|
||||
prev->link = tcs->link;
|
||||
if (wait_q->tail == tcs) {
|
||||
wait_q->tail = prev;
|
||||
}
|
||||
}
|
||||
}
|
||||
#include <timeout_q.h>
|
||||
#else
|
||||
#define _nano_timeout_ccs_init(ccs) do { } while ((0))
|
||||
#define _nano_timeout_abort(ccs) do { } while ((0))
|
||||
#define _nano_timeout_tcs_init(tcs) do { } while ((0))
|
||||
#define _nano_timeout_abort(tcs) do { } while ((0))
|
||||
#define _nano_get_earliest_timeouts_deadline() ((uint32_t)TICKS_UNLIMITED)
|
||||
#endif
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* nanokernel context support */
|
||||
/* nanokernel thread support */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2010-2014 Wind River Systems, Inc.
|
||||
|
@ -32,7 +32,7 @@
|
|||
|
||||
/*
|
||||
DESCRIPTION
|
||||
This module provides general purpose context support, with applies to both
|
||||
This module provides general purpose thread support, with applies to both
|
||||
tasks or fibers.
|
||||
*/
|
||||
|
||||
|
@ -44,29 +44,29 @@ tasks or fibers.
|
|||
|
||||
/**
|
||||
*
|
||||
* @brief Return the currently executing context
|
||||
* @brief Return the currently executing thread
|
||||
*
|
||||
* This routine returns a pointer to the context control block of the currently
|
||||
* executing context. It is cast to a nano_context_id_t for use publically.
|
||||
* This routine returns a pointer to the thread control block of the currently
|
||||
* executing thread. It is cast to a nano_thread_id_t for use publicly.
|
||||
*
|
||||
* @return nano_context_id_t of the currently executing context.
|
||||
* @return nano_thread_id_t of the currently executing thread.
|
||||
*/
|
||||
|
||||
nano_context_id_t context_self_get(void)
|
||||
nano_thread_id_t sys_thread_self_get(void)
|
||||
{
|
||||
return _nanokernel.current;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Return the type of the currently executing context
|
||||
* @brief Return the type of the currently executing thread
|
||||
*
|
||||
* This routine returns the type of context currently executing.
|
||||
* This routine returns the type of thread currently executing.
|
||||
*
|
||||
* @return nano_context_type_t of the currently executing context.
|
||||
* @return nano_context_type_t of the currently executing thread.
|
||||
*/
|
||||
|
||||
nano_context_type_t context_type_get(void)
|
||||
nano_context_type_t sys_execution_context_type_get(void)
|
||||
{
|
||||
if (_IS_IN_ISR())
|
||||
return NANO_CTX_ISR;
|
||||
|
@ -79,133 +79,134 @@ nano_context_type_t context_type_get(void)
|
|||
|
||||
/**
|
||||
*
|
||||
* @brief Mark context as essential to system
|
||||
* @brief Mark thread as essential to system
|
||||
*
|
||||
* This function tags the running fiber or task as essential to system
|
||||
* option; exceptions raised by this context will be treated as a fatal
|
||||
* option; exceptions raised by this thread will be treated as a fatal
|
||||
* system error.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
||||
void _context_essential_set(void)
|
||||
void _thread_essential_set(void)
|
||||
{
|
||||
_nanokernel.current->flags |= ESSENTIAL;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Mark context as not essential to system
|
||||
* @brief Mark thread as not essential to system
|
||||
*
|
||||
* This function tags the running fiber or task as not essential to system
|
||||
* option; exceptions raised by this context may be recoverable.
|
||||
* (This is the default tag for a context.)
|
||||
* option; exceptions raised by this thread may be recoverable.
|
||||
* (This is the default tag for a thread.)
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
||||
void _context_essential_clear(void)
|
||||
void _thread_essential_clear(void)
|
||||
{
|
||||
_nanokernel.current->flags &= ~ESSENTIAL;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Is the specified context essential?
|
||||
* @brief Is the specified thread essential?
|
||||
*
|
||||
* This routine indicates if the specified context is an essential system
|
||||
* context. A NULL context pointer indicates that the current context is
|
||||
* This routine indicates if the specified thread is an essential system
|
||||
* thread. A NULL thread pointer indicates that the current thread is
|
||||
* to be queried.
|
||||
*
|
||||
* @return Non-zero if specified context is essential, zero if it is not
|
||||
* @return Non-zero if specified thread is essential, zero if it is not
|
||||
*/
|
||||
|
||||
int _context_essential_check(tCCS *pCtx /* pointer to context */
|
||||
int _is_thread_essential(struct tcs *pCtx /* pointer to thread */
|
||||
)
|
||||
{
|
||||
return ((pCtx == NULL) ? _nanokernel.current : pCtx)->flags & ESSENTIAL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CONTEXT_CUSTOM_DATA
|
||||
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Set context's custom data
|
||||
* @brief Set thread's custom data
|
||||
*
|
||||
* This routine sets the custom data value for the current task or fiber.
|
||||
* Custom data is not used by the kernel itself, and is freely available
|
||||
* for the context to use as it sees fit.
|
||||
* for the thread to use as it sees fit.
|
||||
*
|
||||
* @param value New to set the thread's custom data to.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
||||
void context_custom_data_set(void *value /* new value */
|
||||
)
|
||||
void sys_thread_custom_data_set(void *value)
|
||||
{
|
||||
_nanokernel.current->custom_data = value;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Get context's custom data
|
||||
* @brief Get thread's custom data
|
||||
*
|
||||
* This function returns the custom data value for the current task or fiber.
|
||||
*
|
||||
* @return current handle value
|
||||
*/
|
||||
|
||||
void *context_custom_data_get(void)
|
||||
void *sys_thread_custom_data_get(void)
|
||||
{
|
||||
return _nanokernel.current->custom_data;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CONTEXT_CUSTOM_DATA */
|
||||
#endif /* CONFIG_THREAD_CUSTOM_DATA */
|
||||
|
||||
#if defined(CONFIG_CONTEXT_MONITOR)
|
||||
#if defined(CONFIG_THREAD_MONITOR)
|
||||
/**
|
||||
*
|
||||
* @brief Context exit routine
|
||||
* @brief Thread exit routine
|
||||
*
|
||||
* This function is invoked when the specified context is aborted, either
|
||||
* normally or abnormally. It is called for the termination of any context,
|
||||
* This function is invoked when the specified thread is aborted, either
|
||||
* normally or abnormally. It is called for the termination of any thread,
|
||||
* (fibers and tasks).
|
||||
*
|
||||
* This routine must be invoked from a fiber to guarantee that the list
|
||||
* of contexts does not change in mid-operation.
|
||||
* of threads does not change in mid-operation.
|
||||
*
|
||||
* @return N/A
|
||||
*
|
||||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
void _context_exit(tCCS *pContext)
|
||||
void _thread_exit(struct tcs *thread)
|
||||
{
|
||||
/*
|
||||
* Remove context from the list of contexts. This singly linked list of
|
||||
* contexts maintains ALL the contexts in the system: both tasks and
|
||||
* Remove thread from the list of threads. This singly linked list of
|
||||
* threads maintains ALL the threads in the system: both tasks and
|
||||
* fibers regardless of whether they are runnable.
|
||||
*/
|
||||
|
||||
if (pContext == _nanokernel.contexts) {
|
||||
_nanokernel.contexts = _nanokernel.contexts->next_context;
|
||||
if (thread == _nanokernel.threads) {
|
||||
_nanokernel.threads = _nanokernel.threads->next_thread;
|
||||
} else {
|
||||
tCCS *pPrevContext;
|
||||
struct tcs *prev_thread;
|
||||
|
||||
pPrevContext = _nanokernel.contexts;
|
||||
while (pContext != pPrevContext->next_context) {
|
||||
pPrevContext = pPrevContext->next_context;
|
||||
prev_thread = _nanokernel.threads;
|
||||
while (thread != prev_thread->next_thread) {
|
||||
prev_thread = prev_thread->next_thread;
|
||||
}
|
||||
pPrevContext->next_context = pContext->next_context;
|
||||
prev_thread->next_thread = thread->next_thread;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_CONTEXT_MONITOR */
|
||||
#endif /* CONFIG_THREAD_MONITOR */
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Common context entry point function for kernel contexts
|
||||
* @brief Common thread entry point function
|
||||
*
|
||||
* This function serves as the entry point for _all_ kernel contexts, i.e. both
|
||||
* task and fiber contexts are instantiated such that initial execution starts
|
||||
* This function serves as the entry point for _all_ threads, i.e. both
|
||||
* task and fibers are instantiated such that initial execution starts
|
||||
* here.
|
||||
*
|
||||
* This routine invokes the actual task or fiber entry point function and
|
||||
|
@ -222,20 +223,20 @@ void _context_exit(tCCS *pContext)
|
|||
* \NOMANUAL
|
||||
*/
|
||||
|
||||
FUNC_NORETURN void _context_entry(
|
||||
_ContextEntry pEntry, /* address of app entry point function */
|
||||
_ContextArg parameter1, /* 1st arg to app entry point function */
|
||||
_ContextArg parameter2, /* 2nd arg to app entry point function */
|
||||
_ContextArg parameter3 /* 3rd arg to app entry point function */
|
||||
FUNC_NORETURN void _thread_entry(
|
||||
_thread_entry_t pEntry, /* address of app entry point function */
|
||||
_thread_arg_t parameter1, /* 1st arg to app entry point function */
|
||||
_thread_arg_t parameter2, /* 2nd arg to app entry point function */
|
||||
_thread_arg_t parameter3 /* 3rd arg to app entry point function */
|
||||
)
|
||||
{
|
||||
/* Execute the "application" entry point function */
|
||||
|
||||
pEntry(parameter1, parameter2, parameter3);
|
||||
|
||||
/* Determine if context can legally terminate itself via "return" */
|
||||
/* Determine if thread can legally terminate itself via "return" */
|
||||
|
||||
if (_context_essential_check(NULL)) {
|
||||
if (_is_thread_essential(NULL)) {
|
||||
#ifdef CONFIG_NANOKERNEL
|
||||
/*
|
||||
* Nanokernel's background task must always be present,
|
||||
|
@ -247,13 +248,13 @@ FUNC_NORETURN void _context_entry(
|
|||
}
|
||||
#endif /* CONFIG_NANOKERNEL */
|
||||
|
||||
/* Loss of essential context is a system fatal error */
|
||||
/* Loss of essential thread is a system fatal error */
|
||||
|
||||
_NanoFatalErrorHandler(_NANO_ERR_INVALID_TASK_EXIT,
|
||||
&_default_esf);
|
||||
}
|
||||
|
||||
/* Gracefully terminate the currently executing context */
|
||||
/* Gracefully terminate the currently executing thread */
|
||||
|
||||
#ifdef CONFIG_MICROKERNEL
|
||||
if (((_nanokernel.current)->flags & TASK) == TASK) {
|
||||
|
|
|
@ -47,30 +47,30 @@
|
|||
*
|
||||
* The list of runnable fibers is maintained via a single linked list
|
||||
* in priority order. Numerically lower priorities represent higher priority
|
||||
* contexts.
|
||||
* fibers.
|
||||
*
|
||||
* Interrupts must already be locked to ensure list cannot change
|
||||
* while this routine is executing!
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
void _nano_fiber_schedule(tCCS *ccs)
|
||||
void _nano_fiber_schedule(struct tcs *tcs)
|
||||
{
|
||||
tCCS *pQ = (tCCS *)&_nanokernel.fiber;
|
||||
struct tcs *pQ = (struct tcs *)&_nanokernel.fiber;
|
||||
|
||||
/*
|
||||
* Search until end of list or until a fiber with numerically
|
||||
* higher priority is located.
|
||||
*/
|
||||
|
||||
while (pQ->link && (ccs->prio >= pQ->link->prio)) {
|
||||
while (pQ->link && (tcs->prio >= pQ->link->prio)) {
|
||||
pQ = pQ->link;
|
||||
}
|
||||
|
||||
/* Insert fiber, following any equal priority fibers */
|
||||
|
||||
ccs->link = pQ->link;
|
||||
pQ->link = ccs;
|
||||
tcs->link = pQ->link;
|
||||
pQ->link = tcs;
|
||||
}
|
||||
|
||||
|
||||
|
@ -88,33 +88,33 @@ void _fiber_start(char *pStack,
|
|||
unsigned priority,
|
||||
unsigned options)
|
||||
{
|
||||
tCCS *ccs;
|
||||
struct tcs *tcs;
|
||||
unsigned int imask;
|
||||
|
||||
ccs = (tCCS *) pStack;
|
||||
_NewContext(pStack,
|
||||
tcs = (struct tcs *) pStack;
|
||||
_new_thread(pStack,
|
||||
stackSize,
|
||||
(_ContextEntry)pEntry,
|
||||
(_thread_entry_t)pEntry,
|
||||
(void *)parameter1,
|
||||
(void *)parameter2,
|
||||
(void *)0,
|
||||
priority,
|
||||
options);
|
||||
|
||||
/* _NewContext() has already set the flags depending on the 'options'
|
||||
/* _new_thread() has already set the flags depending on the 'options'
|
||||
* and 'priority' parameters passed to it */
|
||||
|
||||
/* lock interrupts to prevent corruption of the runnable fiber list */
|
||||
|
||||
imask = irq_lock();
|
||||
|
||||
/* make the newly crafted CCS a runnable fiber */
|
||||
/* make the newly crafted TCS a runnable fiber */
|
||||
|
||||
_nano_fiber_schedule(ccs);
|
||||
_nano_fiber_schedule(tcs);
|
||||
|
||||
/*
|
||||
* Simply return to the caller if the current context is FIBER,
|
||||
* otherwise swap into the newly created fiber context
|
||||
* Simply return to the caller if the current thread is FIBER,
|
||||
* otherwise swap into the newly created fiber
|
||||
*/
|
||||
|
||||
if ((_nanokernel.current->flags & TASK) == TASK)
|
||||
|
@ -127,12 +127,12 @@ void fiber_yield(void)
|
|||
{
|
||||
unsigned int imask = irq_lock();
|
||||
|
||||
if ((_nanokernel.fiber != (tCCS *)NULL) &&
|
||||
if ((_nanokernel.fiber != (struct tcs *)NULL) &&
|
||||
(_nanokernel.current->prio >= _nanokernel.fiber->prio)) {
|
||||
/*
|
||||
* Reinsert current context into the list of runnable contexts,
|
||||
* Reinsert current thread into the list of runnable threads,
|
||||
* and
|
||||
* then swap to the context at the head of the fiber list.
|
||||
* then swap to the thread at the head of the fiber list.
|
||||
*/
|
||||
|
||||
_nano_fiber_schedule(_nanokernel.current);
|
||||
|
@ -147,7 +147,7 @@ void fiber_yield(void)
|
|||
*
|
||||
* This routine is used when a fiber voluntarily gives up control of the CPU.
|
||||
*
|
||||
* This routine can only be called from a fiber context.
|
||||
* This routine can only be called from a fiber.
|
||||
*
|
||||
* @return This function never returns
|
||||
*/
|
||||
|
@ -177,9 +177,9 @@ FUNC_NORETURN void _nano_fiber_swap(void)
|
|||
#ifndef CONFIG_ARCH_HAS_NANO_FIBER_ABORT
|
||||
FUNC_NORETURN void fiber_abort(void)
|
||||
{
|
||||
/* Do normal context exit cleanup, then give up CPU control */
|
||||
/* Do normal thread exit cleanup, then give up CPU control */
|
||||
|
||||
_context_exit(_nanokernel.current);
|
||||
_thread_exit(_nanokernel.current);
|
||||
_nano_fiber_swap();
|
||||
}
|
||||
#endif
|
||||
|
@ -211,18 +211,18 @@ void *fiber_delayed_start(char *stack, unsigned int stack_size_in_bytes,
|
|||
unsigned int options, int32_t timeout_in_ticks)
|
||||
{
|
||||
unsigned int key;
|
||||
struct ccs *ccs;
|
||||
struct tcs *tcs;
|
||||
|
||||
ccs = (struct ccs *)stack;
|
||||
_NewContext(stack, stack_size_in_bytes, (_ContextEntry)entry_point,
|
||||
tcs = (struct tcs *)stack;
|
||||
_new_thread(stack, stack_size_in_bytes, (_thread_entry_t)entry_point,
|
||||
(void *)param1, (void *)param2, (void *)0, priority, options);
|
||||
|
||||
key = irq_lock();
|
||||
|
||||
_nano_timeout_add(ccs, NULL, timeout_in_ticks);
|
||||
_nano_timeout_add(tcs, NULL, timeout_in_ticks);
|
||||
|
||||
irq_unlock(key);
|
||||
return ccs;
|
||||
return tcs;
|
||||
}
|
||||
|
||||
FUNC_ALIAS(fiber_delayed_start_cancel, fiber_fiber_delayed_start_cancel, void);
|
||||
|
@ -232,7 +232,7 @@ void fiber_delayed_start_cancel(void *handle)
|
|||
{
|
||||
int key = irq_lock();
|
||||
|
||||
_nano_timeout_abort((struct ccs *)handle);
|
||||
_nano_timeout_abort((struct tcs *)handle);
|
||||
|
||||
irq_unlock(key);
|
||||
}
|
||||
|
|
|
@ -126,9 +126,9 @@ void _fifo_put_non_preemptible(struct nano_fifo *fifo, void *data)
|
|||
|
||||
fifo->stat++;
|
||||
if (fifo->stat <= 0) {
|
||||
tCCS *ccs = _nano_wait_q_remove_no_check(&fifo->wait_q);
|
||||
_nano_timeout_abort(ccs);
|
||||
fiberRtnValueSet(ccs, (unsigned int)data);
|
||||
struct tcs *tcs = _nano_wait_q_remove_no_check(&fifo->wait_q);
|
||||
_nano_timeout_abort(tcs);
|
||||
fiberRtnValueSet(tcs, (unsigned int)data);
|
||||
} else {
|
||||
enqueue_data(fifo, data);
|
||||
}
|
||||
|
@ -144,9 +144,9 @@ void nano_task_fifo_put( struct nano_fifo *fifo, void *data)
|
|||
|
||||
fifo->stat++;
|
||||
if (fifo->stat <= 0) {
|
||||
tCCS *ccs = _nano_wait_q_remove_no_check(&fifo->wait_q);
|
||||
_nano_timeout_abort(ccs);
|
||||
fiberRtnValueSet(ccs, (unsigned int)data);
|
||||
struct tcs *tcs = _nano_wait_q_remove_no_check(&fifo->wait_q);
|
||||
_nano_timeout_abort(tcs);
|
||||
fiberRtnValueSet(tcs, (unsigned int)data);
|
||||
_Swap(imask);
|
||||
return;
|
||||
} else {
|
||||
|
@ -162,7 +162,7 @@ void nano_fifo_put(struct nano_fifo *fifo, void *data)
|
|||
static void (*func[3])(struct nano_fifo *fifo, void *data) = {
|
||||
nano_isr_fifo_put, nano_fiber_fifo_put, nano_task_fifo_put
|
||||
};
|
||||
func[context_type_get()](fifo, data);
|
||||
func[sys_execution_context_type_get()](fifo, data);
|
||||
}
|
||||
|
||||
FUNC_ALIAS(_fifo_get, nano_isr_fifo_get, void *);
|
||||
|
@ -197,7 +197,7 @@ static inline void *dequeue_data(struct nano_fifo *fifo)
|
|||
/**
|
||||
* INTERNAL
|
||||
* This function is capable of supporting invocations from fiber, task, and ISR
|
||||
* contexts. However, the nano_isr_fifo_get, nano_task_fifo_get, and
|
||||
* execution contexts. However, the nano_isr_fifo_get, nano_task_fifo_get, and
|
||||
* nano_fiber_fifo_get aliases are created to support any required
|
||||
* implementation differences in the future without introducing a source code
|
||||
* migration issue.
|
||||
|
@ -271,7 +271,7 @@ void *nano_fifo_get_wait(struct nano_fifo *fifo)
|
|||
static void *(*func[3])(struct nano_fifo *fifo) = {
|
||||
NULL, nano_fiber_fifo_get_wait, nano_task_fifo_get_wait
|
||||
};
|
||||
return func[context_type_get()](fifo);
|
||||
return func[sys_execution_context_type_get()](fifo);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ uint64_t __noinit __idle_tsc; /* timestamp when CPU goes idle */
|
|||
#define RAND32_INIT()
|
||||
#endif
|
||||
|
||||
/* stack space for the background (or idle) task context */
|
||||
/* stack space for the background (or idle) task */
|
||||
|
||||
char __noinit __stack main_task_stack[CONFIG_MAIN_STACK_SIZE];
|
||||
|
||||
|
@ -157,19 +157,23 @@ static void _main(void)
|
|||
* @return N/A
|
||||
*/
|
||||
|
||||
static void nano_init(tCCS *dummyOutContext)
|
||||
static void nano_init(struct tcs *dummyOutContext)
|
||||
{
|
||||
/*
|
||||
* Initialize the current execution context to permit a level of debugging
|
||||
* Initialize the current execution thread to permit a level of debugging
|
||||
* output if an exception should happen during nanokernel initialization.
|
||||
* However, don't waste effort initializing the fields of the dummy context
|
||||
* beyond those needed to identify it as a dummy context.
|
||||
* However, don't waste effort initializing the fields of the dummy thread
|
||||
* beyond those needed to identify it as a dummy thread.
|
||||
*/
|
||||
|
||||
_nanokernel.current = dummyOutContext;
|
||||
|
||||
dummyOutContext->link =
|
||||
(tCCS *)NULL; /* context not inserted into list */
|
||||
/*
|
||||
* Do not insert dummy execution context in the list of fibers, so that it
|
||||
* does not get scheduled back in once context-switched out.
|
||||
*/
|
||||
dummyOutContext->link = (struct tcs *)NULL;
|
||||
|
||||
dummyOutContext->flags = FIBER | ESSENTIAL;
|
||||
dummyOutContext->prio = 0;
|
||||
|
||||
|
@ -187,18 +191,18 @@ static void nano_init(tCCS *dummyOutContext)
|
|||
#endif
|
||||
|
||||
/*
|
||||
* Initialize the context control block (CCS) for the background task
|
||||
* (or idle task). The entry point for this context is 'main'.
|
||||
* Initialize the thread control block (TCS) for the background task
|
||||
* (or idle task). The entry point for this thread is 'main'.
|
||||
*/
|
||||
|
||||
_nanokernel.task = (tCCS *) main_task_stack;
|
||||
_nanokernel.task = (struct tcs *) main_task_stack;
|
||||
|
||||
_NewContext(main_task_stack, /* pStackMem */
|
||||
_new_thread(main_task_stack, /* pStackMem */
|
||||
CONFIG_MAIN_STACK_SIZE, /* stackSize */
|
||||
(_ContextEntry)_main, /* pEntry */
|
||||
(_ContextArg)0, /* parameter1 */
|
||||
(_ContextArg)0, /* parameter2 */
|
||||
(_ContextArg)0, /* parameter3 */
|
||||
(_thread_entry_t)_main, /* pEntry */
|
||||
(_thread_arg_t)0, /* parameter1 */
|
||||
(_thread_arg_t)0, /* parameter2 */
|
||||
(_thread_arg_t)0, /* parameter3 */
|
||||
-1, /* priority */
|
||||
0 /* options */
|
||||
);
|
||||
|
@ -265,7 +269,7 @@ FUNC_NORETURN void _Cstart(void)
|
|||
{
|
||||
/* floating point operations are NOT performed during nanokernel init */
|
||||
|
||||
char dummyCCS[__tCCS_NOFLOAT_SIZEOF];
|
||||
char dummyTCS[__tTCS_NOFLOAT_SIZEOF];
|
||||
|
||||
/*
|
||||
* Initialize nanokernel data structures. This step includes
|
||||
|
@ -273,7 +277,7 @@ FUNC_NORETURN void _Cstart(void)
|
|||
* before the hardware initialization phase.
|
||||
*/
|
||||
|
||||
nano_init((tCCS *)&dummyCCS);
|
||||
nano_init((struct tcs *)&dummyTCS);
|
||||
|
||||
/* perform basic hardware initialization */
|
||||
|
||||
|
@ -301,7 +305,7 @@ FUNC_NORETURN void _Cstart(void)
|
|||
|
||||
PRINT_BOOT_BANNER();
|
||||
|
||||
/* context switch into background context (entry function is main()) */
|
||||
/* context switch into background thread (entry function is main()) */
|
||||
|
||||
_nano_fiber_swap();
|
||||
|
||||
|
|
|
@ -76,14 +76,14 @@ FUNC_ALIAS(_lifo_put_non_preemptible, nano_fiber_lifo_put, void);
|
|||
*/
|
||||
void _lifo_put_non_preemptible(struct nano_lifo *lifo, void *data)
|
||||
{
|
||||
tCCS *ccs;
|
||||
struct tcs *tcs;
|
||||
unsigned int imask;
|
||||
|
||||
imask = irq_lock();
|
||||
ccs = _nano_wait_q_remove(&lifo->wait_q);
|
||||
if (ccs) {
|
||||
_nano_timeout_abort(ccs);
|
||||
fiberRtnValueSet(ccs, (unsigned int) data);
|
||||
tcs = _nano_wait_q_remove(&lifo->wait_q);
|
||||
if (tcs) {
|
||||
_nano_timeout_abort(tcs);
|
||||
fiberRtnValueSet(tcs, (unsigned int) data);
|
||||
} else {
|
||||
*(void **) data = lifo->list;
|
||||
lifo->list = data;
|
||||
|
@ -94,14 +94,14 @@ void _lifo_put_non_preemptible(struct nano_lifo *lifo, void *data)
|
|||
|
||||
void nano_task_lifo_put(struct nano_lifo *lifo, void *data)
|
||||
{
|
||||
tCCS *ccs;
|
||||
struct tcs *tcs;
|
||||
unsigned int imask;
|
||||
|
||||
imask = irq_lock();
|
||||
ccs = _nano_wait_q_remove(&lifo->wait_q);
|
||||
if (ccs) {
|
||||
_nano_timeout_abort(ccs);
|
||||
fiberRtnValueSet(ccs, (unsigned int) data);
|
||||
tcs = _nano_wait_q_remove(&lifo->wait_q);
|
||||
if (tcs) {
|
||||
_nano_timeout_abort(tcs);
|
||||
fiberRtnValueSet(tcs, (unsigned int) data);
|
||||
_Swap(imask);
|
||||
return;
|
||||
} else {
|
||||
|
@ -144,7 +144,7 @@ void *_lifo_get(struct nano_lifo *lifo)
|
|||
/** INTERNAL
|
||||
*
|
||||
* There exists a separate nano_task_lifo_get_wait() implementation since a
|
||||
* task context cannot pend on a nanokernel object. Instead, tasks will poll
|
||||
* task cannot pend on a nanokernel object. Instead, tasks will poll
|
||||
* the lifo object.
|
||||
*/
|
||||
void *nano_fiber_lifo_get_wait(struct nano_lifo *lifo )
|
||||
|
|
|
@ -42,10 +42,10 @@
|
|||
* nano_fiber_sem_take_wait, nano_task_sem_take_wait
|
||||
|
||||
* The semaphores are of the 'counting' type, i.e. each 'give' operation will
|
||||
* increment the internal count by 1, if no context is pending on it. The 'init'
|
||||
* increment the internal count by 1, if no fiber is pending on it. The 'init'
|
||||
* call initializes the count to 0. Following multiple 'give' operations, the
|
||||
* same number of 'take' operations can be performed without the calling context
|
||||
* having to pend on the semaphore.
|
||||
* same number of 'take' operations can be performed without the calling fiber
|
||||
* having to pend on the semaphore, or the calling task having to poll.
|
||||
*/
|
||||
|
||||
/**
|
||||
|
@ -75,9 +75,9 @@ FUNC_ALIAS(_sem_give_non_preemptible, nano_isr_sem_give, void);
|
|||
FUNC_ALIAS(_sem_give_non_preemptible, nano_fiber_sem_give, void);
|
||||
|
||||
#ifdef CONFIG_NANO_TIMEOUTS
|
||||
#define set_sem_available(ccs) fiberRtnValueSet(ccs, 1)
|
||||
#define set_sem_available(tcs) fiberRtnValueSet(tcs, 1)
|
||||
#else
|
||||
#define set_sem_available(ccs) do { } while ((0))
|
||||
#define set_sem_available(tcs) do { } while ((0))
|
||||
#endif
|
||||
|
||||
/**
|
||||
|
@ -89,16 +89,16 @@ FUNC_ALIAS(_sem_give_non_preemptible, nano_fiber_sem_give, void);
|
|||
*/
|
||||
void _sem_give_non_preemptible(struct nano_sem *sem)
|
||||
{
|
||||
tCCS *ccs;
|
||||
struct tcs *tcs;
|
||||
unsigned int imask;
|
||||
|
||||
imask = irq_lock();
|
||||
ccs = _nano_wait_q_remove(&sem->wait_q);
|
||||
if (!ccs) {
|
||||
tcs = _nano_wait_q_remove(&sem->wait_q);
|
||||
if (!tcs) {
|
||||
sem->nsig++;
|
||||
} else {
|
||||
_nano_timeout_abort(ccs);
|
||||
set_sem_available(ccs);
|
||||
_nano_timeout_abort(tcs);
|
||||
set_sem_available(tcs);
|
||||
}
|
||||
|
||||
irq_unlock(imask);
|
||||
|
@ -106,14 +106,14 @@ void _sem_give_non_preemptible(struct nano_sem *sem)
|
|||
|
||||
void nano_task_sem_give(struct nano_sem *sem)
|
||||
{
|
||||
tCCS *ccs;
|
||||
struct tcs *tcs;
|
||||
unsigned int imask;
|
||||
|
||||
imask = irq_lock();
|
||||
ccs = _nano_wait_q_remove(&sem->wait_q);
|
||||
if (ccs) {
|
||||
_nano_timeout_abort(ccs);
|
||||
set_sem_available(ccs);
|
||||
tcs = _nano_wait_q_remove(&sem->wait_q);
|
||||
if (tcs) {
|
||||
_nano_timeout_abort(tcs);
|
||||
set_sem_available(tcs);
|
||||
_Swap(imask);
|
||||
return;
|
||||
} else {
|
||||
|
@ -128,7 +128,7 @@ void nano_sem_give(struct nano_sem *sem)
|
|||
static void (*func[3])(struct nano_sem *sem) = {
|
||||
nano_isr_sem_give, nano_fiber_sem_give, nano_task_sem_give
|
||||
};
|
||||
func[context_type_get()](sem);
|
||||
func[sys_execution_context_type_get()](sem);
|
||||
}
|
||||
|
||||
FUNC_ALIAS(_sem_take, nano_isr_sem_take, int);
|
||||
|
@ -152,9 +152,9 @@ int _sem_take(
|
|||
|
||||
/**
|
||||
* INTERNAL
|
||||
* There exists a separate nano_task_sem_take_wait() implementation since a task
|
||||
* context cannot pend on a nanokernel object. Instead, tasks will poll
|
||||
* the sempahore object.
|
||||
* There exists a separate nano_task_sem_take_wait() implementation since a
|
||||
* task cannot pend on a nanokernel object. Instead, tasks will poll the
|
||||
* sempahore object.
|
||||
*/
|
||||
void nano_fiber_sem_take_wait(struct nano_sem *sem)
|
||||
{
|
||||
|
@ -201,7 +201,7 @@ void nano_sem_take_wait(struct nano_sem *sem)
|
|||
static void (*func[3])(struct nano_sem *sem) = {
|
||||
NULL, nano_fiber_sem_take_wait, nano_task_sem_take_wait
|
||||
};
|
||||
func[context_type_get()](sem);
|
||||
func[sys_execution_context_type_get()](sem);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NANO_TIMEOUTS
|
||||
|
|
|
@ -56,7 +56,7 @@ APIs to the same function, since they have identical implementations.
|
|||
*
|
||||
* This function initializes a nanokernel stack object structure.
|
||||
*
|
||||
* It may be called from either a fiber or a task context.
|
||||
* It may be called from either a fiber or a task.
|
||||
*
|
||||
* @return N/A
|
||||
*
|
||||
|
@ -72,7 +72,7 @@ void nano_stack_init(
|
|||
)
|
||||
{
|
||||
stack->next = stack->base = data;
|
||||
stack->fiber = (tCCS *)0;
|
||||
stack->fiber = (struct tcs *)0;
|
||||
}
|
||||
|
||||
FUNC_ALIAS(_stack_push_non_preemptible, nano_isr_stack_push, void);
|
||||
|
@ -100,16 +100,16 @@ void _stack_push_non_preemptible(
|
|||
uint32_t data /* data to push on stack */
|
||||
)
|
||||
{
|
||||
tCCS *ccs;
|
||||
struct tcs *tcs;
|
||||
unsigned int imask;
|
||||
|
||||
imask = irq_lock();
|
||||
|
||||
ccs = stack->fiber;
|
||||
if (ccs) {
|
||||
tcs = stack->fiber;
|
||||
if (tcs) {
|
||||
stack->fiber = 0;
|
||||
fiberRtnValueSet(ccs, data);
|
||||
_nano_fiber_schedule(ccs);
|
||||
fiberRtnValueSet(tcs, data);
|
||||
_nano_fiber_schedule(tcs);
|
||||
} else {
|
||||
*(stack->next) = data;
|
||||
stack->next++;
|
||||
|
@ -123,7 +123,7 @@ void _stack_push_non_preemptible(
|
|||
* @brief Push data onto a nanokernel stack
|
||||
*
|
||||
* This routine pushes a data item onto a stack object; it may be called only
|
||||
* from a task context. A fiber pending on the stack object will be
|
||||
* from a task. A fiber pending on the stack object will be
|
||||
* made ready, and will preempt the running task immediately.
|
||||
*
|
||||
* @return N/A
|
||||
|
@ -134,16 +134,16 @@ void nano_task_stack_push(
|
|||
uint32_t data /* data to push on stack */
|
||||
)
|
||||
{
|
||||
tCCS *ccs;
|
||||
struct tcs *tcs;
|
||||
unsigned int imask;
|
||||
|
||||
imask = irq_lock();
|
||||
|
||||
ccs = stack->fiber;
|
||||
if (ccs) {
|
||||
tcs = stack->fiber;
|
||||
if (tcs) {
|
||||
stack->fiber = 0;
|
||||
fiberRtnValueSet(ccs, data);
|
||||
_nano_fiber_schedule(ccs);
|
||||
fiberRtnValueSet(tcs, data);
|
||||
_nano_fiber_schedule(tcs);
|
||||
_Swap(imask);
|
||||
return;
|
||||
} else {
|
||||
|
@ -204,7 +204,7 @@ int _stack_pop(
|
|||
* @brief Pop data from a nanokernel stack, wait if empty
|
||||
*
|
||||
* Pop the first data word from a nanokernel stack object; it can only be
|
||||
* called from a fiber context
|
||||
* called from a fiber.
|
||||
*
|
||||
* If data is not available the calling fiber will pend until data is pushed
|
||||
* onto the stack.
|
||||
|
@ -213,7 +213,7 @@ int _stack_pop(
|
|||
*
|
||||
* INTERNAL
|
||||
* There exists a separate nano_task_stack_pop_wait() implementation since a
|
||||
* task context cannot pend on a nanokernel object. Instead tasks will poll the
|
||||
* task cannot pend on a nanokernel object. Instead tasks will poll the
|
||||
* the stack object.
|
||||
*/
|
||||
|
||||
|
@ -243,7 +243,7 @@ uint32_t nano_fiber_stack_pop_wait(
|
|||
* @brief Pop data from a nanokernel stack, poll if empty
|
||||
*
|
||||
* Pop the first data word from a nanokernel stack; it can only be called
|
||||
* from a task context.
|
||||
* from a task.
|
||||
*
|
||||
* If data is not available the calling task will poll until data is pushed
|
||||
* onto the stack.
|
||||
|
|
|
@ -40,7 +40,7 @@ struct nano_timer *_nano_timer_list = NULL;
|
|||
*
|
||||
* This function initializes a nanokernel timer object structure.
|
||||
*
|
||||
* It may be called from either a fiber or task context.
|
||||
* It may be called from either a fiber or task.
|
||||
*
|
||||
* The <userData> passed to this function must have enough space for a pointer
|
||||
* in its first field, that may be overwritten when the timer expires, plus
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
uint32_t _sys_profiler_buffer[CONFIG_PROFILER_BUFFER_SIZE];
|
||||
|
||||
#ifdef CONFIG_PROFILER_CONTEXT_SWITCH
|
||||
void *_collector_context=NULL;
|
||||
void *_collector_fiber=NULL;
|
||||
#endif
|
||||
|
||||
/**
|
||||
|
@ -89,22 +89,22 @@ void _sys_profiler_context_switch(void)
|
|||
return;
|
||||
}
|
||||
|
||||
if (_collector_context != _nanokernel.current) {
|
||||
if (_collector_fiber != _nanokernel.current) {
|
||||
data[0] = nano_tick_get_32();
|
||||
data[1] = (uint32_t)_nanokernel.current;
|
||||
|
||||
/*
|
||||
* The mechanism we use to log the profile events uses a sync semaphore
|
||||
* to inform that there are available events to be collected. The
|
||||
* context switch event can be triggered from a task context. When we
|
||||
* signal a semaphore from a task context and a fiber is waiting for
|
||||
* context switch event can be triggered from a task. When we
|
||||
* signal a semaphore from a task and a fiber is waiting for
|
||||
* that semaphore, a context switch is generated immediately. Due to
|
||||
* the fact that we register the context switch event while the context
|
||||
* switch is being processed, a new context switch can be generated
|
||||
* before the kernel finishes processing the current context switch. We
|
||||
* need to prevent this because the kernel is not able to handle it.
|
||||
* The _sem_give_non_preemptible function does not trigger a context
|
||||
* switch when we signal the semaphore from any type of context. Using
|
||||
* switch when we signal the semaphore from any type of thread. Using
|
||||
* _sys_event_logger_put_non_preemptible function, that internally uses
|
||||
* _sem_give_non_preemptible function for signaling the sync semaphore,
|
||||
* allow us registering the context switch event without triggering any
|
||||
|
@ -117,6 +117,6 @@ void _sys_profiler_context_switch(void)
|
|||
|
||||
void sys_profiler_register_as_collector(void)
|
||||
{
|
||||
_collector_context = _nanokernel.current;
|
||||
_collector_fiber = _nanokernel.current;
|
||||
}
|
||||
#endif /* CONFIG_PROFILER_CONTEXT_SWITCH */
|
||||
|
|
|
@ -82,7 +82,7 @@ struct bt_buf *bt_buf_get(enum bt_buf_type type, size_t reserve_head)
|
|||
|
||||
buf = nano_fifo_get(avail);
|
||||
if (!buf) {
|
||||
if (context_type_get() == NANO_CTX_ISR) {
|
||||
if (sys_execution_context_type_get() == NANO_CTX_ISR) {
|
||||
BT_ERR("Failed to get free buffer\n");
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ static BT_STACK_NOINIT(rx_prio_fiber_stack, 256);
|
|||
static BT_STACK_NOINIT(cmd_tx_fiber_stack, 256);
|
||||
|
||||
#if defined(CONFIG_BLUETOOTH_DEBUG)
|
||||
static nano_context_id_t rx_prio_fiber_id;
|
||||
static nano_thread_id_t rx_prio_fiber_id;
|
||||
#endif
|
||||
|
||||
struct bt_dev bt_dev;
|
||||
|
@ -189,7 +189,7 @@ int bt_hci_cmd_send_sync(uint16_t opcode, struct bt_buf *buf,
|
|||
* event and giving back the blocking semaphore.
|
||||
*/
|
||||
#if defined(CONFIG_BLUETOOTH_DEBUG)
|
||||
if (context_self_get() == rx_prio_fiber_id) {
|
||||
if (sys_thread_self_get() == rx_prio_fiber_id) {
|
||||
BT_ERR("called from invalid context!\n");
|
||||
return -EDEADLK;
|
||||
}
|
||||
|
@ -277,11 +277,11 @@ static void analyze_stack(const char *name, const char *stack, unsigned size,
|
|||
{
|
||||
unsigned i, stack_offset, pcnt, unused = 0;
|
||||
|
||||
/* The CCS is always placed on a 4-byte aligned boundary - if
|
||||
/* The TCS is always placed on a 4-byte aligned boundary - if
|
||||
* the stack beginning doesn't match that there will be some
|
||||
* unused bytes in the beginning.
|
||||
*/
|
||||
stack_offset = __tCCS_SIZEOF + ((4 - ((unsigned)stack % 4)) % 4);
|
||||
stack_offset = __tTCS_SIZEOF + ((4 - ((unsigned)stack % 4)) % 4);
|
||||
|
||||
if (stack_growth == STACK_DIRECTION_DOWN) {
|
||||
for (i = stack_offset; i < size; i++) {
|
||||
|
@ -313,7 +313,7 @@ static void analyze_stacks(struct bt_conn *conn, struct bt_conn **ref)
|
|||
{
|
||||
int stack_growth;
|
||||
|
||||
printk("sizeof(tCCS) = %u\n", __tCCS_SIZEOF);
|
||||
printk("sizeof(tTCS) = %u\n", __tTCS_SIZEOF);
|
||||
|
||||
if (conn > *ref) {
|
||||
printk("stack grows up\n");
|
||||
|
@ -1040,7 +1040,7 @@ static void rx_prio_fiber(void)
|
|||
|
||||
/* So we can avoid bt_hci_cmd_send_sync deadlocks */
|
||||
#if defined(CONFIG_BLUETOOTH_DEBUG)
|
||||
rx_prio_fiber_id = context_self_get();
|
||||
rx_prio_fiber_id = sys_thread_self_get();
|
||||
#endif
|
||||
|
||||
while (1) {
|
||||
|
|
|
@ -73,7 +73,7 @@ static struct nano_sem contexts_lock;
|
|||
|
||||
static void context_sem_give(struct nano_sem *chan)
|
||||
{
|
||||
switch (context_type_get()) {
|
||||
switch (sys_execution_context_type_get()) {
|
||||
case NANO_CTX_FIBER:
|
||||
nano_fiber_sem_give(chan);
|
||||
break;
|
||||
|
|
|
@ -80,7 +80,7 @@ static unsigned calculate_unused(const char *stack, unsigned size,
|
|||
unsigned i, unused = 0;
|
||||
|
||||
if (stack_growth == STACK_DIRECTION_DOWN) {
|
||||
for (i = __tCCS_SIZEOF; i < size; i++) {
|
||||
for (i = __tTCS_SIZEOF; i < size; i++) {
|
||||
if ((unsigned char)stack[i] == 0xaa) {
|
||||
unused++;
|
||||
} else {
|
||||
|
@ -88,7 +88,7 @@ static unsigned calculate_unused(const char *stack, unsigned size,
|
|||
}
|
||||
}
|
||||
} else {
|
||||
for (i = size - 1; i >= __tCCS_SIZEOF; i--) {
|
||||
for (i = size - 1; i >= __tTCS_SIZEOF; i--) {
|
||||
if ((unsigned char)stack[i] == 0xaa) {
|
||||
unused++;
|
||||
} else {
|
||||
|
@ -119,10 +119,10 @@ void analyze_stacks(struct net_buf *buf, struct net_buf **ref)
|
|||
unused_tx = calculate_unused(tx_fiber_stack, sizeof(tx_fiber_stack),
|
||||
stack_growth);
|
||||
|
||||
printk("net: 802.15.4: stack grows %s, sizeof(tCCS): %u "
|
||||
printk("net: 802.15.4: stack grows %s, sizeof(tTCS): %u "
|
||||
"rx stack(%p/%u): unused %u/%u "
|
||||
"tx stack(%p/%u): unused %u/%u\n",
|
||||
dir, __tCCS_SIZEOF,
|
||||
dir, __tTCS_SIZEOF,
|
||||
rx_fiber_stack, sizeof(rx_fiber_stack),
|
||||
unused_rx, sizeof(rx_fiber_stack),
|
||||
tx_fiber_stack, sizeof(tx_fiber_stack),
|
||||
|
|
|
@ -102,7 +102,7 @@ static unsigned calculate_unused(const char *stack, unsigned size,
|
|||
unsigned i, unused = 0;
|
||||
|
||||
if (stack_growth == STACK_DIRECTION_DOWN) {
|
||||
for (i = __tCCS_SIZEOF; i < size; i++) {
|
||||
for (i = __tTCS_SIZEOF; i < size; i++) {
|
||||
if ((unsigned char)stack[i] == 0xaa) {
|
||||
unused++;
|
||||
} else {
|
||||
|
@ -110,7 +110,7 @@ static unsigned calculate_unused(const char *stack, unsigned size,
|
|||
}
|
||||
}
|
||||
} else {
|
||||
for (i = size - 1; i >= __tCCS_SIZEOF; i--) {
|
||||
for (i = size - 1; i >= __tTCS_SIZEOF; i--) {
|
||||
if ((unsigned char)stack[i] == 0xaa) {
|
||||
unused++;
|
||||
} else {
|
||||
|
@ -141,10 +141,10 @@ static void analyze_stacks(struct net_buf *buf, struct net_buf **ref)
|
|||
unused_tx = calculate_unused(tx_fiber_stack, sizeof(tx_fiber_stack),
|
||||
stack_growth);
|
||||
|
||||
printk("net: ip: stack grows %s, sizeof(tCCS): %u "
|
||||
printk("net: ip: stack grows %s, sizeof(tTCS): %u "
|
||||
"rx stack(%p/%u): unused %u/%u "
|
||||
"tx stack(%p/%u): unused %u/%u\n",
|
||||
dir, __tCCS_SIZEOF,
|
||||
dir, __tTCS_SIZEOF,
|
||||
rx_fiber_stack, sizeof(rx_fiber_stack),
|
||||
unused_rx, sizeof(rx_fiber_stack),
|
||||
tx_fiber_stack, sizeof(tx_fiber_stack),
|
||||
|
|
|
@ -51,7 +51,7 @@ extern void philEntry(void);
|
|||
char __stack profiler_stack[2][STSIZE];
|
||||
|
||||
struct context_switch_data_t {
|
||||
uint32_t context_id;
|
||||
uint32_t thread_id;
|
||||
uint32_t last_time_executed;
|
||||
uint32_t count;
|
||||
};
|
||||
|
@ -66,14 +66,14 @@ struct context_switch_data_t
|
|||
struct event_logger sys_profiler;
|
||||
|
||||
|
||||
void register_context_switch_data(uint32_t timestamp, uint32_t context_id)
|
||||
void register_context_switch_data(uint32_t timestamp, uint32_t thread_id)
|
||||
{
|
||||
int found;
|
||||
int i;
|
||||
|
||||
found=0;
|
||||
for (i=0; (i<MAX_BUFFER_CONTEXT_DATA) && (found==0); i++) {
|
||||
if (context_switch_summary_data[i].context_id == context_id) {
|
||||
if (context_switch_summary_data[i].thread_id == thread_id) {
|
||||
context_switch_summary_data[i].last_time_executed = timestamp;
|
||||
context_switch_summary_data[i].count += 1;
|
||||
found=1;
|
||||
|
@ -82,8 +82,8 @@ void register_context_switch_data(uint32_t timestamp, uint32_t context_id)
|
|||
|
||||
if (!found) {
|
||||
for (i=0; i<MAX_BUFFER_CONTEXT_DATA; i++) {
|
||||
if (context_switch_summary_data[i].context_id == 0) {
|
||||
context_switch_summary_data[i].context_id = context_id;
|
||||
if (context_switch_summary_data[i].thread_id == 0) {
|
||||
context_switch_summary_data[i].thread_id = thread_id;
|
||||
context_switch_summary_data[i].last_time_executed = timestamp;
|
||||
context_switch_summary_data[i].count = 1;
|
||||
break;
|
||||
|
@ -93,10 +93,10 @@ void register_context_switch_data(uint32_t timestamp, uint32_t context_id)
|
|||
}
|
||||
|
||||
|
||||
void print_context_data(uint32_t context_id, uint32_t count,
|
||||
void print_context_data(uint32_t thread_id, uint32_t count,
|
||||
uint32_t last_time_executed, int indice)
|
||||
{
|
||||
PRINTF("\x1b[%d;2H%u ", 15 + indice, context_id);
|
||||
PRINTF("\x1b[%d;2H%u ", 15 + indice, thread_id);
|
||||
PRINTF("\x1b[%d;14H%u ", 15 + indice, count);
|
||||
}
|
||||
|
||||
|
@ -104,7 +104,7 @@ void print_context_data(uint32_t context_id, uint32_t count,
|
|||
/**
|
||||
* @brief Summary data printer fiber
|
||||
*
|
||||
* @details Print the summary data of the context swith events
|
||||
* @details Print the summary data of the context switch events
|
||||
* and the total dropped event ocurred.
|
||||
*
|
||||
* @return No return value.
|
||||
|
@ -119,10 +119,10 @@ void summary_data_printer(void)
|
|||
|
||||
/* print context switch data */
|
||||
PRINTF("\x1b[13;1HContext switch summary");
|
||||
PRINTF("\x1b[14;1HContext Id Amount of context switches");
|
||||
PRINTF("\x1b[14;1HThread Id Amount of context switches");
|
||||
for (i=0; i<MAX_BUFFER_CONTEXT_DATA; i++) {
|
||||
if (context_switch_summary_data[i].context_id != 0) {
|
||||
print_context_data(context_switch_summary_data[i].context_id,
|
||||
if (context_switch_summary_data[i].thread_id != 0) {
|
||||
print_context_data(context_switch_summary_data[i].thread_id,
|
||||
context_switch_summary_data[i].count,
|
||||
context_switch_summary_data[i].last_time_executed, i);
|
||||
}
|
||||
|
|
|
@ -116,9 +116,9 @@ static inline void _LoadAllFloatRegisters(FP_REG_SET *pFromBuffer)
|
|||
* This function loads ALL floating point registers from the memory buffer
|
||||
* specified by <pFromToBuffer>, and then stores them back to that buffer.
|
||||
*
|
||||
* This routine is called by a high priority context prior to calling a primitive
|
||||
* This routine is called by a high priority thread prior to calling a primitive
|
||||
* that pends and triggers a co-operative context switch to a low priority
|
||||
* context. Because the kernel doesn't save floating point context for
|
||||
* thread. Because the kernel doesn't save floating point context for
|
||||
* co-operative context switches, the x87 FPU register stack must be put back
|
||||
* in an empty state before the switch occurs in case the next task to perform
|
||||
* floating point operations was also co-operatively switched out and simply
|
||||
|
@ -205,12 +205,12 @@ static inline void _StoreAllFloatRegisters(FP_REG_SET *pToBuffer)
|
|||
*
|
||||
* @brief Dump non-volatile FP registers to memory
|
||||
*
|
||||
* This routine is called by a high priority context after resuming execution
|
||||
* This routine is called by a high priority thread after resuming execution
|
||||
* from calling a primitive that will pend and thus result in a co-operative
|
||||
* context switch to a low priority context.
|
||||
* context switch to a low priority thread.
|
||||
*
|
||||
* Only the non-volatile floating point registers are expected to survive across
|
||||
* a function call, regardless of whether the call results in the context being
|
||||
* a function call, regardless of whether the call results in the thread being
|
||||
* pended.
|
||||
*
|
||||
* @return N/A
|
||||
|
|
|
@ -38,7 +38,7 @@ verions utilizes a task and a fiber.
|
|||
|
||||
The load/store test validates the nanokernel's floating point unit context
|
||||
save/restore mechanism. (For the IA-32 architecture this includes the x87 FPU
|
||||
(MMX) registers and the XMM registers.) This test utilizes a pair of contexts
|
||||
(MMX) registers and the XMM registers.) This test utilizes a pair of threads
|
||||
of different priorities that each use the floating point registers. The context
|
||||
switching that occurs exercises the kernel's ability to properly preserve the
|
||||
floating point registers. The test also exercises the kernel's ability to
|
||||
|
@ -51,7 +51,7 @@ should be enhanced to ensure that the architectures' _Swap() routine doesn't
|
|||
context switch more registers that it needs to (which would represent a
|
||||
performance issue). For example, on the IA-32, the test should issue
|
||||
a nanoCpuFpDisable() from main(), and then indicate that only x87 FPU
|
||||
registers will be utilized (nanoCpuFpEnable). The fiber context should continue
|
||||
registers will be utilized (nanoCpuFpEnable). The fiber should continue
|
||||
to load ALL non-integer registers, but main() should validate that only the
|
||||
x87 FPU registers are being saved/restored.
|
||||
*/
|
||||
|
@ -106,18 +106,18 @@ x87 FPU registers are being saved/restored.
|
|||
#define TICK_COUNT_GET() task_tick_get_32()
|
||||
#endif
|
||||
|
||||
/* space for float register load/store area used by low priority task context */
|
||||
/* space for float register load/store area used by low priority task */
|
||||
|
||||
static FP_REG_SET floatRegSetLoad;
|
||||
static FP_REG_SET floatRegSetStore;
|
||||
|
||||
/* space for float register load/store area used by high priority context */
|
||||
/* space for float register load/store area used by high priority thread */
|
||||
|
||||
static FP_REG_SET floatRegisterSet;
|
||||
|
||||
|
||||
#ifdef CONFIG_NANOKERNEL
|
||||
/* stack for high priority fiber context (also use .bss for floatRegisterSet) */
|
||||
/* stack for high priority fiber (also use .bss for floatRegisterSet) */
|
||||
|
||||
static char __stack fiberStack[1024];
|
||||
|
||||
|
@ -140,7 +140,7 @@ static volatile unsigned int load_store_high_count = 0;
|
|||
/**
|
||||
*
|
||||
* main -
|
||||
* @brief Low priority FPU load/store context
|
||||
* @brief Low priority FPU load/store thread
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
@ -167,7 +167,7 @@ void load_store_low(void)
|
|||
*/
|
||||
#else /* ! CONFIG_AUTOMATIC_FP_ENABLING */
|
||||
#if defined(CONFIG_FLOAT)
|
||||
task_float_enable(context_self_get());
|
||||
task_float_enable(sys_thread_self_get());
|
||||
#endif
|
||||
#endif /* CONFIG_AUTOMATIC_FP_ENABLING */
|
||||
|
||||
|
@ -204,7 +204,7 @@ void load_store_low(void)
|
|||
|
||||
/*
|
||||
* Initialize floating point load buffer to known values;
|
||||
* these values must be different than the value used in other contexts.
|
||||
* these values must be different than the value used in other threads.
|
||||
*/
|
||||
|
||||
floatRegInitByte = MAIN_FLOAT_REG_CHECK_BYTE;
|
||||
|
@ -231,8 +231,8 @@ void load_store_low(void)
|
|||
_LoadAllFloatRegisters(&floatRegSetLoad);
|
||||
|
||||
/*
|
||||
* Waste some cycles to give the high priority load/store context
|
||||
* an opportunity to run when the low priority context is using the
|
||||
* Waste some cycles to give the high priority load/store thread
|
||||
* an opportunity to run when the low priority thread is using the
|
||||
* floating point registers.
|
||||
*
|
||||
* IMPORTANT: This logic requires that TICK_COUNT_GET() not perform
|
||||
|
@ -257,7 +257,7 @@ void load_store_low(void)
|
|||
/*
|
||||
* Compare each byte of buffer to ensure the expected value is
|
||||
* present, indicating that the floating point registers weren't
|
||||
* impacted by the operation of the high priority context(s).
|
||||
* impacted by the operation of the high priority thread(s).
|
||||
*
|
||||
* Display error message and terminate if discrepancies are detected.
|
||||
*/
|
||||
|
@ -299,7 +299,7 @@ void load_store_low(void)
|
|||
*/
|
||||
if ((load_store_low_count % 1000) == 0) {
|
||||
#if defined(CONFIG_FLOAT)
|
||||
task_float_disable(context_self_get());
|
||||
task_float_disable(sys_thread_self_get());
|
||||
#endif
|
||||
}
|
||||
#endif /* CONFIG_AUTOMATIC_FP_ENABLING */
|
||||
|
@ -308,7 +308,7 @@ void load_store_low(void)
|
|||
|
||||
/**
|
||||
*
|
||||
* @brief High priority FPU load/store context
|
||||
* @brief High priority FPU load/store thread
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
@ -348,7 +348,7 @@ void load_store_high(void)
|
|||
* successive location in the floatRegisterSet structure.
|
||||
*
|
||||
* The initial byte value, and thus the contents of the entire
|
||||
* floatRegisterSet structure, must be different for each context to
|
||||
* floatRegisterSet structure, must be different for each thread to
|
||||
* effectively test the nanokernel's ability to properly save/restore
|
||||
* the floating point values during a context switch.
|
||||
*/
|
||||
|
@ -365,7 +365,7 @@ void load_store_high(void)
|
|||
* the floatRegisterSet structure.
|
||||
*
|
||||
* The goal of the loading all floating point registers with values
|
||||
* that differ from the values used in other contexts is to help
|
||||
* that differ from the values used in other threads is to help
|
||||
* determine whether the floating point register save/restore mechanism
|
||||
* in the nanokernel's context switcher is operating correctly.
|
||||
*
|
||||
|
@ -382,11 +382,11 @@ void load_store_high(void)
|
|||
|
||||
/*
|
||||
* Relinquish the processor for the remainder of the current system
|
||||
* clock tick, so that lower priority contexts get a chance to run.
|
||||
* clock tick, so that lower priority threads get a chance to run.
|
||||
*
|
||||
* This exercises the ability of the nanokernel to restore the FPU
|
||||
* state of a low priority context _and_ the ability of the nanokernel
|
||||
* to provide a "clean" FPU state to this context once the sleep ends.
|
||||
* state of a low priority thread _and_ the ability of the nanokernel
|
||||
* to provide a "clean" FPU state to this thread once the sleep ends.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_NANOKERNEL
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
DESCRIPTION
|
||||
This module is used for the microkernel version of the FPU sharing test,
|
||||
and supplements the basic load/store test by incorporating two additional
|
||||
contexts that utilize the floating point unit.
|
||||
threads that utilize the floating point unit.
|
||||
|
||||
Testing utilizes a pair of tasks that independently compute pi. The lower
|
||||
priority task is regularly preempted by the higher priority task, thereby
|
||||
|
@ -146,11 +146,11 @@ void calculate_pi_high(void)
|
|||
|
||||
/*
|
||||
* Relinquish the processor for the remainder of the current system
|
||||
* clock tick, so that lower priority contexts get a chance to run.
|
||||
* clock tick, so that lower priority threads get a chance to run.
|
||||
*
|
||||
* This exercises the ability of the nanokernel to restore the FPU
|
||||
* state of a low priority context _and_ the ability of the nanokernel
|
||||
* to provide a "clean" FPU state to this context once the sleep ends.
|
||||
* state of a low priority thread _and_ the ability of the nanokernel
|
||||
* to provide a "clean" FPU state to this thread once the sleep ends.
|
||||
*/
|
||||
|
||||
task_sleep(1);
|
||||
|
|
|
@ -68,7 +68,7 @@ NANO_CPU_INT_REGISTER(nanoIntStub, TEST_SOFT_INT, 0);
|
|||
static volatile int excHandlerExecuted;
|
||||
static volatile int intHandlerExecuted;
|
||||
/* Assume the spurious interrupt handler will execute and abort the task/fiber */
|
||||
static volatile int spurHandlerAbortedContext = 1;
|
||||
static volatile int spurHandlerAbortedThread = 1;
|
||||
|
||||
#ifdef CONFIG_NANOKERNEL
|
||||
static char __stack fiberStack[512];
|
||||
|
@ -195,7 +195,7 @@ static void idtSpurFiber(int a1, int a2)
|
|||
_trigger_spurHandler();
|
||||
|
||||
/* Shouldn't get here */
|
||||
spurHandlerAbortedContext = 0;
|
||||
spurHandlerAbortedThread = 0;
|
||||
|
||||
}
|
||||
|
||||
|
@ -270,9 +270,9 @@ void main(void)
|
|||
#endif
|
||||
/*
|
||||
* The fiber/task should not run past where the spurious interrupt is
|
||||
* generated. Therefore spurHandlerAbortedContext should remain at 1.
|
||||
* generated. Therefore spurHandlerAbortedThread should remain at 1.
|
||||
*/
|
||||
if (spurHandlerAbortedContext == 0) {
|
||||
if (spurHandlerAbortedThread == 0) {
|
||||
TC_ERROR("Spurious handler did not execute as expected\n");
|
||||
rv = TC_FAIL;
|
||||
goto doneTests;
|
||||
|
|
|
@ -55,7 +55,7 @@ void lifo_test_init(void)
|
|||
|
||||
/**
|
||||
*
|
||||
* @brief Lifo test context
|
||||
* @brief Lifo test fiber
|
||||
*
|
||||
* @param par1 Ignored parameter.
|
||||
* @param par2 Number of test loops.
|
||||
|
@ -95,7 +95,7 @@ void lifo_fiber1(int par1, int par2)
|
|||
|
||||
/**
|
||||
*
|
||||
* @brief Lifo test context
|
||||
* @brief Lifo test fiber
|
||||
*
|
||||
* @param par1 Address of the counter.
|
||||
* @param par2 Number of test cycles.
|
||||
|
@ -127,7 +127,7 @@ void lifo_fiber2(int par1, int par2)
|
|||
|
||||
/**
|
||||
*
|
||||
* @brief Lifo test context
|
||||
* @brief Lifo test fiber
|
||||
*
|
||||
* @param par1 Address of the counter.
|
||||
* @param par2 Number of test loops.
|
||||
|
@ -200,7 +200,7 @@ int lifo_test(void)
|
|||
|
||||
return_value += check_result(i, t);
|
||||
|
||||
/* fiber contexts have done their job, they can stop now safely: */
|
||||
/* fibers have done their job, they can stop now safely: */
|
||||
for (j = 0; j < 2; j++) {
|
||||
nano_task_fifo_put(&nanoFifo_sync, (void *) element);
|
||||
}
|
||||
|
@ -230,7 +230,7 @@ int lifo_test(void)
|
|||
|
||||
return_value += check_result(i, t);
|
||||
|
||||
/* fiber contexts have done their job, they can stop now safely: */
|
||||
/* fibers have done their job, they can stop now safely: */
|
||||
for (j = 0; j < 2; j++) {
|
||||
nano_task_fifo_put(&nanoFifo_sync, (void *) element);
|
||||
}
|
||||
|
@ -274,7 +274,7 @@ int lifo_test(void)
|
|||
|
||||
return_value += check_result(i * 2, t);
|
||||
|
||||
/* fiber contexts have done their job, they can stop now safely: */
|
||||
/* fibers have done their job, they can stop now safely: */
|
||||
for (j = 0; j < 2; j++) {
|
||||
nano_task_fifo_put(&nanoFifo_sync, (void *) element);
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ void fifo_test_init(void)
|
|||
|
||||
/**
|
||||
*
|
||||
* @brief Fifo test context
|
||||
* @brief Fifo test fiber
|
||||
*
|
||||
* @return N/A
|
||||
*
|
||||
|
@ -88,7 +88,7 @@ void fifo_fiber1(int par1, int par2)
|
|||
|
||||
/**
|
||||
*
|
||||
* @brief Fifo test context
|
||||
* @brief Fifo test fiber
|
||||
*
|
||||
* @return N/A
|
||||
*
|
||||
|
@ -121,7 +121,7 @@ void fifo_fiber2(int par1, int par2)
|
|||
|
||||
/**
|
||||
*
|
||||
* @brief Fifo test context
|
||||
* @brief Fifo test fiber
|
||||
*
|
||||
* @return N/A
|
||||
*
|
||||
|
@ -195,7 +195,7 @@ int fifo_test(void)
|
|||
|
||||
return_value += check_result(i, t);
|
||||
|
||||
/* fiber contexts have done their job, they can stop now safely: */
|
||||
/* fibers have done their job, they can stop now safely: */
|
||||
for (j = 0; j < 2; j++) {
|
||||
nano_task_fifo_put(&nanoFifo_sync, (void *) element);
|
||||
}
|
||||
|
@ -225,7 +225,7 @@ int fifo_test(void)
|
|||
|
||||
return_value += check_result(i, t);
|
||||
|
||||
/* fiber contexts have done their job, they can stop now safely: */
|
||||
/* fibers have done their job, they can stop now safely: */
|
||||
for (j = 0; j < 2; j++) {
|
||||
nano_task_fifo_put(&nanoFifo_sync, (void *) element);
|
||||
}
|
||||
|
@ -270,7 +270,7 @@ int fifo_test(void)
|
|||
|
||||
return_value += check_result(i * 2, t);
|
||||
|
||||
/* fiber contexts have done their job, they can stop now safely: */
|
||||
/* fibers have done their job, they can stop now safely: */
|
||||
for (j = 0; j < 2; j++) {
|
||||
nano_task_fifo_put(&nanoFifo_sync, (void *) element);
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ void sema_test_init(void)
|
|||
|
||||
/**
|
||||
*
|
||||
* @brief Semaphore test context
|
||||
* @brief Semaphore test fiber
|
||||
*
|
||||
* @param par1 Ignored parameter.
|
||||
* @param par2 Number of test loops.
|
||||
|
@ -78,7 +78,7 @@ void sema_fiber1(int par1, int par2)
|
|||
|
||||
/**
|
||||
*
|
||||
* @brief Semaphore test context
|
||||
* @brief Semaphore test fiber
|
||||
*
|
||||
* @param par1 Address of the counter.
|
||||
* @param par2 Number of test cycles.
|
||||
|
@ -102,7 +102,7 @@ void sema_fiber2(int par1, int par2)
|
|||
|
||||
/**
|
||||
*
|
||||
* @brief Semaphore test context
|
||||
* @brief Semaphore test fiber
|
||||
*
|
||||
* @param par1 Address of the counter.
|
||||
* @param par2 Number of test cycles.
|
||||
|
|
|
@ -56,7 +56,7 @@ void stack_test_init(void)
|
|||
|
||||
/**
|
||||
*
|
||||
* @brief Stack test context
|
||||
* @brief Stack test fiber
|
||||
*
|
||||
* @param par1 Ignored parameter.
|
||||
* @param par2 Number of test loops.
|
||||
|
@ -92,7 +92,7 @@ void stack_fiber1(int par1, int par2)
|
|||
|
||||
/**
|
||||
*
|
||||
* @brief Stack test context
|
||||
* @brief Stack test fiber
|
||||
*
|
||||
* @param par1 Address of the counter.
|
||||
* @param par2 Number of test cycles.
|
||||
|
@ -122,7 +122,7 @@ void stack_fiber2(int par1, int par2)
|
|||
|
||||
/**
|
||||
*
|
||||
* @brief Stack test context
|
||||
* @brief Stack test fiber
|
||||
*
|
||||
* @param par1 Address of the counter.
|
||||
* @param par2 Number of test cycles.
|
||||
|
|
|
@ -36,11 +36,11 @@ Testing nano_cpu_idle()
|
|||
Testing interrupt locking and unlocking
|
||||
Testing inline interrupt locking and unlocking
|
||||
Testing irq_disable() and irq_enable()
|
||||
Testing context_self_get() from an ISR and task
|
||||
Testing context_type_get() from an ISR
|
||||
Testing context_type_get() from a task
|
||||
Testing sys_thread_self_get() from an ISR and task
|
||||
Testing sys_execution_context_type_get() from an ISR
|
||||
Testing sys_execution_context_type_get() from a task
|
||||
Spawning a fiber from a task
|
||||
Fiber to test context_self_get() and context_type_get
|
||||
Fiber to test sys_thread_self_get() and sys_execution_context_type_get
|
||||
Fiber to test fiber_yield()
|
||||
Verifying exception handler installed
|
||||
excHandlerExecuted: 1
|
||||
|
|
|
@ -12,13 +12,13 @@ fiber_yield
|
|||
- Called by an equal priority fiber when there is another fiber
|
||||
- Called by a lower priority fiber when there is another fiber
|
||||
|
||||
context_self_get
|
||||
sys_thread_self_get
|
||||
- Called from an ISR (interrupted a task)
|
||||
- Called from an ISR (interrupted a fiber)
|
||||
- Called from a task
|
||||
- Called from a fiber
|
||||
|
||||
context_type_get
|
||||
sys_execution_context_type_get
|
||||
- Called from an ISR that interrupted a task
|
||||
- Called from an ISR that interrupted a fiber
|
||||
- Called from a task
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* context.c - test nanokernel CPU and context APIs */
|
||||
/* thread.c - test nanokernel CPU and thread APIs */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2012-2015 Wind River Systems, Inc.
|
||||
|
@ -32,9 +32,9 @@
|
|||
|
||||
/*
|
||||
DESCRIPTION
|
||||
This module tests the following CPU and context related routines:
|
||||
This module tests the following CPU and thread related routines:
|
||||
fiber_fiber_start(), task_fiber_start(), fiber_yield(),
|
||||
context_self_get(), context_type_get(), nano_cpu_idle(),
|
||||
sys_thread_self_get(), sys_execution_context_type_get(), nano_cpu_idle(),
|
||||
irq_lock(), irq_unlock(),
|
||||
irq_connect(), nanoCpuExcConnect(),
|
||||
irq_enable(), irq_disable(),
|
||||
|
@ -61,8 +61,8 @@ This module tests the following CPU and context related routines:
|
|||
#define FIBER_STACKSIZE 2000
|
||||
#define FIBER_PRIORITY 4
|
||||
|
||||
#define CTX_SELF_CMD 0
|
||||
#define CTX_TYPE_CMD 1
|
||||
#define THREAD_SELF_CMD 0
|
||||
#define EXEC_CTX_TYPE_CMD 1
|
||||
|
||||
#define UNKNOWN_COMMAND -1
|
||||
|
||||
|
@ -129,12 +129,12 @@ void isr_handler(void *data)
|
|||
ARG_UNUSED(data);
|
||||
|
||||
switch (isrInfo.command) {
|
||||
case CTX_SELF_CMD:
|
||||
isrInfo.data = (void *) context_self_get();
|
||||
case THREAD_SELF_CMD:
|
||||
isrInfo.data = (void *) sys_thread_self_get();
|
||||
break;
|
||||
|
||||
case CTX_TYPE_CMD:
|
||||
isrInfo.value = context_type_get();
|
||||
case EXEC_CTX_TYPE_CMD:
|
||||
isrInfo.value = sys_execution_context_type_get();
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -348,40 +348,41 @@ int nanoCpuDisableInterruptsTest(disable_interrupt_func disableRtn,
|
|||
*
|
||||
* @brief Test the various nanoCtxXXX() routines from a task
|
||||
*
|
||||
* This routines tests the context_self_get() and context_type_get() routines from both
|
||||
* a task and an ISR (that interrupted a task). Checking those routines with
|
||||
* fibers are done elsewhere.
|
||||
* This routines tests the sys_thread_self_get() and
|
||||
* sys_execution_context_type_get() routines from both a task and an ISR (that
|
||||
* interrupted a task). Checking those routines with fibers are done
|
||||
* elsewhere.
|
||||
*
|
||||
* @return TC_PASS on success, TC_FAIL on failure
|
||||
*/
|
||||
|
||||
int nanoCtxTaskTest(void)
|
||||
{
|
||||
nano_context_id_t ctxId;
|
||||
nano_thread_id_t self_thread_id;
|
||||
|
||||
TC_PRINT("Testing context_self_get() from an ISR and task\n");
|
||||
ctxId = context_self_get();
|
||||
isrInfo.command = CTX_SELF_CMD;
|
||||
TC_PRINT("Testing sys_thread_self_get() from an ISR and task\n");
|
||||
self_thread_id = sys_thread_self_get();
|
||||
isrInfo.command = THREAD_SELF_CMD;
|
||||
isrInfo.error = 0;
|
||||
_trigger_isrHandler();
|
||||
if ((isrInfo.error != 0) || (isrInfo.data != (void *) ctxId)) {
|
||||
if ((isrInfo.error != 0) || (isrInfo.data != (void *) self_thread_id)) {
|
||||
/*
|
||||
* Either the ISR detected an error, or the ISR context ID does not
|
||||
* match the interrupted task's context ID.
|
||||
* match the interrupted task's thread ID.
|
||||
*/
|
||||
return TC_FAIL;
|
||||
}
|
||||
|
||||
TC_PRINT("Testing context_type_get() from an ISR\n");
|
||||
isrInfo.command = CTX_TYPE_CMD;
|
||||
TC_PRINT("Testing sys_execution_context_type_get() from an ISR\n");
|
||||
isrInfo.command = EXEC_CTX_TYPE_CMD;
|
||||
isrInfo.error = 0;
|
||||
_trigger_isrHandler();
|
||||
if ((isrInfo.error != 0) || (isrInfo.value != NANO_CTX_ISR)) {
|
||||
return TC_FAIL;
|
||||
}
|
||||
|
||||
TC_PRINT("Testing context_type_get() from a task\n");
|
||||
if (context_type_get() != NANO_CTX_TASK) {
|
||||
TC_PRINT("Testing sys_execution_context_type_get() from a task\n");
|
||||
if (sys_execution_context_type_get() != NANO_CTX_TASK) {
|
||||
return TC_FAIL;
|
||||
}
|
||||
|
||||
|
@ -390,44 +391,47 @@ int nanoCtxTaskTest(void)
|
|||
|
||||
/**
|
||||
*
|
||||
* @brief Test the various nanoCtxXXX() routines from a fiber
|
||||
* @brief Test the various context/thread routines from a fiber
|
||||
*
|
||||
* This routines tests the context_self_get() and context_type_get() routines from both
|
||||
* a fiber and an ISR (that interrupted a fiber). Checking those routines with
|
||||
* tasks are done elsewhere.
|
||||
* This routines tests the sys_thread_self_get() and
|
||||
* sys_execution_context_type_get() routines from both a fiber and an ISR (that
|
||||
* interrupted a fiber). Checking those routines with tasks are done
|
||||
* elsewhere.
|
||||
*
|
||||
* This routine may set <fiberDetectedError> to the following values:
|
||||
* 1 - if fiber context ID matches that of the task
|
||||
* 2 - if context ID taken during ISR does not match that of the fiber
|
||||
* 3 - context_type_get() when called from an ISR is not NANO_TYPE_ISR
|
||||
* 3 - context_type_get() when called from a fiber is not NANO_TYPE_FIBER
|
||||
* 1 - if fiber ID matches that of the task
|
||||
* 2 - if thread ID taken during ISR does not match that of the fiber
|
||||
* 3 - sys_execution_context_type_get() when called from an ISR is not
|
||||
* NANO_TYPE_ISR
|
||||
* 4 - sys_execution_context_type_get() when called from a fiber is not
|
||||
* NANO_TYPE_FIBER
|
||||
*
|
||||
* @return TC_PASS on success, TC_FAIL on failure
|
||||
*/
|
||||
|
||||
int nanoCtxFiberTest(nano_context_id_t taskCtxId)
|
||||
int nanoCtxFiberTest(nano_thread_id_t task_thread_id)
|
||||
{
|
||||
nano_context_id_t ctxId;
|
||||
nano_thread_id_t self_thread_id;
|
||||
|
||||
ctxId = context_self_get();
|
||||
if (ctxId == taskCtxId) {
|
||||
self_thread_id = sys_thread_self_get();
|
||||
if (self_thread_id == task_thread_id) {
|
||||
fiberDetectedError = 1;
|
||||
return TC_FAIL;
|
||||
}
|
||||
|
||||
isrInfo.command = CTX_SELF_CMD;
|
||||
isrInfo.command = THREAD_SELF_CMD;
|
||||
isrInfo.error = 0;
|
||||
_trigger_isrHandler();
|
||||
if ((isrInfo.error != 0) || (isrInfo.data != (void *) ctxId)) {
|
||||
if ((isrInfo.error != 0) || (isrInfo.data != (void *) self_thread_id)) {
|
||||
/*
|
||||
* Either the ISR detected an error, or the ISR context ID does not
|
||||
* match the interrupted fiber's context ID.
|
||||
* match the interrupted fiber's thread ID.
|
||||
*/
|
||||
fiberDetectedError = 2;
|
||||
return TC_FAIL;
|
||||
}
|
||||
|
||||
isrInfo.command = CTX_TYPE_CMD;
|
||||
isrInfo.command = EXEC_CTX_TYPE_CMD;
|
||||
isrInfo.error = 0;
|
||||
_trigger_isrHandler();
|
||||
if ((isrInfo.error != 0) || (isrInfo.value != NANO_CTX_ISR)) {
|
||||
|
@ -435,7 +439,7 @@ int nanoCtxFiberTest(nano_context_id_t taskCtxId)
|
|||
return TC_FAIL;
|
||||
}
|
||||
|
||||
if (context_type_get() != NANO_CTX_FIBER) {
|
||||
if (sys_execution_context_type_get() != NANO_CTX_FIBER) {
|
||||
fiberDetectedError = 4;
|
||||
return TC_FAIL;
|
||||
}
|
||||
|
@ -458,7 +462,7 @@ int nanoCtxFiberTest(nano_context_id_t taskCtxId)
|
|||
|
||||
static void fiberHelper(int arg1, int arg2)
|
||||
{
|
||||
nano_context_id_t ctxId;
|
||||
nano_thread_id_t self_thread_id;
|
||||
|
||||
ARG_UNUSED(arg1);
|
||||
ARG_UNUSED(arg2);
|
||||
|
@ -471,8 +475,8 @@ static void fiberHelper(int arg1, int arg2)
|
|||
fiberEvidence++;
|
||||
|
||||
/* Test that helper will yield to a fiber of equal priority */
|
||||
ctxId = context_self_get();
|
||||
ctxId->prio++; /* Lower priority to that of fiberEntry() */
|
||||
self_thread_id = sys_thread_self_get();
|
||||
self_thread_id->prio++; /* Lower priority to that of fiberEntry() */
|
||||
fiber_yield(); /* Yield to fiber of equal priority */
|
||||
|
||||
fiberEvidence++;
|
||||
|
@ -500,7 +504,7 @@ static void fiberHelper(int arg1, int arg2)
|
|||
|
||||
int fiber_yieldTest(void)
|
||||
{
|
||||
nano_context_id_t ctxId;
|
||||
nano_thread_id_t self_thread_id;
|
||||
|
||||
/*
|
||||
* Start a fiber of higher priority. Note that since the new fiber is
|
||||
|
@ -508,7 +512,7 @@ int fiber_yieldTest(void)
|
|||
* fiber as it would if done from a task.
|
||||
*/
|
||||
|
||||
ctxId = context_self_get();
|
||||
self_thread_id = sys_thread_self_get();
|
||||
fiberEvidence = 0;
|
||||
fiber_fiber_start(fiberStack2, FIBER_STACKSIZE, fiberHelper,
|
||||
0, 0, FIBER_PRIORITY - 1, 0);
|
||||
|
@ -543,7 +547,7 @@ int fiber_yieldTest(void)
|
|||
* not result in switching to the helper.
|
||||
*/
|
||||
|
||||
ctxId->prio--;
|
||||
self_thread_id->prio--;
|
||||
fiber_yield();
|
||||
|
||||
if (fiberEvidence != 1) {
|
||||
|
@ -568,13 +572,13 @@ int fiber_yieldTest(void)
|
|||
*
|
||||
* This routine is the entry point to the fiber started by the task.
|
||||
*
|
||||
* @param taskCtxId context ID of the spawning task
|
||||
* @param task_thread_id thread ID of the spawning task
|
||||
* @param arg1 unused
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
||||
static void fiberEntry(int taskCtxId, int arg1)
|
||||
static void fiberEntry(int task_thread_id, int arg1)
|
||||
{
|
||||
int rv;
|
||||
|
||||
|
@ -583,7 +587,7 @@ static void fiberEntry(int taskCtxId, int arg1)
|
|||
fiberEvidence++; /* Prove to the task that the fiber has run */
|
||||
nano_fiber_sem_take_wait(&wakeFiber);
|
||||
|
||||
rv = nanoCtxFiberTest((nano_context_id_t) taskCtxId);
|
||||
rv = nanoCtxFiberTest((nano_thread_id_t) task_thread_id);
|
||||
if (rv != TC_PASS) {
|
||||
return;
|
||||
}
|
||||
|
@ -789,7 +793,7 @@ static int test_timeout(void)
|
|||
*
|
||||
* @brief Entry point to timer tests
|
||||
*
|
||||
* This is the entry point to the CPU and context tests.
|
||||
* This is the entry point to the CPU and thread tests.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
@ -798,7 +802,7 @@ void main(void)
|
|||
{
|
||||
int rv; /* return value from tests */
|
||||
|
||||
TC_START("Test Nanokernel CPU and context routines");
|
||||
TC_START("Test Nanokernel CPU and thread routines");
|
||||
|
||||
TC_PRINT("Initializing nanokernel objects\n");
|
||||
rv = initNanoObjects();
|
||||
|
@ -843,7 +847,7 @@ void main(void)
|
|||
TC_PRINT("Spawning a fiber from a task\n");
|
||||
fiberEvidence = 0;
|
||||
task_fiber_start(fiberStack1, FIBER_STACKSIZE, fiberEntry,
|
||||
(int) context_self_get(), 0, FIBER_PRIORITY, 0);
|
||||
(int) sys_thread_self_get(), 0, FIBER_PRIORITY, 0);
|
||||
|
||||
if (fiberEvidence != 1) {
|
||||
rv = TC_FAIL;
|
||||
|
@ -851,8 +855,11 @@ void main(void)
|
|||
goto doneTests;
|
||||
}
|
||||
|
||||
/* The fiber ran, now wake it so it can test context_self_get and context_type_get */
|
||||
TC_PRINT("Fiber to test context_self_get() and context_type_get\n");
|
||||
/*
|
||||
* The fiber ran, now wake it so it can test sys_thread_self_get and
|
||||
* sys_execution_context_type_get.
|
||||
*/
|
||||
TC_PRINT("Fiber to test sys_thread_self_get() and sys_execution_context_type_get\n");
|
||||
nano_task_sem_give(&wakeFiber);
|
||||
|
||||
if (fiberDetectedError != 0) {
|
||||
|
|
|
@ -256,7 +256,7 @@ void fiber1(void)
|
|||
*
|
||||
* @brief Test the nano_fiber_fifo_get_wait() interface
|
||||
*
|
||||
* This function tests the fifo put and get wait interfaces in the fiber context.
|
||||
* This function tests the fifo put and get wait interfaces in a fiber.
|
||||
* It gets data from nanoFifoObj2 queue and puts data to nanoFifoObj queue.
|
||||
*
|
||||
* @return N/A
|
||||
|
@ -303,7 +303,7 @@ void testFiberFifoGetW(void)
|
|||
*
|
||||
* @brief Test ISR FIFO routines (triggered from fiber)
|
||||
*
|
||||
* This function tests the fifo put and get interfaces in the isr context.
|
||||
* This function tests the fifo put and get interfaces in the ISR context.
|
||||
* It is invoked from a fiber.
|
||||
*
|
||||
* We use nanoFifoObj queue to put and get data.
|
||||
|
@ -340,7 +340,7 @@ void testIsrFifoFromFiber(void)
|
|||
}
|
||||
|
||||
/* Put more item into queue */
|
||||
TC_PRINT("\nISR FIFO (running in fiber context) Put Order:\n");
|
||||
TC_PRINT("\nISR FIFO (running in fiber) Put Order:\n");
|
||||
for (int i=0; i<NUM_FIFO_ELEMENT; i++) {
|
||||
isrFifoInfo.data = pPutList1[i];
|
||||
TC_PRINT(" %p,", pPutList1[i]);
|
||||
|
@ -357,7 +357,7 @@ void testIsrFifoFromFiber(void)
|
|||
*
|
||||
* @brief Test ISR FIFO routines (triggered from task)
|
||||
*
|
||||
* This function tests the fifo put and get interfaces in the isr context.
|
||||
* This function tests the fifo put and get interfaces in the ISR context.
|
||||
* It is invoked from a task.
|
||||
*
|
||||
* We use nanoFifoObj queue to put and get data.
|
||||
|
@ -522,7 +522,7 @@ void fiber3(void)
|
|||
*
|
||||
* @brief Test the nano_task_fifo_get_wait() interface
|
||||
*
|
||||
* This is in the task context. It puts data to nanoFifoObj2 queue and gets
|
||||
* This is in a task. It puts data to nanoFifoObj2 queue and gets
|
||||
* data from nanoFifoObj queue.
|
||||
*
|
||||
* @return N/A
|
||||
|
|
|
@ -138,7 +138,7 @@ void isr_sem_give(void *data)
|
|||
*
|
||||
* @brief Give and take the semaphore in a fiber without blocking
|
||||
*
|
||||
* This test gives and takes the test semaphore in the context of a fiber
|
||||
* This test gives and takes the test semaphore in a fiber
|
||||
* without blocking on the semaphore.
|
||||
*
|
||||
* @return TC_PASS on success, TC_FAIL on failure
|
||||
|
@ -331,7 +331,7 @@ errorReturn:
|
|||
*
|
||||
* @brief Give and take the semaphore in a task without blocking
|
||||
*
|
||||
* This test gives and takes the test semaphore in the context of a task without
|
||||
* This test gives and takes the test semaphore in a task without
|
||||
* blocking on the semaphore.
|
||||
*
|
||||
* @return TC_PASS on success, TC_FAIL on failure
|
||||
|
|
|
@ -240,7 +240,7 @@ void fiber1(void)
|
|||
*
|
||||
* testFiberStackPopW
|
||||
*
|
||||
* This function tests the stack push and pop wait interfaces in the fiber context.
|
||||
* This function tests the stack push and pop wait interfaces in a fiber.
|
||||
* It gets data from nanoStackObj2 queue and puts data to nanoStackObj queue.
|
||||
*
|
||||
* @return N/A
|
||||
|
@ -285,7 +285,7 @@ void testFiberStackPopW(void)
|
|||
*
|
||||
* testIsrStackFromFiber
|
||||
*
|
||||
* This function tests the stack push and pop interfaces in the isr context.
|
||||
* This function tests the stack push and pop interfaces in the ISR context.
|
||||
* It is invoked from a fiber.
|
||||
*
|
||||
* We use nanoStackObj queue to push and pop data.
|
||||
|
@ -303,7 +303,7 @@ void testIsrStackFromFiber(void)
|
|||
_trigger_nano_isr_stack_pop();
|
||||
result = isrStackInfo.data;
|
||||
if (result != INVALID_DATA) {
|
||||
TC_PRINT("ISR STACK (running in fiber context) Pop from queue1: %d\n", result);
|
||||
TC_PRINT("ISR STACK (running in fiber) Pop from queue1: %d\n", result);
|
||||
if (result != myData[3]) {
|
||||
retCode = TC_FAIL;
|
||||
TCERR2;
|
||||
|
@ -322,7 +322,7 @@ void testIsrStackFromFiber(void)
|
|||
}
|
||||
|
||||
/* Put more data into STACK */
|
||||
TC_PRINT("ISR STACK (running in fiber context) Push to queue1:\n");
|
||||
TC_PRINT("ISR STACK (running in fiber) Push to queue1:\n");
|
||||
for (int i=0; i<NUM_STACK_ELEMENT; i++) {
|
||||
isrStackInfo.data = myIsrData[i];
|
||||
TC_PRINT(" %d, ", myIsrData[i]);
|
||||
|
@ -341,7 +341,7 @@ void testIsrStackFromFiber(void)
|
|||
*
|
||||
* testIsrStackFromTask
|
||||
*
|
||||
* This function tests the stack push and pop interfaces in the isr context.
|
||||
* This function tests the stack push and pop interfaces in the ISR context.
|
||||
* It is invoked from a task.
|
||||
*
|
||||
* We use nanoStackObj queue to push and pop data.
|
||||
|
@ -418,7 +418,7 @@ void fiber2(void)
|
|||
*
|
||||
* testTaskStackPopW
|
||||
*
|
||||
* This is in the task context. It puts data to nanoStackObj2 queue and gets
|
||||
* This is in the task. It puts data to nanoStackObj2 queue and gets
|
||||
* data from nanoStackObj queue.
|
||||
*
|
||||
* @return N/A
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue