clarify use of term 'context'

The term 'context' is vague and overloaded. Its usage for 'an execution
context' is now referred as such, in both comments and some APIs' names.
When the execution context can only be a fiber or a task (i.e. not an
ISR), it is referred to as a 'thread', again in comments and everywhere
in the code.

APIs that had their names changed:

  - nano_context_id_t is now nano_thread_id_t
  - context_self_get() is now sys_thread_self_get()
  - context_type_get() is now sys_execution_context_type_get()
  - context_custom_data_set/get() are now
    sys_thread_custom_data_set/get()

The 'context' prefix namespace does not have to be reserved by the
kernel anymore.

The Context Control Structure (CCS) data structure is now the Thread
Control Structure (TCS):

  - struct ccs is now struct tcs
  - tCCS is now tTCS

Change-Id: I7526a76c5b01e7c86333078e2d2e77c9feef5364
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
Benjamin Walsh 2015-08-20 11:04:01 -04:00 committed by Anas Nashif
commit 0dcad8331b
96 changed files with 1082 additions and 1086 deletions

View file

@ -1,4 +1,4 @@
/* context.c - new context creation for ARM Cortex-M */
/* thread.c - new thread creation for ARM Cortex-M */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
@ -47,48 +47,47 @@ architecture.
tNANO _nanokernel = {0};
#if defined(CONFIG_CONTEXT_MONITOR)
#define CONTEXT_MONITOR_INIT(pCcs) _context_monitor_init(pCcs)
#if defined(CONFIG_THREAD_MONITOR)
#define THREAD_MONITOR_INIT(tcs) _thread_monitor_init(tcs)
#else
#define CONTEXT_MONITOR_INIT(pCcs) \
#define THREAD_MONITOR_INIT(tcs) \
do {/* do nothing */ \
} while ((0))
#endif
#if defined(CONFIG_CONTEXT_MONITOR)
#if defined(CONFIG_THREAD_MONITOR)
/**
*
* @brief Initialize context monitoring support
* @brief Initialize thread monitoring support
*
* Currently only inserts the new context in the list of active contexts.
* Currently only inserts the new thread in the list of active threads.
*
* @return N/A
*/
static ALWAYS_INLINE void _context_monitor_init(struct ccs *pCcs /* context */
static ALWAYS_INLINE void _thread_monitor_init(struct tcs *tcs /* thread */
)
{
unsigned int key;
/*
* Add the newly initialized context to head of the list of contexts.
* This singly linked list of contexts maintains ALL the contexts in the
* system: both tasks and fibers regardless of whether they are
* runnable.
* Add the newly initialized thread to head of the list of threads. This
* singly linked list of threads maintains ALL the threads in the system:
* both tasks and fibers regardless of whether they are runnable.
*/
key = irq_lock();
pCcs->next_context = _nanokernel.contexts;
_nanokernel.contexts = pCcs;
tcs->next_thread = _nanokernel.threads;
_nanokernel.threads = tcs;
irq_unlock(key);
}
#endif /* CONFIG_CONTEXT_MONITOR */
#endif /* CONFIG_THREAD_MONITOR */
/**
*
* @brief Intialize a new context (thread) from its stack space
* @brief Intialize a new thread from its stack space
*
* The control structure (CCS) is put at the lower address of the stack. An
* The control structure (TCS) is put at the lower address of the stack. An
* initial context, to be "restored" by __pendsv(), is put at the other end of
* the stack, and thus reusable by the stack when not needed anymore.
*
@ -105,31 +104,31 @@ static ALWAYS_INLINE void _context_monitor_init(struct ccs *pCcs /* context */
* @return N/A
*/
void _NewContext(
void _new_thread(
char *pStackMem, /* aligned stack memory */
unsigned stackSize, /* stack size in bytes */
_ContextEntry pEntry, /* entry point */
_thread_entry_t pEntry, /* entry point */
void *parameter1, /* entry point first param */
void *parameter2, /* entry point second param */
void *parameter3, /* entry point third param */
int priority, /* context priority (-1 for tasks) */
int priority, /* thread priority (-1 for tasks) */
unsigned options /* misc options (future) */
)
{
char *stackEnd = pStackMem + stackSize;
struct __esf *pInitCtx;
tCCS *pCcs = (tCCS *) pStackMem;
struct tcs *tcs = (struct tcs *) pStackMem;
#ifdef CONFIG_INIT_STACKS
memset(pStackMem, 0xaa, stackSize);
#endif
/* carve the context entry struct from the "base" of the stack */
/* carve the thread entry struct from the "base" of the stack */
pInitCtx = (struct __esf *)(STACK_ROUND_DOWN(stackEnd) -
sizeof(struct __esf));
pInitCtx->pc = ((uint32_t)_context_entry) & 0xfffffffe;
pInitCtx->pc = ((uint32_t)_thread_entry) & 0xfffffffe;
pInitCtx->a1 = (uint32_t)pEntry;
pInitCtx->a2 = (uint32_t)parameter1;
pInitCtx->a3 = (uint32_t)parameter2;
@ -137,22 +136,22 @@ void _NewContext(
pInitCtx->xpsr =
0x01000000UL; /* clear all, thumb bit is 1, even if RO */
pCcs->link = NULL;
pCcs->flags = priority == -1 ? TASK | PREEMPTIBLE : FIBER;
pCcs->prio = priority;
tcs->link = NULL;
tcs->flags = priority == -1 ? TASK | PREEMPTIBLE : FIBER;
tcs->prio = priority;
#ifdef CONFIG_CONTEXT_CUSTOM_DATA
#ifdef CONFIG_THREAD_CUSTOM_DATA
/* Initialize custom data field (value is opaque to kernel) */
pCcs->custom_data = NULL;
tcs->custom_data = NULL;
#endif
pCcs->preempReg.psp = (uint32_t)pInitCtx;
pCcs->basepri = 0;
tcs->preempReg.psp = (uint32_t)pInitCtx;
tcs->basepri = 0;
_nano_timeout_ccs_init(pCcs);
_nano_timeout_tcs_init(tcs);
/* initial values in all other registers/CCS entries are irrelevant */
/* initial values in all other registers/TCS entries are irrelevant */
CONTEXT_MONITOR_INIT(pCcs);
THREAD_MONITOR_INIT(tcs);
}

View file

@ -146,9 +146,9 @@ SECTION_FUNC(TEXT, nano_cpu_idle)
*
* @brief Atomically re-enable interrupts and enter low power mode
*
* This function is utilized by the nanokernel object "wait" APIs for task
* contexts, e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(), nano_task_stack_pop_wait(),
* and nano_task_fifo_get_wait().
* This function is utilized by the nanokernel object "wait" APIs for tasks,
* e.g. nano_task_lifo_get_wait(), nano_task_sem_take_wait(),
* nano_task_stack_pop_wait(), and nano_task_fifo_get_wait().
*
* INTERNAL
* The requirements for nano_cpu_atomic_idle() are as follows:

View file

@ -104,9 +104,9 @@ FUNC_NORETURN void _NanoFatalErrorHandler(
PR_EXC("**** Unknown Fatal Error %d! ****\n", reason);
break;
}
PR_EXC("Current context ID = 0x%x\n"
PR_EXC("Current thread ID = 0x%x\n"
"Faulting instruction address = 0x%x\n",
context_self_get(),
sys_thread_self_get(),
pEsf->pc);
/*

View file

@ -84,7 +84,7 @@ void _FaultDump(const NANO_ESF *esf, int fault)
PR_EXC("Fault! EXC #%d, Thread: %x, instr @ %x\n",
fault,
context_self_get(),
sys_thread_self_get(),
esf->pc);
if (3 == fault) { /* hard fault */
@ -120,7 +120,7 @@ void _FaultDump(const NANO_ESF *esf, int fault)
#if (CONFIG_FAULT_DUMP == 2)
/**
*
* @brief Dump context information
* @brief Dump thread information
*
* See _FaultDump() for example.
*
@ -129,11 +129,11 @@ void _FaultDump(const NANO_ESF *esf, int fault)
* \NOMANUAL
*/
static void _FaultContextShow(const NANO_ESF *esf)
static void _FaultThreadShow(const NANO_ESF *esf)
{
PR_EXC(" Executing context ID (thread): 0x%x\n"
PR_EXC(" Executing thread ID (thread): 0x%x\n"
" Faulting instruction address: 0x%x\n",
context_self_get(),
sys_thread_self_get(),
esf->pc);
}
@ -153,7 +153,7 @@ static void _MpuFault(const NANO_ESF *esf,
{
PR_EXC("***** MPU FAULT *****\n");
_FaultContextShow(esf);
_FaultThreadShow(esf);
if (_ScbMemFaultIsStacking()) {
PR_EXC(" Stacking error\n");
@ -188,7 +188,7 @@ static void _BusFault(const NANO_ESF *esf,
{
PR_EXC("***** BUS FAULT *****\n");
_FaultContextShow(esf);
_FaultThreadShow(esf);
if (_ScbBusFaultIsStacking()) {
PR_EXC(" Stacking error\n");
@ -228,7 +228,7 @@ static void _UsageFault(const NANO_ESF *esf)
{
PR_EXC("***** USAGE FAULT *****\n");
_FaultContextShow(esf);
_FaultThreadShow(esf);
/* bits are sticky: they stack and must be reset */
if (_ScbUsageFaultIsDivByZero()) {
@ -325,7 +325,7 @@ static void _ReservedException(const NANO_ESF *esf,
*
* eg. (precise bus error escalated to hard fault):
*
* Executing context ID (thread): 0x200000dc
* Executing thread ID (thread): 0x200000dc
* Faulting instruction address: 0x000011d3
* ***** HARD FAULT *****
* Fault escalation (see below)

View file

@ -66,7 +66,7 @@ the PendSV exception.
void fiber_abort(void)
{
_context_exit(_nanokernel.current);
_thread_exit(_nanokernel.current);
if (_ScbIsInThreadMode()) {
_nano_fiber_swap();
} else {

View file

@ -63,7 +63,7 @@ _ASM_FILE_PROLOGUE
* state.
*
* Also, record the fact that the thread is currently interrupted so that VQEMU
* looks into the CCS and not the CPU registers to obtain the current thread's
* looks into the TCS and not the CPU registers to obtain the current thread's
* register values.
*
* NOTE:
@ -86,10 +86,10 @@ SECTION_FUNC(TEXT, _GdbStubExcEntry)
orrs r2, #EXC_ACTIVE
str r2, [r1, #__tNANO_flags_OFFSET]
ldr r1, [r1, #__tNANO_current_OFFSET]
str r2, [r1, #__tCCS_flags_OFFSET]
str r2, [r1, #__tTCS_flags_OFFSET]
/* save callee-saved + psp in CCS */
adds r1, #__tCCS_preempReg_OFFSET
/* save callee-saved + psp in TCS */
adds r1, #__tTCS_preempReg_OFFSET
mrs ip, PSP
stmia r1, {v1-v8, ip}
@ -100,7 +100,7 @@ SECTION_FUNC(TEXT, _GdbStubExcEntry)
* @brief Exception exit extra clean up when GDB_INFO is enabled
*
* Record the fact that the thread is not interrupted anymore so that VQEMU
* looks at the CPU registers and not into the CCS to obtain the current
* looks at the CPU registers and not into the TCS to obtain the current
* thread's register values. Only do this if this is not a nested exception.
*
* NOTE:
@ -125,7 +125,7 @@ SECTION_FUNC(TEXT, _GdbStubExcExit)
bic r2, #EXC_ACTIVE
str r2, [r1, #__tNANO_flags_OFFSET]
ldr r1, [r1, #__tNANO_current_OFFSET]
str r2, [r1, #__tCCS_flags_OFFSET]
str r2, [r1, #__tTCS_flags_OFFSET]
bx lr

View file

@ -59,11 +59,11 @@ GEN_OFFSET_SYM(tNANO, flags);
GEN_OFFSET_SYM(tNANO, idle);
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
/* ARM-specific tCCS structure member offsets */
/* ARM-specific struct tcs structure member offsets */
GEN_OFFSET_SYM(tCCS, basepri);
#ifdef CONFIG_CONTEXT_CUSTOM_DATA
GEN_OFFSET_SYM(tCCS, custom_data);
GEN_OFFSET_SYM(tTCS, basepri);
#ifdef CONFIG_THREAD_CUSTOM_DATA
GEN_OFFSET_SYM(tTCS, custom_data);
#endif
/* ARM-specific ESF structure member offsets */
@ -97,8 +97,8 @@ GEN_OFFSET_SYM(tPreempt, psp);
GEN_ABSOLUTE_SYM(__tPreempt_SIZEOF, sizeof(tPreempt));
/* size of the tCCS structure sans save area for floating point regs */
/* size of the struct tcs structure sans save area for floating point regs */
GEN_ABSOLUTE_SYM(__tCCS_NOFLOAT_SIZEOF, sizeof(tCCS));
GEN_ABSOLUTE_SYM(__tTCS_NOFLOAT_SIZEOF, sizeof(tTCS));
GEN_ABS_SYM_END

View file

@ -55,18 +55,17 @@ GDATA(_nanokernel)
*
* @brief PendSV exception handler, handling context switches
*
* The PendSV exception is the only context in the system that can perform
* context switching. When an execution context finds out it has to switch
* contexts, it pends the PendSV exception.
* The PendSV exception is the only execution context in the system that can
* perform context switching. When an execution context finds out it has to
* switch contexts, it pends the PendSV exception.
*
* When PendSV is pended, the decision that a context switch must happen has
* already been taken. In other words, when __pendsv() runs, we *know* we have
* to swap *something*.
*
* The scheduling algorithm is simple: schedule the head of the runnable FIBER
* context list, which is represented by _nanokernel.fiber. If there are no
* runnable FIBER contexts, then schedule the TASK context represented by
* _nanokernel.task. The _nanokernel.task field will never be NULL.
* The scheduling algorithm is simple: schedule the head of the runnable fibers
* list (_nanokernel.fiber). If there are no runnable fibers, then schedule the
* task (_nanokernel.task). The _nanokernel.task field will never be NULL.
*/
SECTION_FUNC(TEXT, __pendsv)
@ -80,14 +79,14 @@ SECTION_FUNC(TEXT, __pendsv)
pop {lr}
#endif
/* load _Nanokernel into r1 and current tCCS into r2 */
/* load _Nanokernel into r1 and current tTCS into r2 */
ldr r1, =_nanokernel
ldr r2, [r1, #__tNANO_current_OFFSET]
/* addr of callee-saved regs in CCS in r0 */
add r0, r2, #__tCCS_preempReg_OFFSET
/* addr of callee-saved regs in TCS in r0 */
add r0, r2, #__tTCS_preempReg_OFFSET
/* save callee-saved + psp in CCS */
/* save callee-saved + psp in TCS */
mrs ip, PSP
stmia r0, {v1-v8, ip}
@ -105,7 +104,7 @@ SECTION_FUNC(TEXT, __pendsv)
movs.n r0, #_EXC_IRQ_DEFAULT_PRIO
msr BASEPRI, r0
/* find out incoming context (fiber or task) */
/* find out incoming thread (fiber or task) */
/* is there a fiber ready ? */
ldr r2, [r1, #__tNANO_fiber_OFFSET]
@ -116,12 +115,12 @@ SECTION_FUNC(TEXT, __pendsv)
* else, the task is the thread we're switching in
*/
itte ne
ldrne.w r0, [r2, #__tCCS_link_OFFSET] /* then */
ldrne.w r0, [r2, #__tTCS_link_OFFSET] /* then */
strne.w r0, [r1, #__tNANO_fiber_OFFSET] /* then */
ldreq.w r2, [r1, #__tNANO_task_OFFSET] /* else */
/* r2 contains the new thread */
ldr r0, [r2, #__tCCS_flags_OFFSET]
ldr r0, [r2, #__tTCS_flags_OFFSET]
str r0, [r1, #__tNANO_flags_OFFSET]
str r2, [r1, #__tNANO_current_OFFSET]
@ -138,13 +137,13 @@ SECTION_FUNC(TEXT, __pendsv)
str r3, [ip, #0]
/* restore BASEPRI for the incoming thread */
ldr r0, [r2, #__tCCS_basepri_OFFSET]
ldr r0, [r2, #__tTCS_basepri_OFFSET]
mov ip, #0
str ip, [r2, #__tCCS_basepri_OFFSET]
str ip, [r2, #__tTCS_basepri_OFFSET]
msr BASEPRI, r0
/* load callee-saved + psp from CCS */
add r0, r2, #__tCCS_preempReg_OFFSET
/* load callee-saved + psp from TCS */
add r0, r2, #__tTCS_preempReg_OFFSET
ldmia r0, {v1-v8, ip}
msr PSP, ip
@ -205,9 +204,9 @@ SECTION_FUNC(TEXT, __svc)
* __pendsv all come from handling an interrupt, which means we know the
* interrupts were not locked: in that case the BASEPRI value is 0.
*
* Given that _Swap() is called to effect a cooperative context context switch,
* only the caller-saved integer registers need to be saved in the tCCS of the
* outgoing context. This is all performed by the hardware, which stores it in
* Given that _Swap() is called to effect a cooperative context switch,
* only the caller-saved integer registers need to be saved in the TCS of the
* outgoing thread. This is all performed by the hardware, which stores it in
* its exception stack frame, created when handling the svc exception.
*
* @return may contain a return value setup by a call to fiberRtnValueSet()
@ -222,7 +221,7 @@ SECTION_FUNC(TEXT, _Swap)
ldr r1, =_nanokernel
ldr r2, [r1, #__tNANO_current_OFFSET]
str r0, [r2, #__tCCS_basepri_OFFSET]
str r0, [r2, #__tTCS_basepri_OFFSET]
svc #0

View file

@ -67,7 +67,7 @@ static inline void nonEssentialTaskAbort(void)
* This routine implements the corrective action to be taken when the system
* detects a fatal error.
*
* This sample implementation attempts to abort the current context and allow
* This sample implementation attempts to abort the current thread and allow
* the system to continue executing, which may permit the system to continue
* functioning with degraded capabilities.
*
@ -85,12 +85,12 @@ void _SysFatalErrorHandler(
const NANO_ESF * pEsf /* pointer to exception stack frame */
)
{
nano_context_type_t curCtx = context_type_get();
nano_context_type_t curCtx = sys_execution_context_type_get();
ARG_UNUSED(reason);
ARG_UNUSED(pEsf);
if ((curCtx == NANO_CTX_ISR) || _context_essential_check(NULL)) {
if ((curCtx == NANO_CTX_ISR) || _is_thread_essential(NULL)) {
PRINTK("Fatal fault in %s ! Spinning...\n",
NANO_CTX_ISR == curCtx
? "ISR"