kernel/arch: consolidate tTCS and TNANO definitions

There was a lot of duplication between architectures for the definition
of threads and the "nanokernel" guts. These have been consolidated.

Now, a common file kernel/unified/include/kernel_structs.h holds the
common definitions. Architectures provide two files to complement it:
kernel_arch_data.h and kernel_arch_func.h. The first one contains at
least the struct _thread_arch and struct _kernel_arch data structures,
as well as the struct _callee_saved and struct _caller_saved register
layouts. The second file contains anything that needs what is provided
by the common stuff in kernel_structs.h. Those two files are only meant
to be included in kernel_structs.h in very specific locations.

The thread data structure has been separated into three major parts:
common struct _thread_base and struct k_thread, and arch-specific struct
_thread_arch. The first and third ones are included in the second.

The struct s_NANO data structure has been split into two: common struct
_kernel and arch-specific struct _kernel_arch. The latter is included in
the former.

Offsets files have also changed: nano_offsets.h has been renamed
kernel_offsets.h and is still included by the arch-specific offsets.c.
Also, since the thread and kernel data structures are now made of
sub-structures, offsets have to be added to make up the full offset.
Some of these additions have been consolidated in shorter symbols,
available from kernel/unified/include/offsets_short.h, which includes an
arch-specific offsets_arch_short.h. Most of the code include
offsets_short.h now instead of offsets.h.

Change-Id: I084645cb7e6db8db69aeaaf162963fe157045d5a
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
Benjamin Walsh 2016-11-08 10:36:50 -05:00 committed by Anas Nashif
commit f6ca7de09c
123 changed files with 2249 additions and 1998 deletions

View file

@ -27,7 +27,7 @@
#include <toolchain.h>
#include <sections.h>
#include <arch/cpu.h>
#include <offsets.h>
#include <offsets_short.h>
#include "vector_table.h"
_ASM_FILE_PROLOGUE
@ -128,7 +128,7 @@ SECTION_FUNC(TEXT,_force_exit_one_nested_irq)
ldrne r2, =_do_software_reboot
ldr ip, =_interrupt_stack
add.w ip, #(__tESF_SIZEOF * 2) /* enough for a stack frame */
add.w ip, #(___esf_t_SIZEOF * 2) /* enough for a stack frame */
ldr r1, =0xfffffffe
and.w r2, r1
str r2, [ip, #(6 * 4)]

View file

@ -22,12 +22,12 @@
#define _ASMLANGUAGE
#include <offsets.h>
#include <offsets_short.h>
#include <toolchain.h>
#include <sections.h>
#include <arch/cpu.h>
#ifdef CONFIG_TICKLESS_IDLE
#include <nano_private.h>
#include <kernel_structs.h>
#endif
_ASM_FILE_PROLOGUE
@ -78,8 +78,8 @@ SECTION_FUNC(TEXT, _CpuIdleInit)
*/
SECTION_FUNC(TEXT, _NanoIdleValGet)
ldr r0, =_nanokernel
ldr r0, [r0, #__tNANO_idle_OFFSET]
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_idle]
bx lr
/**
@ -96,9 +96,9 @@ SECTION_FUNC(TEXT, _NanoIdleValGet)
*/
SECTION_FUNC(TEXT, _NanoIdleValClear)
ldr r0, =_nanokernel
ldr r0, =_kernel
eors.n r1, r1
str r1, [r0, #__tNANO_idle_OFFSET]
str r1, [r0, #_kernel_offset_to_idle]
bx lr
#endif /* CONFIG_SYS_POWER_MANAGEMENT */

View file

@ -26,8 +26,8 @@
#define _ASMLANGUAGE
#include <nano_private.h>
#include <offsets.h>
#include <kernel_structs.h>
#include <offsets_short.h>
#include <toolchain.h>
#include <arch/cpu.h>
@ -35,7 +35,7 @@ _ASM_FILE_PROLOGUE
GTEXT(_ExcExit)
GTEXT(_IntExit)
GDATA(_nanokernel)
GDATA(_kernel)
GTEXT(_is_next_thread_current)
#if CONFIG_GDB_INFO
@ -88,11 +88,11 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit)
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)
ldr r1, =_nanokernel
ldr r1, =_kernel
ldr r1, [r1, #__tNANO_current_OFFSET]
ldr r2, [r1, #__tTCS_prio_OFFSET]
ldr r3, [r1, #__tTCS_sched_locked_OFFSET]
ldr r1, [r1, #_kernel_offset_to_current]
ldr r2, [r1, #_thread_offset_to_prio]
ldr r3, [r1, #_thread_offset_to_sched_locked]
/* coop thread ? do not schedule */
cmp r2, #0

View file

@ -26,7 +26,7 @@
#include <inttypes.h>
#include <nanokernel.h>
#include <nano_private.h>
#include <kernel_structs.h>
#ifdef CONFIG_PRINTK
#include <misc/printk.h>

View file

@ -25,7 +25,7 @@
#include <sections.h>
#include <nanokernel.h>
#include <nano_private.h>
#include <kernel_structs.h>
#include <inttypes.h>
#ifdef CONFIG_PRINTK

View file

@ -29,10 +29,10 @@
#define _ASMLANGUAGE
#include <offsets.h>
#include <offsets_short.h>
#include <toolchain.h>
#include <sections.h>
#include <nano_private.h>
#include <kernel_structs.h>
#include <arch/cpu.h>
_ASM_FILE_PROLOGUE
@ -61,7 +61,7 @@ _ASM_FILE_PROLOGUE
SECTION_FUNC(TEXT, _GdbStubExcEntry)
ldr r1, =_nanokernel
ldr r1, =_kernel
ldr r2, [r1, #__tNANO_flags_OFFSET]
/* already in an exception, do not update the registers */
@ -123,7 +123,7 @@ SECTION_FUNC(TEXT, _GdbStubExcExit)
bxeq lr
#endif
ldr r1, =_nanokernel
ldr r1, =_kernel
ldr r2, [r1, #__tNANO_flags_OFFSET]
ldr r3, =EXC_ACTIVE

View file

@ -24,11 +24,11 @@
#define _ASMLANGUAGE
#include <offsets.h>
#include <offsets_short.h>
#include <toolchain.h>
#include <sections.h>
#include <sw_isr_table.h>
#include <nano_private.h>
#include <kernel_structs.h>
#include <arch/cpu.h>
_ASM_FILE_PROLOGUE
@ -77,21 +77,24 @@ SECTION_FUNC(TEXT, _isr_wrapper)
cpsid i /* PRIMASK = 1 */
/* is this a wakeup from idle ? */
ldr r2, =_nanokernel
ldr r0, [r2, #__tNANO_idle_OFFSET] /* requested idle duration, in ticks */
ldr r2, =_kernel
/* requested idle duration, in ticks */
ldr r0, [r2, #_kernel_offset_to_idle]
cmp r0, #0
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
beq _idle_state_cleared
movs.n r1, #0
str r1, [r2, #__tNANO_idle_OFFSET] /* clear kernel idle state */
/* clear kernel idle state */
str r1, [r2, #_kernel_offset_to_idle]
blx _sys_power_save_idle_exit
_idle_state_cleared:
#else
ittt ne
movne r1, #0
strne r1, [r2, #__tNANO_idle_OFFSET] /* clear kernel idle state */
/* clear kernel idle state */
strne r1, [r2, #_kernel_offset_to_idle]
blxne _sys_power_save_idle_exit
#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */

View file

@ -34,64 +34,52 @@
*/
#include <gen_offset.h>
#include <nano_private.h>
#include <nano_offsets.h>
#include <kernel_structs.h>
#include <kernel_offsets.h>
/* ARM-specific tNANO structure member offsets */
#ifdef CONFIG_SYS_POWER_MANAGEMENT
GEN_OFFSET_SYM(tNANO, idle);
#endif
/* ARM-specific struct tcs structure member offsets */
GEN_OFFSET_SYM(tTCS, basepri);
#ifdef CONFIG_THREAD_CUSTOM_DATA
GEN_OFFSET_SYM(tTCS, custom_data);
#endif
GEN_OFFSET_SYM(_thread_arch_t, basepri);
#ifdef CONFIG_FLOAT
GEN_OFFSET_SYM(tTCS, preemp_float_regs);
GEN_OFFSET_SYM(_thread_arch_t, preempt_float);
#endif
/* ARM-specific ESF structure member offsets */
GEN_OFFSET_SYM(tESF, a1);
GEN_OFFSET_SYM(tESF, a2);
GEN_OFFSET_SYM(tESF, a3);
GEN_OFFSET_SYM(tESF, a4);
GEN_OFFSET_SYM(tESF, ip);
GEN_OFFSET_SYM(tESF, lr);
GEN_OFFSET_SYM(tESF, pc);
GEN_OFFSET_SYM(tESF, xpsr);
GEN_OFFSET_SYM(_esf_t, a1);
GEN_OFFSET_SYM(_esf_t, a2);
GEN_OFFSET_SYM(_esf_t, a3);
GEN_OFFSET_SYM(_esf_t, a4);
GEN_OFFSET_SYM(_esf_t, ip);
GEN_OFFSET_SYM(_esf_t, lr);
GEN_OFFSET_SYM(_esf_t, pc);
GEN_OFFSET_SYM(_esf_t, xpsr);
#ifdef CONFIG_FLOAT
GEN_OFFSET_SYM(tESF, s);
GEN_OFFSET_SYM(tESF, fpscr);
GEN_OFFSET_SYM(_esf_t, s);
GEN_OFFSET_SYM(_esf_t, fpscr);
#endif
/* size of the entire tESF structure */
GEN_ABSOLUTE_SYM(___esf_t_SIZEOF, sizeof(_esf_t));
GEN_ABSOLUTE_SYM(__tESF_SIZEOF, sizeof(tESF));
/* ARM-specific preempt registers structure member offsets */
GEN_OFFSET_SYM(tPreempt, v1);
GEN_OFFSET_SYM(tPreempt, v2);
GEN_OFFSET_SYM(tPreempt, v3);
GEN_OFFSET_SYM(tPreempt, v4);
GEN_OFFSET_SYM(tPreempt, v5);
GEN_OFFSET_SYM(tPreempt, v6);
GEN_OFFSET_SYM(tPreempt, v7);
GEN_OFFSET_SYM(tPreempt, v8);
GEN_OFFSET_SYM(tPreempt, psp);
GEN_OFFSET_SYM(_callee_saved_t, v1);
GEN_OFFSET_SYM(_callee_saved_t, v2);
GEN_OFFSET_SYM(_callee_saved_t, v3);
GEN_OFFSET_SYM(_callee_saved_t, v4);
GEN_OFFSET_SYM(_callee_saved_t, v5);
GEN_OFFSET_SYM(_callee_saved_t, v6);
GEN_OFFSET_SYM(_callee_saved_t, v7);
GEN_OFFSET_SYM(_callee_saved_t, v8);
GEN_OFFSET_SYM(_callee_saved_t, psp);
/* size of the entire preempt registers structure */
GEN_ABSOLUTE_SYM(__tPreempt_SIZEOF, sizeof(tPreempt));
GEN_ABSOLUTE_SYM(___callee_saved_t_SIZEOF, sizeof(struct _callee_saved));
/* size of the struct tcs structure sans save area for floating point regs */
GEN_ABSOLUTE_SYM(__tTCS_NOFLOAT_SIZEOF, sizeof(tTCS));
#ifdef CONFIG_FLOAT
GEN_ABSOLUTE_SYM(_K_THREAD_NO_FLOAT_SIZEOF, sizeof(struct k_thread) -
sizeof(struct _preempt_float));
#else
GEN_ABSOLUTE_SYM(_K_THREAD_NO_FLOAT_SIZEOF, sizeof(struct k_thread));
#endif
GEN_ABS_SYM_END

View file

@ -24,8 +24,8 @@
#define _ASMLANGUAGE
#include <nano_private.h>
#include <offsets.h>
#include <kernel_structs.h>
#include <offsets_short.h>
#include <toolchain.h>
#include <arch/cpu.h>
@ -39,7 +39,7 @@ GTEXT(__pendsv)
GTEXT(_get_next_ready_thread)
GDATA(_k_neg_eagain)
GDATA(_nanokernel)
GDATA(_kernel)
/**
*
@ -52,10 +52,6 @@ GDATA(_nanokernel)
* When PendSV is pended, the decision that a context switch must happen has
* already been taken. In other words, when __pendsv() runs, we *know* we have
* to swap *something*.
*
* The scheduling algorithm is simple: schedule the head of the runnable fibers
* list (_nanokernel.fiber). If there are no runnable fibers, then schedule the
* task (_nanokernel.task). The _nanokernel.task field will never be NULL.
*/
SECTION_FUNC(TEXT, __pendsv)
@ -70,12 +66,12 @@ SECTION_FUNC(TEXT, __pendsv)
mov lr, r0
#endif
/* load _Nanokernel into r1 and current tTCS into r2 */
ldr r1, =_nanokernel
ldr r2, [r1, #__tNANO_current_OFFSET]
/* load _kernel into r1 and current k_thread into r2 */
ldr r1, =_kernel
ldr r2, [r1, #_kernel_offset_to_current]
/* addr of callee-saved regs in TCS in r0 */
ldr r0, =__tTCS_preempReg_OFFSET
ldr r0, =_thread_offset_to_callee_saved
add r0, r2
/* save callee-saved + psp in TCS */
@ -95,7 +91,7 @@ SECTION_FUNC(TEXT, __pendsv)
#else
stmia r0, {v1-v8, ip}
#ifdef CONFIG_FP_SHARING
add r0, r2, #__tTCS_preemp_float_regs_OFFSET
add r0, r2, #_thread_offset_to_preempt_float
vstmia r0, {s16-s31}
#endif /* CONFIG_FP_SHARING */
#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */
@ -128,7 +124,7 @@ SECTION_FUNC(TEXT, __pendsv)
movs.n r2, r0
/* r2 contains the new thread */
str r2, [r1, #__tNANO_current_OFFSET]
str r2, [r1, #_kernel_offset_to_current]
/*
* Clear PendSV so that if another interrupt comes in and
@ -143,9 +139,9 @@ SECTION_FUNC(TEXT, __pendsv)
str v3, [v4, #0]
/* Restore previous interrupt disable state (irq_lock key) */
ldr r0, [r2, #__tTCS_basepri_OFFSET]
ldr r0, [r2, #_thread_offset_to_basepri]
movs.n r3, #0
str r3, [r2, #__tTCS_basepri_OFFSET]
str r3, [r2, #_thread_offset_to_basepri]
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
/* BASEPRI not available, previous interrupt disable state
@ -159,7 +155,7 @@ SECTION_FUNC(TEXT, __pendsv)
cpsie i
_thread_irq_disabled:
ldr r4, =__tTCS_preempReg_OFFSET
ldr r4, =_thread_offset_to_callee_saved
adds r0, r2, r4
/* restore r4-r12 for new thread */
@ -180,12 +176,12 @@ _thread_irq_disabled:
msr BASEPRI, r0
#ifdef CONFIG_FP_SHARING
add r0, r2, #__tTCS_preemp_float_regs_OFFSET
add r0, r2, #_thread_offset_to_preempt_float
vldmia r0, {s16-s31}
#endif
/* load callee-saved + psp from TCS */
add r0, r2, #__tTCS_preempReg_OFFSET
add r0, r2, #_thread_offset_to_callee_saved
ldmia r0, {v1-v8, ip}
#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */
@ -246,7 +242,7 @@ _context_switch:
mrs r2, PSP /* thread mode, stack frame is on PSP */
ldr r3, =_k_neg_eagain
ldr r3, [r3, #0]
str r3, [r2, #__tESF_a1_OFFSET]
str r3, [r2, #___esf_t_a1_OFFSET]
/*
* Unlock interrupts:
@ -305,9 +301,9 @@ _context_switch:
SECTION_FUNC(TEXT, _Swap)
ldr r1, =_nanokernel
ldr r2, [r1, #__tNANO_current_OFFSET]
str r0, [r2, #__tTCS_basepri_OFFSET]
ldr r1, =_kernel
ldr r2, [r1, #_kernel_offset_to_current]
str r0, [r2, #_thread_offset_to_basepri]
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
/* No priority-based interrupt masking on M0/M0+,

View file

@ -25,7 +25,7 @@
#include <kernel.h>
#include <toolchain.h>
#include <sections.h>
#include <nano_private.h>
#include <kernel_structs.h>
#include <misc/printk.h>
/**

View file

@ -25,14 +25,12 @@
#include <nanokernel.h>
#include <arch/cpu.h>
#include <toolchain.h>
#include <nano_private.h>
#include <kernel_structs.h>
#include <wait_q.h>
#ifdef CONFIG_INIT_STACKS
#include <string.h>
#endif /* CONFIG_INIT_STACKS */
tNANO _nanokernel = {0};
#if defined(CONFIG_THREAD_MONITOR)
/*
* Add a thread to the kernel's list of active threads.
@ -42,8 +40,8 @@ static ALWAYS_INLINE void thread_monitor_init(struct tcs *tcs)
unsigned int key;
key = irq_lock();
tcs->next_thread = _nanokernel.threads;
_nanokernel.threads = tcs;
tcs->next_thread = _kernel.threads;
_kernel.threads = tcs;
irq_unlock(key);
}
#else
@ -115,13 +113,13 @@ void _new_thread(char *pStackMem, unsigned stackSize,
0x01000000UL; /* clear all, thumb bit is 1, even if RO */
/* k_q_node initialized upon first insertion in a list */
tcs->flags = options | K_PRESTART;
tcs->sched_locked = 0;
tcs->base.flags = options | K_PRESTART;
tcs->base.sched_locked = 0;
/* static threads overwrite it afterwards with real value */
tcs->init_data = NULL;
tcs->fn_abort = NULL;
tcs->prio = priority;
tcs->base.prio = priority;
#ifdef CONFIG_THREAD_CUSTOM_DATA
/* Initialize custom data field (value is opaque to kernel) */
@ -139,10 +137,10 @@ void _new_thread(char *pStackMem, unsigned stackSize,
ARG_UNUSED(uk_task_ptr);
tcs->preempReg.psp = (uint32_t)pInitCtx;
tcs->basepri = 0;
tcs->callee_saved.psp = (uint32_t)pInitCtx;
tcs->arch.basepri = 0;
_nano_timeout_tcs_init(tcs);
_nano_timeout_thread_init(tcs);
/* initial values in all other registers/TCS entries are irrelevant */

View file

@ -27,7 +27,7 @@
*/
#include <kernel.h>
#include <nano_private.h>
#include <kernel_structs.h>
#include <toolchain.h>
#include <sections.h>
#include <ksched.h>