diff --git a/arch/arc/core/cpu_idle.S b/arch/arc/core/cpu_idle.S index eb564526097..0efb3f23732 100644 --- a/arch/arc/core/cpu_idle.S +++ b/arch/arc/core/cpu_idle.S @@ -23,8 +23,8 @@ #define _ASMLANGUAGE -#include -#include +#include +#include #include #include #include diff --git a/arch/arc/core/fast_irq.S b/arch/arc/core/fast_irq.S index 816fd0b7326..b740e48e43b 100644 --- a/arch/arc/core/fast_irq.S +++ b/arch/arc/core/fast_irq.S @@ -25,8 +25,8 @@ #define _ASMLANGUAGE -#include -#include +#include +#include #include #include #include "swap_macros.h" @@ -125,8 +125,8 @@ SECTION_FUNC(TEXT, _firq_exit) #endif #endif - mov_s r1, _nanokernel - ld_s r2, [r1, __tNANO_current_OFFSET] + mov_s r1, _kernel + ld_s r2, [r1, _kernel_offset_to_current] #if CONFIG_NUM_IRQ_PRIO_LEVELS > 1 /* check if we're a nested interrupt: if so, let the interrupted @@ -147,11 +147,11 @@ SECTION_FUNC(TEXT, _firq_exit) .balign 4 _firq_check_for_swap: /* coop thread ? do not schedule */ - ld_s r0, [r2, __tTCS_prio_OFFSET] + ld_s r0, [r2, _thread_offset_to_prio] brlt r0, 0, _firq_no_reschedule /* scheduler locked ? do not schedule */ - ld_s r0, [r2, __tTCS_sched_locked_OFFSET] + ld_s r0, [r2, _thread_offset_to_sched_locked] brgt r0, 0, _firq_no_reschedule /* check if the current thread needs to be rescheduled */ @@ -233,17 +233,17 @@ _firq_reschedule: */ lr r0, [_ARC_V2_STATUS32_P0] - st_s r0, [sp, __tISF_status32_OFFSET] + st_s r0, [sp, ___isf_t_status32_OFFSET] - st ilink, [sp, __tISF_pc_OFFSET] /* ilink into pc */ + st ilink, [sp, ___isf_t_pc_OFFSET] /* ilink into pc */ #endif - mov_s r1, _nanokernel - ld_s r2, [r1, __tNANO_current_OFFSET] + mov_s r1, _kernel + ld_s r2, [r1, _kernel_offset_to_current] _save_callee_saved_regs - st _CAUSE_FIRQ, [r2, __tTCS_relinquish_cause_OFFSET] + st _CAUSE_FIRQ, [r2, _thread_offset_to_relinquish_cause] /* * Save needed registers to callee saved ones. It is faster than @@ -257,13 +257,13 @@ _firq_reschedule: mov_s blink, r13 mov_s r1, r14 mov_s r2, r0 - st_s r2, [r1, __tNANO_current_OFFSET] + st_s r2, [r1, _kernel_offset_to_current] #ifdef CONFIG_ARC_STACK_CHECKING /* Use stack top and down registers from restored context */ - add r3, r2, __tTCS_NOFLOAT_SIZEOF + add r3, r2, _K_THREAD_NO_FLOAT_SIZEOF sr r3, [_ARC_V2_KSTACK_TOP] - ld_s r3, [r2, __tTCS_stack_top_OFFSET] + ld_s r3, [r2, _thread_offset_to_stack_top] sr r3, [_ARC_V2_KSTACK_BASE] #endif /* @@ -272,7 +272,7 @@ _firq_reschedule: */ _load_callee_saved_regs - ld_s r3, [r2, __tTCS_relinquish_cause_OFFSET] + ld_s r3, [r2, _thread_offset_to_relinquish_cause] breq r3, _CAUSE_RIRQ, _firq_return_from_rirq nop @@ -284,8 +284,8 @@ _firq_reschedule: .balign 4 _firq_return_from_coop: - ld_s r3, [r2, __tTCS_intlock_key_OFFSET] - st 0, [r2, __tTCS_intlock_key_OFFSET] + ld_s r3, [r2, _thread_offset_to_intlock_key] + st 0, [r2, _thread_offset_to_intlock_key] /* pc into ilink */ pop_s r0 @@ -303,7 +303,7 @@ _firq_return_from_coop: or.nz r0, r0, _ARC_V2_STATUS32_IE sr r0, [_ARC_V2_STATUS32_P0] - ld_s r0, [r2, __tTCS_return_value_OFFSET] + ld_s r0, [r2, _thread_offset_to_return_value] rtie .balign 4 diff --git a/arch/arc/core/fatal.c b/arch/arc/core/fatal.c index f83da2f43db..9f08f859942 100644 --- a/arch/arc/core/fatal.c +++ b/arch/arc/core/fatal.c @@ -22,8 +22,8 @@ * ARCv2 CPUs. */ -#include -#include +#include +#include #include #include diff --git a/arch/arc/core/fault.c b/arch/arc/core/fault.c index cd374976828..d42092c25ac 100644 --- a/arch/arc/core/fault.c +++ b/arch/arc/core/fault.c @@ -26,7 +26,7 @@ #include #include -#include +#include #ifdef CONFIG_PRINTK #include diff --git a/arch/arc/core/fault_s.S b/arch/arc/core/fault_s.S index 0a9becf6237..1dddca13b5e 100644 --- a/arch/arc/core/fault_s.S +++ b/arch/arc/core/fault_s.S @@ -106,9 +106,9 @@ SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_maligned) _create_irq_stack_frame lr r0,[_ARC_V2_ERSTATUS] - st_s r0, [sp, __tISF_status32_OFFSET] + st_s r0, [sp, ___isf_t_status32_OFFSET] lr r0,[_ARC_V2_ERET] - st_s r0, [sp, __tISF_pc_OFFSET] /* eret into pc */ + st_s r0, [sp, ___isf_t_pc_OFFSET] /* eret into pc */ jl _Fault @@ -143,14 +143,14 @@ SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_trap) _create_irq_stack_frame lr r0,[_ARC_V2_ERSTATUS] - st_s r0, [sp, __tISF_status32_OFFSET] + st_s r0, [sp, ___isf_t_status32_OFFSET] lr r0,[_ARC_V2_ERET] - st_s r0, [sp, __tISF_pc_OFFSET] /* eret into pc */ + st_s r0, [sp, ___isf_t_pc_OFFSET] /* eret into pc */ jl _irq_do_offload - mov_s r1, _nanokernel - ld_s r2, [r1, __tNANO_current_OFFSET] + mov_s r1, _kernel + ld_s r2, [r1, _kernel_offset_to_current] /* check if we're a nested interrupt: if so, let the * interrupted interrupt handle the reschedule */ @@ -168,11 +168,11 @@ _trap_return: .balign 4 _trap_check_for_swap: /* coop thread ? do not schedule */ - ld_s r0, [r2, __tTCS_prio_OFFSET] + ld_s r0, [r2, _thread_offset_to_prio] brlt r0, 0, _trap_return /* scheduler locked ? do not schedule */ - ld_s r0, [r2, __tTCS_sched_locked_OFFSET] + ld_s r0, [r2, _thread_offset_to_sched_locked] brgt r0, 0, _trap_return /* check if the current thread needs to be rescheduled */ @@ -187,7 +187,7 @@ _trap_check_for_swap: _save_callee_saved_regs - st _CAUSE_RIRQ, [r2, __tTCS_relinquish_cause_OFFSET] + st _CAUSE_RIRQ, [r2, _thread_offset_to_relinquish_cause] /* note: Ok to use _CAUSE_RIRQ since everything is saved */ /* @@ -204,7 +204,7 @@ _trap_check_for_swap: mov_s r1, r15 mov_s r0, r14 mov_s blink, r13 - st_s r2, [r1, __tNANO_current_OFFSET] + st_s r2, [r1, _kernel_offset_to_current] /* clear AE bit to forget this was an exception */ lr r3, [_ARC_V2_STATUS32] diff --git a/arch/arc/core/isr_wrapper.S b/arch/arc/core/isr_wrapper.S index 3954cfd21c6..c3950d12b1e 100644 --- a/arch/arc/core/isr_wrapper.S +++ b/arch/arc/core/isr_wrapper.S @@ -25,11 +25,11 @@ #define _ASMLANGUAGE -#include +#include #include #include #include -#include +#include #include GTEXT(_isr_enter) @@ -87,12 +87,12 @@ Registers not taken into account in the current implementation. The context switch code adopts this standard so that it is easier to follow: - - r1 contains _nanokernel ASAP and is not overwritten over the lifespan of + - r1 contains _kernel ASAP and is not overwritten over the lifespan of the functions. - - r2 contains _nanokernel.current ASAP, and the incoming thread when we + - r2 contains _kernel.current ASAP, and the incoming thread when we transition from outgoing thread to incoming thread -Not loading _nanokernel into r0 allows loading _nanokernel without stomping on +Not loading _kernel into r0 allows loading _kernel without stomping on the parameter in r0 in _Swap(). @@ -314,11 +314,11 @@ GTEXT(_sys_k_event_logger_interrupt) clri r0 /* do not interrupt exiting tickless idle operations */ push_s r1 push_s r0 - mov_s r1, _nanokernel - ld_s r0, [r1, __tNANO_idle_OFFSET] /* requested idle duration */ + mov_s r1, _kernel + ld_s r0, [r1, _kernel_offset_to_idle] /* requested idle duration */ breq r0, 0, _skip_sys_power_save_idle_exit - st 0, [r1, __tNANO_idle_OFFSET] /* zero idle duration */ + st 0, [r1, _kernel_offset_to_idle] /* zero idle duration */ push_s blink jl _sys_power_save_idle_exit pop_s blink diff --git a/arch/arc/core/offsets/offsets.c b/arch/arc/core/offsets/offsets.c index 236aeb263b5..1228c07e3fc 100644 --- a/arch/arc/core/offsets/offsets.c +++ b/arch/arc/core/offsets/offsets.c @@ -34,74 +34,60 @@ */ #include -#include -#include +#include +#include -/* ARCv2-specific tNANO structure member offsets */ -GEN_OFFSET_SYM(tNANO, rirq_sp); -#ifdef CONFIG_SYS_POWER_MANAGEMENT -GEN_OFFSET_SYM(tNANO, idle); -#endif - -/* ARCv2-specific struct tcs structure member offsets */ -GEN_OFFSET_SYM(tTCS, intlock_key); -GEN_OFFSET_SYM(tTCS, relinquish_cause); -GEN_OFFSET_SYM(tTCS, return_value); +GEN_OFFSET_SYM(_thread_arch_t, intlock_key); +GEN_OFFSET_SYM(_thread_arch_t, relinquish_cause); +GEN_OFFSET_SYM(_thread_arch_t, return_value); #ifdef CONFIG_ARC_STACK_CHECKING -GEN_OFFSET_SYM(tTCS, stack_top); +GEN_OFFSET_SYM(_thread_arch_t, stack_top); #endif -#ifdef CONFIG_THREAD_CUSTOM_DATA -GEN_OFFSET_SYM(tTCS, custom_data); -#endif - /* ARCv2-specific IRQ stack frame structure member offsets */ -GEN_OFFSET_SYM(tISF, r0); -GEN_OFFSET_SYM(tISF, r1); -GEN_OFFSET_SYM(tISF, r2); -GEN_OFFSET_SYM(tISF, r3); -GEN_OFFSET_SYM(tISF, r4); -GEN_OFFSET_SYM(tISF, r5); -GEN_OFFSET_SYM(tISF, r6); -GEN_OFFSET_SYM(tISF, r7); -GEN_OFFSET_SYM(tISF, r8); -GEN_OFFSET_SYM(tISF, r9); -GEN_OFFSET_SYM(tISF, r10); -GEN_OFFSET_SYM(tISF, r11); -GEN_OFFSET_SYM(tISF, r12); -GEN_OFFSET_SYM(tISF, r13); -GEN_OFFSET_SYM(tISF, blink); -GEN_OFFSET_SYM(tISF, lp_end); -GEN_OFFSET_SYM(tISF, lp_start); -GEN_OFFSET_SYM(tISF, lp_count); -GEN_OFFSET_SYM(tISF, pc); -GEN_OFFSET_SYM(tISF, status32); -GEN_ABSOLUTE_SYM(__tISF_SIZEOF, sizeof(tISF)); +GEN_OFFSET_SYM(_isf_t, r0); +GEN_OFFSET_SYM(_isf_t, r1); +GEN_OFFSET_SYM(_isf_t, r2); +GEN_OFFSET_SYM(_isf_t, r3); +GEN_OFFSET_SYM(_isf_t, r4); +GEN_OFFSET_SYM(_isf_t, r5); +GEN_OFFSET_SYM(_isf_t, r6); +GEN_OFFSET_SYM(_isf_t, r7); +GEN_OFFSET_SYM(_isf_t, r8); +GEN_OFFSET_SYM(_isf_t, r9); +GEN_OFFSET_SYM(_isf_t, r10); +GEN_OFFSET_SYM(_isf_t, r11); +GEN_OFFSET_SYM(_isf_t, r12); +GEN_OFFSET_SYM(_isf_t, r13); +GEN_OFFSET_SYM(_isf_t, blink); +GEN_OFFSET_SYM(_isf_t, lp_end); +GEN_OFFSET_SYM(_isf_t, lp_start); +GEN_OFFSET_SYM(_isf_t, lp_count); +GEN_OFFSET_SYM(_isf_t, pc); +GEN_OFFSET_SYM(_isf_t, status32); +GEN_ABSOLUTE_SYM(___isf_t_SIZEOF, sizeof(_isf_t)); -/* ARCv2-specific preempt registers structure member offsets */ -GEN_OFFSET_SYM(tPreempt, sp); -GEN_ABSOLUTE_SYM(__tPreempt_SIZEOF, sizeof(tPreempt)); +GEN_OFFSET_SYM(_callee_saved_t, sp); +GEN_ABSOLUTE_SYM(___callee_saved_t_SIZEOF, sizeof(_callee_saved_t)); -/* ARCv2-specific callee-saved stack */ -GEN_OFFSET_SYM(tCalleeSaved, r13); -GEN_OFFSET_SYM(tCalleeSaved, r14); -GEN_OFFSET_SYM(tCalleeSaved, r15); -GEN_OFFSET_SYM(tCalleeSaved, r16); -GEN_OFFSET_SYM(tCalleeSaved, r17); -GEN_OFFSET_SYM(tCalleeSaved, r18); -GEN_OFFSET_SYM(tCalleeSaved, r19); -GEN_OFFSET_SYM(tCalleeSaved, r20); -GEN_OFFSET_SYM(tCalleeSaved, r21); -GEN_OFFSET_SYM(tCalleeSaved, r22); -GEN_OFFSET_SYM(tCalleeSaved, r23); -GEN_OFFSET_SYM(tCalleeSaved, r24); -GEN_OFFSET_SYM(tCalleeSaved, r25); -GEN_OFFSET_SYM(tCalleeSaved, r26); -GEN_OFFSET_SYM(tCalleeSaved, fp); -GEN_OFFSET_SYM(tCalleeSaved, r30); -GEN_ABSOLUTE_SYM(__tCalleeSaved_SIZEOF, sizeof(tCalleeSaved)); +GEN_OFFSET_SYM(_callee_saved_stack_t, r13); +GEN_OFFSET_SYM(_callee_saved_stack_t, r14); +GEN_OFFSET_SYM(_callee_saved_stack_t, r15); +GEN_OFFSET_SYM(_callee_saved_stack_t, r16); +GEN_OFFSET_SYM(_callee_saved_stack_t, r17); +GEN_OFFSET_SYM(_callee_saved_stack_t, r18); +GEN_OFFSET_SYM(_callee_saved_stack_t, r19); +GEN_OFFSET_SYM(_callee_saved_stack_t, r20); +GEN_OFFSET_SYM(_callee_saved_stack_t, r21); +GEN_OFFSET_SYM(_callee_saved_stack_t, r22); +GEN_OFFSET_SYM(_callee_saved_stack_t, r23); +GEN_OFFSET_SYM(_callee_saved_stack_t, r24); +GEN_OFFSET_SYM(_callee_saved_stack_t, r25); +GEN_OFFSET_SYM(_callee_saved_stack_t, r26); +GEN_OFFSET_SYM(_callee_saved_stack_t, fp); +GEN_OFFSET_SYM(_callee_saved_stack_t, r30); +GEN_ABSOLUTE_SYM(___callee_saved_stack_t_SIZEOF, sizeof(_callee_saved_stack_t)); -/* size of the struct tcs structure sans save area for floating point regs */ -GEN_ABSOLUTE_SYM(__tTCS_NOFLOAT_SIZEOF, sizeof(tTCS)); +GEN_ABSOLUTE_SYM(_K_THREAD_NO_FLOAT_SIZEOF, sizeof(struct k_thread)); GEN_ABS_SYM_END diff --git a/arch/arc/core/prep_c.c b/arch/arc/core/prep_c.c index 1ee9fee3e7a..e0ff2858150 100644 --- a/arch/arc/core/prep_c.c +++ b/arch/arc/core/prep_c.c @@ -30,6 +30,7 @@ #include #include #include +#include #include diff --git a/arch/arc/core/regular_irq.S b/arch/arc/core/regular_irq.S index dee8e58d3df..21fd06c0930 100644 --- a/arch/arc/core/regular_irq.S +++ b/arch/arc/core/regular_irq.S @@ -26,8 +26,8 @@ #define _ASMLANGUAGE -#include -#include +#include +#include #include #include #include "swap_macros.h" @@ -71,17 +71,17 @@ GTEXT(_is_next_thread_current) SECTION_FUNC(TEXT, _rirq_enter) - mov r1, _nanokernel + mov r1, _kernel #ifdef CONFIG_ARC_STACK_CHECKING /* disable stack checking */ lr r2, [_ARC_V2_STATUS32] bclr r2, r2, _ARC_V2_STATUS32_SC_BIT kflag r2 #endif - ld_s r2, [r1, __tNANO_current_OFFSET] + ld_s r2, [r1, _kernel_offset_to_current] #if NUM_REGULAR_IRQ_PRIO_LEVELS == 1 - st sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET] - ld sp, [r1, __tNANO_rirq_sp_OFFSET] + st sp, [r2, _thread_offset_to_sp] + ld sp, [r1, _kernel_offset_to_irq_stack] #else #error regular irq nesting is not implemented #endif @@ -97,8 +97,8 @@ SECTION_FUNC(TEXT, _rirq_enter) SECTION_FUNC(TEXT, _rirq_exit) - mov r1, _nanokernel - ld_s r2, [r1, __tNANO_current_OFFSET] + mov r1, _kernel + ld_s r2, [r1, _kernel_offset_to_current] /* * Lock interrupts to ensure kernel queues do not change from this @@ -124,7 +124,7 @@ SECTION_FUNC(TEXT, _rirq_exit) cmp r0, r3 brgt _rirq_return_from_rirq - ld sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET] + ld sp, [r2, _thread_offset_to_sp] #endif /* @@ -137,15 +137,15 @@ SECTION_FUNC(TEXT, _rirq_exit) */ /* coop thread ? do not schedule */ - ld_s r0, [r2, __tTCS_prio_OFFSET] + ld_s r0, [r2, _thread_offset_to_prio] cmp_s r0, 0 blt.d _rirq_no_reschedule - ld sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET] + ld sp, [r2, _thread_offset_to_sp] /* scheduler locked ? do not schedule */ - ld_s r0, [r2, __tTCS_sched_locked_OFFSET] + ld_s r0, [r2, _thread_offset_to_sched_locked] brgt.d r0, 0, _rirq_no_reschedule - ld sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET] + ld sp, [r2, _thread_offset_to_sp] /* check if the current thread needs to be rescheduled */ push_s r2 @@ -156,7 +156,7 @@ SECTION_FUNC(TEXT, _rirq_exit) pop_s r1 pop_s r2 brne.d r0, 0, _rirq_no_reschedule - ld sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET] + ld sp, [r2, _thread_offset_to_sp] /* * Get the next scheduled thread. On _get_next_ready_thread @@ -176,11 +176,11 @@ _rirq_reschedule: /* _save_callee_saved_regs expects outgoing thread in r2 */ _save_callee_saved_regs - st _CAUSE_RIRQ, [r2, __tTCS_relinquish_cause_OFFSET] + st _CAUSE_RIRQ, [r2, _thread_offset_to_relinquish_cause] /* incoming thread is in r0: it becomes the new 'current' */ mov r2, r0 - st_s r2, [r1, __tNANO_current_OFFSET] + st_s r2, [r1, _kernel_offset_to_current] .balign 4 _rirq_common_interrupt_swap: @@ -188,9 +188,9 @@ _rirq_common_interrupt_swap: #ifdef CONFIG_ARC_STACK_CHECKING /* Use stack top and down registers from restored context */ - add r3, r2, __tTCS_NOFLOAT_SIZEOF + add r3, r2, _K_THREAD_NO_FLOAT_SIZEOF sr r3, [_ARC_V2_KSTACK_TOP] - ld_s r3, [r2, __tTCS_stack_top_OFFSET] + ld_s r3, [r2, _thread_offset_to_stack_top] sr r3, [_ARC_V2_KSTACK_BASE] #endif /* @@ -199,7 +199,7 @@ _rirq_common_interrupt_swap: */ _load_callee_saved_regs - ld_s r3, [r2, __tTCS_relinquish_cause_OFFSET] + ld_s r3, [r2, _thread_offset_to_relinquish_cause] breq r3, _CAUSE_RIRQ, _rirq_return_from_rirq nop @@ -215,19 +215,22 @@ _rirq_return_from_coop: /* update status32.ie (explanation in firq_exit:_firq_return_from_coop) */ ld_s r0, [sp, 4] - ld_s r3, [r2, __tTCS_intlock_key_OFFSET] - st 0, [r2, __tTCS_intlock_key_OFFSET] + ld_s r3, [r2, _thread_offset_to_intlock_key] + st 0, [r2, _thread_offset_to_intlock_key] cmp r3, 0 or.ne r0, r0, _ARC_V2_STATUS32_IE st_s r0, [sp, 4] /* carve fake stack */ - sub sp, sp, (__tISF_SIZEOF - 12) /* a) status32/pc are already on the stack - * b) a real value will be pushed in r0 */ + /* + * a) status32/pc are already on the stack + * b) a real value will be pushed in r0 + */ + sub sp, sp, (___isf_t_SIZEOF - 12) /* push return value on stack */ - ld_s r0, [r2, __tTCS_return_value_OFFSET] + ld_s r0, [r2, _thread_offset_to_return_value] push_s r0 /* @@ -236,7 +239,7 @@ _rirq_return_from_coop: * IRQ prologue. r13 thus has to be set to its correct value in the IRQ * stack frame. */ - st_s r13, [sp, __tISF_r13_OFFSET] + st_s r13, [sp, ___isf_t_r13_OFFSET] /* stack now has the IRQ stack frame layout, pointing to r0 */ diff --git a/arch/arc/core/swap.S b/arch/arc/core/swap.S index dd45a54842d..90732209e49 100644 --- a/arch/arc/core/swap.S +++ b/arch/arc/core/swap.S @@ -26,8 +26,8 @@ #define _ASMLANGUAGE -#include -#include +#include +#include #include #include #include @@ -36,7 +36,7 @@ GTEXT(_Swap) GTEXT(_get_next_ready_thread) GDATA(_k_neg_eagain) -GDATA(_nanokernel) +GDATA(_kernel) /** * @@ -56,8 +56,9 @@ GDATA(_nanokernel) * popped when returning from _Swap(), but is not suitable for handling a return * from an exception. Thus, the fact that the thread is pending because of a * cooperative call to _Swap() has to be recorded via the _CAUSE_COOP code in - * the relinquish_cause of the thread's tTCS. The _IrqExit()/_FirqExit() code - * will take care of doing the right thing to restore the thread status. + * the relinquish_cause of the thread's k_thread structure. The + * _IrqExit()/_FirqExit() code will take care of doing the right thing to + * restore the thread status. * * When _Swap() is invoked, we know the decision to perform a context switch or * not has already been taken and a context switch must happen. @@ -74,12 +75,12 @@ SECTION_FUNC(TEXT, _Swap) /* interrupts are locked, interrupt key is in r0 */ - mov r1, _nanokernel - ld_s r2, [r1, __tNANO_current_OFFSET] + mov r1, _kernel + ld_s r2, [r1, _kernel_offset_to_current] /* save intlock key */ - st_s r0, [r2, __tTCS_intlock_key_OFFSET] - st _CAUSE_COOP, [r2, __tTCS_relinquish_cause_OFFSET] + st_s r0, [r2, _thread_offset_to_intlock_key] + st _CAUSE_COOP, [r2, _thread_offset_to_relinquish_cause] /* * Carve space for the return value. Setting it to a defafult of @@ -88,7 +89,7 @@ SECTION_FUNC(TEXT, _Swap) * fiberRtnValueSet(). */ ld r3, [_k_neg_eagain] - st_s r3, [r2, __tTCS_return_value_OFFSET] + st_s r3, [r2, _thread_offset_to_return_value] /* * Save status32 and blink on the stack before the callee-saved registers. @@ -125,17 +126,17 @@ SECTION_FUNC(TEXT, _Swap) /* entering here, r2 contains the new current thread */ #ifdef CONFIG_ARC_STACK_CHECKING /* Use stack top and down registers from restored context */ - add r3, r2, __tTCS_NOFLOAT_SIZEOF + add r3, r2, _K_THREAD_NO_FLOAT_SIZEOF sr r3, [_ARC_V2_KSTACK_TOP] - ld_s r3, [r2, __tTCS_stack_top_OFFSET] + ld_s r3, [r2, _thread_offset_to_stack_top] sr r3, [_ARC_V2_KSTACK_BASE] #endif /* XXX - can be moved to delay slot of _CAUSE_RIRQ ? */ - st_s r2, [r1, __tNANO_current_OFFSET] + st_s r2, [r1, _kernel_offset_to_current] _load_callee_saved_regs - ld_s r3, [r2, __tTCS_relinquish_cause_OFFSET] + ld_s r3, [r2, _thread_offset_to_relinquish_cause] breq r3, _CAUSE_RIRQ, _swap_return_from_rirq nop @@ -147,9 +148,9 @@ SECTION_FUNC(TEXT, _Swap) .balign 4 _swap_return_from_coop: - ld_s r1, [r2, __tTCS_intlock_key_OFFSET] - st 0, [r2, __tTCS_intlock_key_OFFSET] - ld_s r0, [r2, __tTCS_return_value_OFFSET] + ld_s r1, [r2, _thread_offset_to_intlock_key] + st 0, [r2, _thread_offset_to_intlock_key] + ld_s r0, [r2, _thread_offset_to_return_value] lr ilink, [_ARC_V2_STATUS32] bbit1 ilink, _ARC_V2_STATUS32_AE_BIT, _return_from_exc diff --git a/arch/arc/core/swap_macros.h b/arch/arc/core/swap_macros.h index 811298be984..348e0c27884 100644 --- a/arch/arc/core/swap_macros.h +++ b/arch/arc/core/swap_macros.h @@ -19,8 +19,8 @@ #ifndef _SWAP_MACROS__H_ #define _SWAP_MACROS__H_ -#include -#include +#include +#include #include #include @@ -33,53 +33,53 @@ extern "C" { /* entering this macro, current is in r2 */ .macro _save_callee_saved_regs - sub_s sp, sp, __tCalleeSaved_SIZEOF + sub_s sp, sp, ___callee_saved_stack_t_SIZEOF /* save regs on stack */ - st_s r13, [sp, __tCalleeSaved_r13_OFFSET] - st_s r14, [sp, __tCalleeSaved_r14_OFFSET] - st_s r15, [sp, __tCalleeSaved_r15_OFFSET] - st r16, [sp, __tCalleeSaved_r16_OFFSET] - st r17, [sp, __tCalleeSaved_r17_OFFSET] - st r18, [sp, __tCalleeSaved_r18_OFFSET] - st r19, [sp, __tCalleeSaved_r19_OFFSET] - st r20, [sp, __tCalleeSaved_r20_OFFSET] - st r21, [sp, __tCalleeSaved_r21_OFFSET] - st r22, [sp, __tCalleeSaved_r22_OFFSET] - st r23, [sp, __tCalleeSaved_r23_OFFSET] - st r24, [sp, __tCalleeSaved_r24_OFFSET] - st r25, [sp, __tCalleeSaved_r25_OFFSET] - st r26, [sp, __tCalleeSaved_r26_OFFSET] - st fp, [sp, __tCalleeSaved_fp_OFFSET] - st r30, [sp, __tCalleeSaved_r30_OFFSET] + st_s r13, [sp, ___callee_saved_stack_t_r13_OFFSET] + st_s r14, [sp, ___callee_saved_stack_t_r14_OFFSET] + st_s r15, [sp, ___callee_saved_stack_t_r15_OFFSET] + st r16, [sp, ___callee_saved_stack_t_r16_OFFSET] + st r17, [sp, ___callee_saved_stack_t_r17_OFFSET] + st r18, [sp, ___callee_saved_stack_t_r18_OFFSET] + st r19, [sp, ___callee_saved_stack_t_r19_OFFSET] + st r20, [sp, ___callee_saved_stack_t_r20_OFFSET] + st r21, [sp, ___callee_saved_stack_t_r21_OFFSET] + st r22, [sp, ___callee_saved_stack_t_r22_OFFSET] + st r23, [sp, ___callee_saved_stack_t_r23_OFFSET] + st r24, [sp, ___callee_saved_stack_t_r24_OFFSET] + st r25, [sp, ___callee_saved_stack_t_r25_OFFSET] + st r26, [sp, ___callee_saved_stack_t_r26_OFFSET] + st fp, [sp, ___callee_saved_stack_t_fp_OFFSET] + st r30, [sp, ___callee_saved_stack_t_r30_OFFSET] /* save stack pointer in struct tcs */ - st sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET] + st sp, [r2, _thread_offset_to_sp] .endm /* entering this macro, current is in r2 */ .macro _load_callee_saved_regs /* restore stack pointer from struct tcs */ - ld sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET] + ld sp, [r2, _thread_offset_to_sp] - ld_s r13, [sp, __tCalleeSaved_r13_OFFSET] - ld_s r14, [sp, __tCalleeSaved_r14_OFFSET] - ld_s r15, [sp, __tCalleeSaved_r15_OFFSET] - ld r16, [sp, __tCalleeSaved_r16_OFFSET] - ld r17, [sp, __tCalleeSaved_r17_OFFSET] - ld r18, [sp, __tCalleeSaved_r18_OFFSET] - ld r19, [sp, __tCalleeSaved_r19_OFFSET] - ld r20, [sp, __tCalleeSaved_r20_OFFSET] - ld r21, [sp, __tCalleeSaved_r21_OFFSET] - ld r22, [sp, __tCalleeSaved_r22_OFFSET] - ld r23, [sp, __tCalleeSaved_r23_OFFSET] - ld r24, [sp, __tCalleeSaved_r24_OFFSET] - ld r25, [sp, __tCalleeSaved_r25_OFFSET] - ld r26, [sp, __tCalleeSaved_r26_OFFSET] - ld fp, [sp, __tCalleeSaved_fp_OFFSET] - ld r30, [sp, __tCalleeSaved_r30_OFFSET] + ld_s r13, [sp, ___callee_saved_stack_t_r13_OFFSET] + ld_s r14, [sp, ___callee_saved_stack_t_r14_OFFSET] + ld_s r15, [sp, ___callee_saved_stack_t_r15_OFFSET] + ld r16, [sp, ___callee_saved_stack_t_r16_OFFSET] + ld r17, [sp, ___callee_saved_stack_t_r17_OFFSET] + ld r18, [sp, ___callee_saved_stack_t_r18_OFFSET] + ld r19, [sp, ___callee_saved_stack_t_r19_OFFSET] + ld r20, [sp, ___callee_saved_stack_t_r20_OFFSET] + ld r21, [sp, ___callee_saved_stack_t_r21_OFFSET] + ld r22, [sp, ___callee_saved_stack_t_r22_OFFSET] + ld r23, [sp, ___callee_saved_stack_t_r23_OFFSET] + ld r24, [sp, ___callee_saved_stack_t_r24_OFFSET] + ld r25, [sp, ___callee_saved_stack_t_r25_OFFSET] + ld r26, [sp, ___callee_saved_stack_t_r26_OFFSET] + ld fp, [sp, ___callee_saved_stack_t_fp_OFFSET] + ld r30, [sp, ___callee_saved_stack_t_r30_OFFSET] - add_s sp, sp, __tCalleeSaved_SIZEOF + add_s sp, sp, ___callee_saved_stack_t_SIZEOF .endm @@ -89,33 +89,33 @@ extern "C" { */ .macro _create_irq_stack_frame - sub_s sp, sp, __tISF_SIZEOF + sub_s sp, sp, ___isf_t_SIZEOF - st blink, [sp, __tISF_blink_OFFSET] + st blink, [sp, ___isf_t_blink_OFFSET] /* store these right away so we can use them if needed */ - st_s r13, [sp, __tISF_r13_OFFSET] - st_s r12, [sp, __tISF_r12_OFFSET] - st r11, [sp, __tISF_r11_OFFSET] - st r10, [sp, __tISF_r10_OFFSET] - st r9, [sp, __tISF_r9_OFFSET] - st r8, [sp, __tISF_r8_OFFSET] - st r7, [sp, __tISF_r7_OFFSET] - st r6, [sp, __tISF_r6_OFFSET] - st r5, [sp, __tISF_r5_OFFSET] - st r4, [sp, __tISF_r4_OFFSET] - st_s r3, [sp, __tISF_r3_OFFSET] - st_s r2, [sp, __tISF_r2_OFFSET] - st_s r1, [sp, __tISF_r1_OFFSET] - st_s r0, [sp, __tISF_r0_OFFSET] + st_s r13, [sp, ___isf_t_r13_OFFSET] + st_s r12, [sp, ___isf_t_r12_OFFSET] + st r11, [sp, ___isf_t_r11_OFFSET] + st r10, [sp, ___isf_t_r10_OFFSET] + st r9, [sp, ___isf_t_r9_OFFSET] + st r8, [sp, ___isf_t_r8_OFFSET] + st r7, [sp, ___isf_t_r7_OFFSET] + st r6, [sp, ___isf_t_r6_OFFSET] + st r5, [sp, ___isf_t_r5_OFFSET] + st r4, [sp, ___isf_t_r4_OFFSET] + st_s r3, [sp, ___isf_t_r3_OFFSET] + st_s r2, [sp, ___isf_t_r2_OFFSET] + st_s r1, [sp, ___isf_t_r1_OFFSET] + st_s r0, [sp, ___isf_t_r0_OFFSET] mov r0, lp_count - st_s r0, [sp, __tISF_lp_count_OFFSET] + st_s r0, [sp, ___isf_t_lp_count_OFFSET] lr r1, [_ARC_V2_LP_START] lr r0, [_ARC_V2_LP_END] - st_s r1, [sp, __tISF_lp_start_OFFSET] - st_s r0, [sp, __tISF_lp_end_OFFSET] + st_s r1, [sp, ___isf_t_lp_start_OFFSET] + st_s r0, [sp, ___isf_t_lp_end_OFFSET] .endm @@ -125,29 +125,29 @@ extern "C" { */ .macro _pop_irq_stack_frame - ld blink, [sp, __tISF_blink_OFFSET] + ld blink, [sp, ___isf_t_blink_OFFSET] - ld_s r0, [sp, __tISF_lp_count_OFFSET] + ld_s r0, [sp, ___isf_t_lp_count_OFFSET] mov lp_count, r0 - ld_s r1, [sp, __tISF_lp_start_OFFSET] - ld_s r0, [sp, __tISF_lp_end_OFFSET] + ld_s r1, [sp, ___isf_t_lp_start_OFFSET] + ld_s r0, [sp, ___isf_t_lp_end_OFFSET] sr r1, [_ARC_V2_LP_START] sr r0, [_ARC_V2_LP_END] - ld_s r13, [sp, __tISF_r13_OFFSET] - ld_s r12, [sp, __tISF_r12_OFFSET] - ld r11, [sp, __tISF_r11_OFFSET] - ld r10, [sp, __tISF_r10_OFFSET] - ld r9, [sp, __tISF_r9_OFFSET] - ld r8, [sp, __tISF_r8_OFFSET] - ld r7, [sp, __tISF_r7_OFFSET] - ld r6, [sp, __tISF_r6_OFFSET] - ld r5, [sp, __tISF_r5_OFFSET] - ld r4, [sp, __tISF_r4_OFFSET] - ld_s r3, [sp, __tISF_r3_OFFSET] - ld_s r2, [sp, __tISF_r2_OFFSET] - ld_s r1, [sp, __tISF_r1_OFFSET] - ld_s r0, [sp, __tISF_r0_OFFSET] + ld_s r13, [sp, ___isf_t_r13_OFFSET] + ld_s r12, [sp, ___isf_t_r12_OFFSET] + ld r11, [sp, ___isf_t_r11_OFFSET] + ld r10, [sp, ___isf_t_r10_OFFSET] + ld r9, [sp, ___isf_t_r9_OFFSET] + ld r8, [sp, ___isf_t_r8_OFFSET] + ld r7, [sp, ___isf_t_r7_OFFSET] + ld r6, [sp, ___isf_t_r6_OFFSET] + ld r5, [sp, ___isf_t_r5_OFFSET] + ld r4, [sp, ___isf_t_r4_OFFSET] + ld_s r3, [sp, ___isf_t_r3_OFFSET] + ld_s r2, [sp, ___isf_t_r2_OFFSET] + ld_s r1, [sp, ___isf_t_r1_OFFSET] + ld_s r0, [sp, ___isf_t_r0_OFFSET] /* * All gprs have been reloaded, the only one that is still usable is @@ -158,7 +158,7 @@ extern "C" { * status32 differently depending on the execution context they are running * in (_Swap(), firq or exception). */ - add_s sp, sp, __tISF_SIZEOF + add_s sp, sp, ___isf_t_SIZEOF .endm diff --git a/arch/arc/core/sys_fatal_error_handler.c b/arch/arc/core/sys_fatal_error_handler.c index 1c1178bdb31..a53a0433561 100644 --- a/arch/arc/core/sys_fatal_error_handler.c +++ b/arch/arc/core/sys_fatal_error_handler.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include /** diff --git a/arch/arc/core/thread.c b/arch/arc/core/thread.c index fb95ddbf207..9480be9eea0 100644 --- a/arch/arc/core/thread.c +++ b/arch/arc/core/thread.c @@ -24,8 +24,8 @@ #include #include -#include -#include +#include +#include #include #ifdef CONFIG_INIT_STACKS #include @@ -40,23 +40,21 @@ struct init_stack_frame { uint32_t r0; }; -tNANO _nanokernel = {0}; - #if defined(CONFIG_THREAD_MONITOR) /* * Add a thread to the kernel's list of active threads. */ -static ALWAYS_INLINE void thread_monitor_init(struct tcs *tcs) +static ALWAYS_INLINE void thread_monitor_init(struct k_thread *thread) { unsigned int key; key = irq_lock(); - tcs->next_thread = _nanokernel.threads; - _nanokernel.threads = tcs; + thread->next_thread = _kernel.threads; + _kernel.threads = thread; irq_unlock(key); } #else -#define thread_monitor_init(tcs) \ +#define thread_monitor_init(thread) \ do {/* do nothing */ \ } while ((0)) #endif /* CONFIG_THREAD_MONITOR */ @@ -64,7 +62,7 @@ static ALWAYS_INLINE void thread_monitor_init(struct tcs *tcs) /* * @brief Initialize a new thread from its stack space * - * The control structure (TCS) is put at the lower address of the stack. An + * The thread control structure is put at the lower address of the stack. An * initial context, to be "restored" by __return_from_coop(), is put at * the other end of the stack, and thus reusable by the stack when not * needed anymore. @@ -96,7 +94,7 @@ void _new_thread(char *pStackMem, unsigned stackSize, char *stackEnd = pStackMem + stackSize; struct init_stack_frame *pInitCtx; - struct tcs *tcs = (struct tcs *) pStackMem; + struct k_thread *thread = (struct k_thread *) pStackMem; #ifdef CONFIG_INIT_STACKS memset(pStackMem, 0xaa, stackSize); @@ -121,32 +119,32 @@ void _new_thread(char *pStackMem, unsigned stackSize, */ #ifdef CONFIG_ARC_STACK_CHECKING pInitCtx->status32 = _ARC_V2_STATUS32_SC | _ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL); - tcs->stack_top = (uint32_t) stackEnd; + thread->arch.stack_top = (uint32_t) stackEnd; #else pInitCtx->status32 = _ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL); #endif /* k_q_node initialized upon first insertion in a list */ - tcs->flags = options | K_PRESTART; - tcs->sched_locked = 0; + thread->base.flags = options | K_PRESTART; + thread->base.sched_locked = 0; /* static threads overwrite them afterwards with real values */ - tcs->init_data = NULL; - tcs->fn_abort = NULL; - tcs->prio = priority; + thread->init_data = NULL; + thread->fn_abort = NULL; + thread->base.prio = priority; #ifdef CONFIG_THREAD_CUSTOM_DATA /* Initialize custom data field (value is opaque to kernel) */ - tcs->custom_data = NULL; + thread->custom_data = NULL; #endif #ifdef CONFIG_THREAD_MONITOR /* - * In debug mode tcs->entry give direct access to the thread entry + * In debug mode thread->entry give direct access to the thread entry * and the corresponding parameters. */ - tcs->entry = (struct __thread_entry *)(pInitCtx); + thread->entry = (struct __thread_entry *)(pInitCtx); #endif ARG_UNUSED(uk_task_ptr); @@ -157,13 +155,14 @@ void _new_thread(char *pStackMem, unsigned stackSize, * dst[31:6] dst[5] dst[4] dst[3:0] * 26'd0 1 STATUS32.IE STATUS32.E[3:0] */ - tcs->intlock_key = 0x3F; - tcs->relinquish_cause = _CAUSE_COOP; - tcs->preempReg.sp = (uint32_t)pInitCtx - __tCalleeSaved_SIZEOF; + thread->arch.intlock_key = 0x3F; + thread->arch.relinquish_cause = _CAUSE_COOP; + thread->callee_saved.sp = + (uint32_t)pInitCtx - ___callee_saved_stack_t_SIZEOF; - _nano_timeout_tcs_init(tcs); + _nano_timeout_thread_init(thread); - /* initial values in all other registers/TCS entries are irrelevant */ + /* initial values in all other regs/k_thread entries are irrelevant */ - thread_monitor_init(tcs); + thread_monitor_init(thread); } diff --git a/arch/arc/include/kernel_arch_data.h b/arch/arc/include/kernel_arch_data.h new file mode 100644 index 00000000000..673783db018 --- /dev/null +++ b/arch/arc/include/kernel_arch_data.h @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2014-2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * @brief Private kernel definitions + * + * This file contains private kernel structures definitions and various + * other definitions for the ARCv2 processor architecture. + * + * This file is also included by assembly language files which must #define + * _ASMLANGUAGE before including this header file. Note that kernel + * assembly source files obtains structure offset values via "absolute + * symbols" in the offsets.o module. + */ + +#ifndef _kernel_arch_data__h_ +#define _kernel_arch_data__h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include + +#ifndef _ASMLANGUAGE +#include +#include +#include +#include +#include +#endif + +#ifndef _ASMLANGUAGE + +struct _caller_saved { + /* + * Saved on the stack as part of handling a regular IRQ or by the + * kernel when calling the FIRQ return code. + */ +}; + +typedef struct _caller_saved _caller_saved_t; + +struct _irq_stack_frame { + uint32_t r0; + uint32_t r1; + uint32_t r2; + uint32_t r3; + uint32_t r4; + uint32_t r5; + uint32_t r6; + uint32_t r7; + uint32_t r8; + uint32_t r9; + uint32_t r10; + uint32_t r11; + uint32_t r12; + uint32_t r13; + uint32_t blink; + uint32_t lp_end; + uint32_t lp_start; + uint32_t lp_count; +#ifdef CONFIG_CODE_DENSITY + /* + * Currently unsupported. This is where those registers are + * automatically pushed on the stack by the CPU when taking a regular + * IRQ. + */ + uint32_t ei_base; + uint32_t ldi_base; + uint32_t jli_base; +#endif + uint32_t pc; + uint32_t status32; +}; + +typedef struct _irq_stack_frame _isf_t; + +struct _callee_saved { + uint32_t sp; /* r28 */ +}; +typedef struct _callee_saved _callee_saved_t; + +/* callee-saved registers pushed on the stack, not in k_thread */ +struct _callee_saved_stack { + uint32_t r13; + uint32_t r14; + uint32_t r15; + uint32_t r16; + uint32_t r17; + uint32_t r18; + uint32_t r19; + uint32_t r20; + uint32_t r21; + uint32_t r22; + uint32_t r23; + uint32_t r24; + uint32_t r25; + uint32_t r26; + uint32_t fp; /* r27 */ + /* r28 is the stack pointer and saved separately */ + /* r29 is ILINK and does not need to be saved */ + uint32_t r30; + /* + * No need to save r31 (blink), it's either alread pushed as the pc or + * blink on an irq stack frame. + */ +}; + +typedef struct _callee_saved_stack _callee_saved_stack_t; + +#endif /* _ASMLANGUAGE */ + +/* Bitmask definitions for the struct tcs->flags bit field */ + +#define K_STATIC 0x00000800 + +#define K_READY 0x00000000 /* Thread is ready to run */ +#define K_TIMING 0x00001000 /* Thread is waiting on a timeout */ +#define K_PENDING 0x00002000 /* Thread is waiting on an object */ +#define K_PRESTART 0x00004000 /* Thread has not yet started */ +#define K_DEAD 0x00008000 /* Thread has terminated */ +#define K_SUSPENDED 0x00010000 /* Thread is suspended */ +#define K_DUMMY 0x00020000 /* Not a real thread */ +#define K_EXECUTION_MASK (K_TIMING | K_PENDING | K_PRESTART | \ + K_DEAD | K_SUSPENDED | K_DUMMY) + +#define K_FP_REGS 0x010 /* 1 = thread uses floating point registers */ +#define K_ESSENTIAL 0x200 /* 1 = system thread that must not abort */ +#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */ + +/* stacks */ + +#define STACK_ALIGN_SIZE 4 + +#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE) +#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE) + +/* + * Reason a thread has relinquished control: fibers can only be in the NONE + * or COOP state, tasks can be one in the four. + */ +#define _CAUSE_NONE 0 +#define _CAUSE_COOP 1 +#define _CAUSE_RIRQ 2 +#define _CAUSE_FIRQ 3 + +#ifndef _ASMLANGUAGE + +struct _thread_arch { + + /* interrupt key when relinquishing control */ + uint32_t intlock_key; + + /* one of the _CAUSE_xxxx definitions above */ + int relinquish_cause; + + /* return value from _Swap */ + unsigned int return_value; + +#ifdef CONFIG_ARC_STACK_CHECKING + /* top of stack for hardware stack checking */ + uint32_t stack_top; +#endif +}; + +typedef struct _thread_arch _thread_arch_t; + +struct _kernel_arch { + + char *rirq_sp; /* regular IRQ stack pointer base */ + + /* + * FIRQ stack pointer is installed once in the second bank's SP, so + * there is no need to track it in _kernel. + */ + +}; + +typedef struct _kernel_arch _kernel_arch_t; + +#endif /* _ASMLANGUAGE */ + +#ifdef __cplusplus +} +#endif + +#endif /* _kernel_arch_data__h_ */ diff --git a/arch/arc/include/kernel_arch_func.h b/arch/arc/include/kernel_arch_func.h new file mode 100644 index 00000000000..27a61eb2dd8 --- /dev/null +++ b/arch/arc/include/kernel_arch_func.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2014-2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * @brief Private kernel definitions + * + * This file contains private kernel structures definitions and various + * other definitions for the ARCv2 processor architecture. + * + * This file is also included by assembly language files which must #define + * _ASMLANGUAGE before including this header file. Note that kernel + * assembly source files obtains structure offset values via "absolute + * symbols" in the offsets.o module. + */ + +#ifndef _kernel_arch_func__h_ +#define _kernel_arch_func__h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#if !defined(_ASMLANGUAGE) + +#ifdef CONFIG_CPU_ARCV2 +#include +#include +#endif + +static ALWAYS_INLINE void nanoArchInit(void) +{ + _icache_setup(); + _irq_setup(); +} + +static ALWAYS_INLINE void +_set_thread_return_value(struct k_thread *thread, unsigned int value) +{ + thread->arch.return_value = value; +} + +static ALWAYS_INLINE int _is_in_isr(void) +{ + uint32_t act = _arc_v2_aux_reg_read(_ARC_V2_AUX_IRQ_ACT); +#if CONFIG_IRQ_OFFLOAD + /* Check if we're in a TRAP_S exception as well */ + if (_arc_v2_aux_reg_read(_ARC_V2_STATUS32) & _ARC_V2_STATUS32_AE && + _ARC_V2_ECR_VECTOR(_arc_v2_aux_reg_read(_ARC_V2_ECR)) == EXC_EV_TRAP + ) { + return 1; + } +#endif + return ((act & 0xffff) != 0); +} + +/** + * + * @bried Indicates the interrupt number of the highest priority + * active interrupt + * + * @return IRQ number + */ +static ALWAYS_INLINE int _INTERRUPT_CAUSE(void) +{ + uint32_t irq_num = _arc_v2_aux_reg_read(_ARC_V2_ICAUSE); + + return irq_num; +} + + +extern void _thread_entry_wrapper(void); + +static inline void _IntLibInit(void) +{ + /* nothing needed, here because the kernel requires it */ +} + +#endif /* _ASMLANGUAGE */ + +#ifdef __cplusplus +} +#endif + +#endif /* _kernel_arch_func__h_ */ diff --git a/arch/arc/include/nano_private.h b/arch/arc/include/nano_private.h deleted file mode 100644 index ee4cdfe31c9..00000000000 --- a/arch/arc/include/nano_private.h +++ /dev/null @@ -1,339 +0,0 @@ -/* - * Copyright (c) 2014 Wind River Systems, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @file - * @brief Private nanokernel definitions - * - * This file contains private nanokernel structures definitions and various - * other definitions for the ARCv2 processor architecture. - * - * This file is also included by assembly language files which must #define - * _ASMLANGUAGE before including this header file. Note that nanokernel - * assembly source files obtains structure offset values via "absolute - * symbols" in the offsets.o module. - */ - -#ifndef _NANO_PRIVATE_H -#define _NANO_PRIVATE_H - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include -#include -#include - -#ifndef _ASMLANGUAGE -#include -#include <../../../kernel/unified/include/nano_internal.h> -#include -#include -#include -#endif - -#ifndef _ASMLANGUAGE - -#ifdef CONFIG_THREAD_MONITOR -struct __thread_entry { - _thread_entry_t pEntry; - void *parameter1; - void *parameter2; - void *parameter3; -}; -#endif /*CONFIG_THREAD_MONITOR*/ - -struct coop { - /* - * Saved on the stack as part of handling a regular IRQ or by the kernel - * when calling the FIRQ return code. - */ -}; - -struct irq_stack_frame { - uint32_t r0; - uint32_t r1; - uint32_t r2; - uint32_t r3; - uint32_t r4; - uint32_t r5; - uint32_t r6; - uint32_t r7; - uint32_t r8; - uint32_t r9; - uint32_t r10; - uint32_t r11; - uint32_t r12; - uint32_t r13; - uint32_t blink; - uint32_t lp_end; - uint32_t lp_start; - uint32_t lp_count; -#ifdef CONFIG_CODE_DENSITY - /* - * Currently unsupported. This is where those registers are automatically - * pushed on the stack by the CPU when taking a regular IRQ. - */ - uint32_t ei_base; - uint32_t ldi_base; - uint32_t jli_base; -#endif - uint32_t pc; - uint32_t status32; -}; - -typedef struct irq_stack_frame tISF; - -struct preempt { - uint32_t sp; /* r28 */ -}; -typedef struct preempt tPreempt; - -struct callee_saved { - uint32_t r13; - uint32_t r14; - uint32_t r15; - uint32_t r16; - uint32_t r17; - uint32_t r18; - uint32_t r19; - uint32_t r20; - uint32_t r21; - uint32_t r22; - uint32_t r23; - uint32_t r24; - uint32_t r25; - uint32_t r26; - uint32_t fp; /* r27 */ - /* r28 is the stack pointer and saved separately */ - /* r29 is ILINK and does not need to be saved */ - uint32_t r30; - /* - * No need to save r31 (blink), it's either alread pushed as the pc or - * blink on an irq stack frame. - */ -}; -typedef struct callee_saved tCalleeSaved; - -#endif /* _ASMLANGUAGE */ - -/* Bitmask definitions for the struct tcs->flags bit field */ - -#define K_STATIC 0x00000800 - -#define K_READY 0x00000000 /* Thread is ready to run */ -#define K_TIMING 0x00001000 /* Thread is waiting on a timeout */ -#define K_PENDING 0x00002000 /* Thread is waiting on an object */ -#define K_PRESTART 0x00004000 /* Thread has not yet started */ -#define K_DEAD 0x00008000 /* Thread has terminated */ -#define K_SUSPENDED 0x00010000 /* Thread is suspended */ -#define K_DUMMY 0x00020000 /* Not a real thread */ -#define K_EXECUTION_MASK (K_TIMING | K_PENDING | K_PRESTART | \ - K_DEAD | K_SUSPENDED | K_DUMMY) - -#define K_FP_REGS 0x010 /* 1 = thread uses floating point registers */ -#define K_ESSENTIAL 0x200 /* 1 = system thread that must not abort */ -#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */ - -/* stacks */ - -#define STACK_ALIGN_SIZE 4 - -#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE) -#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE) - -/* - * Reason a thread has relinquished control: fibers can only be in the NONE - * or COOP state, tasks can be one in the four. - */ -#define _CAUSE_NONE 0 -#define _CAUSE_COOP 1 -#define _CAUSE_RIRQ 2 -#define _CAUSE_FIRQ 3 - -#ifndef _ASMLANGUAGE - -/* 'struct tcs_base' must match the beginning of 'struct tcs' */ -struct tcs_base { - sys_dnode_t k_q_node; - uint32_t flags; - int prio; - void *swap_data; -#ifdef CONFIG_NANO_TIMEOUTS - struct _timeout timeout; -#endif -}; - -struct tcs { - sys_dnode_t k_q_node; /* node object in any kernel queue */ - uint32_t flags; - int prio; - void *swap_data; -#ifdef CONFIG_NANO_TIMEOUTS - struct _timeout timeout; -#endif - uint32_t intlock_key; /* interrupt key when relinquishing control */ - int relinquish_cause; /* one of the _CAUSE_xxxx definitions above */ - unsigned int return_value;/* return value from _Swap */ -#ifdef CONFIG_THREAD_CUSTOM_DATA - void *custom_data; /* available for custom use */ -#endif - struct coop coopReg; - struct preempt preempReg; -#ifdef CONFIG_THREAD_MONITOR - struct __thread_entry *entry; /* thread entry and parameters description */ - struct tcs *next_thread; /* next item in list of ALL fiber+tasks */ -#endif -#ifdef CONFIG_ERRNO - int errno_var; -#endif -#ifdef CONFIG_ARC_STACK_CHECKING - uint32_t stack_top; -#endif - atomic_t sched_locked; - void *init_data; - void (*fn_abort)(void); -}; - -struct ready_q { - struct k_thread *cache; - uint32_t prio_bmap[1]; - sys_dlist_t q[K_NUM_PRIORITIES]; -}; - -struct s_NANO { - struct tcs *current; /* currently scheduled thread (fiber or task) */ - -#ifdef CONFIG_THREAD_MONITOR - struct tcs *threads; /* singly linked list of ALL fiber+tasks */ -#endif - -#ifdef CONFIG_FP_SHARING - struct tcs *current_fp; /* thread (fiber or task) that owns the FP regs */ -#endif - -#ifdef CONFIG_SYS_POWER_MANAGEMENT - int32_t idle; /* Number of ticks for kernel idling */ -#endif - - char *rirq_sp; /* regular IRQ stack pointer base */ - - /* - * FIRQ stack pointer is installed once in the second bank's SP, so - * there is no need to track it in _nanokernel. - */ - -#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS) - sys_dlist_t timeout_q; -#endif - struct ready_q ready_q; -}; - -typedef struct s_NANO tNANO; -extern tNANO _nanokernel; - -#ifdef CONFIG_CPU_ARCV2 -#include -#include -#endif - -static ALWAYS_INLINE void nanoArchInit(void) -{ - _icache_setup(); - _irq_setup(); -} - -/** - * - * @brief Set the return value for the specified fiber (inline) - * - * The register used to store the return value from a function call invocation - * to . It is assumed that the specified is pending, and thus - * the fiber's thread is stored in its struct tcs structure. - * - * @return N/A - */ -static ALWAYS_INLINE void fiberRtnValueSet(struct tcs *fiber, unsigned int value) -{ - fiber->return_value = value; -} - -#define _current _nanokernel.current -#define _ready_q _nanokernel.ready_q -#define _timeout_q _nanokernel.timeout_q -#define _set_thread_return_value fiberRtnValueSet - -static ALWAYS_INLINE void -_set_thread_return_value_with_data(struct k_thread *thread, unsigned int value, - void *data) -{ - _set_thread_return_value(thread, value); - thread->swap_data = data; -} - -#define _IDLE_THREAD_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES) - -/** - * - * @brief Indicates if kernel is handling interrupt - * - * @return 1 if interrupt handler is executed, 0 otherwise - */ -static ALWAYS_INLINE int _is_in_isr(void) -{ - uint32_t act = _arc_v2_aux_reg_read(_ARC_V2_AUX_IRQ_ACT); -#if CONFIG_IRQ_OFFLOAD - /* Check if we're in a TRAP_S exception as well */ - if (_arc_v2_aux_reg_read(_ARC_V2_STATUS32) & _ARC_V2_STATUS32_AE && - _ARC_V2_ECR_VECTOR(_arc_v2_aux_reg_read(_ARC_V2_ECR)) == EXC_EV_TRAP) { - return 1; - } -#endif - return ((act & 0xffff) != 0); -} - -/** - * - * @bried Indicates the interrupt number of the highest priority - * active interrupt - * - * @return IRQ number - */ -static ALWAYS_INLINE int _INTERRUPT_CAUSE(void) -{ - uint32_t irq_num = _arc_v2_aux_reg_read(_ARC_V2_ICAUSE); - - return irq_num; -} - - -extern void nanoCpuAtomicIdle(unsigned int); -extern void _thread_entry_wrapper(void); - -static inline void _IntLibInit(void) -{ - /* nothing needed, here because the kernel requires it */ -} - -#endif /* _ASMLANGUAGE */ - -#ifdef __cplusplus -} -#endif - -#endif /* _NANO_PRIVATE_H */ diff --git a/arch/arc/include/offsets_short_arch.h b/arch/arc/include/offsets_short_arch.h new file mode 100644 index 00000000000..e158b142d76 --- /dev/null +++ b/arch/arc/include/offsets_short_arch.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _offsets_short_arch__h_ +#define _offsets_short_arch__h_ + +#include + +/* kernel */ + +/* nothing for now */ + +/* end - kernel */ + +/* threads */ + +#define _thread_offset_to_intlock_key \ + (___thread_t_arch_OFFSET + ___thread_arch_t_intlock_key_OFFSET) + +#define _thread_offset_to_relinquish_cause \ + (___thread_t_arch_OFFSET + ___thread_arch_t_relinquish_cause_OFFSET) + +#define _thread_offset_to_return_value \ + (___thread_t_arch_OFFSET + ___thread_arch_t_return_value_OFFSET) + +#define _thread_offset_to_stack_top \ + (___thread_t_arch_OFFSET + ___thread_arch_t_stack_top_OFFSET) + +#define _thread_offset_to_sp \ + (___thread_t_callee_saved_OFFSET + ___callee_saved_t_sp_OFFSET) + + +/* end - threads */ + +#endif /* _offsets_short_arch__h_ */ diff --git a/arch/arc/include/start_task_arch.h b/arch/arc/include/start_task_arch.h index 5fe6b6bfda4..3cdd6db0860 100644 --- a/arch/arc/include/start_task_arch.h +++ b/arch/arc/include/start_task_arch.h @@ -30,7 +30,7 @@ #include #include -#include +#include #include #ifdef __cplusplus diff --git a/arch/arc/include/v2/irq.h b/arch/arc/include/v2/irq.h index 494b5eb697b..9f60a512165 100644 --- a/arch/arc/include/v2/irq.h +++ b/arch/arc/include/v2/irq.h @@ -60,7 +60,7 @@ static ALWAYS_INLINE void _irq_setup(void) nano_cpu_sleep_mode = _ARC_V2_WAKE_IRQ_LEVEL; _arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_CTRL, aux_irq_ctrl_value); - _nanokernel.rirq_sp = _interrupt_stack + CONFIG_ISR_STACK_SIZE; + _kernel.irq_stack = _interrupt_stack + CONFIG_ISR_STACK_SIZE; _firq_stack_setup(); } diff --git a/arch/arm/core/cortex_m/reset.S b/arch/arm/core/cortex_m/reset.S index f980048059a..812d6d94382 100644 --- a/arch/arm/core/cortex_m/reset.S +++ b/arch/arm/core/cortex_m/reset.S @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include "vector_table.h" _ASM_FILE_PROLOGUE @@ -128,7 +128,7 @@ SECTION_FUNC(TEXT,_force_exit_one_nested_irq) ldrne r2, =_do_software_reboot ldr ip, =_interrupt_stack - add.w ip, #(__tESF_SIZEOF * 2) /* enough for a stack frame */ + add.w ip, #(___esf_t_SIZEOF * 2) /* enough for a stack frame */ ldr r1, =0xfffffffe and.w r2, r1 str r2, [ip, #(6 * 4)] diff --git a/arch/arm/core/cpu_idle.S b/arch/arm/core/cpu_idle.S index b0c30ba43ac..77447419aa4 100644 --- a/arch/arm/core/cpu_idle.S +++ b/arch/arm/core/cpu_idle.S @@ -22,12 +22,12 @@ #define _ASMLANGUAGE -#include +#include #include #include #include #ifdef CONFIG_TICKLESS_IDLE -#include +#include #endif _ASM_FILE_PROLOGUE @@ -78,8 +78,8 @@ SECTION_FUNC(TEXT, _CpuIdleInit) */ SECTION_FUNC(TEXT, _NanoIdleValGet) - ldr r0, =_nanokernel - ldr r0, [r0, #__tNANO_idle_OFFSET] + ldr r0, =_kernel + ldr r0, [r0, #_kernel_offset_to_idle] bx lr /** @@ -96,9 +96,9 @@ SECTION_FUNC(TEXT, _NanoIdleValGet) */ SECTION_FUNC(TEXT, _NanoIdleValClear) - ldr r0, =_nanokernel + ldr r0, =_kernel eors.n r1, r1 - str r1, [r0, #__tNANO_idle_OFFSET] + str r1, [r0, #_kernel_offset_to_idle] bx lr #endif /* CONFIG_SYS_POWER_MANAGEMENT */ diff --git a/arch/arm/core/exc_exit.S b/arch/arm/core/exc_exit.S index bba56961c0e..c8e8250b629 100644 --- a/arch/arm/core/exc_exit.S +++ b/arch/arm/core/exc_exit.S @@ -26,8 +26,8 @@ #define _ASMLANGUAGE -#include -#include +#include +#include #include #include @@ -35,7 +35,7 @@ _ASM_FILE_PROLOGUE GTEXT(_ExcExit) GTEXT(_IntExit) -GDATA(_nanokernel) +GDATA(_kernel) GTEXT(_is_next_thread_current) #if CONFIG_GDB_INFO @@ -88,11 +88,11 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit) SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit) - ldr r1, =_nanokernel + ldr r1, =_kernel - ldr r1, [r1, #__tNANO_current_OFFSET] - ldr r2, [r1, #__tTCS_prio_OFFSET] - ldr r3, [r1, #__tTCS_sched_locked_OFFSET] + ldr r1, [r1, #_kernel_offset_to_current] + ldr r2, [r1, #_thread_offset_to_prio] + ldr r3, [r1, #_thread_offset_to_sched_locked] /* coop thread ? do not schedule */ cmp r2, #0 diff --git a/arch/arm/core/fatal.c b/arch/arm/core/fatal.c index 4445ad4132d..9eb48f4a3dc 100644 --- a/arch/arm/core/fatal.c +++ b/arch/arm/core/fatal.c @@ -26,7 +26,7 @@ #include #include -#include +#include #ifdef CONFIG_PRINTK #include diff --git a/arch/arm/core/fault.c b/arch/arm/core/fault.c index a332c4c6a66..9dab730296f 100644 --- a/arch/arm/core/fault.c +++ b/arch/arm/core/fault.c @@ -25,7 +25,7 @@ #include #include -#include +#include #include #ifdef CONFIG_PRINTK diff --git a/arch/arm/core/gdb_stub.S b/arch/arm/core/gdb_stub.S index fefcdda5e3f..03b887d2601 100644 --- a/arch/arm/core/gdb_stub.S +++ b/arch/arm/core/gdb_stub.S @@ -29,10 +29,10 @@ #define _ASMLANGUAGE -#include +#include #include #include -#include +#include #include _ASM_FILE_PROLOGUE @@ -61,7 +61,7 @@ _ASM_FILE_PROLOGUE SECTION_FUNC(TEXT, _GdbStubExcEntry) - ldr r1, =_nanokernel + ldr r1, =_kernel ldr r2, [r1, #__tNANO_flags_OFFSET] /* already in an exception, do not update the registers */ @@ -123,7 +123,7 @@ SECTION_FUNC(TEXT, _GdbStubExcExit) bxeq lr #endif - ldr r1, =_nanokernel + ldr r1, =_kernel ldr r2, [r1, #__tNANO_flags_OFFSET] ldr r3, =EXC_ACTIVE diff --git a/arch/arm/core/isr_wrapper.S b/arch/arm/core/isr_wrapper.S index dded16b4521..be06993d95e 100644 --- a/arch/arm/core/isr_wrapper.S +++ b/arch/arm/core/isr_wrapper.S @@ -24,11 +24,11 @@ #define _ASMLANGUAGE -#include +#include #include #include #include -#include +#include #include _ASM_FILE_PROLOGUE @@ -77,21 +77,24 @@ SECTION_FUNC(TEXT, _isr_wrapper) cpsid i /* PRIMASK = 1 */ /* is this a wakeup from idle ? */ - ldr r2, =_nanokernel - ldr r0, [r2, #__tNANO_idle_OFFSET] /* requested idle duration, in ticks */ + ldr r2, =_kernel + /* requested idle duration, in ticks */ + ldr r0, [r2, #_kernel_offset_to_idle] cmp r0, #0 #if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) beq _idle_state_cleared movs.n r1, #0 - str r1, [r2, #__tNANO_idle_OFFSET] /* clear kernel idle state */ + /* clear kernel idle state */ + str r1, [r2, #_kernel_offset_to_idle] blx _sys_power_save_idle_exit _idle_state_cleared: #else ittt ne movne r1, #0 - strne r1, [r2, #__tNANO_idle_OFFSET] /* clear kernel idle state */ + /* clear kernel idle state */ + strne r1, [r2, #_kernel_offset_to_idle] blxne _sys_power_save_idle_exit #endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */ diff --git a/arch/arm/core/offsets/offsets.c b/arch/arm/core/offsets/offsets.c index 788de14d6ea..69b7471f468 100644 --- a/arch/arm/core/offsets/offsets.c +++ b/arch/arm/core/offsets/offsets.c @@ -34,64 +34,52 @@ */ #include -#include -#include +#include +#include -/* ARM-specific tNANO structure member offsets */ - -#ifdef CONFIG_SYS_POWER_MANAGEMENT -GEN_OFFSET_SYM(tNANO, idle); -#endif - -/* ARM-specific struct tcs structure member offsets */ - -GEN_OFFSET_SYM(tTCS, basepri); -#ifdef CONFIG_THREAD_CUSTOM_DATA -GEN_OFFSET_SYM(tTCS, custom_data); -#endif +GEN_OFFSET_SYM(_thread_arch_t, basepri); #ifdef CONFIG_FLOAT -GEN_OFFSET_SYM(tTCS, preemp_float_regs); +GEN_OFFSET_SYM(_thread_arch_t, preempt_float); #endif -/* ARM-specific ESF structure member offsets */ - -GEN_OFFSET_SYM(tESF, a1); -GEN_OFFSET_SYM(tESF, a2); -GEN_OFFSET_SYM(tESF, a3); -GEN_OFFSET_SYM(tESF, a4); -GEN_OFFSET_SYM(tESF, ip); -GEN_OFFSET_SYM(tESF, lr); -GEN_OFFSET_SYM(tESF, pc); -GEN_OFFSET_SYM(tESF, xpsr); +GEN_OFFSET_SYM(_esf_t, a1); +GEN_OFFSET_SYM(_esf_t, a2); +GEN_OFFSET_SYM(_esf_t, a3); +GEN_OFFSET_SYM(_esf_t, a4); +GEN_OFFSET_SYM(_esf_t, ip); +GEN_OFFSET_SYM(_esf_t, lr); +GEN_OFFSET_SYM(_esf_t, pc); +GEN_OFFSET_SYM(_esf_t, xpsr); #ifdef CONFIG_FLOAT -GEN_OFFSET_SYM(tESF, s); -GEN_OFFSET_SYM(tESF, fpscr); +GEN_OFFSET_SYM(_esf_t, s); +GEN_OFFSET_SYM(_esf_t, fpscr); #endif -/* size of the entire tESF structure */ +GEN_ABSOLUTE_SYM(___esf_t_SIZEOF, sizeof(_esf_t)); -GEN_ABSOLUTE_SYM(__tESF_SIZEOF, sizeof(tESF)); - -/* ARM-specific preempt registers structure member offsets */ - -GEN_OFFSET_SYM(tPreempt, v1); -GEN_OFFSET_SYM(tPreempt, v2); -GEN_OFFSET_SYM(tPreempt, v3); -GEN_OFFSET_SYM(tPreempt, v4); -GEN_OFFSET_SYM(tPreempt, v5); -GEN_OFFSET_SYM(tPreempt, v6); -GEN_OFFSET_SYM(tPreempt, v7); -GEN_OFFSET_SYM(tPreempt, v8); -GEN_OFFSET_SYM(tPreempt, psp); +GEN_OFFSET_SYM(_callee_saved_t, v1); +GEN_OFFSET_SYM(_callee_saved_t, v2); +GEN_OFFSET_SYM(_callee_saved_t, v3); +GEN_OFFSET_SYM(_callee_saved_t, v4); +GEN_OFFSET_SYM(_callee_saved_t, v5); +GEN_OFFSET_SYM(_callee_saved_t, v6); +GEN_OFFSET_SYM(_callee_saved_t, v7); +GEN_OFFSET_SYM(_callee_saved_t, v8); +GEN_OFFSET_SYM(_callee_saved_t, psp); /* size of the entire preempt registers structure */ -GEN_ABSOLUTE_SYM(__tPreempt_SIZEOF, sizeof(tPreempt)); +GEN_ABSOLUTE_SYM(___callee_saved_t_SIZEOF, sizeof(struct _callee_saved)); /* size of the struct tcs structure sans save area for floating point regs */ -GEN_ABSOLUTE_SYM(__tTCS_NOFLOAT_SIZEOF, sizeof(tTCS)); +#ifdef CONFIG_FLOAT +GEN_ABSOLUTE_SYM(_K_THREAD_NO_FLOAT_SIZEOF, sizeof(struct k_thread) - + sizeof(struct _preempt_float)); +#else +GEN_ABSOLUTE_SYM(_K_THREAD_NO_FLOAT_SIZEOF, sizeof(struct k_thread)); +#endif GEN_ABS_SYM_END diff --git a/arch/arm/core/swap.S b/arch/arm/core/swap.S index 9b14408d2af..b1f0aecfe0b 100644 --- a/arch/arm/core/swap.S +++ b/arch/arm/core/swap.S @@ -24,8 +24,8 @@ #define _ASMLANGUAGE -#include -#include +#include +#include #include #include @@ -39,7 +39,7 @@ GTEXT(__pendsv) GTEXT(_get_next_ready_thread) GDATA(_k_neg_eagain) -GDATA(_nanokernel) +GDATA(_kernel) /** * @@ -52,10 +52,6 @@ GDATA(_nanokernel) * When PendSV is pended, the decision that a context switch must happen has * already been taken. In other words, when __pendsv() runs, we *know* we have * to swap *something*. - * - * The scheduling algorithm is simple: schedule the head of the runnable fibers - * list (_nanokernel.fiber). If there are no runnable fibers, then schedule the - * task (_nanokernel.task). The _nanokernel.task field will never be NULL. */ SECTION_FUNC(TEXT, __pendsv) @@ -70,12 +66,12 @@ SECTION_FUNC(TEXT, __pendsv) mov lr, r0 #endif - /* load _Nanokernel into r1 and current tTCS into r2 */ - ldr r1, =_nanokernel - ldr r2, [r1, #__tNANO_current_OFFSET] + /* load _kernel into r1 and current k_thread into r2 */ + ldr r1, =_kernel + ldr r2, [r1, #_kernel_offset_to_current] /* addr of callee-saved regs in TCS in r0 */ - ldr r0, =__tTCS_preempReg_OFFSET + ldr r0, =_thread_offset_to_callee_saved add r0, r2 /* save callee-saved + psp in TCS */ @@ -95,7 +91,7 @@ SECTION_FUNC(TEXT, __pendsv) #else stmia r0, {v1-v8, ip} #ifdef CONFIG_FP_SHARING - add r0, r2, #__tTCS_preemp_float_regs_OFFSET + add r0, r2, #_thread_offset_to_preempt_float vstmia r0, {s16-s31} #endif /* CONFIG_FP_SHARING */ #endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */ @@ -128,7 +124,7 @@ SECTION_FUNC(TEXT, __pendsv) movs.n r2, r0 /* r2 contains the new thread */ - str r2, [r1, #__tNANO_current_OFFSET] + str r2, [r1, #_kernel_offset_to_current] /* * Clear PendSV so that if another interrupt comes in and @@ -143,9 +139,9 @@ SECTION_FUNC(TEXT, __pendsv) str v3, [v4, #0] /* Restore previous interrupt disable state (irq_lock key) */ - ldr r0, [r2, #__tTCS_basepri_OFFSET] + ldr r0, [r2, #_thread_offset_to_basepri] movs.n r3, #0 - str r3, [r2, #__tTCS_basepri_OFFSET] + str r3, [r2, #_thread_offset_to_basepri] #if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) /* BASEPRI not available, previous interrupt disable state @@ -159,7 +155,7 @@ SECTION_FUNC(TEXT, __pendsv) cpsie i _thread_irq_disabled: - ldr r4, =__tTCS_preempReg_OFFSET + ldr r4, =_thread_offset_to_callee_saved adds r0, r2, r4 /* restore r4-r12 for new thread */ @@ -180,12 +176,12 @@ _thread_irq_disabled: msr BASEPRI, r0 #ifdef CONFIG_FP_SHARING - add r0, r2, #__tTCS_preemp_float_regs_OFFSET + add r0, r2, #_thread_offset_to_preempt_float vldmia r0, {s16-s31} #endif /* load callee-saved + psp from TCS */ - add r0, r2, #__tTCS_preempReg_OFFSET + add r0, r2, #_thread_offset_to_callee_saved ldmia r0, {v1-v8, ip} #endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */ @@ -246,7 +242,7 @@ _context_switch: mrs r2, PSP /* thread mode, stack frame is on PSP */ ldr r3, =_k_neg_eagain ldr r3, [r3, #0] - str r3, [r2, #__tESF_a1_OFFSET] + str r3, [r2, #___esf_t_a1_OFFSET] /* * Unlock interrupts: @@ -305,9 +301,9 @@ _context_switch: SECTION_FUNC(TEXT, _Swap) - ldr r1, =_nanokernel - ldr r2, [r1, #__tNANO_current_OFFSET] - str r0, [r2, #__tTCS_basepri_OFFSET] + ldr r1, =_kernel + ldr r2, [r1, #_kernel_offset_to_current] + str r0, [r2, #_thread_offset_to_basepri] #if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) /* No priority-based interrupt masking on M0/M0+, diff --git a/arch/arm/core/sys_fatal_error_handler.c b/arch/arm/core/sys_fatal_error_handler.c index 30c78070c45..ca756c213ef 100644 --- a/arch/arm/core/sys_fatal_error_handler.c +++ b/arch/arm/core/sys_fatal_error_handler.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include /** diff --git a/arch/arm/core/thread.c b/arch/arm/core/thread.c index cf0eec347fc..485897bdb6f 100644 --- a/arch/arm/core/thread.c +++ b/arch/arm/core/thread.c @@ -25,14 +25,12 @@ #include #include #include -#include +#include #include #ifdef CONFIG_INIT_STACKS #include #endif /* CONFIG_INIT_STACKS */ -tNANO _nanokernel = {0}; - #if defined(CONFIG_THREAD_MONITOR) /* * Add a thread to the kernel's list of active threads. @@ -42,8 +40,8 @@ static ALWAYS_INLINE void thread_monitor_init(struct tcs *tcs) unsigned int key; key = irq_lock(); - tcs->next_thread = _nanokernel.threads; - _nanokernel.threads = tcs; + tcs->next_thread = _kernel.threads; + _kernel.threads = tcs; irq_unlock(key); } #else @@ -115,13 +113,13 @@ void _new_thread(char *pStackMem, unsigned stackSize, 0x01000000UL; /* clear all, thumb bit is 1, even if RO */ /* k_q_node initialized upon first insertion in a list */ - tcs->flags = options | K_PRESTART; - tcs->sched_locked = 0; + tcs->base.flags = options | K_PRESTART; + tcs->base.sched_locked = 0; /* static threads overwrite it afterwards with real value */ tcs->init_data = NULL; tcs->fn_abort = NULL; - tcs->prio = priority; + tcs->base.prio = priority; #ifdef CONFIG_THREAD_CUSTOM_DATA /* Initialize custom data field (value is opaque to kernel) */ @@ -139,10 +137,10 @@ void _new_thread(char *pStackMem, unsigned stackSize, ARG_UNUSED(uk_task_ptr); - tcs->preempReg.psp = (uint32_t)pInitCtx; - tcs->basepri = 0; + tcs->callee_saved.psp = (uint32_t)pInitCtx; + tcs->arch.basepri = 0; - _nano_timeout_tcs_init(tcs); + _nano_timeout_thread_init(tcs); /* initial values in all other registers/TCS entries are irrelevant */ diff --git a/arch/arm/core/thread_abort.c b/arch/arm/core/thread_abort.c index 378f3c38e7c..17c5e7c4384 100644 --- a/arch/arm/core/thread_abort.c +++ b/arch/arm/core/thread_abort.c @@ -27,7 +27,7 @@ */ #include -#include +#include #include #include #include diff --git a/arch/arm/include/cortex_m/stack.h b/arch/arm/include/cortex_m/stack.h index 945723d0d1e..f0396d58cea 100644 --- a/arch/arm/include/cortex_m/stack.h +++ b/arch/arm/include/cortex_m/stack.h @@ -24,7 +24,7 @@ #ifndef _ARM_CORTEXM_STACK__H_ #define _ARM_CORTEXM_STACK__H_ -#include +#include #include #ifdef __cplusplus diff --git a/arch/arm/include/kernel_arch_data.h b/arch/arm/include/kernel_arch_data.h new file mode 100644 index 00000000000..3fe0b868383 --- /dev/null +++ b/arch/arm/include/kernel_arch_data.h @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2013-2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * @brief Private kernel definitions (ARM) + * + * This file contains private kernel structures definitions and various + * other definitions for the ARM Cortex-M3 processor architecture. + * + * This file is also included by assembly language files which must #define + * _ASMLANGUAGE before including this header file. Note that kernel + * assembly source files obtains structure offset values via "absolute symbols" + * in the offsets.o module. + */ + +#ifndef _kernel_arch_data__h_ +#define _kernel_arch_data__h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +#ifndef _ASMLANGUAGE +#include +#include +#include +#include +#include +#endif + +#ifndef _ASMLANGUAGE + +struct _caller_saved { + /* + * Unused for Cortex-M, which automatically saves the necessary + * registers in its exception stack frame. + * + * For Cortex-A, this may be: + * + * uint32_t a1; // r0 + * uint32_t a2; // r1 + * uint32_t a3; // r2 + * uint32_t a4; // r3 + * uint32_t ip; // r12 + * uint32_t lr; // r14 + * uint32_t pc; // r15 + * uint32_t xpsr; + */ +}; + +typedef struct _caller_saved _caller_saved_t; + +struct _callee_saved { + uint32_t v1; /* r4 */ + uint32_t v2; /* r5 */ + uint32_t v3; /* r6 */ + uint32_t v4; /* r7 */ + uint32_t v5; /* r8 */ + uint32_t v6; /* r9 */ + uint32_t v7; /* r10 */ + uint32_t v8; /* r11 */ + uint32_t psp; /* r13 */ +}; + +typedef struct _callee_saved _callee_saved_t; + +typedef struct __esf _esf_t; + +#endif /* _ASMLANGUAGE */ + +/* Bitmask definitions for the struct tcs.flags bit field */ + +#define K_STATIC 0x00000800 + +#define K_READY 0x00000000 /* Thread is ready to run */ +#define K_TIMING 0x00001000 /* Thread is waiting on a timeout */ +#define K_PENDING 0x00002000 /* Thread is waiting on an object */ +#define K_PRESTART 0x00004000 /* Thread has not yet started */ +#define K_DEAD 0x00008000 /* Thread has terminated */ +#define K_SUSPENDED 0x00010000 /* Thread is suspended */ +#define K_DUMMY 0x00020000 /* Not a real thread */ +#define K_EXECUTION_MASK \ + (K_TIMING | K_PENDING | K_PRESTART | K_DEAD | K_SUSPENDED | K_DUMMY) + +#define K_FP_REGS 0x010 /* 1 = thread uses floating point registers */ +#define K_ESSENTIAL 0x200 /* 1 = system thread that must not abort */ +#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */ + +/* stacks */ + +#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE) +#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE) + +#ifdef CONFIG_CPU_CORTEX_M +#include +#include +#endif + +#ifndef _ASMLANGUAGE + +#ifdef CONFIG_FLOAT +struct _preempt_float { + float s16; + float s17; + float s18; + float s19; + float s20; + float s21; + float s22; + float s23; + float s24; + float s25; + float s26; + float s27; + float s28; + float s29; + float s30; + float s31; +}; +#endif + +struct _thread_arch { + + /* interrupt locking key */ + uint32_t basepri; + +#ifdef CONFIG_FLOAT + /* + * No cooperative floating point register set structure exists for + * the Cortex-M as it automatically saves the necessary registers + * in its exception stack frame. + */ + struct _preempt_float preempt_float; +#endif +}; + +typedef struct _thread_arch _thread_arch_t; + +struct _kernel_arch { + /* empty */ +}; + +typedef struct _kernel_arch _kernel_arch_t; + +#endif /* _ASMLANGUAGE */ + +#ifdef __cplusplus +} +#endif + +#endif /* _kernel_arch_data__h_ */ diff --git a/arch/arm/include/kernel_arch_func.h b/arch/arm/include/kernel_arch_func.h new file mode 100644 index 00000000000..f298a0cdacd --- /dev/null +++ b/arch/arm/include/kernel_arch_func.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2013-2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * @brief Private kernel definitions (ARM) + * + * This file contains private kernel function definitions and various + * other definitions for the ARM Cortex-M3 processor architecture. + * + * This file is also included by assembly language files which must #define + * _ASMLANGUAGE before including this header file. Note that kernel + * assembly source files obtains structure offset values via "absolute symbols" + * in the offsets.o module. + */ + +/* this file is only meant to be included by kernel_structs.h */ + +#ifndef _kernel_arch_func__h_ +#define _kernel_arch_func__h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef _ASMLANGUAGE +extern void _FaultInit(void); +extern void _CpuIdleInit(void); +static ALWAYS_INLINE void nanoArchInit(void) +{ + _InterruptStackSetup(); + _ExcSetup(); + _FaultInit(); + _CpuIdleInit(); +} + +/** + * + * @brief Set the return value for the specified fiber (inline) + * + * The register used to store the return value from a function call invocation + * to . It is assumed that the specified is pending, and thus + * the fiber's thread is stored in its struct tcs structure. + * + * @param fiber pointer to the fiber + * @param value is the value to set as a return value + * + * @return N/A + */ +static ALWAYS_INLINE void +_set_thread_return_value(struct k_thread *thread, unsigned int value) +{ + struct __esf *esf = (struct __esf *)thread->callee_saved.psp; + + esf->a1 = value; +} + +extern void nano_cpu_atomic_idle(unsigned int); + +#define _is_in_isr() _IsInIsr() + +extern void _IntLibInit(void); + +#endif /* _ASMLANGUAGE */ + +#ifdef __cplusplus +} +#endif + +#endif /* _kernel_arch_func__h_ */ diff --git a/arch/arm/include/nano_private.h b/arch/arm/include/nano_private.h deleted file mode 100644 index ff1856f1f07..00000000000 --- a/arch/arm/include/nano_private.h +++ /dev/null @@ -1,283 +0,0 @@ -/* - * Copyright (c) 2013-2014 Wind River Systems, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @file - * @brief Private nanokernel definitions (ARM) - * - * This file contains private nanokernel structures definitions and various - * other definitions for the ARM Cortex-M3 processor architecture. - * - * This file is also included by assembly language files which must #define - * _ASMLANGUAGE before including this header file. Note that nanokernel - * assembly source files obtains structure offset values via "absolute symbols" - * in the offsets.o module. - */ - -#ifndef _NANO_PRIVATE_H -#define _NANO_PRIVATE_H - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include -#include - -#ifndef _ASMLANGUAGE -#include -#include <../../../kernel/unified/include/nano_internal.h> -#include -#include -#include -#endif - -#ifndef _ASMLANGUAGE - -#ifdef CONFIG_THREAD_MONITOR -struct __thread_entry { - _thread_entry_t pEntry; - void *parameter1; - void *parameter2; - void *parameter3; -}; -#endif /*CONFIG_THREAD_MONITOR*/ - -struct coop { - /* - * Unused for Cortex-M, which automatically saves the necessary - * registers in its exception stack frame. - * - * For Cortex-A, this may be: - * - * uint32_t a1; // r0 - * uint32_t a2; // r1 - * uint32_t a3; // r2 - * uint32_t a4; // r3 - * uint32_t ip; // r12 - * uint32_t lr; // r14 - * uint32_t pc; // r15 - * uint32_t xpsr; - */ -}; - -typedef struct __esf tESF; - -struct preempt { - uint32_t v1; /* r4 */ - uint32_t v2; /* r5 */ - uint32_t v3; /* r6 */ - uint32_t v4; /* r7 */ - uint32_t v5; /* r8 */ - uint32_t v6; /* r9 */ - uint32_t v7; /* r10 */ - uint32_t v8; /* r11 */ - uint32_t psp; /* r13 */ -}; - -typedef struct preempt tPreempt; - -#endif /* _ASMLANGUAGE */ - -/* Bitmask definitions for the struct tcs.flags bit field */ - -#define K_STATIC 0x00000800 - -#define K_READY 0x00000000 /* Thread is ready to run */ -#define K_TIMING 0x00001000 /* Thread is waiting on a timeout */ -#define K_PENDING 0x00002000 /* Thread is waiting on an object */ -#define K_PRESTART 0x00004000 /* Thread has not yet started */ -#define K_DEAD 0x00008000 /* Thread has terminated */ -#define K_SUSPENDED 0x00010000 /* Thread is suspended */ -#define K_DUMMY 0x00020000 /* Not a real thread */ -#define K_EXECUTION_MASK (K_TIMING | K_PENDING | K_PRESTART | \ - K_DEAD | K_SUSPENDED | K_DUMMY) - -#define K_FP_REGS 0x010 /* 1 = thread uses floating point registers */ -#define K_ESSENTIAL 0x200 /* 1 = system thread that must not abort */ -#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */ - -/* stacks */ - -#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE) -#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE) - -#ifdef CONFIG_CPU_CORTEX_M -#include -#include -#endif - -#ifndef _ASMLANGUAGE - -#ifdef CONFIG_FLOAT -struct preemp_float { - float s16; - float s17; - float s18; - float s19; - float s20; - float s21; - float s22; - float s23; - float s24; - float s25; - float s26; - float s27; - float s28; - float s29; - float s30; - float s31; -}; -#endif - -/* 'struct tcs_base' must match the beginning of 'struct tcs' */ -struct tcs_base { - sys_dnode_t k_q_node; - uint32_t flags; - int prio; - void *swap_data; -#ifdef CONFIG_NANO_TIMEOUTS - struct _timeout timeout; -#endif -}; - -struct tcs { - sys_dnode_t k_q_node; /* node object in any kernel queue */ - uint32_t flags; - int prio; - void *swap_data; -#ifdef CONFIG_NANO_TIMEOUTS - struct _timeout timeout; -#endif - uint32_t basepri; -#ifdef CONFIG_THREAD_CUSTOM_DATA - void *custom_data; /* available for custom use */ -#endif - struct coop coopReg; - struct preempt preempReg; -#if defined(CONFIG_THREAD_MONITOR) - struct __thread_entry *entry; /* thread entry and parameters description */ - struct tcs *next_thread; /* next item in list of ALL fiber+tasks */ -#endif -#ifdef CONFIG_ERRNO - int errno_var; -#endif - atomic_t sched_locked; - void *init_data; - void (*fn_abort)(void); -#ifdef CONFIG_FLOAT - /* - * No cooperative floating point register set structure exists for - * the Cortex-M as it automatically saves the necessary registers - * in its exception stack frame. - */ - struct preemp_float preemp_float_regs; -#endif -}; - -struct ready_q { - struct k_thread *cache; - uint32_t prio_bmap[1]; - sys_dlist_t q[K_NUM_PRIORITIES]; -}; - -struct s_NANO { - struct tcs *current; /* currently scheduled thread (fiber or task) */ - -#if defined(CONFIG_THREAD_MONITOR) - struct tcs *threads; /* singly linked list of ALL fiber+tasks */ -#endif - -#ifdef CONFIG_FP_SHARING - struct tcs *current_fp; /* thread (fiber or task) that owns the FP regs */ -#endif /* CONFIG_FP_SHARING */ - -#ifdef CONFIG_SYS_POWER_MANAGEMENT - int32_t idle; /* Number of ticks for kernel idling */ -#endif - -#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS) - sys_dlist_t timeout_q; -#endif - struct ready_q ready_q; -}; - -typedef struct s_NANO tNANO; -extern tNANO _nanokernel; - -#endif /* _ASMLANGUAGE */ - -#ifndef _ASMLANGUAGE -extern void _FaultInit(void); -extern void _CpuIdleInit(void); -static ALWAYS_INLINE void nanoArchInit(void) -{ - _InterruptStackSetup(); - _ExcSetup(); - _FaultInit(); - _CpuIdleInit(); -} - -/** - * - * @brief Set the return value for the specified fiber (inline) - * - * The register used to store the return value from a function call invocation - * to . It is assumed that the specified is pending, and thus - * the fiber's thread is stored in its struct tcs structure. - * - * @param fiber pointer to the fiber - * @param value is the value to set as a return value - * - * @return N/A - */ -static ALWAYS_INLINE void fiberRtnValueSet(struct tcs *fiber, - unsigned int value) -{ - tESF *pEsf = (void *)fiber->preempReg.psp; - - pEsf->a1 = value; -} - -#define _current _nanokernel.current -#define _ready_q _nanokernel.ready_q -#define _timeout_q _nanokernel.timeout_q -#define _set_thread_return_value fiberRtnValueSet - -static ALWAYS_INLINE void -_set_thread_return_value_with_data(struct k_thread *thread, unsigned int value, - void *data) -{ - _set_thread_return_value(thread, value); - thread->swap_data = data; -} - -#define _IDLE_THREAD_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES) - -extern void nano_cpu_atomic_idle(unsigned int); - -#define _is_in_isr() _IsInIsr() - -extern void _IntLibInit(void); - -#endif /* _ASMLANGUAGE */ - -#ifdef __cplusplus -} -#endif - -#endif /* _NANO_PRIVATE_H */ diff --git a/arch/arm/include/offsets_short_arch.h b/arch/arm/include/offsets_short_arch.h new file mode 100644 index 00000000000..a568737247a --- /dev/null +++ b/arch/arm/include/offsets_short_arch.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _offsets_short_arch__h_ +#define _offsets_short_arch__h_ + +#include + +/* kernel */ + +/* nothing for now */ + +/* end - kernel */ + +/* threads */ + +#define _thread_offset_to_basepri \ + (___thread_t_arch_OFFSET + ___thread_arch_t_basepri_OFFSET) + +#define _thread_offset_to_preempt_float \ + (___thread_t_arch_OFFSET + ___thread_arch_t_preempt_float_OFFSET) + +/* end - threads */ + +#endif /* _offsets_short_arch__h_ */ diff --git a/arch/arm/include/start_task_arch.h b/arch/arm/include/start_task_arch.h index 977d1918d54..f73a6138400 100644 --- a/arch/arm/include/start_task_arch.h +++ b/arch/arm/include/start_task_arch.h @@ -30,7 +30,7 @@ #include #include -#include +#include #include #ifdef __cplusplus diff --git a/arch/nios2/core/cpu_idle.c b/arch/nios2/core/cpu_idle.c index 55e62247368..5fbbdf8b297 100644 --- a/arch/nios2/core/cpu_idle.c +++ b/arch/nios2/core/cpu_idle.c @@ -15,7 +15,7 @@ */ #include -#include +#include /** * diff --git a/arch/nios2/core/crt0.S b/arch/nios2/core/crt0.S index 77be4f7af5e..d596ac59d09 100644 --- a/arch/nios2/core/crt0.S +++ b/arch/nios2/core/crt0.S @@ -16,7 +16,7 @@ #define _ASMLANGUAGE #include -#include +#include /* exports */ GTEXT(__start) diff --git a/arch/nios2/core/exception.S b/arch/nios2/core/exception.S index 8704f333320..d1a509fd751 100644 --- a/arch/nios2/core/exception.S +++ b/arch/nios2/core/exception.S @@ -16,8 +16,8 @@ #define _ASMLANGUAGE #include -#include -#include +#include +#include /* exports */ GTEXT(_exception) @@ -94,11 +94,11 @@ SECTION_FUNC(exception.entry, _exception) is_interrupt: /* If we get here, this is an interrupt */ - /* Grab a reference to _nanokernel in r10 so we can determine the + /* Grab a reference to _kernel in r10 so we can determine the * current irq stack pointer */ - movhi r10, %hi(_nanokernel) - ori r10, r10, %lo(_nanokernel) + movhi r10, %hi(_kernel) + ori r10, r10, %lo(_kernel) /* Stash a copy of thread's sp in r12 so that we can put it on the IRQ * stack @@ -106,7 +106,7 @@ is_interrupt: mov r12, sp /* Switch to interrupt stack */ - ldw sp, __tNANO_irq_sp_OFFSET(r10) + ldw sp, _kernel_offset_to_irq_stack(r10) /* Store thread stack pointer onto IRQ stack */ addi sp, sp, -4 @@ -122,21 +122,21 @@ on_irq_stack: /* Interrupt handler finished and the interrupt should be serviced * now, the appropriate bits in ipending should be cleared */ - /* Get a reference to _nanokernel again in r10 */ - movhi r10, %hi(_nanokernel) - ori r10, r10, %lo(_nanokernel) + /* Get a reference to _kernel again in r10 */ + movhi r10, %hi(_kernel) + ori r10, r10, %lo(_kernel) - ldw r11, __tNANO_current_OFFSET(r10) + ldw r11, _kernel_offset_to_current(r10) /* Determine whether the exception of the ISR requires context * switch */ /* Do not reschedule coop threads (threads that have negative prio) */ - ldw r12, __tTCS_prio_OFFSET(r11) + ldw r12, _thread_offset_to_prio(r11) blt r12, zero, no_reschedule /* Do not reschedule if scheduler is locked */ - ldw r12, __tTCS_sched_locked_OFFSET(r11) + ldw r12, _thread_offset_to_sched_locked(r11) bne r12, zero, no_reschedule /* Call into the kernel to see if a scheduling decision is necessary */ diff --git a/arch/nios2/core/fatal.c b/arch/nios2/core/fatal.c index 07cae1f6e76..27103b8a305 100644 --- a/arch/nios2/core/fatal.c +++ b/arch/nios2/core/fatal.c @@ -16,7 +16,7 @@ #include #include -#include +#include #include #include diff --git a/arch/nios2/core/irq_manage.c b/arch/nios2/core/irq_manage.c index 67eee062ea3..1caa5a514e7 100644 --- a/arch/nios2/core/irq_manage.c +++ b/arch/nios2/core/irq_manage.c @@ -22,7 +22,7 @@ #include -#include +#include #include #include #include @@ -80,7 +80,7 @@ void _enter_irq(uint32_t ipending) { int index; - _nanokernel.nested++; + _kernel.nested++; #ifdef CONFIG_IRQ_OFFLOAD _irq_do_offload(); @@ -100,6 +100,6 @@ void _enter_irq(uint32_t ipending) ite->isr(ite->arg); } - _nanokernel.nested--; + _kernel.nested--; } diff --git a/arch/nios2/core/irq_offload.c b/arch/nios2/core/irq_offload.c index 687674a5d7f..ba2f59af9cc 100644 --- a/arch/nios2/core/irq_offload.c +++ b/arch/nios2/core/irq_offload.c @@ -15,7 +15,7 @@ */ #include -#include +#include #include volatile irq_offload_routine_t _offload_routine; diff --git a/arch/nios2/core/offsets/offsets.c b/arch/nios2/core/offsets/offsets.c index 1d2126307af..272ed183f54 100644 --- a/arch/nios2/core/offsets/offsets.c +++ b/arch/nios2/core/offsets/offsets.c @@ -35,27 +35,23 @@ #include -#include -#include - -/* Nios II specific tNANO structure member offsets */ -GEN_OFFSET_SYM(tNANO, irq_sp); -GEN_OFFSET_SYM(tNANO, nested); +#include +#include /* struct coop member offsets */ -GEN_OFFSET_SYM(t_coop, r16); -GEN_OFFSET_SYM(t_coop, r17); -GEN_OFFSET_SYM(t_coop, r18); -GEN_OFFSET_SYM(t_coop, r19); -GEN_OFFSET_SYM(t_coop, r20); -GEN_OFFSET_SYM(t_coop, r21); -GEN_OFFSET_SYM(t_coop, r22); -GEN_OFFSET_SYM(t_coop, r23); -GEN_OFFSET_SYM(t_coop, r28); -GEN_OFFSET_SYM(t_coop, ra); -GEN_OFFSET_SYM(t_coop, sp); -GEN_OFFSET_SYM(t_coop, key); -GEN_OFFSET_SYM(t_coop, retval); +GEN_OFFSET_SYM(_callee_saved_t, r16); +GEN_OFFSET_SYM(_callee_saved_t, r17); +GEN_OFFSET_SYM(_callee_saved_t, r18); +GEN_OFFSET_SYM(_callee_saved_t, r19); +GEN_OFFSET_SYM(_callee_saved_t, r20); +GEN_OFFSET_SYM(_callee_saved_t, r21); +GEN_OFFSET_SYM(_callee_saved_t, r22); +GEN_OFFSET_SYM(_callee_saved_t, r23); +GEN_OFFSET_SYM(_callee_saved_t, r28); +GEN_OFFSET_SYM(_callee_saved_t, ra); +GEN_OFFSET_SYM(_callee_saved_t, sp); +GEN_OFFSET_SYM(_callee_saved_t, key); +GEN_OFFSET_SYM(_callee_saved_t, retval); GEN_OFFSET_SYM(NANO_ESF, ra); GEN_OFFSET_SYM(NANO_ESF, r1); @@ -78,6 +74,6 @@ GEN_OFFSET_SYM(NANO_ESF, instr); GEN_ABSOLUTE_SYM(__NANO_ESF_SIZEOF, sizeof(NANO_ESF)); /* size of the struct tcs structure sans save area for floating point regs */ -GEN_ABSOLUTE_SYM(__tTCS_NOFLOAT_SIZEOF, sizeof(tTCS)); +GEN_ABSOLUTE_SYM(_K_THREAD_NO_FLOAT_SIZEOF, sizeof(struct k_thread)); GEN_ABS_SYM_END diff --git a/arch/nios2/core/prep_c.c b/arch/nios2/core/prep_c.c index c5f896820a3..96099fbf807 100644 --- a/arch/nios2/core/prep_c.c +++ b/arch/nios2/core/prep_c.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include /** diff --git a/arch/nios2/core/reset.S b/arch/nios2/core/reset.S index 648972830e4..697aedd6c45 100644 --- a/arch/nios2/core/reset.S +++ b/arch/nios2/core/reset.S @@ -17,8 +17,8 @@ #define _ASMLANGUAGE #include -#include -#include +#include +#include GTEXT(__start) diff --git a/arch/nios2/core/swap.S b/arch/nios2/core/swap.S index 0cc5aa5f3c7..a6728f1e2ae 100644 --- a/arch/nios2/core/swap.S +++ b/arch/nios2/core/swap.S @@ -16,8 +16,8 @@ #define _ASMLANGUAGE #include -#include -#include +#include +#include /* exports */ GTEXT(_Swap) @@ -34,85 +34,85 @@ GTEXT(_k_neg_eagain) */ SECTION_FUNC(exception.other, _Swap) - /* Get a reference to _nanokernel in r10 */ - movhi r10, %hi(_nanokernel) - ori r10, r10, %lo(_nanokernel) + /* Get a reference to _kernel in r10 */ + movhi r10, %hi(_kernel) + ori r10, r10, %lo(_kernel) /* Get the pointer to nanokernel->current */ - ldw r11, __tNANO_current_OFFSET(r10) + ldw r11, _kernel_offset_to_current(r10) /* Store all the callee saved registers. We either got here via * an exception or from a cooperative invocation of _Swap() from C * domain, so all the caller-saved registers have already been * saved by the exception asm or the calling C code already. */ - stw r16, __tTCS_coopReg_OFFSET + __t_coop_r16_OFFSET(r11) - stw r17, __tTCS_coopReg_OFFSET + __t_coop_r17_OFFSET(r11) - stw r18, __tTCS_coopReg_OFFSET + __t_coop_r18_OFFSET(r11) - stw r19, __tTCS_coopReg_OFFSET + __t_coop_r19_OFFSET(r11) - stw r20, __tTCS_coopReg_OFFSET + __t_coop_r20_OFFSET(r11) - stw r21, __tTCS_coopReg_OFFSET + __t_coop_r21_OFFSET(r11) - stw r22, __tTCS_coopReg_OFFSET + __t_coop_r22_OFFSET(r11) - stw r23, __tTCS_coopReg_OFFSET + __t_coop_r23_OFFSET(r11) - stw r28, __tTCS_coopReg_OFFSET + __t_coop_r28_OFFSET(r11) - stw ra, __tTCS_coopReg_OFFSET + __t_coop_ra_OFFSET(r11) - stw sp, __tTCS_coopReg_OFFSET + __t_coop_sp_OFFSET(r11) + stw r16, _thread_offset_to_r16(r11) + stw r17, _thread_offset_to_r17(r11) + stw r18, _thread_offset_to_r18(r11) + stw r19, _thread_offset_to_r19(r11) + stw r20, _thread_offset_to_r20(r11) + stw r21, _thread_offset_to_r21(r11) + stw r22, _thread_offset_to_r22(r11) + stw r23, _thread_offset_to_r23(r11) + stw r28, _thread_offset_to_r28(r11) + stw ra, _thread_offset_to_ra(r11) + stw sp, _thread_offset_to_sp(r11) /* r4 has the 'key' argument which is the result of irq_lock() * before this was called */ - stw r4, __tTCS_coopReg_OFFSET + __t_coop_key_OFFSET(r11) + stw r4, _thread_offset_to_key(r11) /* Populate default return value */ movhi r5, %hi(_k_neg_eagain) ori r5, r5, %lo(_k_neg_eagain) ldw r4, (r5) - stw r4, __tTCS_coopReg_OFFSET + __t_coop_retval_OFFSET(r11) + stw r4, _thread_offset_to_retval(r11) #if CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH call _sys_k_event_logger_context_switch /* Restore caller-saved r10. We could have stuck its value * onto the stack, but less instructions to just use immediates */ - movhi r10, %hi(_nanokernel) - ori r10, r10, %lo(_nanokernel) + movhi r10, %hi(_kernel) + ori r10, r10, %lo(_kernel) #endif /* CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH */ - /* Assign to _nanokernel.current the return value of + /* Assign to _kernel.current the return value of * _get_next_ready_thread() */ call _get_next_ready_thread - movhi r10, %hi(_nanokernel) - ori r10, r10, %lo(_nanokernel) - stw r2, __tNANO_current_OFFSET(r10) + movhi r10, %hi(_kernel) + ori r10, r10, %lo(_kernel) + stw r2, _kernel_offset_to_current(r10) /* At this point r2 points to the next thread to be swapped in */ /* Restore callee-saved registers and switch to the incoming * thread's stack */ - ldw r16, __tTCS_coopReg_OFFSET + __t_coop_r16_OFFSET(r2) - ldw r17, __tTCS_coopReg_OFFSET + __t_coop_r17_OFFSET(r2) - ldw r18, __tTCS_coopReg_OFFSET + __t_coop_r18_OFFSET(r2) - ldw r19, __tTCS_coopReg_OFFSET + __t_coop_r19_OFFSET(r2) - ldw r20, __tTCS_coopReg_OFFSET + __t_coop_r20_OFFSET(r2) - ldw r21, __tTCS_coopReg_OFFSET + __t_coop_r21_OFFSET(r2) - ldw r22, __tTCS_coopReg_OFFSET + __t_coop_r22_OFFSET(r2) - ldw r23, __tTCS_coopReg_OFFSET + __t_coop_r23_OFFSET(r2) - ldw r28, __tTCS_coopReg_OFFSET + __t_coop_r28_OFFSET(r2) - ldw ra, __tTCS_coopReg_OFFSET + __t_coop_ra_OFFSET(r2) - ldw sp, __tTCS_coopReg_OFFSET + __t_coop_sp_OFFSET(r2) + ldw r16, _thread_offset_to_r16(r2) + ldw r17, _thread_offset_to_r17(r2) + ldw r18, _thread_offset_to_r18(r2) + ldw r19, _thread_offset_to_r19(r2) + ldw r20, _thread_offset_to_r20(r2) + ldw r21, _thread_offset_to_r21(r2) + ldw r22, _thread_offset_to_r22(r2) + ldw r23, _thread_offset_to_r23(r2) + ldw r28, _thread_offset_to_r28(r2) + ldw ra, _thread_offset_to_ra(r2) + ldw sp, _thread_offset_to_sp(r2) /* We need to irq_unlock(current->coopReg.key); * key was supplied as argument to _Swap(). Fetch it. */ - ldw r3, __tTCS_coopReg_OFFSET + __t_coop_key_OFFSET(r2) + ldw r3, _thread_offset_to_key(r2) /* Load return value into r2 (return value register). -EAGAIN * unless someone previously called fiberRtnValueSet(). Do this * before we potentially unlock interrupts. */ - ldw r2, __tTCS_coopReg_OFFSET + __t_coop_retval_OFFSET(r2) + ldw r2, _thread_offset_to_retval(r2) /* Now do irq_unlock(current->coopReg.key) */ #if (ALT_CPU_NUM_OF_SHADOW_REG_SETS > 0) || \ diff --git a/arch/nios2/core/thread.c b/arch/nios2/core/thread.c index 40a2f247697..b76797cc1ad 100644 --- a/arch/nios2/core/thread.c +++ b/arch/nios2/core/thread.c @@ -14,30 +14,27 @@ * limitations under the License. */ -#include /* public kernel API */ -#include <../../../kernel/unified/include/nano_internal.h> - -#include +#include +#include +#include #include #include -tNANO _nanokernel = {0}; - #if defined(CONFIG_THREAD_MONITOR) /* * Add a thread to the kernel's list of active threads. */ -static ALWAYS_INLINE void thread_monitor_init(struct tcs *tcs) +static ALWAYS_INLINE void thread_monitor_init(struct k_thread *thread) { unsigned int key; key = irq_lock(); - tcs->next_thread = _nanokernel.threads; - _nanokernel.threads = tcs; + thread->next_thread = _kernel.threads; + _kernel.threads = thread; irq_unlock(key); } #else -#define thread_monitor_init(tcs) \ +#define thread_monitor_init(thread) \ do {/* do nothing */ \ } while ((0)) #endif /* CONFIG_THREAD_MONITOR */ @@ -70,7 +67,7 @@ void _new_thread(char *stack_memory, unsigned stack_size, { _ASSERT_VALID_PRIO(priority, thread_func); - struct tcs *tcs; + struct k_thread *thread; struct init_stack_frame *iframe; #ifdef CONFIG_INIT_STACKS @@ -86,32 +83,32 @@ void _new_thread(char *stack_memory, unsigned stack_size, iframe->arg2 = arg2; iframe->arg3 = arg3; - /* Initialize various struct tcs members */ - tcs = (struct tcs *)stack_memory; - tcs->prio = priority; + /* Initialize various struct k_thread members */ + thread = (struct k_thread *)stack_memory; + thread->base.prio = priority; /* k_q_node initialized upon first insertion in a list */ - tcs->flags = options | K_PRESTART; - tcs->sched_locked = 0; + thread->base.flags = options | K_PRESTART; + thread->base.sched_locked = 0; /* static threads overwrite it afterwards with real value */ - tcs->init_data = NULL; - tcs->fn_abort = NULL; + thread->init_data = NULL; + thread->fn_abort = NULL; #ifdef CONFIG_THREAD_CUSTOM_DATA /* Initialize custom data field (value is opaque to kernel) */ - tcs->custom_data = NULL; + thread->custom_data = NULL; #endif ARG_UNUSED(uk_task_ptr); - tcs->coopReg.sp = (uint32_t)iframe; - tcs->coopReg.ra = (uint32_t)_thread_entry_wrapper; - tcs->coopReg.key = NIOS2_STATUS_PIE_MSK; - /* Leave the rest of tcs->coopReg junk */ + thread->callee_saved.sp = (uint32_t)iframe; + thread->callee_saved.ra = (uint32_t)_thread_entry_wrapper; + thread->callee_saved.key = NIOS2_STATUS_PIE_MSK; + /* Leave the rest of thread->callee_saved junk */ #ifdef CONFIG_NANO_TIMEOUTS - _nano_timeout_tcs_init(tcs); + _nano_timeout_thread_init(thread); #endif - thread_monitor_init(tcs); + thread_monitor_init(thread); } diff --git a/arch/nios2/include/kernel_arch_data.h b/arch/nios2/include/kernel_arch_data.h new file mode 100644 index 00000000000..0a468714f88 --- /dev/null +++ b/arch/nios2/include/kernel_arch_data.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2016 Intel Corporation + * Copyright (c) 2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * @brief Private kernel definitions + * + * This file contains private kernel structures definitions and various + * other definitions for the Nios II processor architecture. + * + * This file is also included by assembly language files which must #define + * _ASMLANGUAGE before including this header file. Note that kernel + * assembly source files obtains structure offset values via "absolute + * symbols" in the offsets.o module. + */ + +#ifndef _kernel_arch_data__h_ +#define _kernel_arch_data__h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +#ifndef _ASMLANGUAGE +#include +#include +#include +#include +#include +#endif + +/* Bitmask definitions for the struct tcs->flags bit field */ +#define K_STATIC 0x00000800 + +#define K_READY 0x00000000 /* Thread is ready to run */ +#define K_TIMING 0x00001000 /* Thread is waiting on a timeout */ +#define K_PENDING 0x00002000 /* Thread is waiting on an object */ +#define K_PRESTART 0x00004000 /* Thread has not yet started */ +#define K_DEAD 0x00008000 /* Thread has terminated */ +#define K_SUSPENDED 0x00010000 /* Thread is suspended */ +#define K_DUMMY 0x00020000 /* Not a real thread */ +#define K_EXECUTION_MASK (K_TIMING | K_PENDING | K_PRESTART | \ + K_DEAD | K_SUSPENDED | K_DUMMY) + +#define INT_ACTIVE 0x002 /* 1 = executing context is interrupt handler */ +#define EXC_ACTIVE 0x004 /* 1 = executing context is exception handler */ +#define K_FP_REGS 0x010 /* 1 = thread uses floating point registers */ +#define K_ESSENTIAL 0x200 /* 1 = system thread that must not abort */ +#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */ + +/* stacks */ + +#define STACK_ALIGN_SIZE 4 + +#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE) +#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE) + +#ifndef _ASMLANGUAGE + +struct _caller_saved { + /* + * Nothing here, the exception code puts all the caller-saved + * registers onto the stack. + */ +}; + +typedef struct _caller_saved _caller_saved_t; + +struct _callee_saved { + /* General purpose callee-saved registers */ + uint32_t r16; + uint32_t r17; + uint32_t r18; + uint32_t r19; + uint32_t r20; + uint32_t r21; + uint32_t r22; + uint32_t r23; + + /* Normally used for the frame pointer but also a general purpose + * register if frame pointers omitted + */ + uint32_t r28; + + /* Return address */ + uint32_t ra; + + /* Stack pointer */ + uint32_t sp; + + /* IRQ status before irq_lock() and call to _Swap() */ + uint32_t key; + + /* Return value of _Swap() */ + uint32_t retval; +}; + +typedef struct _callee_saved _callee_saved_t; + +struct _thread_arch { + /* nothing for now */ +}; + +typedef struct _thread_arch _thread_arch_t; + +struct _kernel_arch { + /* nothing for now */ +}; + +typedef struct _kernel_arch _kernel_arch_t; + +extern char _interrupt_stack[CONFIG_ISR_STACK_SIZE]; + +#endif /* _ASMLANGUAGE */ + +#ifdef __cplusplus +} +#endif + +#endif /* _kernel_arch_data__h_ */ diff --git a/arch/nios2/include/kernel_arch_func.h b/arch/nios2/include/kernel_arch_func.h new file mode 100644 index 00000000000..2f0699283cf --- /dev/null +++ b/arch/nios2/include/kernel_arch_func.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * @brief Private kernel definitions + * + * This file contains private kernel function/macro definitions and various + * other definitions for the Nios II processor architecture. + * + * This file is also included by assembly language files which must #define + * _ASMLANGUAGE before including this header file. Note that kernel + * assembly source files obtains structure offset values via "absolute + * symbols" in the offsets.o module. + */ + +#ifndef _kernel_arch_func__h_ +#define _kernel_arch_func__h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef _ASMLANGUAGE + +void nano_cpu_idle(void); +void nano_cpu_atomic_idle(unsigned int key); + +static ALWAYS_INLINE void nanoArchInit(void) +{ + _kernel.irq_stack = _interrupt_stack + CONFIG_ISR_STACK_SIZE; +} + +static ALWAYS_INLINE void +_set_thread_return_value(struct k_thread *thread, unsigned int value) +{ + thread->callee_saved.retval = value; +} + +static inline void _IntLibInit(void) +{ + /* No special initialization of the interrupt subsystem required */ +} + +FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason, + const NANO_ESF * esf); + + +#define _is_in_isr() (_kernel.nested != 0) + +#ifdef CONFIG_IRQ_OFFLOAD +void _irq_do_offload(void); +#endif + +#if ALT_CPU_ICACHE_SIZE > 0 +void _nios2_icache_flush_all(void); +#else +#define _nios2_icache_flush_all() do { } while (0) +#endif + +#if ALT_CPU_DCACHE_SIZE > 0 +void _nios2_dcache_flush_all(void); +#else +#define _nios2_dcache_flush_all() do { } while (0) +#endif + +#endif /* _ASMLANGUAGE */ + +#ifdef __cplusplus +} +#endif + +#endif /* _kernel_arch_func__h_ */ diff --git a/arch/nios2/include/nano_private.h b/arch/nios2/include/nano_private.h deleted file mode 100644 index 2a3f5607847..00000000000 --- a/arch/nios2/include/nano_private.h +++ /dev/null @@ -1,247 +0,0 @@ -/* - * Copyright (c) 2016 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @file - * @brief Private nanokernel definitions - * - * This file contains private nanokernel structures definitions and various - * other definitions for the Nios II processor architecture. - * - * This file is also included by assembly language files which must #define - * _ASMLANGUAGE before including this header file. Note that nanokernel - * assembly source files obtains structure offset values via "absolute - * symbols" in the offsets.o module. - */ - -#ifndef _NANO_PRIVATE_H -#define _NANO_PRIVATE_H - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include -#include - -#ifndef _ASMLANGUAGE -#include /* public kernel API */ -#include <../../../kernel/unified/include/nano_internal.h> -#include -#include -#include -#endif - -/* Bitmask definitions for the struct tcs->flags bit field */ -#define K_STATIC 0x00000800 - -#define K_READY 0x00000000 /* Thread is ready to run */ -#define K_TIMING 0x00001000 /* Thread is waiting on a timeout */ -#define K_PENDING 0x00002000 /* Thread is waiting on an object */ -#define K_PRESTART 0x00004000 /* Thread has not yet started */ -#define K_DEAD 0x00008000 /* Thread has terminated */ -#define K_SUSPENDED 0x00010000 /* Thread is suspended */ -#define K_DUMMY 0x00020000 /* Not a real thread */ -#define K_EXECUTION_MASK (K_TIMING | K_PENDING | K_PRESTART | \ - K_DEAD | K_SUSPENDED | K_DUMMY) - -#define INT_ACTIVE 0x002 /* 1 = executing context is interrupt handler */ -#define EXC_ACTIVE 0x004 /* 1 = executing context is exception handler */ -#define K_FP_REGS 0x010 /* 1 = thread uses floating point registers */ -#define K_ESSENTIAL 0x200 /* 1 = system thread that must not abort */ -#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */ - -/* stacks */ - -#define STACK_ALIGN_SIZE 4 - -#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE) -#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE) - -#ifndef _ASMLANGUAGE - -/* - * The following structure defines the set of 'non-volatile' or 'callee saved' - * integer registers. These registers must be preserved by a called C - * function. These are the only registers that need to be saved/restored when - * a cooperative context switch occurs. - */ -struct s_coop { - /* General purpose callee-saved registers */ - uint32_t r16; - uint32_t r17; - uint32_t r18; - uint32_t r19; - uint32_t r20; - uint32_t r21; - uint32_t r22; - uint32_t r23; - - /* Normally used for the frame pointer but also a general purpose - * register if frame pointers omitted - */ - uint32_t r28; - - uint32_t ra; /* Return address */ - uint32_t sp; /* Stack pointer */ - uint32_t key; /* IRQ status before irq_lock() and call to _Swap() */ - uint32_t retval; /* Return value of _Swap() */ -}; -typedef struct s_coop t_coop; - -/* - * The following structure defines the set of caller-saved integer registers. - * These registers need not be preserved by a called C function. Given that - * they are not preserved across function calls, they must be save/restored - * (along with the struct coop regs) when a preemptive context switch occurs. - */ -struct preempt { - /* Nothing here, the exception code puts all the caller-saved registers - * onto the stack - */ -}; - - -/* 'struct tcs_base' must match the beginning of 'struct tcs' */ -struct tcs_base { - sys_dnode_t k_q_node; - uint32_t flags; - int prio; /* thread priority used to sort linked list */ - void *swap_data; -#ifdef CONFIG_NANO_TIMEOUTS - struct _timeout timeout; -#endif -}; - - -struct tcs { - sys_dnode_t k_q_node; /* node object in any kernel queue */ - int flags; - int prio; /* thread priority used to sort linked list */ - void *swap_data; -#ifdef CONFIG_NANO_TIMEOUTS - struct _timeout timeout; -#endif - struct preempt preempReg; - t_coop coopReg; - -#ifdef CONFIG_ERRNO - int errno_var; -#endif -#if defined(CONFIG_THREAD_MONITOR) - struct __thread_entry *entry; /* thread entry and parameters description */ - struct tcs *next_thread; /* next item in list of ALL fiber+tasks */ -#endif -#ifdef CONFIG_THREAD_CUSTOM_DATA - void *custom_data; /* available for custom use */ -#endif - atomic_t sched_locked; - void *init_data; - void (*fn_abort)(void); -}; - - -struct ready_q { - struct k_thread *cache; - uint32_t prio_bmap[1]; - sys_dlist_t q[K_NUM_PRIORITIES]; -}; - - -struct s_NANO { - struct tcs *current; /* currently scheduled thread (fiber or task) */ - -#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS) - sys_dlist_t timeout_q; -#endif -#if defined(CONFIG_THREAD_MONITOR) - struct tcs *threads; /* singly linked list of ALL fiber+tasks */ -#endif - struct ready_q ready_q; - /* Nios II-specific members */ - - char *irq_sp; /* Interrupt stack pointer */ - uint32_t nested; /* IRQ/exception nest level */ -}; - -typedef struct s_NANO tNANO; -extern tNANO _nanokernel; -extern char _interrupt_stack[CONFIG_ISR_STACK_SIZE]; - - -/* Arch-specific nanokernel APIs */ -void nano_cpu_idle(void); -void nano_cpu_atomic_idle(unsigned int key); - -static ALWAYS_INLINE void nanoArchInit(void) -{ - _nanokernel.irq_sp = _interrupt_stack + CONFIG_ISR_STACK_SIZE; -} - -static ALWAYS_INLINE void fiberRtnValueSet(struct tcs *fiber, - unsigned int value) -{ - fiber->coopReg.retval = value; -} - -#define _current _nanokernel.current -#define _ready_q _nanokernel.ready_q -#define _timeout_q _nanokernel.timeout_q -#define _set_thread_return_value fiberRtnValueSet - -static ALWAYS_INLINE void -_set_thread_return_value_with_data(struct k_thread *thread, unsigned int value, - void *data) -{ - _set_thread_return_value(thread, value); - thread->swap_data = data; -} - -#define _IDLE_THREAD_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES) - - -static inline void _IntLibInit(void) -{ - /* No special initialization of the interrupt subsystem required */ -} - -FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason, - const NANO_ESF *esf); - - -#define _is_in_isr() (_nanokernel.nested != 0) - -#ifdef CONFIG_IRQ_OFFLOAD -void _irq_do_offload(void); -#endif - -#if ALT_CPU_ICACHE_SIZE > 0 -void _nios2_icache_flush_all(void); -#else -#define _nios2_icache_flush_all() do { } while (0) -#endif - -#if ALT_CPU_DCACHE_SIZE > 0 -void _nios2_dcache_flush_all(void); -#else -#define _nios2_dcache_flush_all() do { } while (0) -#endif - -#endif /* _ASMLANGUAGE */ - -#endif /* _NANO_PRIVATE_H */ - diff --git a/arch/nios2/include/offsets_short_arch.h b/arch/nios2/include/offsets_short_arch.h new file mode 100644 index 00000000000..47cd7dc3fe4 --- /dev/null +++ b/arch/nios2/include/offsets_short_arch.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _offsets_short_arch__h_ +#define _offsets_short_arch__h_ + +#include + +/* kernel */ + +/* nothing for now */ + +/* end - kernel */ + +/* threads */ + +#define _thread_offset_to_r16 \ + (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r16_OFFSET) + +#define _thread_offset_to_r17 \ + (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r17_OFFSET) + +#define _thread_offset_to_r18 \ + (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r18_OFFSET) + +#define _thread_offset_to_r19 \ + (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r19_OFFSET) + +#define _thread_offset_to_r20 \ + (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r20_OFFSET) + +#define _thread_offset_to_r21 \ + (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r21_OFFSET) + +#define _thread_offset_to_r22 \ + (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r22_OFFSET) + +#define _thread_offset_to_r23 \ + (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r23_OFFSET) + +#define _thread_offset_to_r28 \ + (___thread_t_callee_saved_OFFSET + ___callee_saved_t_r28_OFFSET) + +#define _thread_offset_to_ra \ + (___thread_t_callee_saved_OFFSET + ___callee_saved_t_ra_OFFSET) + +#define _thread_offset_to_sp \ + (___thread_t_callee_saved_OFFSET + ___callee_saved_t_sp_OFFSET) + +#define _thread_offset_to_key \ + (___thread_t_callee_saved_OFFSET + ___callee_saved_t_key_OFFSET) + +#define _thread_offset_to_retval \ + (___thread_t_callee_saved_OFFSET + ___callee_saved_t_retval_OFFSET) + +/* end - threads */ + +#endif /* _offsets_short_arch__h_ */ diff --git a/arch/nios2/include/start_task_arch.h b/arch/nios2/include/start_task_arch.h index fe4c215431a..403d097e886 100644 --- a/arch/nios2/include/start_task_arch.h +++ b/arch/nios2/include/start_task_arch.h @@ -30,7 +30,7 @@ #include #include -#include +#include #include #ifdef __cplusplus diff --git a/arch/x86/core/Makefile b/arch/x86/core/Makefile index ba37fef8b9e..b7d5a62d247 100644 --- a/arch/x86/core/Makefile +++ b/arch/x86/core/Makefile @@ -1,4 +1,5 @@ ccflags-y += -I$(srctree)/kernel/unified/include +asflags-y += -I$(srctree)/kernel/unified/include ifeq ($(COMPILER)$(CONFIG_X86_IAMCU),clang) # We rely on GAS for assembling, so don't use the integrated assembler diff --git a/arch/x86/core/debug/debug_frames.c b/arch/x86/core/debug/debug_frames.c index 443f26a1045..7fa22a5a659 100644 --- a/arch/x86/core/debug/debug_frames.c +++ b/arch/x86/core/debug/debug_frames.c @@ -23,9 +23,9 @@ */ #include -#include +#include NANO_ISF *sys_debug_current_isf_get(void) { - return _nanokernel.isf; + return _kernel.isf; } diff --git a/arch/x86/core/excstub.S b/arch/x86/core/excstub.S index 706af29d82f..7d4a9303fa5 100644 --- a/arch/x86/core/excstub.S +++ b/arch/x86/core/excstub.S @@ -26,11 +26,10 @@ #define _ASMLANGUAGE -#include +#include #include #include /* For MK_ISR_NAME */ -#include /* nanokernel structure offset definitions */ - +#include /* exports (internal APIs) */ @@ -122,9 +121,10 @@ SECTION_FUNC(TEXT, _exception_enter) #if defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO) - movl _nanokernel + __tNANO_current_OFFSET, %edx + movl _kernel + _kernel_offset_to_current, %edx - incl __tTCS_excNestCount_OFFSET(%edx) /* inc exception nest count */ + /* inc exception nest count */ + incl _thread_offset_to_excNestCount(%edx) #ifdef CONFIG_GDB_INFO @@ -135,9 +135,9 @@ SECTION_FUNC(TEXT, _exception_enter) * registers and the stack of the preempted thread. */ - testl $EXC_ACTIVE, __tTCS_flags_OFFSET (%edx) + testl $EXC_ACTIVE, _thread_offset_to_flags(%edx) jne alreadyInException - movl %esp, __tTCS_esfPtr_OFFSET(%edx) + movl %esp, _thread_offset_to_esf(%edx) alreadyInException: @@ -151,7 +151,7 @@ alreadyInException: * handled in the event of a context switch. */ - orl $EXC_ACTIVE, __tTCS_flags_OFFSET(%edx) + orl $EXC_ACTIVE, _thread_offset_to_flags(%edx) #endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */ @@ -185,7 +185,7 @@ allDone: #if defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO) - movl _nanokernel + __tNANO_current_OFFSET, %ecx + movl _kernel + _kernel_offset_to_current, %ecx /* * Must lock interrupts to prevent outside interference. @@ -199,18 +199,18 @@ allDone: * Determine whether exiting from a nested interrupt. */ - decl __tTCS_excNestCount_OFFSET(%ecx) /* dec exception nest count */ + decl _thread_offset_to_excNestCount(%ecx) - cmpl $0, __tTCS_excNestCount_OFFSET(%ecx) + cmpl $0, _thread_offset_to_excNestCount(%ecx) jne nestedException /* - * Clear the EXC_ACTIVE bit in the tTCS of the current execution context - * if we are not in a nested exception (ie, when we exit the outermost - * exception). + * Clear the EXC_ACTIVE bit in the k_thread of the current execution + * context if we are not in a nested exception (ie, when we exit the + * outermost exception). */ - andl $~EXC_ACTIVE, __tTCS_flags_OFFSET (%ecx) + andl $~EXC_ACTIVE, _thread_offset_to_flags (%ecx) nestedException: #endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */ diff --git a/arch/x86/core/fatal.c b/arch/x86/core/fatal.c index 89757a2161c..821f42ceff9 100644 --- a/arch/x86/core/fatal.c +++ b/arch/x86/core/fatal.c @@ -25,7 +25,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/x86/core/float.c b/arch/x86/core/float.c index a7cca453faa..ef6e9c10ecc 100644 --- a/arch/x86/core/float.c +++ b/arch/x86/core/float.c @@ -49,7 +49,7 @@ * to enable FP register sharing on its behalf. */ -#include +#include #include #include @@ -75,12 +75,12 @@ extern uint32_t _sse_mxcsr_default_value; static void _FpCtxSave(struct tcs *tcs) { #ifdef CONFIG_SSE - if (tcs->flags & K_SSE_REGS) { - _do_fp_and_sse_regs_save(&tcs->preempFloatReg); + if (tcs->base.flags & K_SSE_REGS) { + _do_fp_and_sse_regs_save(&tcs->arch.preempFloatReg); return; } #endif - _do_fp_regs_save(&tcs->preempFloatReg); + _do_fp_regs_save(&tcs->arch.preempFloatReg); } /** @@ -98,7 +98,7 @@ static inline void _FpCtxInit(struct tcs *tcs) { _do_fp_regs_init(); #ifdef CONFIG_SSE - if (tcs->flags & K_SSE_REGS) { + if (tcs->base.flags & K_SSE_REGS) { _do_sse_regs_init(); } #endif @@ -152,7 +152,7 @@ void k_float_enable(struct tcs *tcs, unsigned int options) /* Indicate thread requires floating point context saving */ - tcs->flags |= options; + tcs->base.flags |= options; /* * The current thread might not allow FP instructions, so clear CR0[TS] @@ -168,9 +168,9 @@ void k_float_enable(struct tcs *tcs, unsigned int options) * must be preserved). */ - fp_owner = _nanokernel.current_fp; + fp_owner = _kernel.current_fp; if (fp_owner) { - if (fp_owner->flags & INT_OR_EXC_MASK) { + if (fp_owner->base.flags & INT_OR_EXC_MASK) { _FpCtxSave(fp_owner); } } @@ -181,7 +181,7 @@ void k_float_enable(struct tcs *tcs, unsigned int options) /* Associate the new FP context with the specified thread */ - if (tcs == _nanokernel.current) { + if (tcs == _current) { /* * When enabling FP support for the current thread, just claim * ownership of the FPU and leave CR0[TS] unset. @@ -189,14 +189,14 @@ void k_float_enable(struct tcs *tcs, unsigned int options) * (The FP context is "live" in hardware, not saved in TCS.) */ - _nanokernel.current_fp = tcs; + _kernel.current_fp = tcs; } else { /* * When enabling FP support for someone else, assign ownership * of the FPU to them (unless we need it ourselves). */ - if ((_nanokernel.current->flags & _FP_USER_MASK) == 0) { + if ((_current->base.flags & _FP_USER_MASK) == 0) { /* * We are not FP-capable, so mark FPU as owned by the * thread we've just enabled FP support for, then @@ -204,7 +204,7 @@ void k_float_enable(struct tcs *tcs, unsigned int options) * to its original state. */ - _nanokernel.current_fp = tcs; + _kernel.current_fp = tcs; _FpAccessDisable(); } else { /* @@ -263,14 +263,14 @@ void k_float_disable(struct tcs *tcs) /* Disable all floating point capabilities for the thread */ - tcs->flags &= ~_FP_USER_MASK; + tcs->base.flags &= ~_FP_USER_MASK; - if (tcs == _nanokernel.current) { + if (tcs == _current) { _FpAccessDisable(); - _nanokernel.current_fp = (struct tcs *)0; + _kernel.current_fp = (struct tcs *)0; } else { - if (_nanokernel.current_fp == tcs) - _nanokernel.current_fp = (struct tcs *)0; + if (_kernel.current_fp == tcs) + _kernel.current_fp = (struct tcs *)0; } irq_unlock(imask); @@ -306,7 +306,7 @@ void _FpNotAvailableExcHandler(NANO_ESF *pEsf) /* Enable highest level of FP capability configured into the kernel */ - k_float_enable(_nanokernel.current, _FP_USER_MASK); + k_float_enable(_current, _FP_USER_MASK); } _EXCEPTION_CONNECT_NOCODE(_FpNotAvailableExcHandler, IV_DEVICE_NOT_AVAILABLE); diff --git a/arch/x86/core/gdt.c b/arch/x86/core/gdt.c index 14d6f684b3f..3c01cff1295 100644 --- a/arch/x86/core/gdt.c +++ b/arch/x86/core/gdt.c @@ -26,7 +26,7 @@ #include #include -#include +#include #include #include diff --git a/arch/x86/core/intstub.S b/arch/x86/core/intstub.S index fc63b340940..7004e846e9f 100644 --- a/arch/x86/core/intstub.S +++ b/arch/x86/core/intstub.S @@ -26,9 +26,9 @@ #define _ASMLANGUAGE -#include +#include #include -#include /* nanokernel structure offset definitions */ +#include #include /* _NANO_ERR_SPURIOUS_INT */ #include @@ -59,7 +59,7 @@ * * This function is called from the interrupt stub created by IRQ_CONNECT() * to inform the kernel of an interrupt. This routine increments - * _nanokernel.nested (to support interrupt nesting), switches to the + * _kernel.nested (to support interrupt nesting), switches to the * base of the interrupt stack, if not already on the interrupt stack, and then * saves the volatile integer registers onto the stack. Finally, control is * returned back to the interrupt stub code (which will then invoke the @@ -185,24 +185,29 @@ SECTION_FUNC(TEXT, _interrupt_enter) popl %eax #endif - /* load %ecx with &_nanokernel */ + /* load %ecx with &_kernel */ - movl $_nanokernel, %ecx + movl $_kernel, %ecx /* switch to the interrupt stack for the non-nested case */ - incl __tNANO_nested_OFFSET(%ecx) /* inc interrupt nest count */ - cmpl $1, __tNANO_nested_OFFSET(%ecx) /* use int stack if !nested */ + incl _kernel_offset_to_nested(%ecx) + + /* use interrupt stack if not nested */ + cmpl $1, _kernel_offset_to_nested(%ecx) #ifdef CONFIG_DEBUG_INFO jne nested_save_isf #else jne alreadyOnIntStack #endif - /* switch to base of the interrupt stack */ + /* + * switch to base of the interrupt stack: save esp in edi, then load + * irq_stack pointer + */ - movl %esp, %edi /* save current thread stack pointer */ - movl __tNANO_common_isp_OFFSET(%ecx), %esp /* load new sp value */ + movl %esp, %edi + movl _kernel_offset_to_irq_stack(%ecx), %esp /* save thread's stack pointer onto base of interrupt stack */ @@ -213,14 +218,14 @@ SECTION_FUNC(TEXT, _interrupt_enter) /* * The saved stack pointer happens to match the address of the * interrupt stack frame. To simplify the exit case, push a dummy ISF - * for the "old" ISF and save it to the _nanokernel.isf. + * for the "old" ISF and save it to the _kernel.isf. */ pushl %edi - movl %edi, __tNANO_isf_OFFSET(%ecx) + movl %edi, _kernel_offset_to_isf(%ecx) #endif #ifdef CONFIG_SYS_POWER_MANAGEMENT - cmpl $0, __tNANO_idle_OFFSET(%ecx) + cmpl $0, _kernel_offset_to_idle(%ecx) jne handle_idle /* fast path is !idle, in the pipeline */ #endif /* CONFIG_SYS_POWER_MANAGEMENT */ @@ -229,8 +234,8 @@ SECTION_FUNC(TEXT, _interrupt_enter) jmp alreadyOnIntStack nested_save_isf: - movl __tNANO_isf_OFFSET(%ecx), %edi /* Get old ISF */ - movl %esp, __tNANO_isf_OFFSET(%ecx) /* Save new ISF */ + movl _kernel_offset_to_isf(%ecx), %edi /* Get old ISF */ + movl %esp, _kernel_offset_to_isf(%ecx) /* Save new ISF */ pushl %edi /* Save old ISF */ #endif @@ -272,15 +277,15 @@ alreadyOnIntStack: #endif /* determine whether exiting from a nested interrupt */ - movl $_nanokernel, %ecx + movl $_kernel, %ecx #ifdef CONFIG_DEBUG_INFO - popl __tNANO_isf_OFFSET(%ecx) /* Restore old ISF */ + popl _kernel_offset_to_isf(%ecx) /* Restore old ISF */ #endif - decl __tNANO_nested_OFFSET(%ecx) /* dec interrupt nest count */ + decl _kernel_offset_to_nested(%ecx) /* dec interrupt nest count */ jne nestedInterrupt /* 'iret' if nested case */ - movl __tNANO_current_OFFSET (%ecx), %edx + movl _kernel_offset_to_current(%ecx), %edx /* * Determine whether the execution of the ISR requires a context @@ -289,11 +294,11 @@ alreadyOnIntStack: */ /* do not reschedule coop threads (negative priority) */ - cmpl $0, __tTCS_prio_OFFSET (%edx) + cmpl $0, _thread_offset_to_prio(%edx) jl noReschedule /* do not reschedule if scheduler is locked */ - cmpl $0, __tTCS_sched_locked_OFFSET (%edx) + cmpl $0, _thread_offset_to_sched_locked(%edx) jg noReschedule @@ -303,7 +308,7 @@ alreadyOnIntStack: jnz noReschedule /* - * Set the INT_ACTIVE bit in the tTCS to allow the upcoming call to + * Set the INT_ACTIVE bit in the k_thread to allow the upcoming call to * _Swap() to determine whether non-floating registers need to be * preserved using the lazy save/restore algorithm, or to indicate to * debug tools that a preemptive context switch has occurred. @@ -315,11 +320,11 @@ alreadyOnIntStack: #if defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO) /* - * Reload _nanokernel.current as _is_next_thread_current() + * Reload _kernel.current as _is_next_thread_current() * might have clobbered it. */ - movl _nanokernel + __tNANO_current_OFFSET, %edx - orl $INT_ACTIVE, __tTCS_flags_OFFSET(%edx) + movl _kernel + _kernel_offset_to_current, %edx + orl $INT_ACTIVE, _thread_offset_to_flags(%edx) #endif /* @@ -366,8 +371,8 @@ alreadyOnIntStack: * since it has served its purpose. */ - movl _nanokernel + __tNANO_current_OFFSET, %eax - andl $~INT_ACTIVE, __tTCS_flags_OFFSET (%eax) + movl _kernel + _kernel_offset_to_current, %eax + andl $~INT_ACTIVE, _thread_offset_to_flags (%eax) #endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */ /* Restore volatile registers and return to the interrupted thread */ @@ -427,13 +432,13 @@ handle_idle: pushl %edx /* Populate 'ticks' argument to _sys_power_save_idle_exit */ #ifdef CONFIG_X86_IAMCU - movl __tNANO_idle_OFFSET(%ecx), %eax + movl _kernel_offset_to_idle(%ecx), %eax #else /* SYS V calling convention */ - push __tNANO_idle_OFFSET(%ecx) + push _kernel_offset_to_idle(%ecx) #endif - /* Zero out _nanokernel.idle */ - movl $0, __tNANO_idle_OFFSET(%ecx) + /* Zero out _kernel.idle */ + movl $0, _kernel_offset_to_idle(%ecx) /* * Beware that a timer driver's _sys_power_save_idle_exit() implementation might diff --git a/arch/x86/core/irq_manage.c b/arch/x86/core/irq_manage.c index 3372b211828..5db238069e7 100644 --- a/arch/x86/core/irq_manage.c +++ b/arch/x86/core/irq_manage.c @@ -26,7 +26,7 @@ #include #include -#include +#include #include #include #include @@ -39,7 +39,7 @@ extern void _SpuriousIntNoErrCodeHandler(void *); * the spurious interrupt handlers. They *must* be declared in a module other * than the one they are used in to get around garbage collection issues and * warnings issued some compilers that they aren't used. Therefore care must - * be taken if they are to be moved. See nano_private.h for more information. + * be taken if they are to be moved. See kernel_structs.h for more information. */ void *_dummy_spurious_interrupt; void *_dummy_exception_vector_stub; diff --git a/arch/x86/core/msr.c b/arch/x86/core/msr.c index 832783344e1..be967fe1591 100644 --- a/arch/x86/core/msr.c +++ b/arch/x86/core/msr.c @@ -28,7 +28,7 @@ * This function is used to write to an MSR. * * The definitions of the so-called "Architectural MSRs" are contained - * in nano_private.h and have the format: IA32_XXX_MSR + * in kernel_structs.h and have the format: IA32_XXX_MSR * * INTERNAL * 1) The 'wrmsr' instruction was introduced in the Pentium processor; executing @@ -61,7 +61,7 @@ void _MsrWrite(unsigned int msr, uint64_t msr_data) * This function is used to read from an MSR. * * The definitions of the so-called "Architectural MSRs" are contained - * in nano_private.h and have the format: IA32_XXX_MSR + * in kernel_structs.h and have the format: IA32_XXX_MSR * * INTERNAL * 1) The 'rdmsr' instruction was introduced in the Pentium processor; executing diff --git a/arch/x86/core/offsets/offsets.c b/arch/x86/core/offsets/offsets.c index 64bb7cc14b9..d8cf01f2a88 100644 --- a/arch/x86/core/offsets/offsets.c +++ b/arch/x86/core/offsets/offsets.c @@ -36,48 +36,34 @@ /* list of headers that define whose structure offsets will be generated */ -#include +#include #include -#include +#include -/* Intel-specific tNANO structure member offsets */ - -GEN_OFFSET_SYM(tNANO, nested); -GEN_OFFSET_SYM(tNANO, common_isp); #ifdef CONFIG_DEBUG_INFO -GEN_OFFSET_SYM(tNANO, isf); +GEN_OFFSET_SYM(_kernel_arch_t, isf); #endif -#ifdef CONFIG_SYS_POWER_MANAGEMENT -GEN_OFFSET_SYM(tNANO, idle); -#endif - -/* Intel-specific struct tcs structure member offsets */ #ifdef CONFIG_GDB_INFO -GEN_OFFSET_SYM(tTCS, esfPtr); -#endif /* CONFIG_GDB_INFO */ -#if (defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO)) -GEN_OFFSET_SYM(tTCS, excNestCount); -#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */ -#ifdef CONFIG_THREAD_CUSTOM_DATA -GEN_OFFSET_SYM(tTCS, custom_data); /* available for custom use */ +GEN_OFFSET_SYM(_thread_arch_t, esf); #endif -GEN_OFFSET_SYM(tTCS, coopFloatReg); /* start of coop FP register set */ -GEN_OFFSET_SYM(tTCS, preempFloatReg); /* start of prempt FP register set */ + +#if (defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO)) +GEN_OFFSET_SYM(_thread_arch_t, excNestCount); +#endif + +GEN_OFFSET_SYM(_thread_arch_t, coopFloatReg); +GEN_OFFSET_SYM(_thread_arch_t, preempFloatReg); /* size of the struct tcs structure sans save area for floating point regs */ -GEN_ABSOLUTE_SYM(__tTCS_NOFLOAT_SIZEOF, - sizeof(tTCS) - sizeof(tCoopFloatReg) - +GEN_ABSOLUTE_SYM(_K_THREAD_NO_FLOAT_SIZEOF, + sizeof(struct k_thread) - sizeof(tCoopFloatReg) - sizeof(tPreempFloatReg)); -/* tCoopReg structure member offsets: tTCS->coopReg is of type tCoopReg */ - -GEN_OFFSET_SYM(tCoopReg, esp); - -/* tSwapStk structure member offsets */ +GEN_OFFSET_SYM(_callee_saved_t, esp); GEN_OFFSET_SYM(tSwapStk, eax); GEN_OFFSET_SYM(tSwapStk, ebp); diff --git a/arch/x86/core/swap.S b/arch/x86/core/swap.S index 920cb0968e6..f6e23964672 100644 --- a/arch/x86/core/swap.S +++ b/arch/x86/core/swap.S @@ -29,9 +29,9 @@ #define _ASMLANGUAGE -#include +#include #include -#include /* nanokernel structure offset definitions */ +#include /* exports (internal APIs) */ @@ -51,11 +51,11 @@ * is passed as a parameter to _Swap(). The 'key' actually represents * the EFLAGS register prior to disabling interrupts via a 'cli' instruction. * - * Given that _Swap() is called to effect a cooperative context switch, - * only the non-volatile integer registers need to be saved in the TCS of the + * Given that _Swap() is called to effect a cooperative context switch, only + * the non-volatile integer registers need to be saved in the TCS of the * outgoing thread. The restoration of the integer registers of the incoming - * thread depends on whether that thread was preemptively context switched - * out. The INT_ACTIVE and EXC_ACTIVE bits in the tTCS->flags field will signify + * thread depends on whether that thread was preemptively context switched out. + * The INT_ACTIVE and EXC_ACTIVE bits in the k_thread->flags field will signify * that the thread was preemptively context switched out, and thus both the * volatile and non-volatile integer registers need to be restored. * @@ -81,10 +81,6 @@ * Floating point registers are currently NOT scrubbed, and are subject to * potential security leaks. * - * The scheduling algorithm is simple: schedule the head of the runnable fiber - * list (_nanokernel.fiber). If there are no runnable fibers, then schedule - * the task (_nanokernel.task). The _nanokernel.task field will never be NULL. - * * @return -EAGAIN, or a return value set by a call to fiberRtnValueSet() * * C function prototype: @@ -103,14 +99,14 @@ SECTION_FUNC(TEXT, _Swap) #endif /* * Push all non-volatile registers onto the stack; do not copy - * any of these registers into the tTCS. Only the 'esp' register + * any of these registers into the k_thread. Only the 'esp' register * after all the pushes have been performed) will be stored in the - * tTCS. + * k_thread. */ pushl %edi - movl $_nanokernel, %edi + movl $_kernel, %edi pushl %esi pushl %ebx @@ -126,10 +122,10 @@ SECTION_FUNC(TEXT, _Swap) pushl _k_neg_eagain - /* save esp into tTCS structure */ + /* save esp into k_thread structure */ - movl __tNANO_current_OFFSET (%edi), %ecx - movl %esp, __tTCS_coopReg_OFFSET + __tCoopReg_esp_OFFSET (%ecx) + movl _kernel_offset_to_current(%edi), %ecx + movl %esp, _thread_offset_to_esp(%ecx) #ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH /* Register the context switch */ @@ -138,9 +134,8 @@ SECTION_FUNC(TEXT, _Swap) call _get_next_ready_thread /* - * At this point, the %eax register contains the 'tTCS *' of - * the TASK or FIBER to be swapped in, and %edi still - * contains &_nanokernel. + * At this point, the %eax register contains the 'k_thread *' of the + * thread to be swapped in, and %edi still contains &_kernel. */ #ifdef CONFIG_FP_SHARING @@ -166,7 +161,7 @@ SECTION_FUNC(TEXT, _Swap) * _and_ whether the thread was context switched out preemptively. */ - testl $_FP_USER_MASK, __tTCS_flags_OFFSET (%eax) + testl $_FP_USER_MASK, _thread_offset_to_flags(%eax) je restoreContext_NoFloatSwap @@ -176,7 +171,7 @@ SECTION_FUNC(TEXT, _Swap) * If so, there there is no need to restore the floating point context. */ - movl __tNANO_current_fp_OFFSET (%edi), %ebx + movl _kernel_offset_to_current_fp(%edi), %ebx cmpl %ebx, %eax je restoreContext_NoFloatSwap @@ -202,12 +197,12 @@ SECTION_FUNC(TEXT, _Swap) * was preemptively context switched. */ - testl $INT_OR_EXC_MASK, __tTCS_flags_OFFSET (%ebx) + testl $INT_OR_EXC_MASK, _thread_offset_to_flags(%ebx) je restoreContext_NoFloatSave #ifdef CONFIG_SSE - testl $K_SSE_REGS, __tTCS_flags_OFFSET (%ebx) + testl $K_SSE_REGS, _thread_offset_to_flags(%ebx) je x87FloatSave /* @@ -216,7 +211,7 @@ SECTION_FUNC(TEXT, _Swap) * (for the case when the fxrstor is not executed). */ - fxsave __tTCS_preempFloatReg_OFFSET (%ebx) + fxsave _thread_offset_to_preempFloatReg(%ebx) fninit jmp floatSaveDone @@ -225,7 +220,7 @@ x87FloatSave: /* 'fnsave' performs an implicit 'fninit' after saving state! */ - fnsave __tTCS_preempFloatReg_OFFSET (%ebx) + fnsave _thread_offset_to_preempFloatReg(%ebx) /* fall through to 'floatSaveDone' */ @@ -242,21 +237,21 @@ restoreContext_NoFloatSave: * was previously preemptively context switched out. */ - testl $INT_OR_EXC_MASK, __tTCS_flags_OFFSET (%eax) + testl $INT_OR_EXC_MASK, _thread_offset_to_flags(%eax) je restoreContext_NoFloatRestore #ifdef CONFIG_SSE - testl $K_SSE_REGS, __tTCS_flags_OFFSET (%eax) + testl $K_SSE_REGS, _thread_offset_to_flags(%eax) je x87FloatRestore - fxrstor __tTCS_preempFloatReg_OFFSET (%eax) + fxrstor _thread_offset_to_preempFloatReg(%eax) jmp floatRestoreDone x87FloatRestore: #endif /* CONFIG_SSE */ - frstor __tTCS_preempFloatReg_OFFSET (%eax) + frstor _thread_offset_to_preempFloatReg(%eax) /* fall through to 'floatRestoreDone' */ @@ -265,7 +260,7 @@ restoreContext_NoFloatRestore: /* record that the incoming thread "owns" the floating point registers */ - movl %eax, __tNANO_current_fp_OFFSET (%edi) + movl %eax, _kernel_offset_to_current_fp(%edi) /* @@ -281,7 +276,7 @@ restoreContext_NoFloatSwap: * registers */ - testl $_FP_USER_MASK, __tTCS_flags_OFFSET (%eax) + testl $_FP_USER_MASK, _thread_offset_to_flags(%eax) jne CROHandlingDone /* @@ -299,13 +294,13 @@ CROHandlingDone: #endif /* CONFIG_FP_SHARING */ - /* update _nanokernel.current to reflect incoming thread */ + /* update _kernel.current to reflect incoming thread */ - movl %eax, __tNANO_current_OFFSET (%edi) + movl %eax, _kernel_offset_to_current(%edi) - /* recover task/fiber stack pointer from tTCS */ + /* recover task/fiber stack pointer from k_thread */ - movl __tTCS_coopReg_OFFSET + __tCoopReg_esp_OFFSET (%eax), %esp + movl _thread_offset_to_esp(%eax), %esp /* load return value from a possible fiberRtnValueSet() */ diff --git a/arch/x86/core/sys_fatal_error_handler.c b/arch/x86/core/sys_fatal_error_handler.c index c41c3aa5bdc..b181aff4042 100644 --- a/arch/x86/core/sys_fatal_error_handler.c +++ b/arch/x86/core/sys_fatal_error_handler.c @@ -25,7 +25,7 @@ #include #include #include -#include /* to get access to '_current' */ +#include #include /** diff --git a/arch/x86/core/thread.c b/arch/x86/core/thread.c index 201ff9ac858..2ab1b412693 100644 --- a/arch/x86/core/thread.c +++ b/arch/x86/core/thread.c @@ -28,13 +28,9 @@ #include #include -#include +#include #include -/* the one and only nanokernel control structure */ - -tNANO _nanokernel = {0}; - /* forward declaration */ #if defined(CONFIG_GDB_INFO) || defined(CONFIG_DEBUG_INFO) \ @@ -47,17 +43,17 @@ void _thread_entry_wrapper(_thread_entry_t, void *, /* * Add a thread to the kernel's list of active threads. */ -static ALWAYS_INLINE void thread_monitor_init(struct tcs *tcs) +static ALWAYS_INLINE void thread_monitor_init(struct k_thread *thread) { unsigned int key; key = irq_lock(); - tcs->next_thread = _nanokernel.threads; - _nanokernel.threads = tcs; + thread->next_thread = _kernel.threads; + _kernel.threads = thread; irq_unlock(key); } #else -#define thread_monitor_init(tcs) \ +#define thread_monitor_init(thread) \ do {/* do nothing */ \ } while ((0)) #endif /* CONFIG_THREAD_MONITOR */ @@ -84,27 +80,27 @@ static void _new_thread_internal(char *pStackMem, unsigned stackSize, unsigned options) { unsigned long *pInitialCtx; - /* ptr to the new task's tcs */ - struct tcs *tcs = (struct tcs *)pStackMem; + /* ptr to the new task's k_thread */ + struct k_thread *thread = (struct k_thread *)pStackMem; - tcs->prio = priority; + thread->base.prio = priority; #if (defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO)) - tcs->excNestCount = 0; + thread->arch.excNestCount = 0; #endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */ /* k_q_node initialized upon first insertion in a list */ - tcs->flags = options | K_PRESTART; - tcs->sched_locked = 0; + thread->base.flags = options | K_PRESTART; + thread->base.sched_locked = 0; /* static threads overwrite it afterwards with real value */ - tcs->init_data = NULL; - tcs->fn_abort = NULL; + thread->init_data = NULL; + thread->fn_abort = NULL; #ifdef CONFIG_THREAD_CUSTOM_DATA /* Initialize custom data field (value is opaque to kernel) */ - tcs->custom_data = NULL; + thread->custom_data = NULL; #endif ARG_UNUSED(uk_task_ptr); @@ -121,10 +117,10 @@ static void _new_thread_internal(char *pStackMem, unsigned stackSize, #ifdef CONFIG_THREAD_MONITOR /* - * In debug mode tcs->entry give direct access to the thread entry + * In debug mode thread->entry give direct access to the thread entry * and the corresponding parameters. */ - tcs->entry = (struct __thread_entry *)(pInitialCtx - + thread->entry = (struct __thread_entry *)(pInitialCtx - sizeof(struct __thread_entry)); #endif @@ -137,14 +133,14 @@ static void _new_thread_internal(char *pStackMem, unsigned stackSize, */ pInitialCtx -= 11; - tcs->coopReg.esp = (unsigned long)pInitialCtx; - PRINTK("\nInitial context ESP = 0x%x\n", tcs->coopReg.esp); + thread->callee_saved.esp = (unsigned long)pInitialCtx; + PRINTK("\nInitial context ESP = 0x%x\n", thread->coopReg.esp); - PRINTK("\nstruct tcs * = 0x%x", tcs); + PRINTK("\nstruct thread * = 0x%x", thread); - thread_monitor_init(tcs); + thread_monitor_init(thread); - _nano_timeout_tcs_init(tcs); + _nano_timeout_thread_init(thread); } #if defined(CONFIG_GDB_INFO) || defined(CONFIG_DEBUG_INFO) \ @@ -234,7 +230,7 @@ __asm__("\t.globl _thread_entry\n" * This function is utilized to create execution threads for both fiber * threads and kernel tasks. * - * The "thread control block" (TCS) is carved from the "end" of the specified + * The k_thread structure is carved from the "end" of the specified * thread stack memory. * * @param pStackmem the pointer to aligned stack memory @@ -247,7 +243,7 @@ __asm__("\t.globl _thread_entry\n" * @param options thread options: K_ESSENTIAL, K_FP_REGS, K_SSE_REGS * * - * @return opaque pointer to initialized TCS structure + * @return opaque pointer to initialized k_thread structure */ void _new_thread(char *pStackMem, unsigned stackSize, void *uk_task_ptr, _thread_entry_t pEntry, @@ -308,8 +304,8 @@ void _new_thread(char *pStackMem, unsigned stackSize, */ /* - * For kernel tasks and fibers the thread the thread control struct (TCS) - * is located at the "low end" of memory set aside for the thread's stack. + * The k_thread structure is located at the "low end" of memory set + * aside for the thread's stack. */ _new_thread_internal(pStackMem, stackSize, uk_task_ptr, priority, options); diff --git a/arch/x86/debug/gdb_arch.c b/arch/x86/debug/gdb_arch.c index 1e91c11401a..4674fd2b42f 100644 --- a/arch/x86/debug/gdb_arch.c +++ b/arch/x86/debug/gdb_arch.c @@ -20,7 +20,7 @@ */ #include -#include +#include #include #include #include diff --git a/arch/x86/include/debug/gdb_arch.h b/arch/x86/include/debug/gdb_arch.h index 087aa4eab09..7a9629b722b 100644 --- a/arch/x86/include/debug/gdb_arch.h +++ b/arch/x86/include/debug/gdb_arch.h @@ -21,7 +21,7 @@ extern "C" { #endif -#include +#include #include #define GDB_ARCH_HAS_ALL_REGS diff --git a/arch/x86/include/nano_private.h b/arch/x86/include/kernel_arch_data.h similarity index 75% rename from arch/x86/include/nano_private.h rename to arch/x86/include/kernel_arch_data.h index ab197eec748..377ff3c86be 100644 --- a/arch/x86/include/nano_private.h +++ b/arch/x86/include/kernel_arch_data.h @@ -16,23 +16,25 @@ /** * @file - * @brief Private nanokernel definitions (IA-32) + * @brief Private kernel definitions (IA-32) * - * This file contains private nanokernel structures definitions and various + * This file contains private kernel structures definitions and various * other definitions for the Intel Architecture 32 bit (IA-32) processor * architecture. - * The header include/nanokernel.h contains the public nanokernel interface - * definitions, with include/arch/nanokernel/x86/arch.h supplying the - * IA-32 specific portions of the public nanokernel interface. + * The header include/kernel.h contains the public kernel interface + * definitions, with include/arch/x86/arch.h supplying the + * IA-32 specific portions of the public kernel interface. * * This file is also included by assembly language files which must #define - * _ASMLANGUAGE before including this header file. Note that nanokernel + * _ASMLANGUAGE before including this header file. Note that kernel * assembly source files obtains structure offset values via "absolute symbols" * in the offsets.o module. */ -#ifndef _NANO_PRIVATE_H -#define _NANO_PRIVATE_H +/* this file is only meant to be included by kernel_structs.h */ + +#ifndef _kernel_arch_data__h_ +#define _kernel_arch_data__h_ #include #include @@ -40,8 +42,8 @@ #include #ifndef _ASMLANGUAGE -#include /* public kernel API */ -#include <../../../kernel/unified/include/nano_internal.h> +#include +#include #include #include #endif @@ -51,7 +53,7 @@ #define STACK_ALIGN_SIZE 4 /* - * Bitmask definitions for the struct tcs->flags bit field + * Bitmask definitions for the struct k_thread->flags bit field */ #define K_STATIC 0x00000800 @@ -416,14 +418,31 @@ extern "C" { #endif -#ifdef CONFIG_THREAD_MONITOR -struct __thread_entry { - _thread_entry_t pEntry; - void *parameter1; - void *parameter2; - void *parameter3; +/* + * The following structure defines the set of 'volatile' integer registers. + * These registers need not be preserved by a called C function. Given that + * they are not preserved across function calls, they must be save/restored + * (along with the struct _caller_saved) when a preemptive context switch + * occurs. + */ + +struct _caller_saved { + + /* + * The volatile registers 'eax', 'ecx' and 'edx' area not included in + * the definition of 'tPreempReg' since the interrupt and exception + * handling routunes use the stack to save and restore the values of + * these registers in order to support interrupt nesting. The stubs + * do _not_ copy the saved values from the stack into the TCS. + * + * unsigned long eax; + * unsigned long ecx; + * unsigned long edx; + */ + }; -#endif /*CONFIG_THREAD_MONITOR*/ + +typedef struct _caller_saved _caller_saved_t; /* * The following structure defines the set of 'non-volatile' integer registers. @@ -432,7 +451,7 @@ struct __thread_entry { * switch occurs. */ -typedef struct s_coopReg { +struct _callee_saved { unsigned long esp; /* @@ -448,37 +467,16 @@ typedef struct s_coopReg { * unsigned long edi; */ -} tCoopReg; +}; -/* - * The following structure defines the set of 'volatile' integer registers. - * These registers need not be preserved by a called C function. Given that - * they are not preserved across function calls, they must be save/restored - * (along with the s_coop_reg) when a preemptive context switch occurs. - */ - -typedef struct s_preempReg { - - /* - * The volatile registers 'eax', 'ecx' and 'edx' area not included in - * the definition of 'tPreempReg' since the interrupt and exception - * handling routunes use the stack to save and restore the values of - * these registers in order to support interrupt nesting. The stubs - * do _not_ copy the saved values from the stack into the TCS. - * - * unsigned long eax; - * unsigned long ecx; - * unsigned long edx; - */ - -} tPreempReg; +typedef struct _callee_saved _callee_saved_t; /* * The macro CONFIG_FP_SHARING shall be set to indicate that the * saving/restoring of the traditional x87 floating point (and MMX) registers - * are supported by the nanokernel's context swapping code. The macro + * are supported by the kernel's context swapping code. The macro * CONFIG_SSE shall _also_ be set if saving/restoring of the XMM - * registers is also supported in the nanokernel's context swapping code. + * registers is also supported in the kernel's context swapping code. */ #ifdef CONFIG_FP_SHARING @@ -615,17 +613,6 @@ typedef struct s_preempFloatReg { } floatRegsUnion; } tPreempFloatReg; -/* 'struct tcs_base' must match the beginning of 'struct tcs' */ -struct tcs_base { - sys_dnode_t k_q_node; - uint32_t flags; - int prio; /* thread priority used to sort linked list */ - void *swap_data; -#ifdef CONFIG_NANO_TIMEOUTS - struct _timeout timeout; -#endif -}; - /* * The thread control stucture definition. It contains the * various fields to manage a _single_ thread. The TCS will be aligned @@ -633,38 +620,13 @@ struct tcs_base { * _new_thread() call. */ -struct tcs { - /* - * Link to next thread in singly-linked thread list (such as - * prioritized list of runnable fibers, or list of fibers waiting on a - * nanokernel FIFO). - */ +struct _thread_arch { - sys_dnode_t k_q_node; /* node object in any kernel queue */ - int flags; - int prio; /* thread priority used to sort linked list */ - void *swap_data; -#ifdef CONFIG_NANO_TIMEOUTS - struct _timeout timeout; -#endif - - /* - * Storage space for integer registers. These must also remain near - * the start of struct tcs for the same reason mention for - * 'flags'. - */ - - tCoopReg coopReg; /* non-volatile integer register storage */ - tPreempReg preempReg; /* volatile integer register storage */ - -#if defined(CONFIG_THREAD_MONITOR) - struct __thread_entry *entry; /* thread entry and parameters description */ - struct tcs *next_thread; /* next item in list of ALL fiber+tasks */ -#endif #ifdef CONFIG_GDB_INFO - void *esfPtr; /* pointer to exception stack frame saved by */ - /* outermost exception wrapper */ -#endif /* CONFIG_GDB_INFO */ + /* pointer to ESF saved by outermost exception wrapper */ + void *esf; +#endif + #if (defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO)) /* * Nested exception count to maintain setting of EXC_ACTIVE flag across @@ -674,18 +636,6 @@ struct tcs { unsigned excNestCount; /* nested exception count */ #endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */ -#ifdef CONFIG_THREAD_CUSTOM_DATA - void *custom_data; /* available for custom use */ -#endif - -#ifdef CONFIG_ERRNO - int errno_var; -#endif - - atomic_t sched_locked; - void *init_data; - void (*fn_abort)(void); - /* * The location of all floating point related structures/fields MUST be * located at the end of struct tcs. This way only the @@ -704,172 +654,20 @@ struct tcs { tPreempFloatReg preempFloatReg; /* volatile float register storage */ }; +typedef struct _thread_arch _thread_arch_t; -struct ready_q { - struct k_thread *cache; - uint32_t prio_bmap[1]; - sys_dlist_t q[K_NUM_PRIORITIES]; -}; - - -/* - * The nanokernel structure definition. It contains various fields to - * manage _all_ the threads in the nanokernel (system level). - */ - -typedef struct s_NANO { - struct tcs *current; /* currently scheduled thread (fiber or task) */ -#if defined(CONFIG_THREAD_MONITOR) - struct tcs *threads; /* singly linked list of ALL fiber+tasks */ -#endif - unsigned nested; /* nested interrupt count */ - char *common_isp; /* interrupt stack pointer base */ - +struct _kernel_arch { #if defined(CONFIG_DEBUG_INFO) NANO_ISF *isf; /* ptr to interrupt stack frame */ #endif +}; -#ifdef CONFIG_SYS_POWER_MANAGEMENT - int32_t idle; /* Number of ticks for kernel idling */ -#endif - - -#ifdef CONFIG_FP_SHARING - /* - * A 'current_sse' field does not exist in addition to the 'current_fp' - * field since it's not possible to divide the IA-32 non-integer - * registers into 2 distinct blocks owned by differing threads. In - * other words, given that the 'fxnsave/fxrstor' instructions - * save/restore both the X87 FPU and XMM registers, it's not possible - * for a thread to only "own" the XMM registers. - */ - - struct tcs *current_fp; /* thread (fiber or task) that owns the FP regs */ -#endif /* CONFIG_FP_SHARING */ -#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS) - sys_dlist_t timeout_q; -#endif - struct ready_q ready_q; -} tNANO; - -/* stack alignment related macros: STACK_ALIGN_SIZE is defined above */ - -#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE) -#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE) - -/* variable declarations */ - -/* - * There is only a single instance of the s_NANO structure, given that there - * is only a single nanokernel in the system: _nanokernel - */ - -extern tNANO _nanokernel; - - -/* inline function definitions */ - -/** - * - * @brief Performs architecture-specific initialization - * - * This routine performs architecture-specific initialization of the nanokernel. - * Trivial stuff is done inline; more complex initialization is done via - * function calls. - * - * @return N/A - */ -static inline void nanoArchInit(void) -{ - extern void *__isr___SpuriousIntHandler; - extern void *_dummy_spurious_interrupt; - extern void *_dummy_exception_vector_stub; - extern char _interrupt_stack[CONFIG_ISR_STACK_SIZE]; - extern void _exception_enter(void); - - _nanokernel.nested = 0; - - _nanokernel.common_isp = _interrupt_stack + CONFIG_ISR_STACK_SIZE; - /* - * Forces the inclusion of the spurious interrupt handlers. If a - * reference isn't made then intconnect.o is never pulled in by the - * linker. - */ - - _dummy_spurious_interrupt = &__isr___SpuriousIntHandler; - - /* - * Forces the inclusion of the exception vector stub code. If a - * reference isn't made then excstubs.o is never pulled in by the - * linker. - */ - - _dummy_exception_vector_stub = &_exception_enter; - - -} - -/** - * - * @brief Set the return value for the specified fiber (inline) - * - * @param fiber pointer to fiber - * @param value value to set as return value - * - * The register used to store the return value from a function call invocation - * is set to . It is assumed that the specified is pending, and - * thus the fibers context is stored in its TCS. - * - * @return N/A - */ -static inline void fiberRtnValueSet(struct tcs *fiber, unsigned int value) -{ - /* write into 'eax' slot created in _Swap() entry */ - - *(unsigned int *)(fiber->coopReg.esp) = value; -} - -#define _current _nanokernel.current -#define _ready_q _nanokernel.ready_q -#define _timeout_q _nanokernel.timeout_q -#define _set_thread_return_value fiberRtnValueSet -static ALWAYS_INLINE void -_set_thread_return_value_with_data(struct k_thread *thread, unsigned int value, - void *data) -{ - _set_thread_return_value(thread, value); - thread->swap_data = data; -} -#define _IDLE_THREAD_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES) - -/* function prototypes */ - -extern void nano_cpu_atomic_idle(unsigned int imask); - -extern void _MsrWrite(unsigned int msr, uint64_t msrData); -extern uint64_t _MsrRead(unsigned int msr); - -/* - * _IntLibInit() is called from the non-arch specific nanokernel function, - * _nano_init(). The IA-32 nanokernel does not require any special - * initialization of the interrupt subsystem. However, we still need to - * provide an _IntLibInit() of some sort to prevent build errors. - */ -static inline void _IntLibInit(void) -{ -} - -/* the _idt_base_address symbol is generated via a linker script */ -extern unsigned char _idt_base_address[]; - -#include /* For size_t */ +typedef struct _kernel_arch _kernel_arch_t; #ifdef __cplusplus } #endif -#define _is_in_isr() (_nanokernel.nested != 0) - #endif /* _ASMLANGUAGE */ -#endif /* _NANO_PRIVATE_H */ +#endif /* _kernel_arch_data__h_ */ diff --git a/arch/x86/include/kernel_arch_func.h b/arch/x86/include/kernel_arch_func.h new file mode 100644 index 00000000000..66fc6e92188 --- /dev/null +++ b/arch/x86/include/kernel_arch_func.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* this file is only meant to be included by kernel_structs.h */ + +#ifndef _kernel_arch_func__h_ +#define _kernel_arch_func__h_ + +#ifndef _ASMLANGUAGE + +#ifdef __cplusplus +extern "C" { +#endif + +/* stack alignment related macros: STACK_ALIGN_SIZE is defined above */ + +#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE) +#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE) + +/** + * + * @brief Performs architecture-specific initialization + * + * This routine performs architecture-specific initialization of the nanokernel. + * Trivial stuff is done inline; more complex initialization is done via + * function calls. + * + * @return N/A + */ +static inline void nanoArchInit(void) +{ + extern void *__isr___SpuriousIntHandler; + extern void *_dummy_spurious_interrupt; + extern void *_dummy_exception_vector_stub; + extern char _interrupt_stack[CONFIG_ISR_STACK_SIZE]; + + extern void _exception_enter(void); + + _kernel.nested = 0; + + _kernel.irq_stack = _interrupt_stack + CONFIG_ISR_STACK_SIZE; + /* + * Forces the inclusion of the spurious interrupt handlers. If a + * reference isn't made then intconnect.o is never pulled in by the + * linker. + */ + + _dummy_spurious_interrupt = &__isr___SpuriousIntHandler; + + /* + * Forces the inclusion of the exception vector stub code. If a + * reference isn't made then excstubs.o is never pulled in by the + * linker. + */ + + _dummy_exception_vector_stub = &_exception_enter; + + +} + +/** + * + * @brief Set the return value for the specified fiber (inline) + * + * @param fiber pointer to fiber + * @param value value to set as return value + * + * The register used to store the return value from a function call invocation + * is set to . It is assumed that the specified is pending, and + * thus the fibers context is stored in its TCS. + * + * @return N/A + */ +static ALWAYS_INLINE void +_set_thread_return_value(struct k_thread *thread, unsigned int value) +{ + /* write into 'eax' slot created in _Swap() entry */ + + *(unsigned int *)(thread->callee_saved.esp) = value; +} + +extern void nano_cpu_atomic_idle(unsigned int imask); + +extern void _MsrWrite(unsigned int msr, uint64_t msrData); +extern uint64_t _MsrRead(unsigned int msr); + +/* + * _IntLibInit() is called from the non-arch specific nanokernel function, + * _nano_init(). The IA-32 nanokernel does not require any special + * initialization of the interrupt subsystem. However, we still need to + * provide an _IntLibInit() of some sort to prevent build errors. + */ +static inline void _IntLibInit(void) +{ +} + +/* the _idt_base_address symbol is generated via a linker script */ +extern unsigned char _idt_base_address[]; + +#include /* For size_t */ + +#ifdef __cplusplus +} +#endif + +#define _is_in_isr() (_kernel.nested != 0) + +#endif /* _ASMLANGUAGE */ + +#endif /* _kernel_arch_func__h_ */ diff --git a/arch/x86/include/offsets_short_arch.h b/arch/x86/include/offsets_short_arch.h new file mode 100644 index 00000000000..e092b3c4d03 --- /dev/null +++ b/arch/x86/include/offsets_short_arch.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _offsets_short_arch__h_ +#define _offsets_short_arch__h_ + +#include + +/* kernel */ + +#define _kernel_offset_to_isf \ + (___kernel_t_arch_OFFSET + ___kernel_arch_t_isf_OFFSET) + +/* end - kernel */ + +/* threads */ + +#define _thread_offset_to_excNestCount \ + (___thread_t_arch_OFFSET + ___thread_arch_t_excNestCount_OFFSET) + +#define _thread_offset_to_esp \ + (___thread_t_callee_saved_OFFSET + ___callee_saved_t_esp_OFFSET) + +#define _thread_offset_to_coopFloatReg \ + (___thread_t_arch_OFFSET + ___thread_arch_t_coopFloatReg_OFFSET) + +#define _thread_offset_to_preempFloatReg \ + (___thread_t_arch_OFFSET + ___thread_arch_t_preempFloatReg_OFFSET) + +/* end - threads */ + +#endif /* _offsets_short_arch__h_ */ diff --git a/arch/x86/include/start_task_arch.h b/arch/x86/include/start_task_arch.h index 632106866bf..3cae3e55866 100644 --- a/arch/x86/include/start_task_arch.h +++ b/arch/x86/include/start_task_arch.h @@ -28,7 +28,7 @@ #include #include -#include +#include #include #ifdef __cplusplus diff --git a/drivers/interrupt_controller/loapic_spurious.S b/drivers/interrupt_controller/loapic_spurious.S index 3c36ab48e7e..70ce9cbffe0 100644 --- a/drivers/interrupt_controller/loapic_spurious.S +++ b/drivers/interrupt_controller/loapic_spurious.S @@ -21,7 +21,7 @@ #define _ASMLANGUAGE -#include +#include #include GTEXT(_loapic_spurious_handler) diff --git a/drivers/timer/Makefile b/drivers/timer/Makefile index d1b440b825d..205407c572f 100644 --- a/drivers/timer/Makefile +++ b/drivers/timer/Makefile @@ -1,3 +1,5 @@ +ccflags-y += -I$(srctree)/kernel/unified/include + obj-$(CONFIG_HPET_TIMER) += hpet.o obj-$(CONFIG_LOAPIC_TIMER) += loapic_timer.o obj-$(CONFIG_ARCV2_TIMER) += arcv2_timer0.o diff --git a/drivers/timer/hpet.c b/drivers/timer/hpet.c index fc7127e8af8..bd615940787 100644 --- a/drivers/timer/hpet.c +++ b/drivers/timer/hpet.c @@ -60,7 +60,7 @@ #include #include #include -#include +#include #include diff --git a/include/kernel.h b/include/kernel.h index 9123e7c51b2..6ed513894ce 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -79,8 +79,8 @@ typedef sys_dlist_t _wait_q_t; #define _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(type) #endif -#define k_thread tcs -struct tcs; +#define tcs k_thread +struct k_thread; struct k_mutex; struct k_sem; struct k_alert; diff --git a/include/legacy.h b/include/legacy.h index 36a10bce170..838c0911bf5 100644 --- a/include/legacy.h +++ b/include/legacy.h @@ -185,7 +185,7 @@ extern __deprecated int sys_execution_context_type_get(void); * Given that this routine is _not_ ISR-callable, the following code is used * to differentiate between a task and fiber: * - * if ((_nanokernel.current->flags & TASK) == TASK) + * if ((_kernel.current->flags & TASK) == TASK) * * Given that the _fiber_start() primitive is not considered real-time * performance critical, a runtime check to differentiate between a calling diff --git a/include/linker-defs.h b/include/linker-defs.h index 5d7fe4e8b61..f9df709c787 100644 --- a/include/linker-defs.h +++ b/include/linker-defs.h @@ -54,7 +54,8 @@ * required space. */ #ifdef CONFIG_DEVICE_POWER_MANAGEMENT -#define DEVICE_COUNT ((__device_init_end - __device_init_start) / __DEVICE_STR_SIZEOF) +#define DEVICE_COUNT \ + ((__device_init_end - __device_init_start) / _DEVICE_STRUCT_SIZE) #define DEV_BUSY_SZ (((DEVICE_COUNT + 31) / 32) * 4) #define DEVICE_BUSY_BITFIELD() \ FILL(0x00) ; \ diff --git a/include/misc/debug/object_tracing.h b/include/misc/debug/object_tracing.h index 935e1a6e3cd..013dd9be8f2 100644 --- a/include/misc/debug/object_tracing.h +++ b/include/misc/debug/object_tracing.h @@ -70,7 +70,7 @@ extern struct ring_buf *_trace_list_sys_ring_buf; #ifdef CONFIG_THREAD_MONITOR -#include +#include /** * @def SYS_THREAD_MONITOR_HEAD @@ -80,7 +80,7 @@ extern struct ring_buf *_trace_list_sys_ring_buf; * @details Access the head element of the thread monitor list. * */ -#define SYS_THREAD_MONITOR_HEAD ((struct tcs *)(_nanokernel.threads)) +#define SYS_THREAD_MONITOR_HEAD ((struct k_thread *)(_kernel.threads)) /** * @def SYS_THREAD_MONITOR_NEXT @@ -92,7 +92,7 @@ extern struct ring_buf *_trace_list_sys_ring_buf; * * @param obj Object to get the next element from. */ -#define SYS_THREAD_MONITOR_NEXT(obj) (((struct tcs *)obj)->next_thread) +#define SYS_THREAD_MONITOR_NEXT(obj) (((struct k_thread *)obj)->next_thread) #endif /*CONFIG_THREAD_MONITOR*/ diff --git a/include/misc/stack.h b/include/misc/stack.h index bb15788ee8f..0df9ae2c511 100644 --- a/include/misc/stack.h +++ b/include/misc/stack.h @@ -32,7 +32,7 @@ static inline void stack_analyze(const char *name, const char *stack, * the stack beginning doesn't match that there will be some * unused bytes in the beginning. */ - stack_offset = __tTCS_SIZEOF + ((4 - ((unsigned)stack % 4)) % 4); + stack_offset = K_THREAD_SIZEOF + ((4 - ((unsigned)stack % 4)) % 4); /* TODO * Currently all supported platforms have stack growth down and there is no diff --git a/include/net/ip_buf.h b/include/net/ip_buf.h index 88ebecb5228..7ea14cedc8a 100644 --- a/include/net/ip_buf.h +++ b/include/net/ip_buf.h @@ -355,7 +355,7 @@ static inline void net_analyze_stack(const char *name, * the stack beginning doesn't match that there will be some * unused bytes in the beginning. */ - stack_offset = __tTCS_SIZEOF + ((4 - ((unsigned)stack % 4)) % 4); + stack_offset = K_THREAD_SIZEOF + ((4 - ((unsigned)stack % 4)) % 4); /* TODO * Currently all supported platforms have stack growth down and there is no diff --git a/kernel/unified/alert.c b/kernel/unified/alert.c index 61b818bc399..03aaea67403 100644 --- a/kernel/unified/alert.c +++ b/kernel/unified/alert.c @@ -20,7 +20,7 @@ */ #include -#include +#include #include #include #include diff --git a/kernel/unified/compiler_stack_protect.c b/kernel/unified/compiler_stack_protect.c index 506d5056e1e..d47b7af83d0 100644 --- a/kernel/unified/compiler_stack_protect.c +++ b/kernel/unified/compiler_stack_protect.c @@ -28,7 +28,7 @@ #include /* compiler specific configurations */ -#include +#include #include #include diff --git a/kernel/unified/errno.c b/kernel/unified/errno.c index 08f27a57a39..9305faba452 100644 --- a/kernel/unified/errno.c +++ b/kernel/unified/errno.c @@ -22,7 +22,7 @@ * context switching. */ -#include +#include /* * Define _k_neg_eagain for use in assembly files as errno.h is @@ -34,6 +34,6 @@ const int _k_neg_eagain = -EAGAIN; #ifdef CONFIG_ERRNO int *_get_errno(void) { - return &_nanokernel.current->errno_var; + return &_current->errno_var; } #endif diff --git a/kernel/unified/fifo.c b/kernel/unified/fifo.c index 8afa595b6a0..a3e8b95f4eb 100644 --- a/kernel/unified/fifo.c +++ b/kernel/unified/fifo.c @@ -22,7 +22,7 @@ #include -#include +#include #include #include #include @@ -156,5 +156,5 @@ void *k_fifo_get(struct k_fifo *fifo, int32_t timeout) _pend_current_thread(&fifo->wait_q, timeout); - return _Swap(key) ? NULL : _current->swap_data; + return _Swap(key) ? NULL : _current->base.swap_data; } diff --git a/kernel/unified/idle.c b/kernel/unified/idle.c index f388b669847..b6e36a3d5c1 100644 --- a/kernel/unified/idle.c +++ b/kernel/unified/idle.c @@ -15,7 +15,7 @@ */ #include -#include +#include #include #include #include @@ -58,7 +58,7 @@ void __attribute__((weak)) _sys_soc_resume_from_deep_sleep(void) */ static void set_kernel_idle_time_in_ticks(int32_t ticks) { - _nanokernel.idle = ticks; + _kernel.idle = ticks; } #else #define set_kernel_idle_time_in_ticks(x) do { } while (0) diff --git a/kernel/unified/include/gen_offset.h b/kernel/unified/include/gen_offset.h index c44d554716e..e195999c9c3 100644 --- a/kernel/unified/include/gen_offset.h +++ b/kernel/unified/include/gen_offset.h @@ -55,7 +55,7 @@ * /@ include struct definitions for which offsets symbols are to be * generated @/ * - * #include + * #include * GEN_ABS_SYM_BEGIN (_OffsetAbsSyms) /@ the name parameter is arbitrary @/ * /@ tNANO structure member offsets @/ * @@ -63,7 +63,7 @@ * GEN_OFFSET_SYM (tNANO, task); * GEN_OFFSET_SYM (tNANO, current); * GEN_OFFSET_SYM (tNANO, nested); - * GEN_OFFSET_SYM (tNANO, common_isp); + * GEN_OFFSET_SYM (tNANO, irq_stack); * * GEN_ABSOLUTE_SYM (__tNANO_SIZEOF, sizeof(tNANO)); * @@ -73,7 +73,7 @@ * Compiling the sample offsets.c results in the following symbols in offsets.o: * * $ nm offsets.o - * 00000010 A __tNANO_common_isp_OFFSET + * 00000010 A __tNANO_irq_stack_OFFSET * 00000008 A __tNANO_current_OFFSET * 0000000c A __tNANO_nested_OFFSET * 00000000 A __tNANO_fiber_OFFSET diff --git a/kernel/unified/include/nano_offsets.h b/kernel/unified/include/kernel_offsets.h similarity index 51% rename from kernel/unified/include/nano_offsets.h rename to kernel/unified/include/kernel_offsets.h index ed4e22c412f..ad61f8db426 100644 --- a/kernel/unified/include/nano_offsets.h +++ b/kernel/unified/include/kernel_offsets.h @@ -1,5 +1,3 @@ -/* nano_offsets.h - nanokernel structure member offset definitions */ - /* * Copyright (c) 2013-2014 Wind River Systems, Inc. * @@ -17,8 +15,8 @@ */ #include -#ifndef _NANO_OFFSETS__H_ -#define _NANO_OFFSETS__H_ +#ifndef _kernel_offsets__h_ +#define _kernel_offsets__h_ /* * The final link step uses the symbol _OffsetAbsSyms to force the linkage of @@ -29,38 +27,45 @@ GEN_ABS_SYM_BEGIN(_OffsetAbsSyms) /* arch-agnostic tNANO structure member offsets */ -GEN_OFFSET_SYM(tNANO, current); +GEN_OFFSET_SYM(_kernel_t, current); #if defined(CONFIG_THREAD_MONITOR) -GEN_OFFSET_SYM(tNANO, threads); +GEN_OFFSET_SYM(_kernel_t, threads); +#endif + +GEN_OFFSET_SYM(_kernel_t, nested); +GEN_OFFSET_SYM(_kernel_t, irq_stack); +#ifdef CONFIG_SYS_POWER_MANAGEMENT +GEN_OFFSET_SYM(_kernel_t, idle); #endif #ifdef CONFIG_FP_SHARING -GEN_OFFSET_SYM(tNANO, current_fp); +GEN_OFFSET_SYM(_kernel_t, current_fp); #endif -/* size of the entire tNANO structure */ +GEN_ABSOLUTE_SYM(_STRUCT_KERNEL_SIZE, sizeof(struct _kernel)); -GEN_ABSOLUTE_SYM(__tNANO_SIZEOF, sizeof(tNANO)); +GEN_OFFSET_SYM(_thread_base_t, flags); +GEN_OFFSET_SYM(_thread_base_t, prio); +GEN_OFFSET_SYM(_thread_base_t, sched_locked); +GEN_OFFSET_SYM(_thread_base_t, swap_data); -/* arch-agnostic struct tcs structure member offsets */ - -GEN_OFFSET_SYM(tTCS, prio); -GEN_OFFSET_SYM(tTCS, flags); -GEN_OFFSET_SYM(tTCS, coopReg); /* start of coop register set */ -GEN_OFFSET_SYM(tTCS, preempReg); /* start of prempt register set */ +GEN_OFFSET_SYM(_thread_t, base); +GEN_OFFSET_SYM(_thread_t, caller_saved); +GEN_OFFSET_SYM(_thread_t, callee_saved); +GEN_OFFSET_SYM(_thread_t, arch); #if defined(CONFIG_THREAD_MONITOR) -GEN_OFFSET_SYM(tTCS, next_thread); +GEN_OFFSET_SYM(_thread_t, next_thread); #endif -GEN_OFFSET_SYM(tTCS, sched_locked); +#ifdef CONFIG_THREAD_CUSTOM_DATA +GEN_OFFSET_SYM(_thread_t, custom_data); +#endif -/* size of the entire struct tcs structure */ - -GEN_ABSOLUTE_SYM(__tTCS_SIZEOF, sizeof(tTCS)); +GEN_ABSOLUTE_SYM(K_THREAD_SIZEOF, sizeof(struct k_thread)); /* size of the device structure. Used by linker scripts */ -GEN_ABSOLUTE_SYM(__DEVICE_STR_SIZEOF, sizeof(struct device)); +GEN_ABSOLUTE_SYM(_DEVICE_STRUCT_SIZE, sizeof(struct device)); -#endif /* _NANO_OFFSETS__H_ */ +#endif /* _kernel_offsets__h_ */ diff --git a/kernel/unified/include/kernel_structs.h b/kernel/unified/include/kernel_structs.h new file mode 100644 index 00000000000..786f0dd5b60 --- /dev/null +++ b/kernel/unified/include/kernel_structs.h @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _kernel_structs__h_ +#define _kernel_structs__h_ + +#if !defined(_ASMLANGUAGE) +#include +#include +#include +#endif + +#include + +#if !defined(_ASMLANGUAGE) + +#ifdef CONFIG_THREAD_MONITOR +struct __thread_entry { + _thread_entry_t pEntry; + void *parameter1; + void *parameter2; + void *parameter3; +}; +#endif + +/* can be used for creating 'dummy' threads, e.g. for pending on objects */ +struct _thread_base { + + /* this thread's entry in a ready/wait queue */ + sys_dnode_t k_q_node; + + /* execution flags */ + int flags; + + /* thread priority used to sort linked list */ + int prio; + + /* scheduler lock count */ + atomic_t sched_locked; + + /* data returned by APIs */ + void *swap_data; + +#ifdef CONFIG_NANO_TIMEOUTS + /* this thread's entry in a timeout queue */ + struct _timeout timeout; +#endif + +}; + +typedef struct _thread_base _thread_base_t; + +struct k_thread { + + struct _thread_base base; + + /* defined by the architecture, but all archs need these */ + struct _caller_saved caller_saved; + struct _callee_saved callee_saved; + + /* static thread init data */ + void *init_data; + + /* abort function */ + void (*fn_abort)(void); + +#if defined(CONFIG_THREAD_MONITOR) + /* thread entry and parameters description */ + struct __thread_entry *entry; + + /* next item in list of all threads */ + struct k_thread *next_thread; +#endif + +#ifdef CONFIG_THREAD_CUSTOM_DATA + /* crude thread-local storage */ + void *custom_data; +#endif + +#ifdef CONFIG_ERRNO + /* per-thread errno variable */ + int errno_var; +#endif + + /* arch-specifics: must always be at the end */ + struct _thread_arch arch; +}; + +typedef struct k_thread _thread_t; + +struct _ready_q { + + /* next thread to run if known, NULL otherwise */ + struct k_thread *cache; + + /* bitmap of priorities that contain at least one ready thread */ + uint32_t prio_bmap[1]; + + /* ready queues, one per priority */ + sys_dlist_t q[K_NUM_PRIORITIES]; +}; + +struct _kernel { + + /* nested interrupt count */ + uint32_t nested; + + /* interrupt stack pointer base */ + char *irq_stack; + + /* currently scheduled thread */ + struct k_thread *current; + +#ifdef CONFIG_SYS_CLOCK_EXISTS + /* queue of timeouts */ + sys_dlist_t timeout_q; +#endif + +#ifdef CONFIG_SYS_POWER_MANAGEMENT + int32_t idle; /* Number of ticks for kernel idling */ +#endif + + /* + * ready queue: can be big, keep after small fields, since some + * assembly (e.g. ARC are limited in the encoding of the offset) + */ + struct _ready_q ready_q; + +#ifdef CONFIG_FP_SHARING + /* + * A 'current_sse' field does not exist in addition to the 'current_fp' + * field since it's not possible to divide the IA-32 non-integer + * registers into 2 distinct blocks owned by differing threads. In + * other words, given that the 'fxnsave/fxrstor' instructions + * save/restore both the X87 FPU and XMM registers, it's not possible + * for a thread to only "own" the XMM registers. + */ + + /* thread (fiber or task) that owns the FP regs */ + struct k_thread *current_fp; +#endif + +#if defined(CONFIG_THREAD_MONITOR) + struct k_thread *threads; /* singly linked list of ALL fiber+tasks */ +#endif + + /* arch-specific part of _kernel */ + struct _kernel_arch arch; +}; + +typedef struct _kernel _kernel_t; + +extern struct _kernel _kernel; + +#define _current _kernel.current +#define _ready_q _kernel.ready_q +#define _timeout_q _kernel.timeout_q +#define _threads _kernel.threads + +#include + +static ALWAYS_INLINE void +_set_thread_return_value_with_data(struct k_thread *thread, + unsigned int value, + void *data) +{ + _set_thread_return_value(thread, value); + thread->base.swap_data = data; +} + +#endif /* _ASMLANGUAGE */ + +#endif /* _kernel_structs__h_ */ diff --git a/kernel/unified/include/ksched.h b/kernel/unified/include/ksched.h index 7162bcb2e0e..e016b02d2fc 100644 --- a/kernel/unified/include/ksched.h +++ b/kernel/unified/include/ksched.h @@ -17,10 +17,7 @@ #ifndef _ksched__h_ #define _ksched__h_ -#include -#include -#include -#include +#include extern k_tid_t const _main_thread; extern k_tid_t const _idle_thread; @@ -111,30 +108,30 @@ static inline int _is_prio_lower(int prio1, int prio2) static inline int _is_t1_higher_prio_than_t2(struct k_thread *t1, struct k_thread *t2) { - return _is_prio1_higher_than_prio2(t1->prio, t2->prio); + return _is_prio1_higher_than_prio2(t1->base.prio, t2->base.prio); } static inline int _is_higher_prio_than_current(struct k_thread *thread) { - return _is_t1_higher_prio_than_t2(thread, _nanokernel.current); + return _is_t1_higher_prio_than_t2(thread, _current); } /* is thread currenlty cooperative ? */ static inline int _is_coop(struct k_thread *thread) { - return thread->prio < 0; + return thread->base.prio < 0; } /* is thread currently preemptible ? */ static inline int _is_preempt(struct k_thread *thread) { - return !_is_coop(thread) && !atomic_get(&thread->sched_locked); + return !_is_coop(thread) && !atomic_get(&thread->base.sched_locked); } /* is current thread preemptible and we are not running in ISR context */ static inline int _is_current_execution_context_preemptible(void) { - return !_is_in_isr() && _is_preempt(_nanokernel.current); + return !_is_in_isr() && _is_preempt(_current); } /* find out if priority is under priority inheritance ceiling */ @@ -178,7 +175,7 @@ static inline int _get_ready_q_q_index(int prio) /* interrupts must be locked */ static inline int _get_highest_ready_prio(void) { - uint32_t ready = _nanokernel.ready_q.prio_bmap[0]; + uint32_t ready = _ready_q.prio_bmap[0]; return find_lsb_set(ready) - 1 - CONFIG_NUM_COOP_PRIORITIES; } @@ -204,7 +201,7 @@ static inline void _sched_lock(void) { __ASSERT(!_is_in_isr(), ""); - atomic_inc(&_nanokernel.current->sched_locked); + atomic_inc(&_current->base.sched_locked); K_DEBUG("scheduler locked (%p:%d)\n", _current, _current->sched_locked); @@ -220,77 +217,77 @@ static inline void _sched_unlock_no_reschedule(void) { __ASSERT(!_is_in_isr(), ""); - atomic_dec(&_nanokernel.current->sched_locked); + atomic_dec(&_current->base.sched_locked); } static inline void _set_thread_states(struct k_thread *thread, uint32_t states) { - thread->flags |= states; + thread->base.flags |= states; } static inline void _reset_thread_states(struct k_thread *thread, uint32_t states) { - thread->flags &= ~states; + thread->base.flags &= ~states; } /* mark a thread as being suspended */ static inline void _mark_thread_as_suspended(struct k_thread *thread) { - thread->flags |= K_SUSPENDED; + thread->base.flags |= K_SUSPENDED; } /* mark a thread as not being suspended */ static inline void _mark_thread_as_not_suspended(struct k_thread *thread) { - thread->flags &= ~K_SUSPENDED; + thread->base.flags &= ~K_SUSPENDED; } /* mark a thread as being in the timer queue */ static inline void _mark_thread_as_timing(struct k_thread *thread) { - thread->flags |= K_TIMING; + thread->base.flags |= K_TIMING; } /* mark a thread as not being in the timer queue */ static inline void _mark_thread_as_not_timing(struct k_thread *thread) { - thread->flags &= ~K_TIMING; + thread->base.flags &= ~K_TIMING; } /* check if a thread is on the timer queue */ static inline int _is_thread_timing(struct k_thread *thread) { - return !!(thread->flags & K_TIMING); + return !!(thread->base.flags & K_TIMING); } static inline int _has_thread_started(struct k_thread *thread) { - return !(thread->flags & K_PRESTART); + return !(thread->base.flags & K_PRESTART); } /* check if a thread is ready */ static inline int _is_thread_ready(struct k_thread *thread) { - return (thread->flags & K_EXECUTION_MASK) == K_READY; + return (thread->base.flags & K_EXECUTION_MASK) == K_READY; } /* mark a thread as pending in its TCS */ static inline void _mark_thread_as_pending(struct k_thread *thread) { - thread->flags |= K_PENDING; + thread->base.flags |= K_PENDING; } /* mark a thread as not pending in its TCS */ static inline void _mark_thread_as_not_pending(struct k_thread *thread) { - thread->flags &= ~K_PENDING; + thread->base.flags &= ~K_PENDING; } /* check if a thread is pending */ static inline int _is_thread_pending(struct k_thread *thread) { - return !!(thread->flags & K_PENDING); + return !!(thread->base.flags & K_PENDING); } /* @@ -300,17 +297,17 @@ static inline int _is_thread_pending(struct k_thread *thread) /* must be called with interrupts locked */ static inline void _ready_thread(struct k_thread *thread) { - __ASSERT(_is_prio_higher(thread->prio, K_LOWEST_THREAD_PRIO) || - ((thread->prio == K_LOWEST_THREAD_PRIO) && + __ASSERT(_is_prio_higher(thread->base.prio, K_LOWEST_THREAD_PRIO) || + ((thread->base.prio == K_LOWEST_THREAD_PRIO) && (thread == _idle_thread)), "thread %p prio too low (is %d, cannot be lower than %d)", - thread, thread->prio, + thread, thread->base.prio, thread == _idle_thread ? K_LOWEST_THREAD_PRIO : K_LOWEST_APPLICATION_THREAD_PRIO); - __ASSERT(!_is_prio_higher(thread->prio, K_HIGHEST_THREAD_PRIO), + __ASSERT(!_is_prio_higher(thread->base.prio, K_HIGHEST_THREAD_PRIO), "thread %p prio too high (id %d, cannot be higher than %d)", - thread, thread->prio, K_HIGHEST_THREAD_PRIO); + thread, thread->base.prio, K_HIGHEST_THREAD_PRIO); /* K_PRESTART is needed to handle the start-with-delay case */ _reset_thread_states(thread, K_TIMING|K_PRESTART); @@ -327,7 +324,7 @@ static inline void _ready_thread(struct k_thread *thread) */ static inline void _mark_thread_as_started(struct k_thread *thread) { - thread->flags &= ~K_PRESTART; + thread->base.flags &= ~K_PRESTART; } /** @@ -337,7 +334,7 @@ static inline void _mark_thread_as_started(struct k_thread *thread) */ static inline void _mark_thread_as_dead(struct k_thread *thread) { - thread->flags |= K_DEAD; + thread->base.flags |= K_DEAD; } /* @@ -349,10 +346,10 @@ static inline void _thread_priority_set(struct k_thread *thread, int prio) { if (_is_thread_ready(thread)) { _remove_thread_from_ready_q(thread); - thread->prio = prio; + thread->base.prio = prio; _add_thread_to_ready_q(thread); } else { - thread->prio = prio; + thread->base.prio = prio; } } @@ -378,9 +375,9 @@ static inline struct k_thread *_unpend_first_thread(_wait_q_t *wait_q) /* must be called with interrupts locked */ static inline void _unpend_thread(struct k_thread *thread) { - __ASSERT(thread->flags & K_PENDING, ""); + __ASSERT(thread->base.flags & K_PENDING, ""); - sys_dlist_remove(&thread->k_q_node); + sys_dlist_remove(&thread->base.k_q_node); _mark_thread_as_not_pending(thread); } diff --git a/kernel/unified/include/nano_internal.h b/kernel/unified/include/nano_internal.h index 2161216f765..344f085981b 100644 --- a/kernel/unified/include/nano_internal.h +++ b/kernel/unified/include/nano_internal.h @@ -49,7 +49,6 @@ FUNC_NORETURN void _Cstart(void); /* helper type alias for thread control structure */ -typedef struct tcs tTCS; typedef void (*_thread_entry_t)(void *, void *, void *); extern void _thread_entry(void (*)(void *, void *, void *), @@ -73,9 +72,9 @@ extern void _thread_essential_clear(void); /* clean up when a thread is aborted */ #if defined(CONFIG_THREAD_MONITOR) -extern void _thread_monitor_exit(struct tcs *tcs); +extern void _thread_monitor_exit(struct k_thread *thread); #else -#define _thread_monitor_exit(tcs) \ +#define _thread_monitor_exit(thread) \ do {/* nothing */ \ } while (0) #endif /* CONFIG_THREAD_MONITOR */ diff --git a/kernel/unified/include/offsets_short.h b/kernel/unified/include/offsets_short.h new file mode 100644 index 00000000000..dbfb1f5aaf5 --- /dev/null +++ b/kernel/unified/include/offsets_short.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _offsets_short__h_ +#define _offsets_short__h_ + +#include +#include + +/* kernel */ + +/* main */ + +#define _kernel_offset_to_nested \ + (___kernel_t_nested_OFFSET) + +#define _kernel_offset_to_irq_stack \ + (___kernel_t_irq_stack_OFFSET) + +#define _kernel_offset_to_current \ + (___kernel_t_current_OFFSET) + +#define _kernel_offset_to_idle \ + (___kernel_t_idle_OFFSET) + +#define _kernel_offset_to_current_fp \ + (___kernel_t_current_fp_OFFSET) + +/* end - kernel */ + +/* threads */ + +/* main */ + +#define _thread_offset_to_callee_saved \ + (___thread_t_callee_saved_OFFSET) + +/* base */ + +#define _thread_offset_to_flags \ + (___thread_t_base_OFFSET + ___thread_base_t_flags_OFFSET) + +#define _thread_offset_to_prio \ + (___thread_t_base_OFFSET + ___thread_base_t_prio_OFFSET) + +#define _thread_offset_to_sched_locked \ + (___thread_t_base_OFFSET + ___thread_base_t_sched_locked_OFFSET) + +#define _thread_offset_to_esf \ + (___thread_t_arch_OFFSET + ___thread_arch_t_esf_OFFSET) + + +/* end - threads */ + +#endif /* _offsets_short__h_ */ diff --git a/kernel/unified/include/timeout_q.h b/kernel/unified/include/timeout_q.h index 31642f04370..66f7a4994fe 100644 --- a/kernel/unified/include/timeout_q.h +++ b/kernel/unified/include/timeout_q.h @@ -67,16 +67,16 @@ static inline void _init_timeout(struct _timeout *t, _timeout_func_t func) static inline void _init_thread_timeout(struct k_thread *thread) { - _init_timeout(&thread->timeout, NULL); + _init_timeout(&thread->base.timeout, NULL); } /* * XXX - backwards compatibility until the arch part is updated to call * _init_thread_timeout() */ -static inline void _nano_timeout_tcs_init(struct tcs *tcs) +static inline void _nano_timeout_thread_init(struct k_thread *thread) { - _init_thread_timeout(tcs); + _init_thread_timeout(thread); } /* remove a thread timing out from kernel object's wait queue */ @@ -86,7 +86,7 @@ static inline void _unpend_thread_timing_out(struct k_thread *thread, { if (timeout_obj->wait_q) { _unpend_thread(thread); - thread->timeout.wait_q = NULL; + thread->base.timeout.wait_q = NULL; } } @@ -132,7 +132,7 @@ static inline struct _timeout *_handle_one_timeout( static inline void _handle_timeouts(void) { - sys_dlist_t *timeout_q = &_nanokernel.timeout_q; + sys_dlist_t *timeout_q = &_timeout_q; struct _timeout *next; next = (struct _timeout *)sys_dlist_peek_head(timeout_q); @@ -145,7 +145,7 @@ static inline void _handle_timeouts(void) static inline int _abort_timeout(struct _timeout *t) { - sys_dlist_t *timeout_q = &_nanokernel.timeout_q; + sys_dlist_t *timeout_q = &_timeout_q; if (-1 == t->delta_ticks_from_prev) { return -1; @@ -165,7 +165,7 @@ static inline int _abort_timeout(struct _timeout *t) static inline int _abort_thread_timeout(struct k_thread *thread) { - return _abort_timeout(&thread->timeout); + return _abort_timeout(&thread->base.timeout); } /* @@ -210,12 +210,12 @@ static inline void _add_timeout(struct k_thread *thread, K_DEBUG("thread %p on wait_q %p, for timeout: %d\n", thread, wait_q, timeout); - sys_dlist_t *timeout_q = &_nanokernel.timeout_q; + sys_dlist_t *timeout_q = &_timeout_q; K_DEBUG("timeout_q %p before: head: %p, tail: %p\n", - &_nanokernel.timeout_q, - sys_dlist_peek_head(&_nanokernel.timeout_q), - _nanokernel.timeout_q.tail); + &_timeout_q, + sys_dlist_peek_head(&_timeout_q), + _timeout_q.tail); K_DEBUG("timeout %p before: next: %p, prev: %p\n", timeout_obj, timeout_obj->node.next, timeout_obj->node.prev); @@ -228,9 +228,9 @@ static inline void _add_timeout(struct k_thread *thread, &timeout_obj->delta_ticks_from_prev); K_DEBUG("timeout_q %p after: head: %p, tail: %p\n", - &_nanokernel.timeout_q, - sys_dlist_peek_head(&_nanokernel.timeout_q), - _nanokernel.timeout_q.tail); + &_timeout_q, + sys_dlist_peek_head(&_timeout_q), + _timeout_q.tail); K_DEBUG("timeout %p after: next: %p, prev: %p\n", timeout_obj, timeout_obj->node.next, timeout_obj->node.prev); @@ -245,7 +245,7 @@ static inline void _add_timeout(struct k_thread *thread, static inline void _add_thread_timeout(struct k_thread *thread, _wait_q_t *wait_q, int32_t timeout) { - _add_timeout(thread, &thread->timeout, wait_q, timeout); + _add_timeout(thread, &thread->base.timeout, wait_q, timeout); } /* find the closest deadline in the timeout queue */ diff --git a/kernel/unified/include/wait_q.h b/kernel/unified/include/wait_q.h index bfa1108fe10..d499871c4c2 100644 --- a/kernel/unified/include/wait_q.h +++ b/kernel/unified/include/wait_q.h @@ -19,7 +19,7 @@ #ifndef _kernel_nanokernel_include_wait_q__h_ #define _kernel_nanokernel_include_wait_q__h_ -#include +#include #include #include @@ -31,7 +31,7 @@ extern "C" { #include #else #define _init_thread_timeout(thread) do { } while ((0)) -#define _nano_timeout_tcs_init(thread) _init_thread_timeout(thread) +#define _nano_timeout_thread_init(thread) _init_thread_timeout(thread) #define _add_thread_timeout(thread, wait_q, timeout) do { } while (0) static inline int _abort_thread_timeout(struct k_thread *thread) { return 0; } #define _get_next_timeout_expiry() (K_FOREVER) diff --git a/kernel/unified/init.c b/kernel/unified/init.c index 29591daedb0..781d625c857 100644 --- a/kernel/unified/init.c +++ b/kernel/unified/init.c @@ -22,13 +22,13 @@ */ #include -#include +#include #include #include #include #include #include -#include +#include #include #include #include @@ -116,7 +116,7 @@ char __noinit __stack _interrupt_stack[CONFIG_ISR_STACK_SIZE]; #ifdef CONFIG_SYS_CLOCK_EXISTS #include #define initialize_timeouts() do { \ - sys_dlist_init(&_nanokernel.timeout_q); \ + sys_dlist_init(&_timeout_q); \ } while ((0)) #else #define initialize_timeouts() do { } while ((0)) @@ -219,7 +219,7 @@ static void _main(void *unused1, void *unused2, void *unused3) main(); /* Terminate thread normally since it has no more work to do */ - _main_thread->flags &= ~K_ESSENTIAL; + _main_thread->base.flags &= ~K_ESSENTIAL; } void __weak main(void) @@ -234,7 +234,7 @@ void __weak main(void) * This routine initializes various nanokernel data structures, including * the background (or idle) task and any architecture-specific initialization. * - * Note that all fields of "_nanokernel" are set to zero on entry, which may + * Note that all fields of "_kernel" are set to zero on entry, which may * be all the initialization many of them require. * * @return N/A @@ -255,10 +255,10 @@ static void prepare_multithreading(struct k_thread *dummy_thread) * Do not insert dummy execution context in the list of fibers, so * that it does not get scheduled back in once context-switched out. */ - dummy_thread->flags = K_ESSENTIAL; - dummy_thread->prio = K_PRIO_COOP(0); + dummy_thread->base.flags = K_ESSENTIAL; + dummy_thread->base.prio = K_PRIO_COOP(0); - /* _nanokernel.ready_q is all zeroes */ + /* _kernel.ready_q is all zeroes */ /* @@ -274,7 +274,7 @@ static void prepare_multithreading(struct k_thread *dummy_thread) /* ready the init/main and idle threads */ for (int ii = 0; ii < K_NUM_PRIORITIES; ii++) { - sys_dlist_init(&_nanokernel.ready_q.q[ii]); + sys_dlist_init(&_ready_q.q[ii]); } _new_thread(main_stack, MAIN_STACK_SIZE, NULL, @@ -359,7 +359,7 @@ FUNC_NORETURN void _Cstart(void) { /* floating point operations are NOT performed during nanokernel init */ - char __stack dummy_thread[__tTCS_NOFLOAT_SIZEOF]; + char __stack dummy_thread[_K_THREAD_NO_FLOAT_SIZEOF]; /* * Initialize nanokernel data structures. This step includes diff --git a/kernel/unified/kernel_event_logger.c b/kernel/unified/kernel_event_logger.c index 981d2f909fa..18b3930b82e 100644 --- a/kernel/unified/kernel_event_logger.c +++ b/kernel/unified/kernel_event_logger.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include @@ -81,7 +81,7 @@ void sys_k_event_logger_put_timed(uint16_t event_id) #ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH void _sys_k_event_logger_context_switch(void) { - extern tNANO _nanokernel; + extern struct _kernel _kernel; uint32_t data[2]; extern void _sys_event_logger_put_non_preemptible( @@ -101,12 +101,12 @@ void _sys_k_event_logger_context_switch(void) return; } - if (_collector_coop_thread == _nanokernel.current) { + if (_collector_coop_thread == _kernel.current) { return; } data[0] = _sys_k_get_time(); - data[1] = (uint32_t)_nanokernel.current; + data[1] = (uint32_t)_kernel.current; /* * The mechanism we use to log the kernel events uses a sync semaphore @@ -137,7 +137,7 @@ void sys_k_event_logger_register_as_collector(void) { ASSERT_CURRENT_IS_COOP_THREAD(); - _collector_coop_thread = _nanokernel.current; + _collector_coop_thread = _kernel.current; } #endif /* CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH */ diff --git a/kernel/unified/legacy_offload.c b/kernel/unified/legacy_offload.c index f652994679a..007c1d44c5d 100644 --- a/kernel/unified/legacy_offload.c +++ b/kernel/unified/legacy_offload.c @@ -22,7 +22,7 @@ */ #include -#include +#include #include #include @@ -46,7 +46,7 @@ static void offload_handler(struct k_work *work) int result = (offload->offload_func)(offload->offload_args); unsigned int key = irq_lock(); - offload->thread->swap_data = (void *)result; + offload->thread->base.swap_data = (void *)result; irq_unlock(key); } @@ -68,7 +68,7 @@ int task_offload_to_fiber(int (*func)(), void *argp) offload.thread = _current; k_work_submit_to_queue(&offload_work_q, &offload.work_item); - return (int)_current->swap_data; + return (int)_current->base.swap_data; } static char __stack offload_work_q_stack[CONFIG_OFFLOAD_WORKQUEUE_STACK_SIZE]; diff --git a/kernel/unified/lifo.c b/kernel/unified/lifo.c index bd662dc5b4e..03d58972e87 100644 --- a/kernel/unified/lifo.c +++ b/kernel/unified/lifo.c @@ -20,7 +20,7 @@ */ #include -#include +#include #include #include #include @@ -111,5 +111,5 @@ void *k_lifo_get(struct k_lifo *lifo, int32_t timeout) _pend_current_thread(&lifo->wait_q, timeout); - return _Swap(key) ? NULL : _current->swap_data; + return _Swap(key) ? NULL : _current->base.swap_data; } diff --git a/kernel/unified/mailbox.c b/kernel/unified/mailbox.c index 5fd7b14f218..7eb255ff901 100644 --- a/kernel/unified/mailbox.c +++ b/kernel/unified/mailbox.c @@ -19,7 +19,7 @@ */ #include -#include +#include #include #include #include @@ -33,7 +33,7 @@ /* asynchronous message descriptor type */ struct k_mbox_async { - struct tcs_base thread; /* dummy thread object */ + struct _thread_base thread; /* dummy thread object */ struct k_mbox_msg tx_msg; /* transmit message descriptor */ }; @@ -201,7 +201,7 @@ static void _mbox_message_dispose(struct k_mbox_msg *rx_msg) /* recover sender info */ sending_thread = rx_msg->_syncing_thread; rx_msg->_syncing_thread = NULL; - tx_msg = (struct k_mbox_msg *)sending_thread->swap_data; + tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data; /* update data size field for sender */ tx_msg->size = rx_msg->size; @@ -211,7 +211,7 @@ static void _mbox_message_dispose(struct k_mbox_msg *rx_msg) * asynchronous send: free asynchronous message descriptor + * dummy thread pair, then give semaphore (if needed) */ - if (sending_thread->flags & K_DUMMY) { + if (sending_thread->base.flags & K_DUMMY) { struct k_sem *async_sem = tx_msg->_async_sem; _mbox_async_free((struct k_mbox_async *)sending_thread); @@ -258,14 +258,14 @@ static int _mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, /* finish readying sending thread (actual or dummy) for send */ sending_thread = tx_msg->_syncing_thread; - sending_thread->swap_data = tx_msg; + sending_thread->base.swap_data = tx_msg; /* search mailbox's rx queue for a compatible receiver */ key = irq_lock(); SYS_DLIST_FOR_EACH_NODE(&mbox->rx_msg_queue, wait_q_item) { receiving_thread = (struct k_thread *)wait_q_item; - rx_msg = (struct k_mbox_msg *)receiving_thread->swap_data; + rx_msg = (struct k_mbox_msg *)receiving_thread->base.swap_data; if (_mbox_message_match(tx_msg, rx_msg) == 0) { /* take receiver out of rx queue */ @@ -284,7 +284,7 @@ static int _mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, * note: dummy sending thread sits (unqueued) * until the receiver consumes the message */ - if (sending_thread->flags & K_DUMMY) { + if (sending_thread->base.flags & K_DUMMY) { _reschedule_threads(key); return 0; } @@ -308,7 +308,7 @@ static int _mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0) /* asynchronous send: dummy thread waits on tx queue for receiver */ - if (sending_thread->flags & K_DUMMY) { + if (sending_thread->base.flags & K_DUMMY) { _pend_thread(sending_thread, &mbox->tx_msg_queue, K_FOREVER); irq_unlock(key); return 0; @@ -340,7 +340,7 @@ void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, */ _mbox_async_alloc(&async); - async->thread.prio = _current->prio; + async->thread.prio = _current->base.prio; async->tx_msg = *tx_msg; async->tx_msg._syncing_thread = (struct k_thread *)&async->thread; @@ -448,7 +448,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, SYS_DLIST_FOR_EACH_NODE(&mbox->tx_msg_queue, wait_q_item) { sending_thread = (struct k_thread *)wait_q_item; - tx_msg = (struct k_mbox_msg *)sending_thread->swap_data; + tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data; if (_mbox_message_match(tx_msg, rx_msg) == 0) { /* take sender out of mailbox's tx queue */ @@ -472,7 +472,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, /* wait until a matching sender appears or a timeout occurs */ _pend_current_thread(&mbox->rx_msg_queue, timeout); - _current->swap_data = rx_msg; + _current->base.swap_data = rx_msg; result = _Swap(key); /* consume message data immediately, if needed */ @@ -499,7 +499,7 @@ int task_mbox_put(kmbox_t mbox, kpriority_t prio, struct k_msg *msg, } /* handle sending message of current thread priority */ - curr_prio = _current->prio; + curr_prio = _current->base.prio; if (prio == curr_prio) { return _error_to_rc(k_mbox_put(mbox, tx_msg, _ticks_to_ms(timeout))); @@ -527,7 +527,7 @@ void task_mbox_block_put(kmbox_t mbox, kpriority_t prio, struct k_msg *msg, unsigned int key; /* handle sending message of current thread priority */ - curr_prio = _current->prio; + curr_prio = _current->base.prio; if (prio == curr_prio) { k_mbox_async_put(mbox, tx_msg, sema); return; diff --git a/kernel/unified/mem_pool.c b/kernel/unified/mem_pool.c index 930c67d06dc..a509f35d9d6 100644 --- a/kernel/unified/mem_pool.c +++ b/kernel/unified/mem_pool.c @@ -19,7 +19,7 @@ */ #include -#include +#include #include #include #include @@ -434,7 +434,7 @@ static void block_waiters_check(struct k_mem_pool *pool) /* loop all waiters */ while (waiter != NULL) { - uint32_t req_size = (uint32_t)(waiter->swap_data); + uint32_t req_size = (uint32_t)(waiter->base.swap_data); /* locate block set to try allocating from */ offset = compute_block_set_index(pool, req_size); @@ -443,7 +443,7 @@ static void block_waiters_check(struct k_mem_pool *pool) found_block = get_block_recursive(pool, offset, offset); next_waiter = (struct k_thread *)sys_dlist_peek_next( - &pool->wait_q, &waiter->k_q_node); + &pool->wait_q, &waiter->base.k_q_node); /* if success : remove task from list and reschedule */ if (found_block != NULL) { @@ -509,13 +509,13 @@ int k_mem_pool_alloc(struct k_mem_pool *pool, struct k_mem_block *block, unsigned int key = irq_lock(); _sched_unlock_no_reschedule(); - _current->swap_data = (void *)size; + _current->base.swap_data = (void *)size; _pend_current_thread(&pool->wait_q, timeout); result = _Swap(key); if (result == 0) { block->pool_id = pool; - block->addr_in_pool = _current->swap_data; - block->data = _current->swap_data; + block->addr_in_pool = _current->base.swap_data; + block->data = _current->base.swap_data; block->req_size = size; } return result; diff --git a/kernel/unified/mem_slab.c b/kernel/unified/mem_slab.c index 3402303ea2b..6bff6ea8c03 100644 --- a/kernel/unified/mem_slab.c +++ b/kernel/unified/mem_slab.c @@ -15,7 +15,7 @@ */ #include -#include +#include #include #include #include @@ -109,7 +109,7 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, int32_t timeout) _pend_current_thread(&slab->wait_q, timeout); result = _Swap(key); if (result == 0) { - *mem = _current->swap_data; + *mem = _current->base.swap_data; } return result; } diff --git a/kernel/unified/msg_q.c b/kernel/unified/msg_q.c index 374a8faaecd..652ec6f6b0c 100644 --- a/kernel/unified/msg_q.c +++ b/kernel/unified/msg_q.c @@ -21,7 +21,7 @@ #include -#include +#include #include #include #include @@ -83,7 +83,8 @@ int k_msgq_put(struct k_msgq *q, void *data, int32_t timeout) pending_thread = _unpend_first_thread(&q->wait_q); if (pending_thread) { /* give message to waiting thread */ - memcpy(pending_thread->swap_data, data, q->msg_size); + memcpy(pending_thread->base.swap_data, data, + q->msg_size); /* wake up waiting thread */ _set_thread_return_value(pending_thread, 0); _abort_thread_timeout(pending_thread); @@ -108,7 +109,7 @@ int k_msgq_put(struct k_msgq *q, void *data, int32_t timeout) } else { /* wait for put message success, failure, or timeout */ _pend_current_thread(&q->wait_q, timeout); - _current->swap_data = data; + _current->base.swap_data = data; return _Swap(key); } @@ -138,7 +139,7 @@ int k_msgq_get(struct k_msgq *q, void *data, int32_t timeout) pending_thread = _unpend_first_thread(&q->wait_q); if (pending_thread) { /* add thread's message to queue */ - memcpy(q->write_ptr, pending_thread->swap_data, + memcpy(q->write_ptr, pending_thread->base.swap_data, q->msg_size); q->write_ptr += q->msg_size; if (q->write_ptr == q->buffer_end) { @@ -162,7 +163,7 @@ int k_msgq_get(struct k_msgq *q, void *data, int32_t timeout) } else { /* wait for get message success or timeout */ _pend_current_thread(&q->wait_q, timeout); - _current->swap_data = data; + _current->base.swap_data = data; return _Swap(key); } diff --git a/kernel/unified/mutex.c b/kernel/unified/mutex.c index ee51c67c4e9..be0640ad34a 100644 --- a/kernel/unified/mutex.c +++ b/kernel/unified/mutex.c @@ -37,7 +37,7 @@ */ #include -#include +#include #include #include #include @@ -116,7 +116,7 @@ static int new_prio_for_inheritance(int target, int limit) static void adjust_owner_prio(struct k_mutex *mutex, int new_prio) { - if (mutex->owner->prio != new_prio) { + if (mutex->owner->base.prio != new_prio) { K_DEBUG("%p (ready (y/n): %c) prio changed to %d (was %d)\n", mutex->owner, _is_thread_ready(mutex->owner) ? @@ -138,7 +138,7 @@ int k_mutex_lock(struct k_mutex *mutex, int32_t timeout) RECORD_STATE_CHANGE(); mutex->owner_orig_prio = mutex->lock_count == 0 ? - _current->prio : + _current->base.prio : mutex->owner_orig_prio; mutex->lock_count++; @@ -166,7 +166,8 @@ int k_mutex_lock(struct k_mutex *mutex, int32_t timeout) } new_prio = _get_new_prio_with_ceiling(new_prio); #endif - new_prio = new_prio_for_inheritance(_current->prio, mutex->owner->prio); + new_prio = new_prio_for_inheritance(_current->base.prio, + mutex->owner->base.prio); key = irq_lock(); @@ -196,8 +197,8 @@ int k_mutex_lock(struct k_mutex *mutex, int32_t timeout) (struct k_thread *)sys_dlist_peek_head(&mutex->wait_q); new_prio = mutex->owner_orig_prio; - new_prio = waiter ? new_prio_for_inheritance(waiter->prio, new_prio) : - new_prio; + new_prio = waiter ? new_prio_for_inheritance(waiter->base.prio, + new_prio) : new_prio; K_DEBUG("adjusting prio down on mutex %p\n", mutex); @@ -254,7 +255,7 @@ void k_mutex_unlock(struct k_mutex *mutex) */ mutex->owner = new_owner; mutex->lock_count++; - mutex->owner_orig_prio = new_owner->prio; + mutex->owner_orig_prio = new_owner->base.prio; } else { irq_unlock(key); mutex->owner = NULL; diff --git a/kernel/unified/pipes.c b/kernel/unified/pipes.c index 97083445ec3..b9ac73ee26f 100644 --- a/kernel/unified/pipes.c +++ b/kernel/unified/pipes.c @@ -21,7 +21,7 @@ */ #include -#include +#include #include #include #include @@ -40,7 +40,7 @@ struct k_pipe_desc { }; struct k_pipe_async { - struct tcs_base thread; /* Dummy thread object */ + struct _thread_base thread; /* Dummy thread object */ struct k_pipe_desc desc; /* Pipe message descriptor */ }; @@ -286,7 +286,7 @@ static bool _pipe_xfer_prepare(sys_dlist_t *xfer_list, for (node = sys_dlist_peek_head(wait_q); node != NULL; node = sys_dlist_peek_next(wait_q, node)) { thread = (struct k_thread *)node; - desc = (struct k_pipe_desc *)thread->swap_data; + desc = (struct k_pipe_desc *)thread->base.swap_data; num_bytes += desc->bytes_to_xfer; @@ -309,7 +309,7 @@ static bool _pipe_xfer_prepare(sys_dlist_t *xfer_list, num_bytes = 0; while ((thread = (struct k_thread *) sys_dlist_peek_head(wait_q))) { - desc = (struct k_pipe_desc *)thread->swap_data; + desc = (struct k_pipe_desc *)thread->base.swap_data; num_bytes += desc->bytes_to_xfer; if (num_bytes > bytes_to_xfer) { @@ -330,7 +330,7 @@ static bool _pipe_xfer_prepare(sys_dlist_t *xfer_list, */ _unpend_thread(thread); _abort_thread_timeout(thread); - sys_dlist_append(xfer_list, &thread->k_q_node); + sys_dlist_append(xfer_list, &thread->base.k_q_node); } *waiter = (num_bytes > bytes_to_xfer) ? thread : NULL; @@ -377,7 +377,7 @@ static void _pipe_thread_ready(struct k_thread *thread) unsigned int key; #if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0) - if (thread->flags & K_DUMMY) { + if (thread->base.flags & K_DUMMY) { _pipe_async_finish((struct k_pipe_async *)thread); return; } @@ -440,7 +440,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc, struct k_thread *thread = (struct k_thread *) sys_dlist_get(&xfer_list); while (thread) { - desc = (struct k_pipe_desc *)thread->swap_data; + desc = (struct k_pipe_desc *)thread->base.swap_data; bytes_copied = _pipe_xfer(desc->buffer, desc->bytes_to_xfer, data + num_bytes_written, bytes_to_write - num_bytes_written); @@ -462,7 +462,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc, * It is possible no data will be copied. */ if (reader) { - desc = (struct k_pipe_desc *)reader->swap_data; + desc = (struct k_pipe_desc *)reader->base.swap_data; bytes_copied = _pipe_xfer(desc->buffer, desc->bytes_to_xfer, data + num_bytes_written, bytes_to_write - num_bytes_written); @@ -515,7 +515,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc, pipe_desc.bytes_to_xfer = bytes_to_write - num_bytes_written; if (timeout != K_NO_WAIT) { - _current->swap_data = &pipe_desc; + _current->base.swap_data = &pipe_desc; /* * Lock interrupts and unlock the scheduler before * manipulating the writers wait_q. @@ -584,7 +584,7 @@ int k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read, struct k_thread *thread = (struct k_thread *) sys_dlist_get(&xfer_list); while (thread && (num_bytes_read < bytes_to_read)) { - desc = (struct k_pipe_desc *)thread->swap_data; + desc = (struct k_pipe_desc *)thread->base.swap_data; bytes_copied = _pipe_xfer(data + num_bytes_read, bytes_to_read - num_bytes_read, desc->buffer, desc->bytes_to_xfer); @@ -608,7 +608,7 @@ int k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read, } if (writer && (num_bytes_read < bytes_to_read)) { - desc = (struct k_pipe_desc *)writer->swap_data; + desc = (struct k_pipe_desc *)writer->base.swap_data; bytes_copied = _pipe_xfer(data + num_bytes_read, bytes_to_read - num_bytes_read, desc->buffer, desc->bytes_to_xfer); @@ -624,7 +624,7 @@ int k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read, */ while (thread) { - desc = (struct k_pipe_desc *)thread->swap_data; + desc = (struct k_pipe_desc *)thread->base.swap_data; bytes_copied = _pipe_buffer_put(pipe, desc->buffer, desc->bytes_to_xfer); @@ -638,7 +638,7 @@ int k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read, } if (writer) { - desc = (struct k_pipe_desc *)writer->swap_data; + desc = (struct k_pipe_desc *)writer->base.swap_data; bytes_copied = _pipe_buffer_put(pipe, desc->buffer, desc->bytes_to_xfer); @@ -662,7 +662,7 @@ int k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read, pipe_desc.bytes_to_xfer = bytes_to_read - num_bytes_read; if (timeout != K_NO_WAIT) { - _current->swap_data = &pipe_desc; + _current->base.swap_data = &pipe_desc; key = irq_lock(); _sched_unlock_no_reschedule(); _pend_current_thread(&pipe->wait_q.readers, timeout); diff --git a/kernel/unified/sched.c b/kernel/unified/sched.c index b25d62ae851..a877da5cc31 100644 --- a/kernel/unified/sched.c +++ b/kernel/unified/sched.c @@ -15,16 +15,19 @@ */ #include -#include +#include #include #include #include +/* the only struct _kernel instance */ +struct _kernel _kernel = {0}; + /* set the bit corresponding to prio in ready q bitmap */ static void _set_ready_q_prio_bit(int prio) { int bmap_index = _get_ready_q_prio_bmap_index(prio); - uint32_t *bmap = &_nanokernel.ready_q.prio_bmap[bmap_index]; + uint32_t *bmap = &_ready_q.prio_bmap[bmap_index]; *bmap |= _get_ready_q_prio_bit(prio); } @@ -33,7 +36,7 @@ static void _set_ready_q_prio_bit(int prio) static void _clear_ready_q_prio_bit(int prio) { int bmap_index = _get_ready_q_prio_bmap_index(prio); - uint32_t *bmap = &_nanokernel.ready_q.prio_bmap[bmap_index]; + uint32_t *bmap = &_ready_q.prio_bmap[bmap_index]; *bmap &= ~_get_ready_q_prio_bit(prio); } @@ -50,15 +53,16 @@ static void _clear_ready_q_prio_bit(int prio) void _add_thread_to_ready_q(struct k_thread *thread) { - int q_index = _get_ready_q_q_index(thread->prio); - sys_dlist_t *q = &_nanokernel.ready_q.q[q_index]; + int q_index = _get_ready_q_q_index(thread->base.prio); + sys_dlist_t *q = &_ready_q.q[q_index]; - _set_ready_q_prio_bit(thread->prio); - sys_dlist_append(q, &thread->k_q_node); + _set_ready_q_prio_bit(thread->base.prio); + sys_dlist_append(q, &thread->base.k_q_node); - struct k_thread **cache = &_nanokernel.ready_q.cache; + struct k_thread **cache = &_ready_q.cache; - *cache = *cache && _is_prio_higher(thread->prio, (*cache)->prio) ? + *cache = *cache && _is_prio_higher(thread->base.prio, + (*cache)->base.prio) ? thread : *cache; } @@ -71,15 +75,15 @@ void _add_thread_to_ready_q(struct k_thread *thread) void _remove_thread_from_ready_q(struct k_thread *thread) { - int q_index = _get_ready_q_q_index(thread->prio); - sys_dlist_t *q = &_nanokernel.ready_q.q[q_index]; + int q_index = _get_ready_q_q_index(thread->base.prio); + sys_dlist_t *q = &_ready_q.q[q_index]; - sys_dlist_remove(&thread->k_q_node); + sys_dlist_remove(&thread->base.k_q_node); if (sys_dlist_is_empty(q)) { - _clear_ready_q_prio_bit(thread->prio); + _clear_ready_q_prio_bit(thread->base.prio); } - struct k_thread **cache = &_nanokernel.ready_q.cache; + struct k_thread **cache = &_ready_q.cache; *cache = *cache == thread ? NULL : *cache; } @@ -103,20 +107,20 @@ void k_sched_lock(void) { __ASSERT(!_is_in_isr(), ""); - atomic_inc(&_nanokernel.current->sched_locked); + atomic_inc(&_current->base.sched_locked); K_DEBUG("scheduler locked (%p:%d)\n", - _current, _current->sched_locked); + _current, _current->base.sched_locked); } void k_sched_unlock(void) { - __ASSERT(_nanokernel.current->sched_locked > 0, ""); + __ASSERT(_current->base.sched_locked > 0, ""); __ASSERT(!_is_in_isr(), ""); int key = irq_lock(); - atomic_dec(&_nanokernel.current->sched_locked); + atomic_dec(&_current->base.sched_locked); K_DEBUG("scheduler unlocked (%p:%d)\n", _current, _current->sched_locked); @@ -128,12 +132,15 @@ void k_sched_unlock(void) * Callback for sys_dlist_insert_at() to find the correct insert point in a * wait queue (priority-based). */ -static int _is_wait_q_insert_point(sys_dnode_t *dnode_info, void *insert_prio) +static int _is_wait_q_insert_point(sys_dnode_t *node, void *insert_prio) { struct k_thread *waitq_node = - CONTAINER_OF(dnode_info, struct k_thread, k_q_node); + CONTAINER_OF( + CONTAINER_OF(node, struct _thread_base, k_q_node), + struct k_thread, + base); - return _is_prio_higher((int)insert_prio, waitq_node->prio); + return _is_prio_higher((int)insert_prio, waitq_node->base.prio); } /* convert milliseconds to ticks */ @@ -154,8 +161,9 @@ void _pend_thread(struct k_thread *thread, _wait_q_t *wait_q, int32_t timeout) { sys_dlist_t *dlist = (sys_dlist_t *)wait_q; - sys_dlist_insert_at(dlist, &thread->k_q_node, - _is_wait_q_insert_point, (void *)thread->prio); + sys_dlist_insert_at(dlist, &thread->base.k_q_node, + _is_wait_q_insert_point, + (void *)thread->base.prio); _mark_thread_as_pending(thread); @@ -182,7 +190,7 @@ static struct k_thread *__get_next_ready_thread(void) { int prio = _get_highest_ready_prio(); int q_index = _get_ready_q_q_index(prio); - sys_dlist_t *list = &_nanokernel.ready_q.q[q_index]; + sys_dlist_t *list = &_ready_q.q[q_index]; __ASSERT(!sys_dlist_is_empty(list), "no thread to run (prio: %d, queue index: %u)!\n", @@ -191,7 +199,7 @@ static struct k_thread *__get_next_ready_thread(void) struct k_thread *thread = (struct k_thread *)sys_dlist_peek_head_not_empty(list); - _nanokernel.ready_q.cache = thread; + _ready_q.cache = thread; return thread; } @@ -200,7 +208,7 @@ static struct k_thread *__get_next_ready_thread(void) /* must be called with interrupts locked */ struct k_thread *_get_next_ready_thread(void) { - struct k_thread *cache = _nanokernel.ready_q.cache; + struct k_thread *cache = _ready_q.cache; return cache ? cache : __get_next_ready_thread(); } @@ -217,7 +225,7 @@ int __must_switch_threads(void) extern void _dump_ready_q(void); _dump_ready_q(); - return _is_prio_higher(_get_highest_ready_prio(), _current->prio); + return _is_prio_higher(_get_highest_ready_prio(), _current->base.prio); } int _is_next_thread_current(void) @@ -227,7 +235,7 @@ int _is_next_thread_current(void) int k_thread_priority_get(k_tid_t thread) { - return thread->prio; + return thread->base.prio; } void k_thread_priority_set(k_tid_t tid, int prio) @@ -255,17 +263,17 @@ void k_thread_priority_set(k_tid_t tid, int prio) */ void _move_thread_to_end_of_prio_q(struct k_thread *thread) { - int q_index = _get_ready_q_q_index(thread->prio); - sys_dlist_t *q = &_nanokernel.ready_q.q[q_index]; + int q_index = _get_ready_q_q_index(thread->base.prio); + sys_dlist_t *q = &_ready_q.q[q_index]; - if (sys_dlist_is_tail(q, &thread->k_q_node)) { + if (sys_dlist_is_tail(q, &thread->base.k_q_node)) { return; } - sys_dlist_remove(&thread->k_q_node); - sys_dlist_append(q, &thread->k_q_node); + sys_dlist_remove(&thread->base.k_q_node); + sys_dlist_append(q, &thread->base.k_q_node); - struct k_thread **cache = &_nanokernel.ready_q.cache; + struct k_thread **cache = &_ready_q.cache; *cache = *cache == thread ? NULL : *cache; } diff --git a/kernel/unified/sem.c b/kernel/unified/sem.c index c3baaa59681..f16b25c8ed0 100644 --- a/kernel/unified/sem.c +++ b/kernel/unified/sem.c @@ -27,7 +27,7 @@ */ #include -#include +#include #include #include #include @@ -44,7 +44,7 @@ struct _sem_desc { }; struct _sem_thread { - struct tcs_base dummy; + struct _thread_base dummy; struct _sem_desc desc; }; #endif @@ -120,7 +120,7 @@ int k_sem_group_take(struct k_sem *sem_array[], struct k_sem **sem, sys_dlist_t list; sys_dlist_init(&list); - _current->swap_data = &list; + _current->base.swap_data = &list; for (int i = 0; i < num; i++) { wait_objects[i].dummy.flags = K_DUMMY; @@ -172,7 +172,7 @@ static int handle_sem_group(struct k_sem *sem, struct k_thread *thread) sys_dnode_t *node; sys_dnode_t *next; - if (!(thread->flags & K_DUMMY)) { + if (!(thread->base.flags & K_DUMMY)) { /* * The awakened thread is a real thread and thus was not * involved in a semaphore group operation. @@ -185,7 +185,7 @@ static int handle_sem_group(struct k_sem *sem, struct k_thread *thread) * in a semaphore group operation. */ - list = (sys_dlist_t *)dummy->desc.thread->swap_data; + list = (sys_dlist_t *)dummy->desc.thread->base.swap_data; node = sys_dlist_peek_head(list); __ASSERT(node != NULL, ""); diff --git a/kernel/unified/stack.c b/kernel/unified/stack.c index c5729dcba3f..366d0e3ed77 100644 --- a/kernel/unified/stack.c +++ b/kernel/unified/stack.c @@ -19,7 +19,7 @@ */ #include -#include +#include #include #include #include @@ -116,7 +116,7 @@ int k_stack_pop(struct k_stack *stack, uint32_t *data, int32_t timeout) result = _Swap(key); if (result == 0) { - *data = (uint32_t)_current->swap_data; + *data = (uint32_t)_current->base.swap_data; } return result; } diff --git a/kernel/unified/sys_clock.c b/kernel/unified/sys_clock.c index d460293ad13..d2934da6703 100644 --- a/kernel/unified/sys_clock.c +++ b/kernel/unified/sys_clock.c @@ -17,7 +17,7 @@ */ -#include +#include #include #include #include @@ -206,7 +206,7 @@ static void handle_time_slicing(int32_t ticks) return; } - if (_is_prio_higher(_current->prio, _time_slice_prio_ceiling)) { + if (_is_prio_higher(_current->base.prio, _time_slice_prio_ceiling)) { return; } diff --git a/kernel/unified/thread.c b/kernel/unified/thread.c index b1e37ca4b49..9a6816568fc 100644 --- a/kernel/unified/thread.c +++ b/kernel/unified/thread.c @@ -26,7 +26,7 @@ #include #include -#include +#include #include #include #include @@ -69,7 +69,7 @@ int sys_execution_context_type_get(void) if (k_is_in_isr()) return NANO_CTX_ISR; - if (_current->prio < 0) + if (_current->base.prio < 0) return NANO_CTX_FIBER; return NANO_CTX_TASK; @@ -86,7 +86,7 @@ int k_is_in_isr(void) */ void _thread_essential_set(void) { - _current->flags |= K_ESSENTIAL; + _current->base.flags |= K_ESSENTIAL; } /* @@ -96,7 +96,7 @@ void _thread_essential_set(void) */ void _thread_essential_clear(void) { - _current->flags &= ~K_ESSENTIAL; + _current->base.flags &= ~K_ESSENTIAL; } /* @@ -106,7 +106,7 @@ void _thread_essential_clear(void) */ int _is_thread_essential(void) { - return _current->flags & K_ESSENTIAL; + return _current->base.flags & K_ESSENTIAL; } void k_busy_wait(uint32_t usec_to_wait) @@ -151,12 +151,12 @@ void _thread_monitor_exit(struct k_thread *thread) { unsigned int key = irq_lock(); - if (thread == _nanokernel.threads) { - _nanokernel.threads = _nanokernel.threads->next_thread; + if (thread == _kernel.threads) { + _kernel.threads = _kernel.threads->next_thread; } else { struct k_thread *prev_thread; - prev_thread = _nanokernel.threads; + prev_thread = _kernel.threads; while (thread != prev_thread->next_thread) { prev_thread = prev_thread->next_thread; } diff --git a/kernel/unified/thread_abort.c b/kernel/unified/thread_abort.c index deceafc2d21..5ec34f20c2e 100644 --- a/kernel/unified/thread_abort.c +++ b/kernel/unified/thread_abort.c @@ -21,7 +21,7 @@ */ #include -#include +#include #include #include #include diff --git a/kernel/unified/timer.c b/kernel/unified/timer.c index 988a6001a76..4067c779cf6 100644 --- a/kernel/unified/timer.c +++ b/kernel/unified/timer.c @@ -201,7 +201,6 @@ int32_t k_timer_remaining_get(struct k_timer *timer) { unsigned int key = irq_lock(); int32_t remaining_ticks; - sys_dlist_t *timeout_q = &_nanokernel.timeout_q; if (timer->timeout.delta_ticks_from_prev == -1) { remaining_ticks = 0; @@ -211,11 +210,11 @@ int32_t k_timer_remaining_get(struct k_timer *timer) * and summing up the various tick deltas involved */ struct _timeout *t = - (struct _timeout *)sys_dlist_peek_head(timeout_q); + (struct _timeout *)sys_dlist_peek_head(&_timeout_q); remaining_ticks = t->delta_ticks_from_prev; while (t != &timer->timeout) { - t = (struct _timeout *)sys_dlist_peek_next(timeout_q, + t = (struct _timeout *)sys_dlist_peek_next(&_timeout_q, &t->node); remaining_ticks += t->delta_ticks_from_prev; } diff --git a/kernel/unified/work_q.c b/kernel/unified/work_q.c index 97e119c2910..a15535f38a0 100644 --- a/kernel/unified/work_q.c +++ b/kernel/unified/work_q.c @@ -21,7 +21,7 @@ * Workqueue support functions */ -#include +#include #include #include diff --git a/misc/debug/gdb_server.c b/misc/debug/gdb_server.c index 0278f4bfdcf..2938aff9e7d 100644 --- a/misc/debug/gdb_server.c +++ b/misc/debug/gdb_server.c @@ -328,7 +328,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/scripts/gen_offset_header/gen_offset_header.c b/scripts/gen_offset_header/gen_offset_header.c index 1ed29ceef77..fc4b83e53ec 100644 --- a/scripts/gen_offset_header/gen_offset_header.c +++ b/scripts/gen_offset_header/gen_offset_header.c @@ -27,7 +27,7 @@ * assuming that the module offsets.o contains the following absolute symbols: * * $ nm offsets.o - * 00000010 A __tNANO_common_isp_OFFSET + * 00000010 A __tNANO_irq_stack_OFFSET * 00000008 A __tNANO_current_OFFSET * 0000000c A __tNANO_nested_OFFSET * 00000000 A __tNANO_fiber_OFFSET @@ -35,7 +35,7 @@ * * ... the following C preprocessor code will be generated: * - * #define __tNANO_common_isp_OFFSET 0x10 + * #define __tNANO_irq_stack_OFFSET 0x10 * #define __tNANO_current_OFFSET 0x8 * #define __tNANO_nested_OFFSET 0xC * #define __tNANO_fiber_OFFSET 0x0 diff --git a/tests/legacy/kernel/test_context/src/context.c b/tests/legacy/kernel/test_context/src/context.c index 1dbba066dc1..57327548db3 100644 --- a/tests/legacy/kernel/test_context/src/context.c +++ b/tests/legacy/kernel/test_context/src/context.c @@ -27,7 +27,7 @@ */ #include -#include +#include #include #include @@ -443,7 +443,7 @@ static void fiber_helper(int arg1, int arg2) self_thread_id = sys_thread_self_get(); /* Lower priority to that of fiber_entry() */ - fiber_priority_set(self_thread_id, self_thread_id->prio + 1); + fiber_priority_set(self_thread_id, self_thread_id->base.prio + 1); fiber_yield(); /* Yield to fiber of equal priority */ @@ -515,7 +515,7 @@ static int test_fiber_yield(void) * not result in switching to the helper. */ - fiber_priority_set(self_thread_id, self_thread_id->prio - 1); + fiber_priority_set(self_thread_id, self_thread_id->base.prio - 1); fiber_yield(); if (fiber_evidence != 1) { diff --git a/tests/legacy/kernel/test_irq_offload/src/main.c b/tests/legacy/kernel/test_irq_offload/src/main.c index ad35206a2b5..b06ed731802 100644 --- a/tests/legacy/kernel/test_irq_offload/src/main.c +++ b/tests/legacy/kernel/test_irq_offload/src/main.c @@ -16,7 +16,7 @@ #include #include -#include +#include #include volatile uint32_t sentinel; diff --git a/tests/legacy/kernel/test_obj_tracing/microkernel/src/object_monitor.c b/tests/legacy/kernel/test_obj_tracing/microkernel/src/object_monitor.c index 88bf3593fa9..ed779d57869 100644 --- a/tests/legacy/kernel/test_obj_tracing/microkernel/src/object_monitor.c +++ b/tests/legacy/kernel/test_obj_tracing/microkernel/src/object_monitor.c @@ -56,22 +56,22 @@ void *force_sys_work_q_in = (void *)&k_sys_work_q; static inline int test_thread_monitor(void) { int obj_counter = 0; - struct tcs *thread_list = NULL; + struct k_thread *thread_list = NULL; /* wait a bit to allow any initialization-only threads to terminate */ task_sleep(100); - thread_list = (struct tcs *)SYS_THREAD_MONITOR_HEAD; + thread_list = (struct k_thread *)SYS_THREAD_MONITOR_HEAD; while (thread_list != NULL) { - if (thread_list->prio == -1) { + if (thread_list->base.prio == -1) { TC_PRINT("TASK: %p FLAGS: 0x%x\n", - thread_list, thread_list->flags); + thread_list, thread_list->base.flags); } else { TC_PRINT("FIBER: %p FLAGS: 0x%x\n", - thread_list, thread_list->flags); + thread_list, thread_list->base.flags); } thread_list = - (struct tcs *)SYS_THREAD_MONITOR_NEXT(thread_list); + (struct k_thread *)SYS_THREAD_MONITOR_NEXT(thread_list); obj_counter++; } TC_PRINT("THREAD QUANTITY: %d\n", obj_counter); diff --git a/tests/legacy/kernel/test_obj_tracing/nanokernel/src/object_monitor.c b/tests/legacy/kernel/test_obj_tracing/nanokernel/src/object_monitor.c index 436831cf179..4e9ffe96fa6 100644 --- a/tests/legacy/kernel/test_obj_tracing/nanokernel/src/object_monitor.c +++ b/tests/legacy/kernel/test_obj_tracing/nanokernel/src/object_monitor.c @@ -56,22 +56,22 @@ void *force_sys_work_q_in = (void *)&k_sys_work_q; static inline int test_thread_monitor(void) { int obj_counter = 0; - struct tcs *thread_list = NULL; + struct k_thread *thread_list = NULL; /* wait a bit to allow any initialization-only threads to terminate */ fiber_sleep(100); - thread_list = (struct tcs *)SYS_THREAD_MONITOR_HEAD; + thread_list = (struct k_thread *)SYS_THREAD_MONITOR_HEAD; while (thread_list != NULL) { - if (thread_list->prio == -1) { + if (thread_list->base.prio == -1) { TC_PRINT("TASK: %p FLAGS: 0x%x\n", - thread_list, thread_list->flags); + thread_list, thread_list->base.flags); } else { TC_PRINT("FIBER: %p FLAGS: 0x%x\n", - thread_list, thread_list->flags); + thread_list, thread_list->base.flags); } thread_list = - (struct tcs *)SYS_THREAD_MONITOR_NEXT(thread_list); + (struct k_thread *)SYS_THREAD_MONITOR_NEXT(thread_list); obj_counter++; } TC_PRINT("THREAD QUANTITY: %d\n", obj_counter); diff --git a/tests/legacy/kernel/test_pend/src/pend.c b/tests/legacy/kernel/test_pend/src/pend.c index 3518c145bf7..fe67048820e 100644 --- a/tests/legacy/kernel/test_pend/src/pend.c +++ b/tests/legacy/kernel/test_pend/src/pend.c @@ -16,7 +16,7 @@ #include #include -#include +#include #include #define SECONDS(x) ((x) * sys_clock_ticks_per_sec) diff --git a/tests/legacy/kernel/test_static_idt/microkernel/src/static_idt.c b/tests/legacy/kernel/test_static_idt/microkernel/src/static_idt.c index 144bbef1536..108569b2be3 100644 --- a/tests/legacy/kernel/test_static_idt/microkernel/src/static_idt.c +++ b/tests/legacy/kernel/test_static_idt/microkernel/src/static_idt.c @@ -25,7 +25,7 @@ Ensures interrupt and exception stubs are installed correctly. #include #include -#include +#include #if defined(__GNUC__) #include #else diff --git a/tests/legacy/kernel/test_static_idt/microkernel/src/test_stubs.S b/tests/legacy/kernel/test_static_idt/microkernel/src/test_stubs.S index f6a0abdf027..b104d840c25 100644 --- a/tests/legacy/kernel/test_static_idt/microkernel/src/test_stubs.S +++ b/tests/legacy/kernel/test_static_idt/microkernel/src/test_stubs.S @@ -29,7 +29,7 @@ testing. /* IA-32 specific */ #include -#include +#include #include #include diff --git a/tests/legacy/kernel/test_static_idt/nanokernel/src/static_idt.c b/tests/legacy/kernel/test_static_idt/nanokernel/src/static_idt.c index 6297a43cdc2..66fd8078eb4 100644 --- a/tests/legacy/kernel/test_static_idt/nanokernel/src/static_idt.c +++ b/tests/legacy/kernel/test_static_idt/nanokernel/src/static_idt.c @@ -25,7 +25,7 @@ Ensures interrupt and exception stubs are installed correctly. #include #include -#include +#include #if defined(__GNUC__) #include #else diff --git a/tests/legacy/kernel/test_static_idt/nanokernel/src/test_stubs.S b/tests/legacy/kernel/test_static_idt/nanokernel/src/test_stubs.S index f6a0abdf027..b104d840c25 100644 --- a/tests/legacy/kernel/test_static_idt/nanokernel/src/test_stubs.S +++ b/tests/legacy/kernel/test_static_idt/nanokernel/src/test_stubs.S @@ -29,7 +29,7 @@ testing. /* IA-32 specific */ #include -#include +#include #include #include