kernel/arch: consolidate tTCS and TNANO definitions
There was a lot of duplication between architectures for the definition of threads and the "nanokernel" guts. These have been consolidated. Now, a common file kernel/unified/include/kernel_structs.h holds the common definitions. Architectures provide two files to complement it: kernel_arch_data.h and kernel_arch_func.h. The first one contains at least the struct _thread_arch and struct _kernel_arch data structures, as well as the struct _callee_saved and struct _caller_saved register layouts. The second file contains anything that needs what is provided by the common stuff in kernel_structs.h. Those two files are only meant to be included in kernel_structs.h in very specific locations. The thread data structure has been separated into three major parts: common struct _thread_base and struct k_thread, and arch-specific struct _thread_arch. The first and third ones are included in the second. The struct s_NANO data structure has been split into two: common struct _kernel and arch-specific struct _kernel_arch. The latter is included in the former. Offsets files have also changed: nano_offsets.h has been renamed kernel_offsets.h and is still included by the arch-specific offsets.c. Also, since the thread and kernel data structures are now made of sub-structures, offsets have to be added to make up the full offset. Some of these additions have been consolidated in shorter symbols, available from kernel/unified/include/offsets_short.h, which includes an arch-specific offsets_arch_short.h. Most of the code include offsets_short.h now instead of offsets.h. Change-Id: I084645cb7e6db8db69aeaaf162963fe157045d5a Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
parent
779794cdbf
commit
f6ca7de09c
123 changed files with 2249 additions and 1998 deletions
|
@ -16,8 +16,8 @@
|
|||
|
||||
#define _ASMLANGUAGE
|
||||
#include <arch/nios2/asm.h>
|
||||
#include <nano_private.h>
|
||||
#include <offsets.h>
|
||||
#include <kernel_structs.h>
|
||||
#include <offsets_short.h>
|
||||
|
||||
/* exports */
|
||||
GTEXT(_Swap)
|
||||
|
@ -34,85 +34,85 @@ GTEXT(_k_neg_eagain)
|
|||
*/
|
||||
SECTION_FUNC(exception.other, _Swap)
|
||||
|
||||
/* Get a reference to _nanokernel in r10 */
|
||||
movhi r10, %hi(_nanokernel)
|
||||
ori r10, r10, %lo(_nanokernel)
|
||||
/* Get a reference to _kernel in r10 */
|
||||
movhi r10, %hi(_kernel)
|
||||
ori r10, r10, %lo(_kernel)
|
||||
|
||||
/* Get the pointer to nanokernel->current */
|
||||
ldw r11, __tNANO_current_OFFSET(r10)
|
||||
ldw r11, _kernel_offset_to_current(r10)
|
||||
|
||||
/* Store all the callee saved registers. We either got here via
|
||||
* an exception or from a cooperative invocation of _Swap() from C
|
||||
* domain, so all the caller-saved registers have already been
|
||||
* saved by the exception asm or the calling C code already.
|
||||
*/
|
||||
stw r16, __tTCS_coopReg_OFFSET + __t_coop_r16_OFFSET(r11)
|
||||
stw r17, __tTCS_coopReg_OFFSET + __t_coop_r17_OFFSET(r11)
|
||||
stw r18, __tTCS_coopReg_OFFSET + __t_coop_r18_OFFSET(r11)
|
||||
stw r19, __tTCS_coopReg_OFFSET + __t_coop_r19_OFFSET(r11)
|
||||
stw r20, __tTCS_coopReg_OFFSET + __t_coop_r20_OFFSET(r11)
|
||||
stw r21, __tTCS_coopReg_OFFSET + __t_coop_r21_OFFSET(r11)
|
||||
stw r22, __tTCS_coopReg_OFFSET + __t_coop_r22_OFFSET(r11)
|
||||
stw r23, __tTCS_coopReg_OFFSET + __t_coop_r23_OFFSET(r11)
|
||||
stw r28, __tTCS_coopReg_OFFSET + __t_coop_r28_OFFSET(r11)
|
||||
stw ra, __tTCS_coopReg_OFFSET + __t_coop_ra_OFFSET(r11)
|
||||
stw sp, __tTCS_coopReg_OFFSET + __t_coop_sp_OFFSET(r11)
|
||||
stw r16, _thread_offset_to_r16(r11)
|
||||
stw r17, _thread_offset_to_r17(r11)
|
||||
stw r18, _thread_offset_to_r18(r11)
|
||||
stw r19, _thread_offset_to_r19(r11)
|
||||
stw r20, _thread_offset_to_r20(r11)
|
||||
stw r21, _thread_offset_to_r21(r11)
|
||||
stw r22, _thread_offset_to_r22(r11)
|
||||
stw r23, _thread_offset_to_r23(r11)
|
||||
stw r28, _thread_offset_to_r28(r11)
|
||||
stw ra, _thread_offset_to_ra(r11)
|
||||
stw sp, _thread_offset_to_sp(r11)
|
||||
|
||||
/* r4 has the 'key' argument which is the result of irq_lock()
|
||||
* before this was called
|
||||
*/
|
||||
stw r4, __tTCS_coopReg_OFFSET + __t_coop_key_OFFSET(r11)
|
||||
stw r4, _thread_offset_to_key(r11)
|
||||
|
||||
/* Populate default return value */
|
||||
movhi r5, %hi(_k_neg_eagain)
|
||||
ori r5, r5, %lo(_k_neg_eagain)
|
||||
ldw r4, (r5)
|
||||
stw r4, __tTCS_coopReg_OFFSET + __t_coop_retval_OFFSET(r11)
|
||||
stw r4, _thread_offset_to_retval(r11)
|
||||
|
||||
#if CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
|
||||
call _sys_k_event_logger_context_switch
|
||||
/* Restore caller-saved r10. We could have stuck its value
|
||||
* onto the stack, but less instructions to just use immediates
|
||||
*/
|
||||
movhi r10, %hi(_nanokernel)
|
||||
ori r10, r10, %lo(_nanokernel)
|
||||
movhi r10, %hi(_kernel)
|
||||
ori r10, r10, %lo(_kernel)
|
||||
#endif /* CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH */
|
||||
|
||||
/* Assign to _nanokernel.current the return value of
|
||||
/* Assign to _kernel.current the return value of
|
||||
* _get_next_ready_thread()
|
||||
*/
|
||||
call _get_next_ready_thread
|
||||
movhi r10, %hi(_nanokernel)
|
||||
ori r10, r10, %lo(_nanokernel)
|
||||
stw r2, __tNANO_current_OFFSET(r10)
|
||||
movhi r10, %hi(_kernel)
|
||||
ori r10, r10, %lo(_kernel)
|
||||
stw r2, _kernel_offset_to_current(r10)
|
||||
|
||||
/* At this point r2 points to the next thread to be swapped in */
|
||||
|
||||
/* Restore callee-saved registers and switch to the incoming
|
||||
* thread's stack
|
||||
*/
|
||||
ldw r16, __tTCS_coopReg_OFFSET + __t_coop_r16_OFFSET(r2)
|
||||
ldw r17, __tTCS_coopReg_OFFSET + __t_coop_r17_OFFSET(r2)
|
||||
ldw r18, __tTCS_coopReg_OFFSET + __t_coop_r18_OFFSET(r2)
|
||||
ldw r19, __tTCS_coopReg_OFFSET + __t_coop_r19_OFFSET(r2)
|
||||
ldw r20, __tTCS_coopReg_OFFSET + __t_coop_r20_OFFSET(r2)
|
||||
ldw r21, __tTCS_coopReg_OFFSET + __t_coop_r21_OFFSET(r2)
|
||||
ldw r22, __tTCS_coopReg_OFFSET + __t_coop_r22_OFFSET(r2)
|
||||
ldw r23, __tTCS_coopReg_OFFSET + __t_coop_r23_OFFSET(r2)
|
||||
ldw r28, __tTCS_coopReg_OFFSET + __t_coop_r28_OFFSET(r2)
|
||||
ldw ra, __tTCS_coopReg_OFFSET + __t_coop_ra_OFFSET(r2)
|
||||
ldw sp, __tTCS_coopReg_OFFSET + __t_coop_sp_OFFSET(r2)
|
||||
ldw r16, _thread_offset_to_r16(r2)
|
||||
ldw r17, _thread_offset_to_r17(r2)
|
||||
ldw r18, _thread_offset_to_r18(r2)
|
||||
ldw r19, _thread_offset_to_r19(r2)
|
||||
ldw r20, _thread_offset_to_r20(r2)
|
||||
ldw r21, _thread_offset_to_r21(r2)
|
||||
ldw r22, _thread_offset_to_r22(r2)
|
||||
ldw r23, _thread_offset_to_r23(r2)
|
||||
ldw r28, _thread_offset_to_r28(r2)
|
||||
ldw ra, _thread_offset_to_ra(r2)
|
||||
ldw sp, _thread_offset_to_sp(r2)
|
||||
|
||||
/* We need to irq_unlock(current->coopReg.key);
|
||||
* key was supplied as argument to _Swap(). Fetch it.
|
||||
*/
|
||||
ldw r3, __tTCS_coopReg_OFFSET + __t_coop_key_OFFSET(r2)
|
||||
ldw r3, _thread_offset_to_key(r2)
|
||||
|
||||
/* Load return value into r2 (return value register). -EAGAIN
|
||||
* unless someone previously called fiberRtnValueSet(). Do this
|
||||
* before we potentially unlock interrupts.
|
||||
*/
|
||||
ldw r2, __tTCS_coopReg_OFFSET + __t_coop_retval_OFFSET(r2)
|
||||
ldw r2, _thread_offset_to_retval(r2)
|
||||
|
||||
/* Now do irq_unlock(current->coopReg.key) */
|
||||
#if (ALT_CPU_NUM_OF_SHADOW_REG_SETS > 0) || \
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue