kernel/arch: optimize memory use of some thread fields
Some thread fields were 32-bit wide, when they are not even close to using that full range of values. They are instead changed to 8-bit fields. - prio can fit in one byte, limiting the priorities range to -128 to 127 - recursive scheduler locking can be limited to 255; a rollover results most probably from a logic error - flags are split into execution flags and thread states; 8 bits is enough for each of them currently, with at worst two states and four flags to spare (on x86, on other archs, there are six flags to spare) Doing this saves 8 bytes per stack. It also sets up an incoming enhancement when checking if the current thread is preemptible on interrupt exit. Change-Id: Ieb5321a5b99f99173b0605dd4a193c3bc7ddabf4 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
parent
7e18ab70f9
commit
f955476559
22 changed files with 116 additions and 83 deletions
|
@ -84,8 +84,13 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)
|
|||
ldr r0, =_kernel
|
||||
|
||||
ldr r1, [r0, #_kernel_offset_to_current]
|
||||
ldr r2, [r1, #_thread_offset_to_prio]
|
||||
ldr r3, [r1, #_thread_offset_to_sched_locked]
|
||||
#ifdef CONFIG_CPU_CORTEX_M0_M0PLUS
|
||||
movs r3, #_thread_offset_to_prio
|
||||
ldrsb r2, [r1, r3]
|
||||
#else
|
||||
ldrsb r2, [r1, #_thread_offset_to_prio]
|
||||
#endif
|
||||
ldrb r3, [r1, #_thread_offset_to_sched_locked]
|
||||
|
||||
/* coop thread ? do not schedule */
|
||||
cmp r2, #0
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue