kernel/arch: optimize memory use of some thread fields

Some thread fields were 32-bit wide, when they are not even close to
using that full range of values. They are instead changed to 8-bit fields.

- prio can fit in one byte, limiting the priorities range to -128 to 127

- recursive scheduler locking can be limited to 255; a rollover results
  most probably from a logic error

- flags are split into execution flags and thread states; 8 bits is
  enough for each of them currently, with at worst two states and four
  flags to spare (on x86, on other archs, there are six flags to spare)

Doing this saves 8 bytes per stack. It also sets up an incoming
enhancement when checking if the current thread is preemptible on
interrupt exit.

Change-Id: Ieb5321a5b99f99173b0605dd4a193c3bc7ddabf4
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
Benjamin Walsh 2016-12-21 15:38:54 -05:00 committed by Anas Nashif
commit f955476559
22 changed files with 116 additions and 83 deletions

View file

@ -150,11 +150,11 @@ SECTION_FUNC(TEXT, _firq_exit)
.balign 4 .balign 4
_firq_check_for_swap: _firq_check_for_swap:
/* coop thread ? do not schedule */ /* coop thread ? do not schedule */
ld_s r0, [r2, _thread_offset_to_prio] ldb.x r0, [r2, _thread_offset_to_prio]
brlt r0, 0, _firq_no_reschedule brlt r0, 0, _firq_no_reschedule
/* scheduler locked ? do not schedule */ /* scheduler locked ? do not schedule */
ld_s r0, [r2, _thread_offset_to_sched_locked] ldb_s r0, [r2, _thread_offset_to_sched_locked]
brne r0, 0, _firq_no_reschedule brne r0, 0, _firq_no_reschedule
/* Check if the current thread (in r2) is the cached thread */ /* Check if the current thread (in r2) is the cached thread */

View file

@ -168,11 +168,11 @@ _trap_return:
.balign 4 .balign 4
_trap_check_for_swap: _trap_check_for_swap:
/* coop thread ? do not schedule */ /* coop thread ? do not schedule */
ld_s r0, [r2, _thread_offset_to_prio] ldb.x r0, [r2, _thread_offset_to_prio]
brlt r0, 0, _trap_return brlt r0, 0, _trap_return
/* scheduler locked ? do not schedule */ /* scheduler locked ? do not schedule */
ld_s r0, [r2, _thread_offset_to_sched_locked] ldb_s r0, [r2, _thread_offset_to_sched_locked]
brne r0, 0, _trap_return brne r0, 0, _trap_return
/* check if the current thread needs to be rescheduled */ /* check if the current thread needs to be rescheduled */

View file

@ -138,13 +138,13 @@ SECTION_FUNC(TEXT, _rirq_exit)
*/ */
/* coop thread ? do not schedule */ /* coop thread ? do not schedule */
ld_s r0, [r2, _thread_offset_to_prio] ldb.x r0, [r2, _thread_offset_to_prio]
cmp_s r0, 0 cmp_s r0, 0
blt.d _rirq_no_reschedule blt.d _rirq_no_reschedule
ld sp, [r2, _thread_offset_to_sp] ld sp, [r2, _thread_offset_to_sp]
/* scheduler locked ? do not schedule */ /* scheduler locked ? do not schedule */
ld_s r0, [r2, _thread_offset_to_sched_locked] ldb_s r0, [r2, _thread_offset_to_sched_locked]
#ifdef CONFIG_ARC_STACK_CHECKING #ifdef CONFIG_ARC_STACK_CHECKING
breq.d r0, 0, _rirq_reschedule_check breq.d r0, 0, _rirq_reschedule_check
ld sp, [r2, _thread_offset_to_sp] ld sp, [r2, _thread_offset_to_sp]

View file

@ -84,8 +84,13 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)
ldr r0, =_kernel ldr r0, =_kernel
ldr r1, [r0, #_kernel_offset_to_current] ldr r1, [r0, #_kernel_offset_to_current]
ldr r2, [r1, #_thread_offset_to_prio] #ifdef CONFIG_CPU_CORTEX_M0_M0PLUS
ldr r3, [r1, #_thread_offset_to_sched_locked] movs r3, #_thread_offset_to_prio
ldrsb r2, [r1, r3]
#else
ldrsb r2, [r1, #_thread_offset_to_prio]
#endif
ldrb r3, [r1, #_thread_offset_to_sched_locked]
/* coop thread ? do not schedule */ /* coop thread ? do not schedule */
cmp r2, #0 cmp r2, #0

View file

@ -132,11 +132,11 @@ on_irq_stack:
*/ */
/* Do not reschedule coop threads (threads that have negative prio) */ /* Do not reschedule coop threads (threads that have negative prio) */
ldw r12, _thread_offset_to_prio(r11) ldb r12, _thread_offset_to_prio(r11)
blt r12, zero, no_reschedule blt r12, zero, no_reschedule
/* Do not reschedule if scheduler is locked */ /* Do not reschedule if scheduler is locked */
ldw r12, _thread_offset_to_sched_locked(r11) ldb r12, _thread_offset_to_sched_locked(r11)
bne r12, zero, no_reschedule bne r12, zero, no_reschedule
/* Call into the kernel to see if a scheduling decision is necessary */ /* Call into the kernel to see if a scheduling decision is necessary */

View file

@ -135,7 +135,7 @@ SECTION_FUNC(TEXT, _exception_enter)
* registers and the stack of the preempted thread. * registers and the stack of the preempted thread.
*/ */
testl $EXC_ACTIVE, _thread_offset_to_flags(%edx) testb $EXC_ACTIVE, _thread_offset_to_execution_flags(%edx)
jne alreadyInException jne alreadyInException
movl %esp, _thread_offset_to_esf(%edx) movl %esp, _thread_offset_to_esf(%edx)
@ -151,7 +151,7 @@ alreadyInException:
* handled in the event of a context switch. * handled in the event of a context switch.
*/ */
orl $EXC_ACTIVE, _thread_offset_to_flags(%edx) orb $EXC_ACTIVE, _thread_offset_to_execution_flags(%edx)
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */ #endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
@ -210,7 +210,7 @@ allDone:
* outermost exception). * outermost exception).
*/ */
andl $~EXC_ACTIVE, _thread_offset_to_flags (%ecx) andb $~EXC_ACTIVE, _thread_offset_to_execution_flags (%ecx)
nestedException: nestedException:
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */ #endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */

View file

@ -70,7 +70,7 @@ extern uint32_t _sse_mxcsr_default_value;
static void _FpCtxSave(struct tcs *tcs) static void _FpCtxSave(struct tcs *tcs)
{ {
#ifdef CONFIG_SSE #ifdef CONFIG_SSE
if (tcs->base.flags & K_SSE_REGS) { if (tcs->base.execution_flags & K_SSE_REGS) {
_do_fp_and_sse_regs_save(&tcs->arch.preempFloatReg); _do_fp_and_sse_regs_save(&tcs->arch.preempFloatReg);
return; return;
} }
@ -88,7 +88,7 @@ static inline void _FpCtxInit(struct tcs *tcs)
{ {
_do_fp_regs_init(); _do_fp_regs_init();
#ifdef CONFIG_SSE #ifdef CONFIG_SSE
if (tcs->base.flags & K_SSE_REGS) { if (tcs->base.execution_flags & K_SSE_REGS) {
_do_sse_regs_init(); _do_sse_regs_init();
} }
#endif #endif
@ -114,7 +114,7 @@ void k_float_enable(struct tcs *tcs, unsigned int options)
/* Indicate thread requires floating point context saving */ /* Indicate thread requires floating point context saving */
tcs->base.flags |= options; tcs->base.execution_flags |= (uint8_t)options;
/* /*
* The current thread might not allow FP instructions, so clear CR0[TS] * The current thread might not allow FP instructions, so clear CR0[TS]
@ -132,7 +132,7 @@ void k_float_enable(struct tcs *tcs, unsigned int options)
fp_owner = _kernel.current_fp; fp_owner = _kernel.current_fp;
if (fp_owner) { if (fp_owner) {
if (fp_owner->base.flags & INT_OR_EXC_MASK) { if (fp_owner->base.execution_flags & INT_OR_EXC_MASK) {
_FpCtxSave(fp_owner); _FpCtxSave(fp_owner);
} }
} }
@ -158,7 +158,7 @@ void k_float_enable(struct tcs *tcs, unsigned int options)
* of the FPU to them (unless we need it ourselves). * of the FPU to them (unless we need it ourselves).
*/ */
if ((_current->base.flags & _FP_USER_MASK) == 0) { if ((_current->base.execution_flags & _FP_USER_MASK) == 0) {
/* /*
* We are not FP-capable, so mark FPU as owned by the * We are not FP-capable, so mark FPU as owned by the
* thread we've just enabled FP support for, then * thread we've just enabled FP support for, then
@ -212,7 +212,7 @@ void k_float_disable(struct tcs *tcs)
/* Disable all floating point capabilities for the thread */ /* Disable all floating point capabilities for the thread */
tcs->base.flags &= ~_FP_USER_MASK; tcs->base.execution_flags &= ~_FP_USER_MASK;
if (tcs == _current) { if (tcs == _current) {
_FpAccessDisable(); _FpAccessDisable();

View file

@ -294,11 +294,11 @@ alreadyOnIntStack:
*/ */
/* do not reschedule coop threads (negative priority) */ /* do not reschedule coop threads (negative priority) */
cmpl $0, _thread_offset_to_prio(%edx) cmpb $0, _thread_offset_to_prio(%edx)
jl noReschedule jl noReschedule
/* do not reschedule if scheduler is locked */ /* do not reschedule if scheduler is locked */
cmpl $0, _thread_offset_to_sched_locked(%edx) cmpb $0, _thread_offset_to_sched_locked(%edx)
jne noReschedule jne noReschedule
@ -314,7 +314,7 @@ alreadyOnIntStack:
*/ */
#if defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO) #if defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO)
orl $INT_ACTIVE, _thread_offset_to_flags(%edx) orb $INT_ACTIVE, _thread_offset_to_execution_flags(%edx)
#endif #endif
/* /*
@ -362,7 +362,7 @@ alreadyOnIntStack:
*/ */
movl _kernel + _kernel_offset_to_current, %eax movl _kernel + _kernel_offset_to_current, %eax
andl $~INT_ACTIVE, _thread_offset_to_flags (%eax) andb $~INT_ACTIVE, _thread_offset_to_execution_flags(%eax)
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */ #endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
/* Restore volatile registers and return to the interrupted thread */ /* Restore volatile registers and return to the interrupted thread */

View file

@ -54,9 +54,9 @@
* the non-volatile integer registers need to be saved in the TCS of the * the non-volatile integer registers need to be saved in the TCS of the
* outgoing thread. The restoration of the integer registers of the incoming * outgoing thread. The restoration of the integer registers of the incoming
* thread depends on whether that thread was preemptively context switched out. * thread depends on whether that thread was preemptively context switched out.
* The INT_ACTIVE and EXC_ACTIVE bits in the k_thread->flags field will signify * The INT_ACTIVE and EXC_ACTIVE bits in the k_thread->execution_flags field
* that the thread was preemptively context switched out, and thus both the * will signify that the thread was preemptively context switched out, and thus
* volatile and non-volatile integer registers need to be restored. * both the volatile and non-volatile integer registers need to be restored.
* *
* The non-volatile registers need to be scrubbed to ensure they contain no * The non-volatile registers need to be scrubbed to ensure they contain no
* sensitive information that could compromise system security. This is to * sensitive information that could compromise system security. This is to
@ -161,7 +161,7 @@ SECTION_FUNC(TEXT, _Swap)
* _and_ whether the thread was context switched out preemptively. * _and_ whether the thread was context switched out preemptively.
*/ */
testl $_FP_USER_MASK, _thread_offset_to_flags(%eax) testb $_FP_USER_MASK, _thread_offset_to_execution_flags(%eax)
je restoreContext_NoFloatSwap je restoreContext_NoFloatSwap
@ -197,12 +197,12 @@ SECTION_FUNC(TEXT, _Swap)
* was preemptively context switched. * was preemptively context switched.
*/ */
testl $INT_OR_EXC_MASK, _thread_offset_to_flags(%ebx) testb $INT_OR_EXC_MASK, _thread_offset_to_execution_flags(%ebx)
je restoreContext_NoFloatSave je restoreContext_NoFloatSave
#ifdef CONFIG_SSE #ifdef CONFIG_SSE
testl $K_SSE_REGS, _thread_offset_to_flags(%ebx) testb $K_SSE_REGS, _thread_offset_to_execution_flags(%ebx)
je x87FloatSave je x87FloatSave
/* /*
@ -237,11 +237,11 @@ restoreContext_NoFloatSave:
* was previously preemptively context switched out. * was previously preemptively context switched out.
*/ */
testl $INT_OR_EXC_MASK, _thread_offset_to_flags(%eax) testb $INT_OR_EXC_MASK, _thread_offset_to_execution_flags(%eax)
je restoreContext_NoFloatRestore je restoreContext_NoFloatRestore
#ifdef CONFIG_SSE #ifdef CONFIG_SSE
testl $K_SSE_REGS, _thread_offset_to_flags(%eax) testb $K_SSE_REGS, _thread_offset_to_execution_flags(%eax)
je x87FloatRestore je x87FloatRestore
fxrstor _thread_offset_to_preempFloatReg(%eax) fxrstor _thread_offset_to_preempFloatReg(%eax)
@ -276,7 +276,7 @@ restoreContext_NoFloatSwap:
* registers * registers
*/ */
testl $_FP_USER_MASK, _thread_offset_to_flags(%eax) testb $_FP_USER_MASK, _thread_offset_to_execution_flags(%eax)
jne CROHandlingDone jne CROHandlingDone
/* /*

View file

@ -52,19 +52,19 @@
#define STACK_ALIGN_SIZE 4 #define STACK_ALIGN_SIZE 4
/* x86 Bitmask definitions for the struct k_thread->flags bit field */ /* x86 Bitmask definitions for struct k_thread->execution_flags */
/* executing context is interrupt handler */ /* executing context is interrupt handler */
#define INT_ACTIVE (1 << 31) #define INT_ACTIVE (1 << 7)
/* executing context is exception handler */ /* executing context is exception handler */
#define EXC_ACTIVE (1 << 30) #define EXC_ACTIVE (1 << 6)
#define INT_OR_EXC_MASK (INT_ACTIVE | EXC_ACTIVE) #define INT_OR_EXC_MASK (INT_ACTIVE | EXC_ACTIVE)
#if defined(CONFIG_FP_SHARING) && defined(CONFIG_SSE) #if defined(CONFIG_FP_SHARING) && defined(CONFIG_SSE)
/* thread uses SSEx (and also FP) registers */ /* thread uses SSEx (and also FP) registers */
#define K_SSE_REGS (1 << 29) #define K_SSE_REGS (1 << 5)
#endif #endif
#if defined(CONFIG_FP_SHARING) && defined(CONFIG_SSE) #if defined(CONFIG_FP_SHARING) && defined(CONFIG_SSE)

View file

@ -37,6 +37,7 @@ config NUM_COOP_PRIORITIES
prompt "Number of coop priorities" if MULTITHREADING prompt "Number of coop priorities" if MULTITHREADING
default 16 default 16
default 1 if !MULTITHREADING default 1 if !MULTITHREADING
range 0 128
help help
Number of cooperative priorities configured in the system. Gives access Number of cooperative priorities configured in the system. Gives access
to priorities: to priorities:
@ -66,6 +67,7 @@ config NUM_PREEMPT_PRIORITIES
prompt "Number of preemptible priorities" if MULTITHREADING prompt "Number of preemptible priorities" if MULTITHREADING
default 15 default 15
default 0 if !MULTITHREADING default 0 if !MULTITHREADING
range 0 128
help help
Number of preemptible priorities available in the system. Gives access Number of preemptible priorities available in the system. Gives access
to priorities 0 to CONFIG_NUM_PREEMPT_PRIORITIES - 1. to priorities 0 to CONFIG_NUM_PREEMPT_PRIORITIES - 1.

View file

@ -48,7 +48,8 @@ GEN_OFFSET_SYM(_kernel_t, current_fp);
GEN_ABSOLUTE_SYM(_STRUCT_KERNEL_SIZE, sizeof(struct _kernel)); GEN_ABSOLUTE_SYM(_STRUCT_KERNEL_SIZE, sizeof(struct _kernel));
GEN_OFFSET_SYM(_thread_base_t, flags); GEN_OFFSET_SYM(_thread_base_t, execution_flags);
GEN_OFFSET_SYM(_thread_base_t, thread_state);
GEN_OFFSET_SYM(_thread_base_t, prio); GEN_OFFSET_SYM(_thread_base_t, prio);
GEN_OFFSET_SYM(_thread_base_t, sched_locked); GEN_OFFSET_SYM(_thread_base_t, sched_locked);
GEN_OFFSET_SYM(_thread_base_t, swap_data); GEN_OFFSET_SYM(_thread_base_t, swap_data);

View file

@ -24,14 +24,17 @@
#endif #endif
/* /*
* Common bitmask definitions for the struct tcs->flags bit field. * bitmask definitions for the execution_flags and state
* *
* Must be before kerneL_arch_data.h because it might need them to be already * Must be before kerneL_arch_data.h because it might need them to be already
* defined. * defined.
*/ */
/* thread is defined statically */
#define K_STATIC (1 << 0) /* states: common uses low bits, arch-specific use high bits */
/* system thread that must not abort */
#define K_ESSENTIAL (1 << 0)
/* Thread is waiting on an object */ /* Thread is waiting on an object */
#define K_PENDING (1 << 1) #define K_PENDING (1 << 1)
@ -48,13 +51,20 @@
/* Not a real thread */ /* Not a real thread */
#define K_DUMMY (1 << 5) #define K_DUMMY (1 << 5)
/* system thread that must not abort */ /* end - states */
#define K_ESSENTIAL (1 << 6)
/* execution flags: common uses low bits, arch-specific use high bits */
/* thread is defined statically */
#define K_STATIC (1 << 0)
#if defined(CONFIG_FP_SHARING) #if defined(CONFIG_FP_SHARING)
/* thread uses floating point registers */ /* thread uses floating point registers */
#define K_FP_REGS (1 << 7) #define K_FP_REGS (1 << 1)
#endif #endif
/* end - execution flags */
#include <kernel_arch_data.h> #include <kernel_arch_data.h>
@ -76,13 +86,16 @@ struct _thread_base {
sys_dnode_t k_q_node; sys_dnode_t k_q_node;
/* execution flags */ /* execution flags */
uint32_t flags; uint8_t execution_flags;
/* thread priority used to sort linked list */ /* thread state */
int prio; uint8_t thread_state;
/* scheduler lock count */ /* scheduler lock count */
volatile uint32_t sched_locked; volatile uint8_t sched_locked;
/* thread priority used to sort linked list */
int8_t prio;
/* data returned by APIs */ /* data returned by APIs */
void *swap_data; void *swap_data;

View file

@ -271,25 +271,25 @@ static ALWAYS_INLINE void _sched_unlock_no_reschedule(void)
static inline void _set_thread_states(struct k_thread *thread, uint32_t states) static inline void _set_thread_states(struct k_thread *thread, uint32_t states)
{ {
thread->base.flags |= states; thread->base.thread_state |= states;
} }
static inline void _reset_thread_states(struct k_thread *thread, static inline void _reset_thread_states(struct k_thread *thread,
uint32_t states) uint32_t states)
{ {
thread->base.flags &= ~states; thread->base.thread_state &= ~states;
} }
/* mark a thread as being suspended */ /* mark a thread as being suspended */
static inline void _mark_thread_as_suspended(struct k_thread *thread) static inline void _mark_thread_as_suspended(struct k_thread *thread)
{ {
thread->base.flags |= K_SUSPENDED; thread->base.thread_state |= K_SUSPENDED;
} }
/* mark a thread as not being suspended */ /* mark a thread as not being suspended */
static inline void _mark_thread_as_not_suspended(struct k_thread *thread) static inline void _mark_thread_as_not_suspended(struct k_thread *thread)
{ {
thread->base.flags &= ~K_SUSPENDED; thread->base.thread_state &= ~K_SUSPENDED;
} }
static ALWAYS_INLINE int _is_thread_timeout_expired(struct k_thread *thread) static ALWAYS_INLINE int _is_thread_timeout_expired(struct k_thread *thread)
@ -313,14 +313,14 @@ static inline int _is_thread_timeout_active(struct k_thread *thread)
static inline int _has_thread_started(struct k_thread *thread) static inline int _has_thread_started(struct k_thread *thread)
{ {
return !(thread->base.flags & K_PRESTART); return !(thread->base.thread_state & K_PRESTART);
} }
static inline int _is_thread_prevented_from_running(struct k_thread *thread) static inline int _is_thread_prevented_from_running(struct k_thread *thread)
{ {
return thread->base.flags & (K_PENDING | K_PRESTART | return thread->base.thread_state & (K_PENDING | K_PRESTART |
K_DEAD | K_DUMMY | K_DEAD | K_DUMMY |
K_SUSPENDED); K_SUSPENDED);
} }
@ -334,19 +334,19 @@ static inline int _is_thread_ready(struct k_thread *thread)
/* mark a thread as pending in its TCS */ /* mark a thread as pending in its TCS */
static inline void _mark_thread_as_pending(struct k_thread *thread) static inline void _mark_thread_as_pending(struct k_thread *thread)
{ {
thread->base.flags |= K_PENDING; thread->base.thread_state |= K_PENDING;
} }
/* mark a thread as not pending in its TCS */ /* mark a thread as not pending in its TCS */
static inline void _mark_thread_as_not_pending(struct k_thread *thread) static inline void _mark_thread_as_not_pending(struct k_thread *thread)
{ {
thread->base.flags &= ~K_PENDING; thread->base.thread_state &= ~K_PENDING;
} }
/* check if a thread is pending */ /* check if a thread is pending */
static inline int _is_thread_pending(struct k_thread *thread) static inline int _is_thread_pending(struct k_thread *thread)
{ {
return !!(thread->base.flags & K_PENDING); return !!(thread->base.thread_state & K_PENDING);
} }
/** /**
@ -356,7 +356,7 @@ static inline int _is_thread_pending(struct k_thread *thread)
*/ */
static inline void _mark_thread_as_started(struct k_thread *thread) static inline void _mark_thread_as_started(struct k_thread *thread)
{ {
thread->base.flags &= ~K_PRESTART; thread->base.thread_state &= ~K_PRESTART;
} }
/* /*
@ -394,7 +394,7 @@ static inline void _ready_thread(struct k_thread *thread)
*/ */
static inline void _mark_thread_as_dead(struct k_thread *thread) static inline void _mark_thread_as_dead(struct k_thread *thread)
{ {
thread->base.flags |= K_DEAD; thread->base.thread_state |= K_DEAD;
} }
/* /*
@ -463,7 +463,7 @@ static inline struct k_thread *_unpend_first_thread(_wait_q_t *wait_q)
/* must be called with interrupts locked */ /* must be called with interrupts locked */
static inline void _unpend_thread(struct k_thread *thread) static inline void _unpend_thread(struct k_thread *thread)
{ {
__ASSERT(thread->base.flags & K_PENDING, ""); __ASSERT(thread->base.thread_state & K_PENDING, "");
sys_dlist_remove(&thread->base.k_q_node); sys_dlist_remove(&thread->base.k_q_node);
_mark_thread_as_not_pending(thread); _mark_thread_as_not_pending(thread);

View file

@ -53,8 +53,11 @@
/* base */ /* base */
#define _thread_offset_to_flags \ #define _thread_offset_to_thread_state \
(___thread_t_base_OFFSET + ___thread_base_t_flags_OFFSET) (___thread_t_base_OFFSET + ___thread_base_t_thread_state_OFFSET)
#define _thread_offset_to_execution_flags \
(___thread_t_base_OFFSET + ___thread_base_t_execution_flags_OFFSET)
#define _thread_offset_to_prio \ #define _thread_offset_to_prio \
(___thread_t_base_OFFSET + ___thread_base_t_prio_OFFSET) (___thread_t_base_OFFSET + ___thread_base_t_prio_OFFSET)

View file

@ -203,7 +203,7 @@ static void _main(void *unused1, void *unused2, void *unused3)
main(); main();
/* Terminate thread normally since it has no more work to do */ /* Terminate thread normally since it has no more work to do */
_main_thread->base.flags &= ~K_ESSENTIAL; _main_thread->base.thread_state &= ~K_ESSENTIAL;
} }
void __weak main(void) void __weak main(void)
@ -238,7 +238,7 @@ static void prepare_multithreading(struct k_thread *dummy_thread)
_current = dummy_thread; _current = dummy_thread;
dummy_thread->base.flags = K_ESSENTIAL; dummy_thread->base.thread_state = K_ESSENTIAL;
#endif #endif
/* _kernel.ready_q is all zeroes */ /* _kernel.ready_q is all zeroes */

View file

@ -211,7 +211,7 @@ static void _mbox_message_dispose(struct k_mbox_msg *rx_msg)
* asynchronous send: free asynchronous message descriptor + * asynchronous send: free asynchronous message descriptor +
* dummy thread pair, then give semaphore (if needed) * dummy thread pair, then give semaphore (if needed)
*/ */
if (sending_thread->base.flags & K_DUMMY) { if (sending_thread->base.thread_state & K_DUMMY) {
struct k_sem *async_sem = tx_msg->_async_sem; struct k_sem *async_sem = tx_msg->_async_sem;
_mbox_async_free((struct k_mbox_async *)sending_thread); _mbox_async_free((struct k_mbox_async *)sending_thread);
@ -286,7 +286,7 @@ static int _mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
* note: dummy sending thread sits (unqueued) * note: dummy sending thread sits (unqueued)
* until the receiver consumes the message * until the receiver consumes the message
*/ */
if (sending_thread->base.flags & K_DUMMY) { if (sending_thread->base.thread_state & K_DUMMY) {
_reschedule_threads(key); _reschedule_threads(key);
return 0; return 0;
} }
@ -310,7 +310,7 @@ static int _mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0) #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
/* asynchronous send: dummy thread waits on tx queue for receiver */ /* asynchronous send: dummy thread waits on tx queue for receiver */
if (sending_thread->base.flags & K_DUMMY) { if (sending_thread->base.thread_state & K_DUMMY) {
_pend_thread(sending_thread, &mbox->tx_msg_queue, K_FOREVER); _pend_thread(sending_thread, &mbox->tx_msg_queue, K_FOREVER);
irq_unlock(key); irq_unlock(key);
return 0; return 0;

View file

@ -111,7 +111,7 @@ static int init_pipes_module(struct device *dev)
*/ */
for (int i = 0; i < CONFIG_NUM_PIPE_ASYNC_MSGS; i++) { for (int i = 0; i < CONFIG_NUM_PIPE_ASYNC_MSGS; i++) {
async_msg[i].thread.flags = K_DUMMY; async_msg[i].thread.thread_state = K_DUMMY;
async_msg[i].thread.swap_data = &async_msg[i].desc; async_msg[i].thread.swap_data = &async_msg[i].desc;
k_stack_push(&pipe_async_msgs, (uint32_t)&async_msg[i]); k_stack_push(&pipe_async_msgs, (uint32_t)&async_msg[i]);
} }
@ -377,7 +377,7 @@ static void _pipe_thread_ready(struct k_thread *thread)
unsigned int key; unsigned int key;
#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0) #if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
if (thread->base.flags & K_DUMMY) { if (thread->base.thread_state & K_DUMMY) {
_pipe_async_finish((struct k_pipe_async *)thread); _pipe_async_finish((struct k_pipe_async *)thread);
return; return;
} }

View file

@ -170,7 +170,7 @@ static int handle_sem_group(struct k_sem *sem, struct k_thread *thread)
sys_dnode_t *node; sys_dnode_t *node;
sys_dnode_t *next; sys_dnode_t *next;
if (!(thread->base.flags & K_DUMMY)) { if (!(thread->base.thread_state & K_DUMMY)) {
/* /*
* The awakened thread is a real thread and thus was not * The awakened thread is a real thread and thus was not
* involved in a semaphore group operation. * involved in a semaphore group operation.

View file

@ -86,7 +86,7 @@ int k_is_in_isr(void)
*/ */
void _thread_essential_set(void) void _thread_essential_set(void)
{ {
_current->base.flags |= K_ESSENTIAL; _current->base.thread_state |= K_ESSENTIAL;
} }
/* /*
@ -96,7 +96,7 @@ void _thread_essential_set(void)
*/ */
void _thread_essential_clear(void) void _thread_essential_clear(void)
{ {
_current->base.flags &= ~K_ESSENTIAL; _current->base.thread_state &= ~K_ESSENTIAL;
} }
/* /*
@ -106,7 +106,7 @@ void _thread_essential_clear(void)
*/ */
int _is_thread_essential(void) int _is_thread_essential(void)
{ {
return _current->base.flags & K_ESSENTIAL; return _current->base.thread_state & K_ESSENTIAL;
} }
void k_busy_wait(uint32_t usec_to_wait) void k_busy_wait(uint32_t usec_to_wait)
@ -437,7 +437,8 @@ void _init_thread_base(struct _thread_base *thread_base, int priority,
{ {
/* k_q_node is initialized upon first insertion in a list */ /* k_q_node is initialized upon first insertion in a list */
thread_base->flags = options | initial_state; thread_base->execution_flags = (uint8_t)options;
thread_base->thread_state = (uint8_t)initial_state;
thread_base->prio = priority; thread_base->prio = priority;

View file

@ -64,11 +64,15 @@ static inline int test_thread_monitor(void)
thread_list = (struct k_thread *)SYS_THREAD_MONITOR_HEAD; thread_list = (struct k_thread *)SYS_THREAD_MONITOR_HEAD;
while (thread_list != NULL) { while (thread_list != NULL) {
if (thread_list->base.prio == -1) { if (thread_list->base.prio == -1) {
TC_PRINT("TASK: %p FLAGS: 0x%x\n", TC_PRINT("TASK: %p FLAGS: 0x%02x, STATE: 0x%02x\n",
thread_list, thread_list->base.flags); thread_list,
thread_list->base.execution_flags,
thread_list->base.thread_state);
} else { } else {
TC_PRINT("FIBER: %p FLAGS: 0x%x\n", TC_PRINT("FIBER: %p FLAGS: 0x%02x, STATE: 0x%02x\n",
thread_list, thread_list->base.flags); thread_list,
thread_list->base.execution_flags,
thread_list->base.thread_state);
} }
thread_list = thread_list =
(struct k_thread *)SYS_THREAD_MONITOR_NEXT(thread_list); (struct k_thread *)SYS_THREAD_MONITOR_NEXT(thread_list);

View file

@ -64,11 +64,15 @@ static inline int test_thread_monitor(void)
thread_list = (struct k_thread *)SYS_THREAD_MONITOR_HEAD; thread_list = (struct k_thread *)SYS_THREAD_MONITOR_HEAD;
while (thread_list != NULL) { while (thread_list != NULL) {
if (thread_list->base.prio == -1) { if (thread_list->base.prio == -1) {
TC_PRINT("TASK: %p FLAGS: 0x%x\n", TC_PRINT("TASK: %p FLAGS: 0x%02x, STATE: 0x%02x\n",
thread_list, thread_list->base.flags); thread_list,
thread_list->base.execution_flags,
thread_list->base.thread_state);
} else { } else {
TC_PRINT("FIBER: %p FLAGS: 0x%x\n", TC_PRINT("FIBER: %p FLAGS: 0x%02x, STATE: 0x%02x\n",
thread_list, thread_list->base.flags); thread_list,
thread_list->base.execution_flags,
thread_list->base.thread_state);
} }
thread_list = thread_list =
(struct k_thread *)SYS_THREAD_MONITOR_NEXT(thread_list); (struct k_thread *)SYS_THREAD_MONITOR_NEXT(thread_list);