arch: deprecate _current
`_current` is now functionally equals to `arch_curr_thread()`, remove its usage in-tree and deprecate it instead of removing it outright, as it has been with us since forever. Signed-off-by: Yong Cong Sin <ycsin@meta.com> Signed-off-by: Yong Cong Sin <yongcong.sin@gmail.com>
This commit is contained in:
parent
1a752e8a35
commit
b1def7145f
107 changed files with 490 additions and 479 deletions
|
@ -262,7 +262,7 @@ config ARC_CURRENT_THREAD_USE_NO_TLS
|
||||||
RGF_NUM_BANKS the parameter is disabled by-default because banks syncronization
|
RGF_NUM_BANKS the parameter is disabled by-default because banks syncronization
|
||||||
requires significant time, and it slows down performance.
|
requires significant time, and it slows down performance.
|
||||||
ARCMWDT works with tls pointer in different way then GCC. Optimized access to
|
ARCMWDT works with tls pointer in different way then GCC. Optimized access to
|
||||||
TLS pointer via _current variable does not provide significant advantages
|
TLS pointer via arch_current_thread() does not provide significant advantages
|
||||||
in case of MetaWare.
|
in case of MetaWare.
|
||||||
|
|
||||||
config GEN_ISR_TABLES
|
config GEN_ISR_TABLES
|
||||||
|
|
|
@ -55,7 +55,7 @@ static bool z_check_thread_stack_fail(const uint32_t fault_addr, uint32_t sp)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_MULTITHREADING)
|
#if defined(CONFIG_MULTITHREADING)
|
||||||
uint32_t guard_end, guard_start;
|
uint32_t guard_end, guard_start;
|
||||||
const struct k_thread *thread = _current;
|
const struct k_thread *thread = arch_current_thread();
|
||||||
|
|
||||||
if (!thread) {
|
if (!thread) {
|
||||||
/* TODO: Under what circumstances could we get here ? */
|
/* TODO: Under what circumstances could we get here ? */
|
||||||
|
|
|
@ -49,8 +49,8 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
|
||||||
|
|
||||||
__asm__ volatile("sync");
|
__asm__ volatile("sync");
|
||||||
|
|
||||||
/* If _current was aborted in the offload routine, we shouldn't be here */
|
/* If arch_current_thread() was aborted in the offload routine, we shouldn't be here */
|
||||||
__ASSERT_NO_MSG((_current->base.thread_state & _THREAD_DEAD) == 0);
|
__ASSERT_NO_MSG((arch_current_thread()->base.thread_state & _THREAD_DEAD) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* need to be executed on every core in the system */
|
/* need to be executed on every core in the system */
|
||||||
|
|
|
@ -210,7 +210,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||||
#ifdef CONFIG_MULTITHREADING
|
#ifdef CONFIG_MULTITHREADING
|
||||||
void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
|
void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
|
||||||
{
|
{
|
||||||
*old_thread = _current;
|
*old_thread = arch_current_thread();
|
||||||
|
|
||||||
return z_get_next_switch_handle(NULL);
|
return z_get_next_switch_handle(NULL);
|
||||||
}
|
}
|
||||||
|
@ -227,16 +227,16 @@ void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
|
||||||
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||||
void *p1, void *p2, void *p3)
|
void *p1, void *p2, void *p3)
|
||||||
{
|
{
|
||||||
setup_stack_vars(_current);
|
setup_stack_vars(arch_current_thread());
|
||||||
|
|
||||||
/* possible optimizaiton: no need to load mem domain anymore */
|
/* possible optimizaiton: no need to load mem domain anymore */
|
||||||
/* need to lock cpu here ? */
|
/* need to lock cpu here ? */
|
||||||
configure_mpu_thread(_current);
|
configure_mpu_thread(arch_current_thread());
|
||||||
|
|
||||||
z_arc_userspace_enter(user_entry, p1, p2, p3,
|
z_arc_userspace_enter(user_entry, p1, p2, p3,
|
||||||
(uint32_t)_current->stack_info.start,
|
(uint32_t)arch_current_thread()->stack_info.start,
|
||||||
(_current->stack_info.size -
|
(arch_current_thread()->stack_info.size -
|
||||||
_current->stack_info.delta), _current);
|
arch_current_thread()->stack_info.delta), arch_current_thread());
|
||||||
CODE_UNREACHABLE;
|
CODE_UNREACHABLE;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -336,7 +336,7 @@ int arc_vpx_lock(k_timeout_t timeout)
|
||||||
|
|
||||||
id = _current_cpu->id;
|
id = _current_cpu->id;
|
||||||
#if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK)
|
#if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK)
|
||||||
__ASSERT(!arch_is_in_isr() && (_current->base.cpu_mask == BIT(id)), "");
|
__ASSERT(!arch_is_in_isr() && (arch_current_thread()->base.cpu_mask == BIT(id)), "");
|
||||||
#endif
|
#endif
|
||||||
k_spin_unlock(&lock, key);
|
k_spin_unlock(&lock, key);
|
||||||
|
|
||||||
|
@ -355,7 +355,7 @@ void arc_vpx_unlock(void)
|
||||||
|
|
||||||
key = k_spin_lock(&lock);
|
key = k_spin_lock(&lock);
|
||||||
#if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK)
|
#if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK)
|
||||||
__ASSERT(!arch_is_in_isr() && (_current->base.cpu_mask == BIT(id)), "");
|
__ASSERT(!arch_is_in_isr() && (arch_current_thread()->base.cpu_mask == BIT(id)), "");
|
||||||
#endif
|
#endif
|
||||||
id = _current_cpu->id;
|
id = _current_cpu->id;
|
||||||
k_spin_unlock(&lock, key);
|
k_spin_unlock(&lock, key);
|
||||||
|
|
|
@ -29,7 +29,7 @@ size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr)
|
||||||
|
|
||||||
void *_Preserve_flags _mwget_tls(void)
|
void *_Preserve_flags _mwget_tls(void)
|
||||||
{
|
{
|
||||||
return (void *)(_current->tls);
|
return (void *)(arch_current_thread()->tls);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -178,7 +178,7 @@ bool z_arm_fault_undef_instruction_fp(void)
|
||||||
* context because it is about to be overwritten.
|
* context because it is about to be overwritten.
|
||||||
*/
|
*/
|
||||||
if (((_current_cpu->nested == 2)
|
if (((_current_cpu->nested == 2)
|
||||||
&& (_current->base.user_options & K_FP_REGS))
|
&& (arch_current_thread()->base.user_options & K_FP_REGS))
|
||||||
|| ((_current_cpu->nested > 2)
|
|| ((_current_cpu->nested > 2)
|
||||||
&& (spill_esf->undefined & FPEXC_EN))) {
|
&& (spill_esf->undefined & FPEXC_EN))) {
|
||||||
/*
|
/*
|
||||||
|
@ -196,7 +196,7 @@ bool z_arm_fault_undef_instruction_fp(void)
|
||||||
* means that a thread that uses the VFP does not have to,
|
* means that a thread that uses the VFP does not have to,
|
||||||
* but should, set K_FP_REGS on thread creation.
|
* but should, set K_FP_REGS on thread creation.
|
||||||
*/
|
*/
|
||||||
_current->base.user_options |= K_FP_REGS;
|
arch_current_thread()->base.user_options |= K_FP_REGS;
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -17,8 +17,8 @@
|
||||||
int arch_swap(unsigned int key)
|
int arch_swap(unsigned int key)
|
||||||
{
|
{
|
||||||
/* store off key and return value */
|
/* store off key and return value */
|
||||||
_current->arch.basepri = key;
|
arch_current_thread()->arch.basepri = key;
|
||||||
_current->arch.swap_return_value = -EAGAIN;
|
arch_current_thread()->arch.swap_return_value = -EAGAIN;
|
||||||
|
|
||||||
z_arm_cortex_r_svc();
|
z_arm_cortex_r_svc();
|
||||||
irq_unlock(key);
|
irq_unlock(key);
|
||||||
|
@ -26,5 +26,5 @@ int arch_swap(unsigned int key)
|
||||||
/* Context switch is performed here. Returning implies the
|
/* Context switch is performed here. Returning implies the
|
||||||
* thread has been context-switched-in again.
|
* thread has been context-switched-in again.
|
||||||
*/
|
*/
|
||||||
return _current->arch.swap_return_value;
|
return arch_current_thread()->arch.swap_return_value;
|
||||||
}
|
}
|
||||||
|
|
|
@ -70,7 +70,7 @@ SECTION_FUNC(TEXT, z_arm_do_swap)
|
||||||
|
|
||||||
#if defined(CONFIG_FPU_SHARING)
|
#if defined(CONFIG_FPU_SHARING)
|
||||||
ldrb r0, [r2, #_thread_offset_to_user_options]
|
ldrb r0, [r2, #_thread_offset_to_user_options]
|
||||||
tst r0, #K_FP_REGS /* _current->base.user_options & K_FP_REGS */
|
tst r0, #K_FP_REGS /* arch_current_thread()->base.user_options & K_FP_REGS */
|
||||||
beq out_fp_inactive
|
beq out_fp_inactive
|
||||||
|
|
||||||
mov ip, #FPEXC_EN
|
mov ip, #FPEXC_EN
|
||||||
|
@ -152,7 +152,7 @@ out_fp_inactive:
|
||||||
|
|
||||||
#if defined(CONFIG_FPU_SHARING)
|
#if defined(CONFIG_FPU_SHARING)
|
||||||
ldrb r0, [r2, #_thread_offset_to_user_options]
|
ldrb r0, [r2, #_thread_offset_to_user_options]
|
||||||
tst r0, #K_FP_REGS /* _current->base.user_options & K_FP_REGS */
|
tst r0, #K_FP_REGS /* arch_current_thread()->base.user_options & K_FP_REGS */
|
||||||
beq in_fp_inactive
|
beq in_fp_inactive
|
||||||
|
|
||||||
mov r3, #FPEXC_EN
|
mov r3, #FPEXC_EN
|
||||||
|
|
|
@ -198,8 +198,8 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||||
{
|
{
|
||||||
|
|
||||||
/* Set up privileged stack before entering user mode */
|
/* Set up privileged stack before entering user mode */
|
||||||
_current->arch.priv_stack_start =
|
arch_current_thread()->arch.priv_stack_start =
|
||||||
(uint32_t)z_priv_stack_find(_current->stack_obj);
|
(uint32_t)z_priv_stack_find(arch_current_thread()->stack_obj);
|
||||||
#if defined(CONFIG_MPU_STACK_GUARD)
|
#if defined(CONFIG_MPU_STACK_GUARD)
|
||||||
#if defined(CONFIG_THREAD_STACK_INFO)
|
#if defined(CONFIG_THREAD_STACK_INFO)
|
||||||
/* We're dropping to user mode which means the guard area is no
|
/* We're dropping to user mode which means the guard area is no
|
||||||
|
@ -208,13 +208,13 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||||
* which accounted for memory borrowed from the thread stack.
|
* which accounted for memory borrowed from the thread stack.
|
||||||
*/
|
*/
|
||||||
#if FP_GUARD_EXTRA_SIZE > 0
|
#if FP_GUARD_EXTRA_SIZE > 0
|
||||||
if ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
|
if ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
|
||||||
_current->stack_info.start -= FP_GUARD_EXTRA_SIZE;
|
arch_current_thread()->stack_info.start -= FP_GUARD_EXTRA_SIZE;
|
||||||
_current->stack_info.size += FP_GUARD_EXTRA_SIZE;
|
arch_current_thread()->stack_info.size += FP_GUARD_EXTRA_SIZE;
|
||||||
}
|
}
|
||||||
#endif /* FP_GUARD_EXTRA_SIZE */
|
#endif /* FP_GUARD_EXTRA_SIZE */
|
||||||
_current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
|
arch_current_thread()->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
|
||||||
_current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
|
arch_current_thread()->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
|
||||||
#endif /* CONFIG_THREAD_STACK_INFO */
|
#endif /* CONFIG_THREAD_STACK_INFO */
|
||||||
|
|
||||||
/* Stack guard area reserved at the bottom of the thread's
|
/* Stack guard area reserved at the bottom of the thread's
|
||||||
|
@ -222,23 +222,23 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||||
* buffer area accordingly.
|
* buffer area accordingly.
|
||||||
*/
|
*/
|
||||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||||
_current->arch.priv_stack_start +=
|
arch_current_thread()->arch.priv_stack_start +=
|
||||||
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
|
((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
|
||||||
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
|
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
|
||||||
#else
|
#else
|
||||||
_current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
|
arch_current_thread()->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
|
||||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||||
#endif /* CONFIG_MPU_STACK_GUARD */
|
#endif /* CONFIG_MPU_STACK_GUARD */
|
||||||
|
|
||||||
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
|
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
|
||||||
_current->arch.priv_stack_end =
|
arch_current_thread()->arch.priv_stack_end =
|
||||||
_current->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
|
arch_current_thread()->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
z_arm_userspace_enter(user_entry, p1, p2, p3,
|
z_arm_userspace_enter(user_entry, p1, p2, p3,
|
||||||
(uint32_t)_current->stack_info.start,
|
(uint32_t)arch_current_thread()->stack_info.start,
|
||||||
_current->stack_info.size -
|
arch_current_thread()->stack_info.size -
|
||||||
_current->stack_info.delta);
|
arch_current_thread()->stack_info.delta);
|
||||||
CODE_UNREACHABLE;
|
CODE_UNREACHABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -304,7 +304,7 @@ EXPORT_SYMBOL(z_arm_thread_is_in_user_mode);
|
||||||
uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp)
|
uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_MULTITHREADING)
|
#if defined(CONFIG_MULTITHREADING)
|
||||||
const struct k_thread *thread = _current;
|
const struct k_thread *thread = arch_current_thread();
|
||||||
|
|
||||||
if (thread == NULL) {
|
if (thread == NULL) {
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -314,7 +314,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
|
||||||
#if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \
|
#if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \
|
||||||
defined(CONFIG_MPU_STACK_GUARD)
|
defined(CONFIG_MPU_STACK_GUARD)
|
||||||
uint32_t guard_len =
|
uint32_t guard_len =
|
||||||
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
|
((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
|
||||||
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
|
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
|
||||||
#else
|
#else
|
||||||
/* If MPU_STACK_GUARD is not enabled, the guard length is
|
/* If MPU_STACK_GUARD is not enabled, the guard length is
|
||||||
|
@ -377,7 +377,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
|
||||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||||
int arch_float_disable(struct k_thread *thread)
|
int arch_float_disable(struct k_thread *thread)
|
||||||
{
|
{
|
||||||
if (thread != _current) {
|
if (thread != arch_current_thread()) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,8 +33,8 @@
|
||||||
int arch_swap(unsigned int key)
|
int arch_swap(unsigned int key)
|
||||||
{
|
{
|
||||||
/* store off key and return value */
|
/* store off key and return value */
|
||||||
_current->arch.basepri = key;
|
arch_current_thread()->arch.basepri = key;
|
||||||
_current->arch.swap_return_value = -EAGAIN;
|
arch_current_thread()->arch.swap_return_value = -EAGAIN;
|
||||||
|
|
||||||
/* set pending bit to make sure we will take a PendSV exception */
|
/* set pending bit to make sure we will take a PendSV exception */
|
||||||
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
|
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
|
||||||
|
@ -45,5 +45,5 @@ int arch_swap(unsigned int key)
|
||||||
/* Context switch is performed here. Returning implies the
|
/* Context switch is performed here. Returning implies the
|
||||||
* thread has been context-switched-in again.
|
* thread has been context-switched-in again.
|
||||||
*/
|
*/
|
||||||
return _current->arch.swap_return_value;
|
return arch_current_thread()->arch.swap_return_value;
|
||||||
}
|
}
|
||||||
|
|
|
@ -288,7 +288,7 @@ in_fp_endif:
|
||||||
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
|
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
|
||||||
/* Re-program dynamic memory map */
|
/* Re-program dynamic memory map */
|
||||||
push {r2,lr}
|
push {r2,lr}
|
||||||
mov r0, r2 /* _current thread */
|
mov r0, r2 /* arch_current_thread() thread */
|
||||||
bl z_arm_configure_dynamic_mpu_regions
|
bl z_arm_configure_dynamic_mpu_regions
|
||||||
pop {r2,lr}
|
pop {r2,lr}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -231,8 +231,8 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||||
{
|
{
|
||||||
|
|
||||||
/* Set up privileged stack before entering user mode */
|
/* Set up privileged stack before entering user mode */
|
||||||
_current->arch.priv_stack_start =
|
arch_current_thread()->arch.priv_stack_start =
|
||||||
(uint32_t)z_priv_stack_find(_current->stack_obj);
|
(uint32_t)z_priv_stack_find(arch_current_thread()->stack_obj);
|
||||||
#if defined(CONFIG_MPU_STACK_GUARD)
|
#if defined(CONFIG_MPU_STACK_GUARD)
|
||||||
#if defined(CONFIG_THREAD_STACK_INFO)
|
#if defined(CONFIG_THREAD_STACK_INFO)
|
||||||
/* We're dropping to user mode which means the guard area is no
|
/* We're dropping to user mode which means the guard area is no
|
||||||
|
@ -241,13 +241,13 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||||
* which accounted for memory borrowed from the thread stack.
|
* which accounted for memory borrowed from the thread stack.
|
||||||
*/
|
*/
|
||||||
#if FP_GUARD_EXTRA_SIZE > 0
|
#if FP_GUARD_EXTRA_SIZE > 0
|
||||||
if ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
|
if ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
|
||||||
_current->stack_info.start -= FP_GUARD_EXTRA_SIZE;
|
arch_current_thread()->stack_info.start -= FP_GUARD_EXTRA_SIZE;
|
||||||
_current->stack_info.size += FP_GUARD_EXTRA_SIZE;
|
arch_current_thread()->stack_info.size += FP_GUARD_EXTRA_SIZE;
|
||||||
}
|
}
|
||||||
#endif /* FP_GUARD_EXTRA_SIZE */
|
#endif /* FP_GUARD_EXTRA_SIZE */
|
||||||
_current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
|
arch_current_thread()->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
|
||||||
_current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
|
arch_current_thread()->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
|
||||||
#endif /* CONFIG_THREAD_STACK_INFO */
|
#endif /* CONFIG_THREAD_STACK_INFO */
|
||||||
|
|
||||||
/* Stack guard area reserved at the bottom of the thread's
|
/* Stack guard area reserved at the bottom of the thread's
|
||||||
|
@ -255,18 +255,18 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||||
* buffer area accordingly.
|
* buffer area accordingly.
|
||||||
*/
|
*/
|
||||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||||
_current->arch.priv_stack_start +=
|
arch_current_thread()->arch.priv_stack_start +=
|
||||||
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
|
((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
|
||||||
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
|
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
|
||||||
#else
|
#else
|
||||||
_current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
|
arch_current_thread()->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
|
||||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||||
#endif /* CONFIG_MPU_STACK_GUARD */
|
#endif /* CONFIG_MPU_STACK_GUARD */
|
||||||
|
|
||||||
z_arm_userspace_enter(user_entry, p1, p2, p3,
|
z_arm_userspace_enter(user_entry, p1, p2, p3,
|
||||||
(uint32_t)_current->stack_info.start,
|
(uint32_t)arch_current_thread()->stack_info.start,
|
||||||
_current->stack_info.size -
|
arch_current_thread()->stack_info.size -
|
||||||
_current->stack_info.delta);
|
arch_current_thread()->stack_info.delta);
|
||||||
CODE_UNREACHABLE;
|
CODE_UNREACHABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -379,7 +379,7 @@ void configure_builtin_stack_guard(struct k_thread *thread)
|
||||||
uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp)
|
uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_MULTITHREADING)
|
#if defined(CONFIG_MULTITHREADING)
|
||||||
const struct k_thread *thread = _current;
|
const struct k_thread *thread = arch_current_thread();
|
||||||
|
|
||||||
if (thread == NULL) {
|
if (thread == NULL) {
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -389,7 +389,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
|
||||||
#if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \
|
#if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \
|
||||||
defined(CONFIG_MPU_STACK_GUARD)
|
defined(CONFIG_MPU_STACK_GUARD)
|
||||||
uint32_t guard_len =
|
uint32_t guard_len =
|
||||||
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
|
((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
|
||||||
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
|
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
|
||||||
#else
|
#else
|
||||||
/* If MPU_STACK_GUARD is not enabled, the guard length is
|
/* If MPU_STACK_GUARD is not enabled, the guard length is
|
||||||
|
@ -452,7 +452,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
|
||||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||||
int arch_float_disable(struct k_thread *thread)
|
int arch_float_disable(struct k_thread *thread)
|
||||||
{
|
{
|
||||||
if (thread != _current) {
|
if (thread != arch_current_thread()) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -522,7 +522,7 @@ void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
|
||||||
{
|
{
|
||||||
z_arm_prepare_switch_to_main();
|
z_arm_prepare_switch_to_main();
|
||||||
|
|
||||||
_current = main_thread;
|
arch_current_thread_set(main_thread);
|
||||||
|
|
||||||
#if defined(CONFIG_THREAD_LOCAL_STORAGE)
|
#if defined(CONFIG_THREAD_LOCAL_STORAGE)
|
||||||
/* On Cortex-M, TLS uses a global variable as pointer to
|
/* On Cortex-M, TLS uses a global variable as pointer to
|
||||||
|
|
|
@ -27,7 +27,7 @@ void z_impl_k_thread_abort(k_tid_t thread)
|
||||||
{
|
{
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread);
|
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread);
|
||||||
|
|
||||||
if (_current == thread) {
|
if (arch_current_thread() == thread) {
|
||||||
if (arch_is_in_isr()) {
|
if (arch_is_in_isr()) {
|
||||||
/* ARM is unlike most arches in that this is true
|
/* ARM is unlike most arches in that this is true
|
||||||
* even for non-peripheral interrupts, even though
|
* even for non-peripheral interrupts, even though
|
||||||
|
|
|
@ -727,7 +727,7 @@ static int configure_dynamic_mpu_regions(struct k_thread *thread)
|
||||||
*/
|
*/
|
||||||
thread->arch.region_num = (uint8_t)region_num;
|
thread->arch.region_num = (uint8_t)region_num;
|
||||||
|
|
||||||
if (thread == _current) {
|
if (thread == arch_current_thread()) {
|
||||||
ret = flush_dynamic_regions_to_mpu(dyn_regions, region_num);
|
ret = flush_dynamic_regions_to_mpu(dyn_regions, region_num);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -795,7 +795,7 @@ int arch_mem_domain_thread_add(struct k_thread *thread)
|
||||||
|
|
||||||
ret = configure_dynamic_mpu_regions(thread);
|
ret = configure_dynamic_mpu_regions(thread);
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
if (ret == 0 && thread != _current) {
|
if (ret == 0 && thread != arch_current_thread()) {
|
||||||
/* the thread could be running on another CPU right now */
|
/* the thread could be running on another CPU right now */
|
||||||
z_arm64_mem_cfg_ipi();
|
z_arm64_mem_cfg_ipi();
|
||||||
}
|
}
|
||||||
|
@ -810,7 +810,7 @@ int arch_mem_domain_thread_remove(struct k_thread *thread)
|
||||||
|
|
||||||
ret = configure_dynamic_mpu_regions(thread);
|
ret = configure_dynamic_mpu_regions(thread);
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
if (ret == 0 && thread != _current) {
|
if (ret == 0 && thread != arch_current_thread()) {
|
||||||
/* the thread could be running on another CPU right now */
|
/* the thread could be running on another CPU right now */
|
||||||
z_arm64_mem_cfg_ipi();
|
z_arm64_mem_cfg_ipi();
|
||||||
}
|
}
|
||||||
|
|
|
@ -306,8 +306,9 @@ static bool z_arm64_stack_corruption_check(struct arch_esf *esf, uint64_t esr, u
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
else if ((_current->base.user_options & K_USER) != 0 && GET_ESR_EC(esr) == 0x24) {
|
else if ((arch_current_thread()->base.user_options & K_USER) != 0 &&
|
||||||
sp_limit = (uint64_t)_current->stack_info.start;
|
GET_ESR_EC(esr) == 0x24) {
|
||||||
|
sp_limit = (uint64_t)arch_current_thread()->stack_info.start;
|
||||||
guard_start = sp_limit - Z_ARM64_STACK_GUARD_SIZE;
|
guard_start = sp_limit - Z_ARM64_STACK_GUARD_SIZE;
|
||||||
sp = esf->sp;
|
sp = esf->sp;
|
||||||
if (sp <= sp_limit || (guard_start <= far && far <= sp_limit)) {
|
if (sp <= sp_limit || (guard_start <= far && far <= sp_limit)) {
|
||||||
|
@ -434,7 +435,7 @@ void z_arm64_do_kernel_oops(struct arch_esf *esf)
|
||||||
* User mode is only allowed to induce oopses and stack check
|
* User mode is only allowed to induce oopses and stack check
|
||||||
* failures via software-triggered system fatal exceptions.
|
* failures via software-triggered system fatal exceptions.
|
||||||
*/
|
*/
|
||||||
if (((_current->base.user_options & K_USER) != 0) &&
|
if (((arch_current_thread()->base.user_options & K_USER) != 0) &&
|
||||||
reason != K_ERR_STACK_CHK_FAIL) {
|
reason != K_ERR_STACK_CHK_FAIL) {
|
||||||
reason = K_ERR_KERNEL_OOPS;
|
reason = K_ERR_KERNEL_OOPS;
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,7 @@ static void DBG(char *msg, struct k_thread *th)
|
||||||
strcpy(buf, "CPU# exc# ");
|
strcpy(buf, "CPU# exc# ");
|
||||||
buf[3] = '0' + _current_cpu->id;
|
buf[3] = '0' + _current_cpu->id;
|
||||||
buf[8] = '0' + arch_exception_depth();
|
buf[8] = '0' + arch_exception_depth();
|
||||||
strcat(buf, _current->name);
|
strcat(buf, arch_current_thread()->name);
|
||||||
strcat(buf, ": ");
|
strcat(buf, ": ");
|
||||||
strcat(buf, msg);
|
strcat(buf, msg);
|
||||||
strcat(buf, " ");
|
strcat(buf, " ");
|
||||||
|
@ -125,7 +125,7 @@ static void flush_owned_fpu(struct k_thread *thread)
|
||||||
* replace it, and this avoids a deadlock where
|
* replace it, and this avoids a deadlock where
|
||||||
* two CPUs want to pull each other's FPU context.
|
* two CPUs want to pull each other's FPU context.
|
||||||
*/
|
*/
|
||||||
if (thread == _current) {
|
if (thread == arch_current_thread()) {
|
||||||
arch_flush_local_fpu();
|
arch_flush_local_fpu();
|
||||||
while (atomic_ptr_get(&_kernel.cpus[i].arch.fpu_owner) == thread) {
|
while (atomic_ptr_get(&_kernel.cpus[i].arch.fpu_owner) == thread) {
|
||||||
barrier_dsync_fence_full();
|
barrier_dsync_fence_full();
|
||||||
|
@ -260,15 +260,15 @@ void z_arm64_fpu_trap(struct arch_esf *esf)
|
||||||
* Make sure the FPU context we need isn't live on another CPU.
|
* Make sure the FPU context we need isn't live on another CPU.
|
||||||
* The current CPU's FPU context is NULL at this point.
|
* The current CPU's FPU context is NULL at this point.
|
||||||
*/
|
*/
|
||||||
flush_owned_fpu(_current);
|
flush_owned_fpu(arch_current_thread());
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* become new owner */
|
/* become new owner */
|
||||||
atomic_ptr_set(&_current_cpu->arch.fpu_owner, _current);
|
atomic_ptr_set(&_current_cpu->arch.fpu_owner, arch_current_thread());
|
||||||
|
|
||||||
/* restore our content */
|
/* restore our content */
|
||||||
z_arm64_fpu_restore(&_current->arch.saved_fp_context);
|
z_arm64_fpu_restore(&arch_current_thread()->arch.saved_fp_context);
|
||||||
DBG("restore", _current);
|
DBG("restore", arch_current_thread());
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -287,7 +287,7 @@ static void fpu_access_update(unsigned int exc_update_level)
|
||||||
|
|
||||||
if (arch_exception_depth() == exc_update_level) {
|
if (arch_exception_depth() == exc_update_level) {
|
||||||
/* We're about to execute non-exception code */
|
/* We're about to execute non-exception code */
|
||||||
if (atomic_ptr_get(&_current_cpu->arch.fpu_owner) == _current) {
|
if (atomic_ptr_get(&_current_cpu->arch.fpu_owner) == arch_current_thread()) {
|
||||||
/* turn on FPU access */
|
/* turn on FPU access */
|
||||||
write_cpacr_el1(cpacr | CPACR_EL1_FPEN_NOTRAP);
|
write_cpacr_el1(cpacr | CPACR_EL1_FPEN_NOTRAP);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -1309,7 +1309,7 @@ int arch_mem_domain_thread_add(struct k_thread *thread)
|
||||||
}
|
}
|
||||||
|
|
||||||
thread->arch.ptables = domain_ptables;
|
thread->arch.ptables = domain_ptables;
|
||||||
if (thread == _current) {
|
if (thread == arch_current_thread()) {
|
||||||
z_arm64_swap_ptables(thread);
|
z_arm64_swap_ptables(thread);
|
||||||
} else {
|
} else {
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
|
|
@ -240,7 +240,7 @@ void mem_cfg_ipi_handler(const void *unused)
|
||||||
* This is a no-op if the page table is already the right one.
|
* This is a no-op if the page table is already the right one.
|
||||||
* Lock irq to prevent the interrupt during mem region switch.
|
* Lock irq to prevent the interrupt during mem region switch.
|
||||||
*/
|
*/
|
||||||
z_arm64_swap_mem_domains(_current);
|
z_arm64_swap_mem_domains(arch_current_thread());
|
||||||
arch_irq_unlock(key);
|
arch_irq_unlock(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -159,15 +159,15 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||||
uint64_t tmpreg;
|
uint64_t tmpreg;
|
||||||
|
|
||||||
/* Map the thread stack */
|
/* Map the thread stack */
|
||||||
z_arm64_thread_mem_domains_init(_current);
|
z_arm64_thread_mem_domains_init(arch_current_thread());
|
||||||
|
|
||||||
/* Top of the user stack area */
|
/* Top of the user stack area */
|
||||||
stack_el0 = Z_STACK_PTR_ALIGN(_current->stack_info.start +
|
stack_el0 = Z_STACK_PTR_ALIGN(arch_current_thread()->stack_info.start +
|
||||||
_current->stack_info.size -
|
arch_current_thread()->stack_info.size -
|
||||||
_current->stack_info.delta);
|
arch_current_thread()->stack_info.delta);
|
||||||
|
|
||||||
/* Top of the privileged non-user-accessible part of the stack */
|
/* Top of the privileged non-user-accessible part of the stack */
|
||||||
stack_el1 = (uintptr_t)(_current->stack_obj + ARCH_THREAD_STACK_RESERVED);
|
stack_el1 = (uintptr_t)(arch_current_thread()->stack_obj + ARCH_THREAD_STACK_RESERVED);
|
||||||
|
|
||||||
register void *x0 __asm__("x0") = user_entry;
|
register void *x0 __asm__("x0") = user_entry;
|
||||||
register void *x1 __asm__("x1") = p1;
|
register void *x1 __asm__("x1") = p1;
|
||||||
|
|
|
@ -23,7 +23,7 @@
|
||||||
int arch_swap(unsigned int key)
|
int arch_swap(unsigned int key)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* struct k_thread * _current is the currently running thread
|
* struct k_thread * arch_current_thread() is the currently running thread
|
||||||
* struct k_thread * _kernel.ready_q.cache contains the next thread to
|
* struct k_thread * _kernel.ready_q.cache contains the next thread to
|
||||||
* run (cannot be NULL)
|
* run (cannot be NULL)
|
||||||
*
|
*
|
||||||
|
@ -34,8 +34,8 @@ int arch_swap(unsigned int key)
|
||||||
#if CONFIG_INSTRUMENT_THREAD_SWITCHING
|
#if CONFIG_INSTRUMENT_THREAD_SWITCHING
|
||||||
z_thread_mark_switched_out();
|
z_thread_mark_switched_out();
|
||||||
#endif
|
#endif
|
||||||
_current->callee_saved.key = key;
|
arch_current_thread()->callee_saved.key = key;
|
||||||
_current->callee_saved.retval = -EAGAIN;
|
arch_current_thread()->callee_saved.retval = -EAGAIN;
|
||||||
|
|
||||||
/* retval may be modified with a call to
|
/* retval may be modified with a call to
|
||||||
* arch_thread_return_value_set()
|
* arch_thread_return_value_set()
|
||||||
|
@ -47,10 +47,10 @@ int arch_swap(unsigned int key)
|
||||||
|
|
||||||
posix_thread_status_t *this_thread_ptr =
|
posix_thread_status_t *this_thread_ptr =
|
||||||
(posix_thread_status_t *)
|
(posix_thread_status_t *)
|
||||||
_current->callee_saved.thread_status;
|
arch_current_thread()->callee_saved.thread_status;
|
||||||
|
|
||||||
|
|
||||||
_current = _kernel.ready_q.cache;
|
arch_current_thread_set(_kernel.ready_q.cache);
|
||||||
#if CONFIG_INSTRUMENT_THREAD_SWITCHING
|
#if CONFIG_INSTRUMENT_THREAD_SWITCHING
|
||||||
z_thread_mark_switched_in();
|
z_thread_mark_switched_in();
|
||||||
#endif
|
#endif
|
||||||
|
@ -66,9 +66,9 @@ int arch_swap(unsigned int key)
|
||||||
|
|
||||||
/* When we continue, _kernel->current points back to this thread */
|
/* When we continue, _kernel->current points back to this thread */
|
||||||
|
|
||||||
irq_unlock(_current->callee_saved.key);
|
irq_unlock(arch_current_thread()->callee_saved.key);
|
||||||
|
|
||||||
return _current->callee_saved.retval;
|
return arch_current_thread()->callee_saved.retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -94,7 +94,7 @@ void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
|
||||||
z_thread_mark_switched_out();
|
z_thread_mark_switched_out();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
_current = _kernel.ready_q.cache;
|
arch_current_thread_set(_kernel.ready_q.cache);
|
||||||
|
|
||||||
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
|
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
|
||||||
z_thread_mark_switched_in();
|
z_thread_mark_switched_in();
|
||||||
|
|
|
@ -112,7 +112,7 @@ void z_impl_k_thread_abort(k_tid_t thread)
|
||||||
|
|
||||||
key = irq_lock();
|
key = irq_lock();
|
||||||
|
|
||||||
if (_current == thread) {
|
if (arch_current_thread() == thread) {
|
||||||
if (tstatus->aborted == 0) { /* LCOV_EXCL_BR_LINE */
|
if (tstatus->aborted == 0) { /* LCOV_EXCL_BR_LINE */
|
||||||
tstatus->aborted = 1;
|
tstatus->aborted = 1;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -35,7 +35,7 @@ config RISCV_CURRENT_VIA_GP
|
||||||
select ARCH_HAS_CUSTOM_CURRENT_IMPL
|
select ARCH_HAS_CUSTOM_CURRENT_IMPL
|
||||||
help
|
help
|
||||||
Store the current thread's pointer into the global pointer (GP) register.
|
Store the current thread's pointer into the global pointer (GP) register.
|
||||||
When is enabled, calls to `_current` & `k_sched_current_thread_query()` will
|
When is enabled, calls to `arch_current_thread()` & `k_sched_current_thread_query()` will
|
||||||
be reduced to a single register read.
|
be reduced to a single register read.
|
||||||
|
|
||||||
config RISCV_ALWAYS_SWITCH_THROUGH_ECALL
|
config RISCV_ALWAYS_SWITCH_THROUGH_ECALL
|
||||||
|
|
|
@ -158,23 +158,23 @@ static bool bad_stack_pointer(struct arch_esf *esf)
|
||||||
uintptr_t sp = (uintptr_t)esf + sizeof(struct arch_esf);
|
uintptr_t sp = (uintptr_t)esf + sizeof(struct arch_esf);
|
||||||
|
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
if (_current->arch.priv_stack_start != 0 &&
|
if (arch_current_thread()->arch.priv_stack_start != 0 &&
|
||||||
sp >= _current->arch.priv_stack_start &&
|
sp >= arch_current_thread()->arch.priv_stack_start &&
|
||||||
sp < _current->arch.priv_stack_start + Z_RISCV_STACK_GUARD_SIZE) {
|
sp < arch_current_thread()->arch.priv_stack_start + Z_RISCV_STACK_GUARD_SIZE) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (z_stack_is_user_capable(_current->stack_obj) &&
|
if (z_stack_is_user_capable(arch_current_thread()->stack_obj) &&
|
||||||
sp >= _current->stack_info.start - K_THREAD_STACK_RESERVED &&
|
sp >= arch_current_thread()->stack_info.start - K_THREAD_STACK_RESERVED &&
|
||||||
sp < _current->stack_info.start - K_THREAD_STACK_RESERVED
|
sp < arch_current_thread()->stack_info.start - K_THREAD_STACK_RESERVED
|
||||||
+ Z_RISCV_STACK_GUARD_SIZE) {
|
+ Z_RISCV_STACK_GUARD_SIZE) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_USERSPACE */
|
#endif /* CONFIG_USERSPACE */
|
||||||
|
|
||||||
#if CONFIG_MULTITHREADING
|
#if CONFIG_MULTITHREADING
|
||||||
if (sp >= _current->stack_info.start - K_KERNEL_STACK_RESERVED &&
|
if (sp >= arch_current_thread()->stack_info.start - K_KERNEL_STACK_RESERVED &&
|
||||||
sp < _current->stack_info.start - K_KERNEL_STACK_RESERVED
|
sp < arch_current_thread()->stack_info.start - K_KERNEL_STACK_RESERVED
|
||||||
+ Z_RISCV_STACK_GUARD_SIZE) {
|
+ Z_RISCV_STACK_GUARD_SIZE) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -191,10 +191,10 @@ static bool bad_stack_pointer(struct arch_esf *esf)
|
||||||
|
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
if ((esf->mstatus & MSTATUS_MPP) == 0 &&
|
if ((esf->mstatus & MSTATUS_MPP) == 0 &&
|
||||||
(esf->sp < _current->stack_info.start ||
|
(esf->sp < arch_current_thread()->stack_info.start ||
|
||||||
esf->sp > _current->stack_info.start +
|
esf->sp > arch_current_thread()->stack_info.start +
|
||||||
_current->stack_info.size -
|
arch_current_thread()->stack_info.size -
|
||||||
_current->stack_info.delta)) {
|
arch_current_thread()->stack_info.delta)) {
|
||||||
/* user stack pointer moved outside of its allowed stack */
|
/* user stack pointer moved outside of its allowed stack */
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -246,9 +246,9 @@ FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr)
|
||||||
|
|
||||||
void z_impl_user_fault(unsigned int reason)
|
void z_impl_user_fault(unsigned int reason)
|
||||||
{
|
{
|
||||||
struct arch_esf *oops_esf = _current->syscall_frame;
|
struct arch_esf *oops_esf = arch_current_thread()->syscall_frame;
|
||||||
|
|
||||||
if (((_current->base.user_options & K_USER) != 0) &&
|
if (((arch_current_thread()->base.user_options & K_USER) != 0) &&
|
||||||
reason != K_ERR_STACK_CHK_FAIL) {
|
reason != K_ERR_STACK_CHK_FAIL) {
|
||||||
reason = K_ERR_KERNEL_OOPS;
|
reason = K_ERR_KERNEL_OOPS;
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,8 +36,8 @@ static void DBG(char *msg, struct k_thread *th)
|
||||||
|
|
||||||
strcpy(buf, "CPU# exc# ");
|
strcpy(buf, "CPU# exc# ");
|
||||||
buf[3] = '0' + _current_cpu->id;
|
buf[3] = '0' + _current_cpu->id;
|
||||||
buf[8] = '0' + _current->arch.exception_depth;
|
buf[8] = '0' + arch_current_thread()->arch.exception_depth;
|
||||||
strcat(buf, _current->name);
|
strcat(buf, arch_current_thread()->name);
|
||||||
strcat(buf, ": ");
|
strcat(buf, ": ");
|
||||||
strcat(buf, msg);
|
strcat(buf, msg);
|
||||||
strcat(buf, " ");
|
strcat(buf, " ");
|
||||||
|
@ -82,12 +82,12 @@ static void z_riscv_fpu_load(void)
|
||||||
"must be called with FPU access disabled");
|
"must be called with FPU access disabled");
|
||||||
|
|
||||||
/* become new owner */
|
/* become new owner */
|
||||||
atomic_ptr_set(&_current_cpu->arch.fpu_owner, _current);
|
atomic_ptr_set(&_current_cpu->arch.fpu_owner, arch_current_thread());
|
||||||
|
|
||||||
/* restore our content */
|
/* restore our content */
|
||||||
csr_set(mstatus, MSTATUS_FS_INIT);
|
csr_set(mstatus, MSTATUS_FS_INIT);
|
||||||
z_riscv_fpu_restore(&_current->arch.saved_fp_context);
|
z_riscv_fpu_restore(&arch_current_thread()->arch.saved_fp_context);
|
||||||
DBG("restore", _current);
|
DBG("restore", arch_current_thread());
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -168,7 +168,7 @@ static void flush_owned_fpu(struct k_thread *thread)
|
||||||
* replace it, and this avoids a deadlock where
|
* replace it, and this avoids a deadlock where
|
||||||
* two CPUs want to pull each other's FPU context.
|
* two CPUs want to pull each other's FPU context.
|
||||||
*/
|
*/
|
||||||
if (thread == _current) {
|
if (thread == arch_current_thread()) {
|
||||||
z_riscv_fpu_disable();
|
z_riscv_fpu_disable();
|
||||||
arch_flush_local_fpu();
|
arch_flush_local_fpu();
|
||||||
do {
|
do {
|
||||||
|
@ -213,7 +213,7 @@ void z_riscv_fpu_trap(struct arch_esf *esf)
|
||||||
/* save current owner's content if any */
|
/* save current owner's content if any */
|
||||||
arch_flush_local_fpu();
|
arch_flush_local_fpu();
|
||||||
|
|
||||||
if (_current->arch.exception_depth > 0) {
|
if (arch_current_thread()->arch.exception_depth > 0) {
|
||||||
/*
|
/*
|
||||||
* We were already in exception when the FPU access trapped.
|
* We were already in exception when the FPU access trapped.
|
||||||
* We give it access and prevent any further IRQ recursion
|
* We give it access and prevent any further IRQ recursion
|
||||||
|
@ -233,7 +233,7 @@ void z_riscv_fpu_trap(struct arch_esf *esf)
|
||||||
* Make sure the FPU context we need isn't live on another CPU.
|
* Make sure the FPU context we need isn't live on another CPU.
|
||||||
* The current CPU's FPU context is NULL at this point.
|
* The current CPU's FPU context is NULL at this point.
|
||||||
*/
|
*/
|
||||||
flush_owned_fpu(_current);
|
flush_owned_fpu(arch_current_thread());
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* make it accessible and clean to the returning context */
|
/* make it accessible and clean to the returning context */
|
||||||
|
@ -256,13 +256,13 @@ static bool fpu_access_allowed(unsigned int exc_update_level)
|
||||||
__ASSERT((csr_read(mstatus) & MSTATUS_IEN) == 0,
|
__ASSERT((csr_read(mstatus) & MSTATUS_IEN) == 0,
|
||||||
"must be called with IRQs disabled");
|
"must be called with IRQs disabled");
|
||||||
|
|
||||||
if (_current->arch.exception_depth == exc_update_level) {
|
if (arch_current_thread()->arch.exception_depth == exc_update_level) {
|
||||||
/* We're about to execute non-exception code */
|
/* We're about to execute non-exception code */
|
||||||
if (_current_cpu->arch.fpu_owner == _current) {
|
if (_current_cpu->arch.fpu_owner == arch_current_thread()) {
|
||||||
/* everything is already in place */
|
/* everything is already in place */
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (_current->arch.fpu_recently_used) {
|
if (arch_current_thread()->arch.fpu_recently_used) {
|
||||||
/*
|
/*
|
||||||
* Before this thread was context-switched out,
|
* Before this thread was context-switched out,
|
||||||
* it made active use of the FPU, but someone else
|
* it made active use of the FPU, but someone else
|
||||||
|
@ -273,7 +273,7 @@ static bool fpu_access_allowed(unsigned int exc_update_level)
|
||||||
z_riscv_fpu_disable();
|
z_riscv_fpu_disable();
|
||||||
arch_flush_local_fpu();
|
arch_flush_local_fpu();
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
flush_owned_fpu(_current);
|
flush_owned_fpu(arch_current_thread());
|
||||||
#endif
|
#endif
|
||||||
z_riscv_fpu_load();
|
z_riscv_fpu_load();
|
||||||
_current_cpu->arch.fpu_state = MSTATUS_FS_CLEAN;
|
_current_cpu->arch.fpu_state = MSTATUS_FS_CLEAN;
|
||||||
|
|
|
@ -297,7 +297,7 @@ is_fp: /* Process the FP trap and quickly return from exception */
|
||||||
mv a0, sp
|
mv a0, sp
|
||||||
tail z_riscv_fpu_trap
|
tail z_riscv_fpu_trap
|
||||||
2:
|
2:
|
||||||
no_fp: /* increment _current->arch.exception_depth */
|
no_fp: /* increment arch_current_thread()->arch.exception_depth */
|
||||||
lr t0, ___cpu_t_current_OFFSET(s0)
|
lr t0, ___cpu_t_current_OFFSET(s0)
|
||||||
lb t1, _thread_offset_to_exception_depth(t0)
|
lb t1, _thread_offset_to_exception_depth(t0)
|
||||||
add t1, t1, 1
|
add t1, t1, 1
|
||||||
|
@ -724,7 +724,7 @@ no_reschedule:
|
||||||
mv a0, sp
|
mv a0, sp
|
||||||
call z_riscv_fpu_exit_exc
|
call z_riscv_fpu_exit_exc
|
||||||
|
|
||||||
/* decrement _current->arch.exception_depth */
|
/* decrement arch_current_thread()->arch.exception_depth */
|
||||||
lr t0, ___cpu_t_current_OFFSET(s0)
|
lr t0, ___cpu_t_current_OFFSET(s0)
|
||||||
lb t1, _thread_offset_to_exception_depth(t0)
|
lb t1, _thread_offset_to_exception_depth(t0)
|
||||||
add t1, t1, -1
|
add t1, t1, -1
|
||||||
|
|
|
@ -752,8 +752,8 @@ int arch_buffer_validate(const void *addr, size_t size, int write)
|
||||||
int ret = -1;
|
int ret = -1;
|
||||||
|
|
||||||
/* Check if this is on the stack */
|
/* Check if this is on the stack */
|
||||||
if (IS_WITHIN(start, size,
|
if (IS_WITHIN(start, size, arch_current_thread()->stack_info.start,
|
||||||
_current->stack_info.start, _current->stack_info.size)) {
|
arch_current_thread()->stack_info.size)) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -768,7 +768,7 @@ int arch_buffer_validate(const void *addr, size_t size, int write)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Look for a matching partition in our memory domain */
|
/* Look for a matching partition in our memory domain */
|
||||||
struct k_mem_domain *domain = _current->mem_domain_info.mem_domain;
|
struct k_mem_domain *domain = arch_current_thread()->mem_domain_info.mem_domain;
|
||||||
int p_idx, remaining_partitions;
|
int p_idx, remaining_partitions;
|
||||||
k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);
|
k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);
|
||||||
|
|
||||||
|
|
|
@ -108,7 +108,7 @@ static void walk_stackframe(riscv_stacktrace_cb cb, void *cookie, const struct k
|
||||||
/* Unwind the provided exception stack frame */
|
/* Unwind the provided exception stack frame */
|
||||||
fp = esf->s0;
|
fp = esf->s0;
|
||||||
ra = esf->mepc;
|
ra = esf->mepc;
|
||||||
} else if ((csf == NULL) || (csf == &_current->callee_saved)) {
|
} else if ((csf == NULL) || (csf == &arch_current_thread()->callee_saved)) {
|
||||||
/* Unwind current thread (default case when nothing is provided ) */
|
/* Unwind current thread (default case when nothing is provided ) */
|
||||||
fp = (uintptr_t)__builtin_frame_address(0);
|
fp = (uintptr_t)__builtin_frame_address(0);
|
||||||
ra = (uintptr_t)walk_stackframe;
|
ra = (uintptr_t)walk_stackframe;
|
||||||
|
@ -181,7 +181,7 @@ static void walk_stackframe(riscv_stacktrace_cb cb, void *cookie, const struct k
|
||||||
/* Unwind the provided exception stack frame */
|
/* Unwind the provided exception stack frame */
|
||||||
sp = z_riscv_get_sp_before_exc(esf);
|
sp = z_riscv_get_sp_before_exc(esf);
|
||||||
ra = esf->mepc;
|
ra = esf->mepc;
|
||||||
} else if ((csf == NULL) || (csf == &_current->callee_saved)) {
|
} else if ((csf == NULL) || (csf == &arch_current_thread()->callee_saved)) {
|
||||||
/* Unwind current thread (default case when nothing is provided ) */
|
/* Unwind current thread (default case when nothing is provided ) */
|
||||||
sp = current_stack_pointer;
|
sp = current_stack_pointer;
|
||||||
ra = (uintptr_t)walk_stackframe;
|
ra = (uintptr_t)walk_stackframe;
|
||||||
|
@ -215,8 +215,10 @@ void arch_stack_walk(stack_trace_callback_fn callback_fn, void *cookie,
|
||||||
const struct k_thread *thread, const struct arch_esf *esf)
|
const struct k_thread *thread, const struct arch_esf *esf)
|
||||||
{
|
{
|
||||||
if (thread == NULL) {
|
if (thread == NULL) {
|
||||||
/* In case `thread` is NULL, default that to `_current` and try to unwind */
|
/* In case `thread` is NULL, default that to `arch_current_thread()`
|
||||||
thread = _current;
|
* and try to unwind
|
||||||
|
*/
|
||||||
|
thread = arch_current_thread();
|
||||||
}
|
}
|
||||||
|
|
||||||
walk_stackframe((riscv_stacktrace_cb)callback_fn, cookie, thread, esf, in_stack_bound,
|
walk_stackframe((riscv_stacktrace_cb)callback_fn, cookie, thread, esf, in_stack_bound,
|
||||||
|
@ -280,7 +282,8 @@ void z_riscv_unwind_stack(const struct arch_esf *esf, const _callee_saved_t *csf
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
|
||||||
LOG_ERR("call trace:");
|
LOG_ERR("call trace:");
|
||||||
walk_stackframe(print_trace_address, &i, _current, esf, in_fatal_stack_bound, csf);
|
walk_stackframe(print_trace_address, &i, arch_current_thread(), esf, in_fatal_stack_bound,
|
||||||
|
csf);
|
||||||
LOG_ERR("");
|
LOG_ERR("");
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_EXCEPTION_STACK_TRACE */
|
#endif /* CONFIG_EXCEPTION_STACK_TRACE */
|
||||||
|
|
|
@ -132,28 +132,29 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||||
|
|
||||||
/* Set up privileged stack */
|
/* Set up privileged stack */
|
||||||
#ifdef CONFIG_GEN_PRIV_STACKS
|
#ifdef CONFIG_GEN_PRIV_STACKS
|
||||||
_current->arch.priv_stack_start =
|
arch_current_thread()->arch.priv_stack_start =
|
||||||
(unsigned long)z_priv_stack_find(_current->stack_obj);
|
(unsigned long)z_priv_stack_find(arch_current_thread()->stack_obj);
|
||||||
/* remove the stack guard from the main stack */
|
/* remove the stack guard from the main stack */
|
||||||
_current->stack_info.start -= K_THREAD_STACK_RESERVED;
|
arch_current_thread()->stack_info.start -= K_THREAD_STACK_RESERVED;
|
||||||
_current->stack_info.size += K_THREAD_STACK_RESERVED;
|
arch_current_thread()->stack_info.size += K_THREAD_STACK_RESERVED;
|
||||||
#else
|
#else
|
||||||
_current->arch.priv_stack_start = (unsigned long)_current->stack_obj;
|
arch_current_thread()->arch.priv_stack_start =
|
||||||
|
(unsigned long)arch_current_thread()->stack_obj;
|
||||||
#endif /* CONFIG_GEN_PRIV_STACKS */
|
#endif /* CONFIG_GEN_PRIV_STACKS */
|
||||||
top_of_priv_stack = Z_STACK_PTR_ALIGN(_current->arch.priv_stack_start +
|
top_of_priv_stack = Z_STACK_PTR_ALIGN(arch_current_thread()->arch.priv_stack_start +
|
||||||
K_KERNEL_STACK_RESERVED +
|
K_KERNEL_STACK_RESERVED +
|
||||||
CONFIG_PRIVILEGED_STACK_SIZE);
|
CONFIG_PRIVILEGED_STACK_SIZE);
|
||||||
|
|
||||||
#ifdef CONFIG_INIT_STACKS
|
#ifdef CONFIG_INIT_STACKS
|
||||||
/* Initialize the privileged stack */
|
/* Initialize the privileged stack */
|
||||||
(void)memset((void *)_current->arch.priv_stack_start, 0xaa,
|
(void)memset((void *)arch_current_thread()->arch.priv_stack_start, 0xaa,
|
||||||
Z_STACK_PTR_ALIGN(K_KERNEL_STACK_RESERVED + CONFIG_PRIVILEGED_STACK_SIZE));
|
Z_STACK_PTR_ALIGN(K_KERNEL_STACK_RESERVED + CONFIG_PRIVILEGED_STACK_SIZE));
|
||||||
#endif /* CONFIG_INIT_STACKS */
|
#endif /* CONFIG_INIT_STACKS */
|
||||||
|
|
||||||
top_of_user_stack = Z_STACK_PTR_ALIGN(
|
top_of_user_stack = Z_STACK_PTR_ALIGN(
|
||||||
_current->stack_info.start +
|
arch_current_thread()->stack_info.start +
|
||||||
_current->stack_info.size -
|
arch_current_thread()->stack_info.size -
|
||||||
_current->stack_info.delta);
|
arch_current_thread()->stack_info.delta);
|
||||||
|
|
||||||
status = csr_read(mstatus);
|
status = csr_read(mstatus);
|
||||||
|
|
||||||
|
@ -169,12 +170,12 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||||
|
|
||||||
#ifdef CONFIG_PMP_STACK_GUARD
|
#ifdef CONFIG_PMP_STACK_GUARD
|
||||||
/* reconfigure as the kernel mode stack will be different */
|
/* reconfigure as the kernel mode stack will be different */
|
||||||
z_riscv_pmp_stackguard_prepare(_current);
|
z_riscv_pmp_stackguard_prepare(arch_current_thread());
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Set up Physical Memory Protection */
|
/* Set up Physical Memory Protection */
|
||||||
z_riscv_pmp_usermode_prepare(_current);
|
z_riscv_pmp_usermode_prepare(arch_current_thread());
|
||||||
z_riscv_pmp_usermode_enable(_current);
|
z_riscv_pmp_usermode_enable(arch_current_thread());
|
||||||
|
|
||||||
/* preserve stack pointer for next exception entry */
|
/* preserve stack pointer for next exception entry */
|
||||||
arch_curr_cpu()->arch.user_exc_sp = top_of_priv_stack;
|
arch_curr_cpu()->arch.user_exc_sp = top_of_priv_stack;
|
||||||
|
|
|
@ -61,7 +61,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||||
|
|
||||||
void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
|
void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
|
||||||
{
|
{
|
||||||
*old_thread = _current;
|
*old_thread = arch_current_thread();
|
||||||
|
|
||||||
return z_get_next_switch_handle(*old_thread);
|
return z_get_next_switch_handle(*old_thread);
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,7 +49,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs)
|
||||||
{
|
{
|
||||||
uintptr_t start, end;
|
uintptr_t start, end;
|
||||||
|
|
||||||
if (_current == NULL || arch_is_in_isr()) {
|
if (arch_current_thread() == NULL || arch_is_in_isr()) {
|
||||||
/* We were servicing an interrupt or in early boot environment
|
/* We were servicing an interrupt or in early boot environment
|
||||||
* and are supposed to be on the interrupt stack */
|
* and are supposed to be on the interrupt stack */
|
||||||
int cpu_id;
|
int cpu_id;
|
||||||
|
@ -64,7 +64,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs)
|
||||||
end = start + CONFIG_ISR_STACK_SIZE;
|
end = start + CONFIG_ISR_STACK_SIZE;
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
} else if ((cs & 0x3U) == 0U &&
|
} else if ((cs & 0x3U) == 0U &&
|
||||||
(_current->base.user_options & K_USER) != 0) {
|
(arch_current_thread()->base.user_options & K_USER) != 0) {
|
||||||
/* The low two bits of the CS register is the privilege
|
/* The low two bits of the CS register is the privilege
|
||||||
* level. It will be 0 in supervisor mode and 3 in user mode
|
* level. It will be 0 in supervisor mode and 3 in user mode
|
||||||
* corresponding to ring 0 / ring 3.
|
* corresponding to ring 0 / ring 3.
|
||||||
|
@ -72,14 +72,14 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs)
|
||||||
* If we get here, we must have been doing a syscall, check
|
* If we get here, we must have been doing a syscall, check
|
||||||
* privilege elevation stack bounds
|
* privilege elevation stack bounds
|
||||||
*/
|
*/
|
||||||
start = _current->stack_info.start - CONFIG_PRIVILEGED_STACK_SIZE;
|
start = arch_current_thread()->stack_info.start - CONFIG_PRIVILEGED_STACK_SIZE;
|
||||||
end = _current->stack_info.start;
|
end = arch_current_thread()->stack_info.start;
|
||||||
#endif /* CONFIG_USERSPACE */
|
#endif /* CONFIG_USERSPACE */
|
||||||
} else {
|
} else {
|
||||||
/* Normal thread operation, check its stack buffer */
|
/* Normal thread operation, check its stack buffer */
|
||||||
start = _current->stack_info.start;
|
start = arch_current_thread()->stack_info.start;
|
||||||
end = Z_STACK_PTR_ALIGN(_current->stack_info.start +
|
end = Z_STACK_PTR_ALIGN(arch_current_thread()->stack_info.start +
|
||||||
_current->stack_info.size);
|
arch_current_thread()->stack_info.size);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (addr <= start) || (addr + size > end);
|
return (addr <= start) || (addr + size > end);
|
||||||
|
@ -97,7 +97,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs)
|
||||||
__pinned_func
|
__pinned_func
|
||||||
bool z_x86_check_guard_page(uintptr_t addr)
|
bool z_x86_check_guard_page(uintptr_t addr)
|
||||||
{
|
{
|
||||||
struct k_thread *thread = _current;
|
struct k_thread *thread = arch_current_thread();
|
||||||
uintptr_t start, end;
|
uintptr_t start, end;
|
||||||
|
|
||||||
/* Front guard size - before thread stack area */
|
/* Front guard size - before thread stack area */
|
||||||
|
@ -233,7 +233,7 @@ static inline uintptr_t get_cr3(const struct arch_esf *esf)
|
||||||
* switch when we took the exception via z_x86_trampoline_to_kernel
|
* switch when we took the exception via z_x86_trampoline_to_kernel
|
||||||
*/
|
*/
|
||||||
if ((esf->cs & 0x3) != 0) {
|
if ((esf->cs & 0x3) != 0) {
|
||||||
return _current->arch.ptables;
|
return arch_current_thread()->arch.ptables;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
ARG_UNUSED(esf);
|
ARG_UNUSED(esf);
|
||||||
|
|
|
@ -207,7 +207,7 @@ void z_float_enable(struct k_thread *thread, unsigned int options)
|
||||||
|
|
||||||
/* Associate the new FP context with the specified thread */
|
/* Associate the new FP context with the specified thread */
|
||||||
|
|
||||||
if (thread == _current) {
|
if (thread == arch_current_thread()) {
|
||||||
/*
|
/*
|
||||||
* When enabling FP support for the current thread, just claim
|
* When enabling FP support for the current thread, just claim
|
||||||
* ownership of the FPU and leave CR0[TS] unset.
|
* ownership of the FPU and leave CR0[TS] unset.
|
||||||
|
@ -222,7 +222,7 @@ void z_float_enable(struct k_thread *thread, unsigned int options)
|
||||||
* of the FPU to them (unless we need it ourselves).
|
* of the FPU to them (unless we need it ourselves).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if ((_current->base.user_options & _FP_USER_MASK) == 0) {
|
if ((arch_current_thread()->base.user_options & _FP_USER_MASK) == 0) {
|
||||||
/*
|
/*
|
||||||
* We are not FP-capable, so mark FPU as owned by the
|
* We are not FP-capable, so mark FPU as owned by the
|
||||||
* thread we've just enabled FP support for, then
|
* thread we've just enabled FP support for, then
|
||||||
|
@ -278,7 +278,7 @@ int z_float_disable(struct k_thread *thread)
|
||||||
|
|
||||||
thread->base.user_options &= ~_FP_USER_MASK;
|
thread->base.user_options &= ~_FP_USER_MASK;
|
||||||
|
|
||||||
if (thread == _current) {
|
if (thread == arch_current_thread()) {
|
||||||
z_FpAccessDisable();
|
z_FpAccessDisable();
|
||||||
_kernel.current_fp = (struct k_thread *)0;
|
_kernel.current_fp = (struct k_thread *)0;
|
||||||
} else {
|
} else {
|
||||||
|
@ -314,7 +314,7 @@ void _FpNotAvailableExcHandler(struct arch_esf *pEsf)
|
||||||
|
|
||||||
/* Enable highest level of FP capability configured into the kernel */
|
/* Enable highest level of FP capability configured into the kernel */
|
||||||
|
|
||||||
k_float_enable(_current, _FP_USER_MASK);
|
k_float_enable(arch_current_thread(), _FP_USER_MASK);
|
||||||
}
|
}
|
||||||
_EXCEPTION_CONNECT_NOCODE(_FpNotAvailableExcHandler,
|
_EXCEPTION_CONNECT_NOCODE(_FpNotAvailableExcHandler,
|
||||||
IV_DEVICE_NOT_AVAILABLE, 0);
|
IV_DEVICE_NOT_AVAILABLE, 0);
|
||||||
|
|
|
@ -132,9 +132,9 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||||
/* Transition will reset stack pointer to initial, discarding
|
/* Transition will reset stack pointer to initial, discarding
|
||||||
* any old context since this is a one-way operation
|
* any old context since this is a one-way operation
|
||||||
*/
|
*/
|
||||||
stack_end = Z_STACK_PTR_ALIGN(_current->stack_info.start +
|
stack_end = Z_STACK_PTR_ALIGN(arch_current_thread()->stack_info.start +
|
||||||
_current->stack_info.size -
|
arch_current_thread()->stack_info.size -
|
||||||
_current->stack_info.delta);
|
arch_current_thread()->stack_info.delta);
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
/* x86_64 SysV ABI requires 16 byte stack alignment, which
|
/* x86_64 SysV ABI requires 16 byte stack alignment, which
|
||||||
|
@ -156,15 +156,15 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||||
* Note that this also needs to page in the reserved
|
* Note that this also needs to page in the reserved
|
||||||
* portion of the stack (which is usually the page just
|
* portion of the stack (which is usually the page just
|
||||||
* before the beginning of stack in
|
* before the beginning of stack in
|
||||||
* _current->stack_info.start.
|
* arch_current_thread()->stack_info.start.
|
||||||
*/
|
*/
|
||||||
uintptr_t stack_start;
|
uintptr_t stack_start;
|
||||||
size_t stack_size;
|
size_t stack_size;
|
||||||
uintptr_t stack_aligned_start;
|
uintptr_t stack_aligned_start;
|
||||||
size_t stack_aligned_size;
|
size_t stack_aligned_size;
|
||||||
|
|
||||||
stack_start = POINTER_TO_UINT(_current->stack_obj);
|
stack_start = POINTER_TO_UINT(arch_current_thread()->stack_obj);
|
||||||
stack_size = K_THREAD_STACK_LEN(_current->stack_info.size);
|
stack_size = K_THREAD_STACK_LEN(arch_current_thread()->stack_info.size);
|
||||||
|
|
||||||
#if defined(CONFIG_X86_STACK_PROTECTION)
|
#if defined(CONFIG_X86_STACK_PROTECTION)
|
||||||
/* With hardware stack protection, the first page of stack
|
/* With hardware stack protection, the first page of stack
|
||||||
|
@ -182,7 +182,7 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
z_x86_userspace_enter(user_entry, p1, p2, p3, stack_end,
|
z_x86_userspace_enter(user_entry, p1, p2, p3, stack_end,
|
||||||
_current->stack_info.start);
|
arch_current_thread()->stack_info.start);
|
||||||
CODE_UNREACHABLE;
|
CODE_UNREACHABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -421,7 +421,7 @@ void z_x86_tlb_ipi(const void *arg)
|
||||||
/* We might have been moved to another memory domain, so always invoke
|
/* We might have been moved to another memory domain, so always invoke
|
||||||
* z_x86_thread_page_tables_get() instead of using current CR3 value.
|
* z_x86_thread_page_tables_get() instead of using current CR3 value.
|
||||||
*/
|
*/
|
||||||
ptables_phys = k_mem_phys_addr(z_x86_thread_page_tables_get(_current));
|
ptables_phys = k_mem_phys_addr(z_x86_thread_page_tables_get(arch_current_thread()));
|
||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
* In the future, we can consider making this smarter, such as
|
* In the future, we can consider making this smarter, such as
|
||||||
|
@ -1440,7 +1440,7 @@ static inline void bcb_fence(void)
|
||||||
__pinned_func
|
__pinned_func
|
||||||
int arch_buffer_validate(const void *addr, size_t size, int write)
|
int arch_buffer_validate(const void *addr, size_t size, int write)
|
||||||
{
|
{
|
||||||
pentry_t *ptables = z_x86_thread_page_tables_get(_current);
|
pentry_t *ptables = z_x86_thread_page_tables_get(arch_current_thread());
|
||||||
uint8_t *virt;
|
uint8_t *virt;
|
||||||
size_t aligned_size;
|
size_t aligned_size;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -1958,7 +1958,7 @@ int arch_mem_domain_thread_add(struct k_thread *thread)
|
||||||
* IPI takes care of this if the thread is currently running on some
|
* IPI takes care of this if the thread is currently running on some
|
||||||
* other CPU.
|
* other CPU.
|
||||||
*/
|
*/
|
||||||
if (thread == _current && thread->arch.ptables != z_x86_cr3_get()) {
|
if (thread == arch_current_thread() && thread->arch.ptables != z_x86_cr3_get()) {
|
||||||
z_x86_cr3_set(thread->arch.ptables);
|
z_x86_cr3_set(thread->arch.ptables);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_X86_KPTI */
|
#endif /* CONFIG_X86_KPTI */
|
||||||
|
@ -1980,8 +1980,9 @@ void z_x86_current_stack_perms(void)
|
||||||
/* Clear any previous context in the stack buffer to prevent
|
/* Clear any previous context in the stack buffer to prevent
|
||||||
* unintentional data leakage.
|
* unintentional data leakage.
|
||||||
*/
|
*/
|
||||||
(void)memset((void *)_current->stack_info.start, 0xAA,
|
(void)memset((void *)arch_current_thread()->stack_info.start, 0xAA,
|
||||||
_current->stack_info.size - _current->stack_info.delta);
|
arch_current_thread()->stack_info.size -
|
||||||
|
arch_current_thread()->stack_info.delta);
|
||||||
|
|
||||||
/* Only now is it safe to grant access to the stack buffer since any
|
/* Only now is it safe to grant access to the stack buffer since any
|
||||||
* previous context has been erased.
|
* previous context has been erased.
|
||||||
|
@ -1991,13 +1992,13 @@ void z_x86_current_stack_perms(void)
|
||||||
* This will grant stack and memory domain access if it wasn't set
|
* This will grant stack and memory domain access if it wasn't set
|
||||||
* already (in which case this returns very quickly).
|
* already (in which case this returns very quickly).
|
||||||
*/
|
*/
|
||||||
z_x86_swap_update_common_page_table(_current);
|
z_x86_swap_update_common_page_table(arch_current_thread());
|
||||||
#else
|
#else
|
||||||
/* Memory domain access is already programmed into the page tables.
|
/* Memory domain access is already programmed into the page tables.
|
||||||
* Need to enable access to this new user thread's stack buffer in
|
* Need to enable access to this new user thread's stack buffer in
|
||||||
* its domain-specific page tables.
|
* its domain-specific page tables.
|
||||||
*/
|
*/
|
||||||
set_stack_perms(_current, z_x86_thread_page_tables_get(_current));
|
set_stack_perms(arch_current_thread(), z_x86_thread_page_tables_get(arch_current_thread()));
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_USERSPACE */
|
#endif /* CONFIG_USERSPACE */
|
||||||
|
|
|
@ -140,7 +140,7 @@ FUNC_NORETURN void arch_syscall_oops(void *ssf)
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
void z_impl_xtensa_user_fault(unsigned int reason)
|
void z_impl_xtensa_user_fault(unsigned int reason)
|
||||||
{
|
{
|
||||||
if ((_current->base.user_options & K_USER) != 0) {
|
if ((arch_current_thread()->base.user_options & K_USER) != 0) {
|
||||||
if ((reason != K_ERR_KERNEL_OOPS) &&
|
if ((reason != K_ERR_KERNEL_OOPS) &&
|
||||||
(reason != K_ERR_STACK_CHK_FAIL)) {
|
(reason != K_ERR_STACK_CHK_FAIL)) {
|
||||||
reason = K_ERR_KERNEL_OOPS;
|
reason = K_ERR_KERNEL_OOPS;
|
||||||
|
|
|
@ -1086,7 +1086,7 @@ static int mem_buffer_validate(const void *addr, size_t size, int write, int rin
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
uint8_t *virt;
|
uint8_t *virt;
|
||||||
size_t aligned_size;
|
size_t aligned_size;
|
||||||
const struct k_thread *thread = _current;
|
const struct k_thread *thread = arch_current_thread();
|
||||||
uint32_t *ptables = thread_page_tables_get(thread);
|
uint32_t *ptables = thread_page_tables_get(thread);
|
||||||
|
|
||||||
/* addr/size arbitrary, fix this up into an aligned region */
|
/* addr/size arbitrary, fix this up into an aligned region */
|
||||||
|
|
|
@ -156,7 +156,7 @@ int arch_float_enable(struct k_thread *thread, unsigned int options)
|
||||||
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||||
void *p1, void *p2, void *p3)
|
void *p1, void *p2, void *p3)
|
||||||
{
|
{
|
||||||
struct k_thread *current = _current;
|
struct k_thread *current = arch_current_thread();
|
||||||
size_t stack_end;
|
size_t stack_end;
|
||||||
|
|
||||||
/* Transition will reset stack pointer to initial, discarding
|
/* Transition will reset stack pointer to initial, discarding
|
||||||
|
|
|
@ -34,7 +34,7 @@ extern char xtensa_arch_kernel_oops_epc[];
|
||||||
bool xtensa_is_outside_stack_bounds(uintptr_t addr, size_t sz, uint32_t ps)
|
bool xtensa_is_outside_stack_bounds(uintptr_t addr, size_t sz, uint32_t ps)
|
||||||
{
|
{
|
||||||
uintptr_t start, end;
|
uintptr_t start, end;
|
||||||
struct k_thread *thread = _current;
|
struct k_thread *thread = arch_current_thread();
|
||||||
bool was_in_isr, invalid;
|
bool was_in_isr, invalid;
|
||||||
|
|
||||||
/* Without userspace, there is no privileged stack so the thread stack
|
/* Without userspace, there is no privileged stack so the thread stack
|
||||||
|
|
|
@ -105,7 +105,7 @@ void posix_irq_handler(void)
|
||||||
*/
|
*/
|
||||||
if (may_swap
|
if (may_swap
|
||||||
&& (hw_irq_ctrl_get_cur_prio() == 256)
|
&& (hw_irq_ctrl_get_cur_prio() == 256)
|
||||||
&& (_kernel.ready_q.cache) && (_kernel.ready_q.cache != _current)) {
|
&& (_kernel.ready_q.cache) && (_kernel.ready_q.cache != arch_current_thread())) {
|
||||||
|
|
||||||
(void)z_swap_irqlock(irq_lock);
|
(void)z_swap_irqlock(irq_lock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -113,7 +113,7 @@ void posix_irq_handler(void)
|
||||||
*/
|
*/
|
||||||
if (may_swap
|
if (may_swap
|
||||||
&& (hw_irq_ctrl_get_cur_prio() == 256)
|
&& (hw_irq_ctrl_get_cur_prio() == 256)
|
||||||
&& (_kernel.ready_q.cache) && (_kernel.ready_q.cache != _current)) {
|
&& (_kernel.ready_q.cache) && (_kernel.ready_q.cache != arch_current_thread())) {
|
||||||
|
|
||||||
(void)z_swap_irqlock(irq_lock);
|
(void)z_swap_irqlock(irq_lock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -135,7 +135,7 @@ void posix_irq_handler(void)
|
||||||
if (may_swap
|
if (may_swap
|
||||||
&& (hw_irq_ctrl_get_cur_prio(cpu_n) == 256)
|
&& (hw_irq_ctrl_get_cur_prio(cpu_n) == 256)
|
||||||
&& (CPU_will_be_awaken_from_WFE == false)
|
&& (CPU_will_be_awaken_from_WFE == false)
|
||||||
&& (_kernel.ready_q.cache) && (_kernel.ready_q.cache != _current)) {
|
&& (_kernel.ready_q.cache) && (_kernel.ready_q.cache != arch_current_thread())) {
|
||||||
|
|
||||||
z_swap_irqlock(irq_lock);
|
z_swap_irqlock(irq_lock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -276,7 +276,7 @@ Per-CPU data
|
||||||
============
|
============
|
||||||
|
|
||||||
Many elements of the core kernel data need to be implemented for each
|
Many elements of the core kernel data need to be implemented for each
|
||||||
CPU in SMP mode. For example, the ``_current`` thread pointer obviously
|
CPU in SMP mode. For example, the ``arch_current_thread()`` thread pointer obviously
|
||||||
needs to reflect what is running locally, there are many threads
|
needs to reflect what is running locally, there are many threads
|
||||||
running concurrently. Likewise a kernel-provided interrupt stack
|
running concurrently. Likewise a kernel-provided interrupt stack
|
||||||
needs to be created and assigned for each physical CPU, as does the
|
needs to be created and assigned for each physical CPU, as does the
|
||||||
|
|
|
@ -168,6 +168,10 @@ Modem
|
||||||
Architectures
|
Architectures
|
||||||
*************
|
*************
|
||||||
|
|
||||||
|
* Common
|
||||||
|
|
||||||
|
* ``_current`` is deprecated, used :c:func:`arch_current_thread` instead.
|
||||||
|
|
||||||
* native/POSIX
|
* native/POSIX
|
||||||
|
|
||||||
* :kconfig:option:`CONFIG_NATIVE_APPLICATION` has been deprecated. Out-of-tree boards using this
|
* :kconfig:option:`CONFIG_NATIVE_APPLICATION` has been deprecated. Out-of-tree boards using this
|
||||||
|
|
|
@ -92,9 +92,9 @@ static inline int eswifi_request(struct eswifi_dev *eswifi, char *cmd,
|
||||||
static inline void eswifi_lock(struct eswifi_dev *eswifi)
|
static inline void eswifi_lock(struct eswifi_dev *eswifi)
|
||||||
{
|
{
|
||||||
/* Nested locking */
|
/* Nested locking */
|
||||||
if (atomic_get(&eswifi->mutex_owner) != (atomic_t)(uintptr_t)_current) {
|
if (atomic_get(&eswifi->mutex_owner) != (atomic_t)(uintptr_t)arch_current_thread()) {
|
||||||
k_mutex_lock(&eswifi->mutex, K_FOREVER);
|
k_mutex_lock(&eswifi->mutex, K_FOREVER);
|
||||||
atomic_set(&eswifi->mutex_owner, (atomic_t)(uintptr_t)_current);
|
atomic_set(&eswifi->mutex_owner, (atomic_t)(uintptr_t)arch_current_thread());
|
||||||
eswifi->mutex_depth = 1;
|
eswifi->mutex_depth = 1;
|
||||||
} else {
|
} else {
|
||||||
eswifi->mutex_depth++;
|
eswifi->mutex_depth++;
|
||||||
|
|
|
@ -1289,7 +1289,7 @@ typedef bool (*stack_trace_callback_fn)(void *cookie, unsigned long addr);
|
||||||
* ============ ======= ============================================
|
* ============ ======= ============================================
|
||||||
* thread esf
|
* thread esf
|
||||||
* ============ ======= ============================================
|
* ============ ======= ============================================
|
||||||
* thread NULL Stack trace from thread (can be _current)
|
* thread NULL Stack trace from thread (can be arch_current_thread())
|
||||||
* thread esf Stack trace starting on esf
|
* thread esf Stack trace starting on esf
|
||||||
* ============ ======= ============================================
|
* ============ ======= ============================================
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
static ALWAYS_INLINE struct k_thread *arch_current_thread(void)
|
static ALWAYS_INLINE struct k_thread *arch_current_thread(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/* In SMP, _current is a field read from _current_cpu, which
|
/* In SMP, arch_current_thread() is a field read from _current_cpu, which
|
||||||
* can race with preemption before it is read. We must lock
|
* can race with preemption before it is read. We must lock
|
||||||
* local interrupts when reading it.
|
* local interrupts when reading it.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -305,7 +305,7 @@ static inline void arch_isr_direct_footer(int swap)
|
||||||
* 3) Next thread to run in the ready queue is not this thread
|
* 3) Next thread to run in the ready queue is not this thread
|
||||||
*/
|
*/
|
||||||
if (swap != 0 && _kernel.cpus[0].nested == 0 &&
|
if (swap != 0 && _kernel.cpus[0].nested == 0 &&
|
||||||
_kernel.ready_q.cache != _current) {
|
_kernel.ready_q.cache != arch_current_thread()) {
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
|
|
||||||
/* Fetch EFLAGS argument to z_swap() */
|
/* Fetch EFLAGS argument to z_swap() */
|
||||||
|
|
|
@ -62,7 +62,7 @@ static inline bool k_is_in_user_syscall(void)
|
||||||
* calls from supervisor mode bypass everything directly to
|
* calls from supervisor mode bypass everything directly to
|
||||||
* the implementation function.
|
* the implementation function.
|
||||||
*/
|
*/
|
||||||
return !k_is_in_isr() && (_current->syscall_frame != NULL);
|
return !k_is_in_isr() && (arch_current_thread()->syscall_frame != NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -350,7 +350,7 @@ int k_usermode_string_copy(char *dst, const char *src, size_t maxlen);
|
||||||
#define K_OOPS(expr) \
|
#define K_OOPS(expr) \
|
||||||
do { \
|
do { \
|
||||||
if (expr) { \
|
if (expr) { \
|
||||||
arch_syscall_oops(_current->syscall_frame); \
|
arch_syscall_oops(arch_current_thread()->syscall_frame); \
|
||||||
} \
|
} \
|
||||||
} while (false)
|
} while (false)
|
||||||
|
|
||||||
|
|
|
@ -171,7 +171,7 @@ struct _cpu {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/* True when _current is allowed to context switch */
|
/* True when arch_current_thread() is allowed to context switch */
|
||||||
uint8_t swap_ok;
|
uint8_t swap_ok;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -260,12 +260,12 @@ bool z_smp_cpu_mobile(void);
|
||||||
|
|
||||||
#define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \
|
#define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \
|
||||||
arch_curr_cpu(); })
|
arch_curr_cpu(); })
|
||||||
#define _current arch_current_thread()
|
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#define _current_cpu (&_kernel.cpus[0])
|
#define _current_cpu (&_kernel.cpus[0])
|
||||||
#define _current _kernel.cpus[0].current
|
#endif /* CONFIG_SMP */
|
||||||
#endif
|
|
||||||
|
#define _current arch_current_thread() __DEPRECATED_MACRO
|
||||||
|
|
||||||
/* kernel wait queue record */
|
/* kernel wait queue record */
|
||||||
#ifdef CONFIG_WAITQ_SCALABLE
|
#ifdef CONFIG_WAITQ_SCALABLE
|
||||||
|
|
|
@ -211,7 +211,7 @@ config THREAD_ABORT_NEED_CLEANUP
|
||||||
bool
|
bool
|
||||||
help
|
help
|
||||||
This option enables the bits to clean up the current thread if
|
This option enables the bits to clean up the current thread if
|
||||||
k_thread_abort(_current) is called, as the cleanup cannot be
|
k_thread_abort(arch_current_thread()) is called, as the cleanup cannot be
|
||||||
running in the current thread stack.
|
running in the current thread stack.
|
||||||
|
|
||||||
config THREAD_CUSTOM_DATA
|
config THREAD_CUSTOM_DATA
|
||||||
|
|
|
@ -36,7 +36,7 @@ int *z_impl_z_errno(void)
|
||||||
/* Initialized to the lowest address in the stack so the thread can
|
/* Initialized to the lowest address in the stack so the thread can
|
||||||
* directly read/write it
|
* directly read/write it
|
||||||
*/
|
*/
|
||||||
return &_current->userspace_local_data->errno_var;
|
return &arch_current_thread()->userspace_local_data->errno_var;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int *z_vrfy_z_errno(void)
|
static inline int *z_vrfy_z_errno(void)
|
||||||
|
@ -48,7 +48,7 @@ static inline int *z_vrfy_z_errno(void)
|
||||||
#else
|
#else
|
||||||
int *z_impl_z_errno(void)
|
int *z_impl_z_errno(void)
|
||||||
{
|
{
|
||||||
return &_current->errno_var;
|
return &arch_current_thread()->errno_var;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_USERSPACE */
|
#endif /* CONFIG_USERSPACE */
|
||||||
|
|
||||||
|
|
|
@ -90,7 +90,7 @@ void z_fatal_error(unsigned int reason, const struct arch_esf *esf)
|
||||||
*/
|
*/
|
||||||
unsigned int key = arch_irq_lock();
|
unsigned int key = arch_irq_lock();
|
||||||
struct k_thread *thread = IS_ENABLED(CONFIG_MULTITHREADING) ?
|
struct k_thread *thread = IS_ENABLED(CONFIG_MULTITHREADING) ?
|
||||||
_current : NULL;
|
arch_current_thread() : NULL;
|
||||||
|
|
||||||
/* twister looks for the "ZEPHYR FATAL ERROR" string, don't
|
/* twister looks for the "ZEPHYR FATAL ERROR" string, don't
|
||||||
* change it without also updating twister
|
* change it without also updating twister
|
||||||
|
|
|
@ -24,7 +24,7 @@ void idle(void *unused1, void *unused2, void *unused3)
|
||||||
ARG_UNUSED(unused2);
|
ARG_UNUSED(unused2);
|
||||||
ARG_UNUSED(unused3);
|
ARG_UNUSED(unused3);
|
||||||
|
|
||||||
__ASSERT_NO_MSG(_current->base.prio >= 0);
|
__ASSERT_NO_MSG(arch_current_thread()->base.prio >= 0);
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
/* SMP systems without a working IPI can't actual
|
/* SMP systems without a working IPI can't actual
|
||||||
|
@ -85,7 +85,7 @@ void idle(void *unused1, void *unused2, void *unused3)
|
||||||
* explicitly yield in the idle thread otherwise
|
* explicitly yield in the idle thread otherwise
|
||||||
* nothing else will run once it starts.
|
* nothing else will run once it starts.
|
||||||
*/
|
*/
|
||||||
if (_kernel.ready_q.cache != _current) {
|
if (_kernel.ready_q.cache != arch_current_thread()) {
|
||||||
z_swap_unlocked();
|
z_swap_unlocked();
|
||||||
}
|
}
|
||||||
# endif /* !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC) */
|
# endif /* !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC) */
|
||||||
|
|
|
@ -286,7 +286,7 @@ int z_kernel_stats_query(struct k_obj_core *obj_core, void *stats);
|
||||||
* where these steps require that the thread is no longer running.
|
* where these steps require that the thread is no longer running.
|
||||||
* If the target thread is not the current running thread, the cleanup
|
* If the target thread is not the current running thread, the cleanup
|
||||||
* steps will be performed immediately. However, if the target thread is
|
* steps will be performed immediately. However, if the target thread is
|
||||||
* the current running thread (e.g. k_thread_abort(_current)), it defers
|
* the current running thread (e.g. k_thread_abort(arch_current_thread())), it defers
|
||||||
* the cleanup steps to later when the work will be finished in another
|
* the cleanup steps to later when the work will be finished in another
|
||||||
* context.
|
* context.
|
||||||
*
|
*
|
||||||
|
|
|
@ -143,9 +143,9 @@ static inline bool _is_valid_prio(int prio, void *entry_point)
|
||||||
static inline void z_sched_lock(void)
|
static inline void z_sched_lock(void)
|
||||||
{
|
{
|
||||||
__ASSERT(!arch_is_in_isr(), "");
|
__ASSERT(!arch_is_in_isr(), "");
|
||||||
__ASSERT(_current->base.sched_locked != 1U, "");
|
__ASSERT(arch_current_thread()->base.sched_locked != 1U, "");
|
||||||
|
|
||||||
--_current->base.sched_locked;
|
--arch_current_thread()->base.sched_locked;
|
||||||
|
|
||||||
compiler_barrier();
|
compiler_barrier();
|
||||||
}
|
}
|
||||||
|
|
|
@ -97,12 +97,12 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
|
||||||
*/
|
*/
|
||||||
# ifndef CONFIG_ARM64
|
# ifndef CONFIG_ARM64
|
||||||
__ASSERT(arch_irq_unlocked(key) ||
|
__ASSERT(arch_irq_unlocked(key) ||
|
||||||
_current->base.thread_state & (_THREAD_DUMMY | _THREAD_DEAD),
|
arch_current_thread()->base.thread_state & (_THREAD_DUMMY | _THREAD_DEAD),
|
||||||
"Context switching while holding lock!");
|
"Context switching while holding lock!");
|
||||||
# endif /* CONFIG_ARM64 */
|
# endif /* CONFIG_ARM64 */
|
||||||
#endif /* CONFIG_SPIN_VALIDATE */
|
#endif /* CONFIG_SPIN_VALIDATE */
|
||||||
|
|
||||||
old_thread = _current;
|
old_thread = arch_current_thread();
|
||||||
|
|
||||||
z_check_stack_sentinel();
|
z_check_stack_sentinel();
|
||||||
|
|
||||||
|
@ -147,7 +147,7 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
|
||||||
arch_cohere_stacks(old_thread, NULL, new_thread);
|
arch_cohere_stacks(old_thread, NULL, new_thread);
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/* Now add _current back to the run queue, once we are
|
/* Now add arch_current_thread() back to the run queue, once we are
|
||||||
* guaranteed to reach the context switch in finite
|
* guaranteed to reach the context switch in finite
|
||||||
* time. See z_sched_switch_spin().
|
* time. See z_sched_switch_spin().
|
||||||
*/
|
*/
|
||||||
|
@ -175,7 +175,7 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
|
||||||
irq_unlock(key);
|
irq_unlock(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
return _current->swap_retval;
|
return arch_current_thread()->swap_retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int z_swap_irqlock(unsigned int key)
|
static inline int z_swap_irqlock(unsigned int key)
|
||||||
|
|
|
@ -197,17 +197,17 @@ static ALWAYS_INLINE bool should_preempt(struct k_thread *thread,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
__ASSERT(_current != NULL, "");
|
__ASSERT(arch_current_thread() != NULL, "");
|
||||||
|
|
||||||
/* Or if we're pended/suspended/dummy (duh) */
|
/* Or if we're pended/suspended/dummy (duh) */
|
||||||
if (z_is_thread_prevented_from_running(_current)) {
|
if (z_is_thread_prevented_from_running(arch_current_thread())) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Otherwise we have to be running a preemptible thread or
|
/* Otherwise we have to be running a preemptible thread or
|
||||||
* switching to a metairq
|
* switching to a metairq
|
||||||
*/
|
*/
|
||||||
if (thread_is_preemptible(_current) || thread_is_metairq(thread)) {
|
if (thread_is_preemptible(arch_current_thread()) || thread_is_metairq(thread)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -101,7 +101,7 @@ void z_sched_ipi(void)
|
||||||
#endif /* CONFIG_TRACE_SCHED_IPI */
|
#endif /* CONFIG_TRACE_SCHED_IPI */
|
||||||
|
|
||||||
#ifdef CONFIG_TIMESLICING
|
#ifdef CONFIG_TIMESLICING
|
||||||
if (thread_is_sliceable(_current)) {
|
if (thread_is_sliceable(arch_current_thread())) {
|
||||||
z_time_slice();
|
z_time_slice();
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_TIMESLICING */
|
#endif /* CONFIG_TIMESLICING */
|
||||||
|
|
|
@ -216,7 +216,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
|
||||||
k_spinlock_key_t key;
|
k_spinlock_key_t key;
|
||||||
|
|
||||||
/* save sender id so it can be used during message matching */
|
/* save sender id so it can be used during message matching */
|
||||||
tx_msg->rx_source_thread = _current;
|
tx_msg->rx_source_thread = arch_current_thread();
|
||||||
|
|
||||||
/* finish readying sending thread (actual or dummy) for send */
|
/* finish readying sending thread (actual or dummy) for send */
|
||||||
sending_thread = tx_msg->_syncing_thread;
|
sending_thread = tx_msg->_syncing_thread;
|
||||||
|
@ -296,7 +296,7 @@ int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
|
||||||
k_timeout_t timeout)
|
k_timeout_t timeout)
|
||||||
{
|
{
|
||||||
/* configure things for a synchronous send, then send the message */
|
/* configure things for a synchronous send, then send the message */
|
||||||
tx_msg->_syncing_thread = _current;
|
tx_msg->_syncing_thread = arch_current_thread();
|
||||||
|
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, put, mbox, timeout);
|
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, put, mbox, timeout);
|
||||||
|
|
||||||
|
@ -321,7 +321,7 @@ void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
|
||||||
*/
|
*/
|
||||||
mbox_async_alloc(&async);
|
mbox_async_alloc(&async);
|
||||||
|
|
||||||
async->thread.prio = _current->base.prio;
|
async->thread.prio = arch_current_thread()->base.prio;
|
||||||
|
|
||||||
async->tx_msg = *tx_msg;
|
async->tx_msg = *tx_msg;
|
||||||
async->tx_msg._syncing_thread = (struct k_thread *)&async->thread;
|
async->tx_msg._syncing_thread = (struct k_thread *)&async->thread;
|
||||||
|
@ -388,7 +388,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
|
||||||
int result;
|
int result;
|
||||||
|
|
||||||
/* save receiver id so it can be used during message matching */
|
/* save receiver id so it can be used during message matching */
|
||||||
rx_msg->tx_target_thread = _current;
|
rx_msg->tx_target_thread = arch_current_thread();
|
||||||
|
|
||||||
/* search mailbox's tx queue for a compatible sender */
|
/* search mailbox's tx queue for a compatible sender */
|
||||||
key = k_spin_lock(&mbox->lock);
|
key = k_spin_lock(&mbox->lock);
|
||||||
|
@ -425,7 +425,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, get, mbox, timeout);
|
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, get, mbox, timeout);
|
||||||
|
|
||||||
/* wait until a matching sender appears or a timeout occurs */
|
/* wait until a matching sender appears or a timeout occurs */
|
||||||
_current->base.swap_data = rx_msg;
|
arch_current_thread()->base.swap_data = rx_msg;
|
||||||
result = z_pend_curr(&mbox->lock, key, &mbox->rx_msg_queue, timeout);
|
result = z_pend_curr(&mbox->lock, key, &mbox->rx_msg_queue, timeout);
|
||||||
|
|
||||||
/* consume message data immediately, if needed */
|
/* consume message data immediately, if needed */
|
||||||
|
|
|
@ -299,7 +299,7 @@ void z_mem_domain_init_thread(struct k_thread *thread)
|
||||||
k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);
|
k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);
|
||||||
|
|
||||||
/* New threads inherit memory domain configuration from parent */
|
/* New threads inherit memory domain configuration from parent */
|
||||||
ret = add_thread_locked(_current->mem_domain_info.mem_domain, thread);
|
ret = add_thread_locked(arch_current_thread()->mem_domain_info.mem_domain, thread);
|
||||||
__ASSERT_NO_MSG(ret == 0);
|
__ASSERT_NO_MSG(ret == 0);
|
||||||
ARG_UNUSED(ret);
|
ARG_UNUSED(ret);
|
||||||
|
|
||||||
|
|
|
@ -248,7 +248,7 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
|
||||||
/* wait for a free block or timeout */
|
/* wait for a free block or timeout */
|
||||||
result = z_pend_curr(&slab->lock, key, &slab->wait_q, timeout);
|
result = z_pend_curr(&slab->lock, key, &slab->wait_q, timeout);
|
||||||
if (result == 0) {
|
if (result == 0) {
|
||||||
*mem = _current->base.swap_data;
|
*mem = arch_current_thread()->base.swap_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, alloc, slab, timeout, result);
|
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, alloc, slab, timeout, result);
|
||||||
|
|
|
@ -165,7 +165,7 @@ void *z_thread_aligned_alloc(size_t align, size_t size)
|
||||||
if (k_is_in_isr()) {
|
if (k_is_in_isr()) {
|
||||||
heap = _SYSTEM_HEAP;
|
heap = _SYSTEM_HEAP;
|
||||||
} else {
|
} else {
|
||||||
heap = _current->resource_pool;
|
heap = arch_current_thread()->resource_pool;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (heap != NULL) {
|
if (heap != NULL) {
|
||||||
|
|
|
@ -1674,7 +1674,7 @@ static bool do_page_fault(void *addr, bool pin)
|
||||||
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
|
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
|
||||||
|
|
||||||
key = k_spin_lock(&z_mm_lock);
|
key = k_spin_lock(&z_mm_lock);
|
||||||
faulting_thread = _current;
|
faulting_thread = arch_current_thread();
|
||||||
|
|
||||||
status = arch_page_location_get(addr, &page_in_location);
|
status = arch_page_location_get(addr, &page_in_location);
|
||||||
if (status == ARCH_PAGE_LOCATION_BAD) {
|
if (status == ARCH_PAGE_LOCATION_BAD) {
|
||||||
|
|
|
@ -169,7 +169,7 @@ int z_impl_k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_msgq, put, msgq, timeout);
|
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_msgq, put, msgq, timeout);
|
||||||
|
|
||||||
/* wait for put message success, failure, or timeout */
|
/* wait for put message success, failure, or timeout */
|
||||||
_current->base.swap_data = (void *) data;
|
arch_current_thread()->base.swap_data = (void *) data;
|
||||||
|
|
||||||
result = z_pend_curr(&msgq->lock, key, &msgq->wait_q, timeout);
|
result = z_pend_curr(&msgq->lock, key, &msgq->wait_q, timeout);
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_msgq, put, msgq, timeout, result);
|
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_msgq, put, msgq, timeout, result);
|
||||||
|
@ -267,7 +267,7 @@ int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout)
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_msgq, get, msgq, timeout);
|
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_msgq, get, msgq, timeout);
|
||||||
|
|
||||||
/* wait for get message success or timeout */
|
/* wait for get message success or timeout */
|
||||||
_current->base.swap_data = data;
|
arch_current_thread()->base.swap_data = data;
|
||||||
|
|
||||||
result = z_pend_curr(&msgq->lock, key, &msgq->wait_q, timeout);
|
result = z_pend_curr(&msgq->lock, key, &msgq->wait_q, timeout);
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_msgq, get, msgq, timeout, result);
|
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_msgq, get, msgq, timeout, result);
|
||||||
|
|
|
@ -114,17 +114,17 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
|
||||||
|
|
||||||
key = k_spin_lock(&lock);
|
key = k_spin_lock(&lock);
|
||||||
|
|
||||||
if (likely((mutex->lock_count == 0U) || (mutex->owner == _current))) {
|
if (likely((mutex->lock_count == 0U) || (mutex->owner == arch_current_thread()))) {
|
||||||
|
|
||||||
mutex->owner_orig_prio = (mutex->lock_count == 0U) ?
|
mutex->owner_orig_prio = (mutex->lock_count == 0U) ?
|
||||||
_current->base.prio :
|
arch_current_thread()->base.prio :
|
||||||
mutex->owner_orig_prio;
|
mutex->owner_orig_prio;
|
||||||
|
|
||||||
mutex->lock_count++;
|
mutex->lock_count++;
|
||||||
mutex->owner = _current;
|
mutex->owner = arch_current_thread();
|
||||||
|
|
||||||
LOG_DBG("%p took mutex %p, count: %d, orig prio: %d",
|
LOG_DBG("%p took mutex %p, count: %d, orig prio: %d",
|
||||||
_current, mutex, mutex->lock_count,
|
arch_current_thread(), mutex, mutex->lock_count,
|
||||||
mutex->owner_orig_prio);
|
mutex->owner_orig_prio);
|
||||||
|
|
||||||
k_spin_unlock(&lock, key);
|
k_spin_unlock(&lock, key);
|
||||||
|
@ -144,7 +144,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
|
||||||
|
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mutex, lock, mutex, timeout);
|
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mutex, lock, mutex, timeout);
|
||||||
|
|
||||||
new_prio = new_prio_for_inheritance(_current->base.prio,
|
new_prio = new_prio_for_inheritance(arch_current_thread()->base.prio,
|
||||||
mutex->owner->base.prio);
|
mutex->owner->base.prio);
|
||||||
|
|
||||||
LOG_DBG("adjusting prio up on mutex %p", mutex);
|
LOG_DBG("adjusting prio up on mutex %p", mutex);
|
||||||
|
@ -157,7 +157,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
|
||||||
|
|
||||||
LOG_DBG("on mutex %p got_mutex value: %d", mutex, got_mutex);
|
LOG_DBG("on mutex %p got_mutex value: %d", mutex, got_mutex);
|
||||||
|
|
||||||
LOG_DBG("%p got mutex %p (y/n): %c", _current, mutex,
|
LOG_DBG("%p got mutex %p (y/n): %c", arch_current_thread(), mutex,
|
||||||
got_mutex ? 'y' : 'n');
|
got_mutex ? 'y' : 'n');
|
||||||
|
|
||||||
if (got_mutex == 0) {
|
if (got_mutex == 0) {
|
||||||
|
@ -167,7 +167,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
|
||||||
|
|
||||||
/* timed out */
|
/* timed out */
|
||||||
|
|
||||||
LOG_DBG("%p timeout on mutex %p", _current, mutex);
|
LOG_DBG("%p timeout on mutex %p", arch_current_thread(), mutex);
|
||||||
|
|
||||||
key = k_spin_lock(&lock);
|
key = k_spin_lock(&lock);
|
||||||
|
|
||||||
|
@ -224,7 +224,7 @@ int z_impl_k_mutex_unlock(struct k_mutex *mutex)
|
||||||
/*
|
/*
|
||||||
* The current thread does not own the mutex.
|
* The current thread does not own the mutex.
|
||||||
*/
|
*/
|
||||||
CHECKIF(mutex->owner != _current) {
|
CHECKIF(mutex->owner != arch_current_thread()) {
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, unlock, mutex, -EPERM);
|
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, unlock, mutex, -EPERM);
|
||||||
|
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
|
@ -443,11 +443,11 @@ int z_impl_k_pipe_put(struct k_pipe *pipe, const void *data,
|
||||||
* invoked from within an ISR as that is not safe to do.
|
* invoked from within an ISR as that is not safe to do.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
src_desc = k_is_in_isr() ? &isr_desc : &_current->pipe_desc;
|
src_desc = k_is_in_isr() ? &isr_desc : &arch_current_thread()->pipe_desc;
|
||||||
|
|
||||||
src_desc->buffer = (unsigned char *)data;
|
src_desc->buffer = (unsigned char *)data;
|
||||||
src_desc->bytes_to_xfer = bytes_to_write;
|
src_desc->bytes_to_xfer = bytes_to_write;
|
||||||
src_desc->thread = _current;
|
src_desc->thread = arch_current_thread();
|
||||||
sys_dlist_append(&src_list, &src_desc->node);
|
sys_dlist_append(&src_list, &src_desc->node);
|
||||||
|
|
||||||
*bytes_written = pipe_write(pipe, &src_list,
|
*bytes_written = pipe_write(pipe, &src_list,
|
||||||
|
@ -488,7 +488,7 @@ int z_impl_k_pipe_put(struct k_pipe *pipe, const void *data,
|
||||||
|
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_pipe, put, pipe, timeout);
|
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_pipe, put, pipe, timeout);
|
||||||
|
|
||||||
_current->base.swap_data = src_desc;
|
arch_current_thread()->base.swap_data = src_desc;
|
||||||
|
|
||||||
z_sched_wait(&pipe->lock, key, &pipe->wait_q.writers, timeout, NULL);
|
z_sched_wait(&pipe->lock, key, &pipe->wait_q.writers, timeout, NULL);
|
||||||
|
|
||||||
|
@ -581,11 +581,11 @@ static int pipe_get_internal(k_spinlock_key_t key, struct k_pipe *pipe,
|
||||||
* invoked from within an ISR as that is not safe to do.
|
* invoked from within an ISR as that is not safe to do.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
dest_desc = k_is_in_isr() ? &isr_desc : &_current->pipe_desc;
|
dest_desc = k_is_in_isr() ? &isr_desc : &arch_current_thread()->pipe_desc;
|
||||||
|
|
||||||
dest_desc->buffer = data;
|
dest_desc->buffer = data;
|
||||||
dest_desc->bytes_to_xfer = bytes_to_read;
|
dest_desc->bytes_to_xfer = bytes_to_read;
|
||||||
dest_desc->thread = _current;
|
dest_desc->thread = arch_current_thread();
|
||||||
|
|
||||||
src_desc = (struct _pipe_desc *)sys_dlist_get(&src_list);
|
src_desc = (struct _pipe_desc *)sys_dlist_get(&src_list);
|
||||||
while (src_desc != NULL) {
|
while (src_desc != NULL) {
|
||||||
|
@ -674,7 +674,7 @@ static int pipe_get_internal(k_spinlock_key_t key, struct k_pipe *pipe,
|
||||||
|
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_pipe, get, pipe, timeout);
|
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_pipe, get, pipe, timeout);
|
||||||
|
|
||||||
_current->base.swap_data = dest_desc;
|
arch_current_thread()->base.swap_data = dest_desc;
|
||||||
|
|
||||||
z_sched_wait(&pipe->lock, key, &pipe->wait_q.readers, timeout, NULL);
|
z_sched_wait(&pipe->lock, key, &pipe->wait_q.readers, timeout, NULL);
|
||||||
|
|
||||||
|
|
|
@ -290,7 +290,7 @@ int z_impl_k_poll(struct k_poll_event *events, int num_events,
|
||||||
{
|
{
|
||||||
int events_registered;
|
int events_registered;
|
||||||
k_spinlock_key_t key;
|
k_spinlock_key_t key;
|
||||||
struct z_poller *poller = &_current->poller;
|
struct z_poller *poller = &arch_current_thread()->poller;
|
||||||
|
|
||||||
poller->is_polling = true;
|
poller->is_polling = true;
|
||||||
poller->mode = MODE_POLL;
|
poller->mode = MODE_POLL;
|
||||||
|
|
|
@ -346,9 +346,9 @@ void *z_impl_k_queue_get(struct k_queue *queue, k_timeout_t timeout)
|
||||||
int ret = z_pend_curr(&queue->lock, key, &queue->wait_q, timeout);
|
int ret = z_pend_curr(&queue->lock, key, &queue->wait_q, timeout);
|
||||||
|
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout,
|
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout,
|
||||||
(ret != 0) ? NULL : _current->base.swap_data);
|
(ret != 0) ? NULL : arch_current_thread()->base.swap_data);
|
||||||
|
|
||||||
return (ret != 0) ? NULL : _current->base.swap_data;
|
return (ret != 0) ? NULL : arch_current_thread()->base.swap_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool k_queue_remove(struct k_queue *queue, void *data)
|
bool k_queue_remove(struct k_queue *queue, void *data)
|
||||||
|
|
142
kernel/sched.c
142
kernel/sched.c
|
@ -31,7 +31,7 @@ extern struct k_thread *pending_current;
|
||||||
struct k_spinlock _sched_spinlock;
|
struct k_spinlock _sched_spinlock;
|
||||||
|
|
||||||
/* Storage to "complete" the context switch from an invalid/incomplete thread
|
/* Storage to "complete" the context switch from an invalid/incomplete thread
|
||||||
* context (ex: exiting an ISR that aborted _current)
|
* context (ex: exiting an ISR that aborted arch_current_thread())
|
||||||
*/
|
*/
|
||||||
__incoherent struct k_thread _thread_dummy;
|
__incoherent struct k_thread _thread_dummy;
|
||||||
|
|
||||||
|
@ -135,12 +135,12 @@ static ALWAYS_INLINE struct k_thread *runq_best(void)
|
||||||
return _priq_run_best(curr_cpu_runq());
|
return _priq_run_best(curr_cpu_runq());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* _current is never in the run queue until context switch on
|
/* arch_current_thread() is never in the run queue until context switch on
|
||||||
* SMP configurations, see z_requeue_current()
|
* SMP configurations, see z_requeue_current()
|
||||||
*/
|
*/
|
||||||
static inline bool should_queue_thread(struct k_thread *thread)
|
static inline bool should_queue_thread(struct k_thread *thread)
|
||||||
{
|
{
|
||||||
return !IS_ENABLED(CONFIG_SMP) || (thread != _current);
|
return !IS_ENABLED(CONFIG_SMP) || (thread != arch_current_thread());
|
||||||
}
|
}
|
||||||
|
|
||||||
static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
|
static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
|
||||||
|
@ -150,7 +150,7 @@ static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
|
||||||
runq_add(thread);
|
runq_add(thread);
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
if (thread == _current) {
|
if (thread == arch_current_thread()) {
|
||||||
/* add current to end of queue means "yield" */
|
/* add current to end of queue means "yield" */
|
||||||
_current_cpu->swap_ok = true;
|
_current_cpu->swap_ok = true;
|
||||||
}
|
}
|
||||||
|
@ -202,8 +202,8 @@ static inline void clear_halting(struct k_thread *thread)
|
||||||
static ALWAYS_INLINE struct k_thread *next_up(void)
|
static ALWAYS_INLINE struct k_thread *next_up(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
if (is_halting(_current)) {
|
if (is_halting(arch_current_thread())) {
|
||||||
halt_thread(_current, is_aborting(_current) ?
|
halt_thread(arch_current_thread(), is_aborting(arch_current_thread()) ?
|
||||||
_THREAD_DEAD : _THREAD_SUSPENDED);
|
_THREAD_DEAD : _THREAD_SUSPENDED);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
@ -242,42 +242,42 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
|
||||||
#else
|
#else
|
||||||
/* Under SMP, the "cache" mechanism for selecting the next
|
/* Under SMP, the "cache" mechanism for selecting the next
|
||||||
* thread doesn't work, so we have more work to do to test
|
* thread doesn't work, so we have more work to do to test
|
||||||
* _current against the best choice from the queue. Here, the
|
* arch_current_thread() against the best choice from the queue. Here, the
|
||||||
* thread selected above represents "the best thread that is
|
* thread selected above represents "the best thread that is
|
||||||
* not current".
|
* not current".
|
||||||
*
|
*
|
||||||
* Subtle note on "queued": in SMP mode, _current does not
|
* Subtle note on "queued": in SMP mode, arch_current_thread() does not
|
||||||
* live in the queue, so this isn't exactly the same thing as
|
* live in the queue, so this isn't exactly the same thing as
|
||||||
* "ready", it means "is _current already added back to the
|
* "ready", it means "is arch_current_thread() already added back to the
|
||||||
* queue such that we don't want to re-add it".
|
* queue such that we don't want to re-add it".
|
||||||
*/
|
*/
|
||||||
bool queued = z_is_thread_queued(_current);
|
bool queued = z_is_thread_queued(arch_current_thread());
|
||||||
bool active = !z_is_thread_prevented_from_running(_current);
|
bool active = !z_is_thread_prevented_from_running(arch_current_thread());
|
||||||
|
|
||||||
if (thread == NULL) {
|
if (thread == NULL) {
|
||||||
thread = _current_cpu->idle_thread;
|
thread = _current_cpu->idle_thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (active) {
|
if (active) {
|
||||||
int32_t cmp = z_sched_prio_cmp(_current, thread);
|
int32_t cmp = z_sched_prio_cmp(arch_current_thread(), thread);
|
||||||
|
|
||||||
/* Ties only switch if state says we yielded */
|
/* Ties only switch if state says we yielded */
|
||||||
if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) {
|
if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) {
|
||||||
thread = _current;
|
thread = arch_current_thread();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!should_preempt(thread, _current_cpu->swap_ok)) {
|
if (!should_preempt(thread, _current_cpu->swap_ok)) {
|
||||||
thread = _current;
|
thread = arch_current_thread();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Put _current back into the queue */
|
/* Put arch_current_thread() back into the queue */
|
||||||
if ((thread != _current) && active &&
|
if ((thread != arch_current_thread()) && active &&
|
||||||
!z_is_idle_thread_object(_current) && !queued) {
|
!z_is_idle_thread_object(arch_current_thread()) && !queued) {
|
||||||
queue_thread(_current);
|
queue_thread(arch_current_thread());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Take the new _current out of the queue */
|
/* Take the new arch_current_thread() out of the queue */
|
||||||
if (z_is_thread_queued(thread)) {
|
if (z_is_thread_queued(thread)) {
|
||||||
dequeue_thread(thread);
|
dequeue_thread(thread);
|
||||||
}
|
}
|
||||||
|
@ -293,7 +293,7 @@ void move_thread_to_end_of_prio_q(struct k_thread *thread)
|
||||||
dequeue_thread(thread);
|
dequeue_thread(thread);
|
||||||
}
|
}
|
||||||
queue_thread(thread);
|
queue_thread(thread);
|
||||||
update_cache(thread == _current);
|
update_cache(thread == arch_current_thread());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Track cooperative threads preempted by metairqs so we can return to
|
/* Track cooperative threads preempted by metairqs so we can return to
|
||||||
|
@ -304,10 +304,10 @@ static void update_metairq_preempt(struct k_thread *thread)
|
||||||
{
|
{
|
||||||
#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \
|
#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \
|
||||||
(CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
|
(CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
|
||||||
if (thread_is_metairq(thread) && !thread_is_metairq(_current) &&
|
if (thread_is_metairq(thread) && !thread_is_metairq(arch_current_thread()) &&
|
||||||
!thread_is_preemptible(_current)) {
|
!thread_is_preemptible(arch_current_thread())) {
|
||||||
/* Record new preemption */
|
/* Record new preemption */
|
||||||
_current_cpu->metairq_preempted = _current;
|
_current_cpu->metairq_preempted = arch_current_thread();
|
||||||
} else if (!thread_is_metairq(thread) && !z_is_idle_thread_object(thread)) {
|
} else if (!thread_is_metairq(thread) && !z_is_idle_thread_object(thread)) {
|
||||||
/* Returning from existing preemption */
|
/* Returning from existing preemption */
|
||||||
_current_cpu->metairq_preempted = NULL;
|
_current_cpu->metairq_preempted = NULL;
|
||||||
|
@ -327,14 +327,14 @@ static ALWAYS_INLINE void update_cache(int preempt_ok)
|
||||||
|
|
||||||
if (should_preempt(thread, preempt_ok)) {
|
if (should_preempt(thread, preempt_ok)) {
|
||||||
#ifdef CONFIG_TIMESLICING
|
#ifdef CONFIG_TIMESLICING
|
||||||
if (thread != _current) {
|
if (thread != arch_current_thread()) {
|
||||||
z_reset_time_slice(thread);
|
z_reset_time_slice(thread);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_TIMESLICING */
|
#endif /* CONFIG_TIMESLICING */
|
||||||
update_metairq_preempt(thread);
|
update_metairq_preempt(thread);
|
||||||
_kernel.ready_q.cache = thread;
|
_kernel.ready_q.cache = thread;
|
||||||
} else {
|
} else {
|
||||||
_kernel.ready_q.cache = _current;
|
_kernel.ready_q.cache = arch_current_thread();
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
@ -427,9 +427,9 @@ void z_sched_start(struct k_thread *thread)
|
||||||
*/
|
*/
|
||||||
static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
|
static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
|
||||||
{
|
{
|
||||||
if (is_halting(_current)) {
|
if (is_halting(arch_current_thread())) {
|
||||||
halt_thread(_current,
|
halt_thread(arch_current_thread(),
|
||||||
is_aborting(_current) ? _THREAD_DEAD : _THREAD_SUSPENDED);
|
is_aborting(arch_current_thread()) ? _THREAD_DEAD : _THREAD_SUSPENDED);
|
||||||
}
|
}
|
||||||
k_spin_unlock(&_sched_spinlock, key);
|
k_spin_unlock(&_sched_spinlock, key);
|
||||||
while (is_halting(thread)) {
|
while (is_halting(thread)) {
|
||||||
|
@ -443,7 +443,7 @@ static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
|
||||||
/* Shared handler for k_thread_{suspend,abort}(). Called with the
|
/* Shared handler for k_thread_{suspend,abort}(). Called with the
|
||||||
* scheduler lock held and the key passed (which it may
|
* scheduler lock held and the key passed (which it may
|
||||||
* release/reacquire!) which will be released before a possible return
|
* release/reacquire!) which will be released before a possible return
|
||||||
* (aborting _current will not return, obviously), which may be after
|
* (aborting arch_current_thread() will not return, obviously), which may be after
|
||||||
* a context switch.
|
* a context switch.
|
||||||
*/
|
*/
|
||||||
static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
|
static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
|
||||||
|
@ -476,14 +476,14 @@ static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
|
||||||
if (arch_is_in_isr()) {
|
if (arch_is_in_isr()) {
|
||||||
thread_halt_spin(thread, key);
|
thread_halt_spin(thread, key);
|
||||||
} else {
|
} else {
|
||||||
add_to_waitq_locked(_current, wq);
|
add_to_waitq_locked(arch_current_thread(), wq);
|
||||||
z_swap(&_sched_spinlock, key);
|
z_swap(&_sched_spinlock, key);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED);
|
halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED);
|
||||||
if ((thread == _current) && !arch_is_in_isr()) {
|
if ((thread == arch_current_thread()) && !arch_is_in_isr()) {
|
||||||
z_swap(&_sched_spinlock, key);
|
z_swap(&_sched_spinlock, key);
|
||||||
__ASSERT(!terminate, "aborted _current back from dead");
|
__ASSERT(!terminate, "aborted arch_current_thread() back from dead");
|
||||||
} else {
|
} else {
|
||||||
k_spin_unlock(&_sched_spinlock, key);
|
k_spin_unlock(&_sched_spinlock, key);
|
||||||
}
|
}
|
||||||
|
@ -559,7 +559,7 @@ static void unready_thread(struct k_thread *thread)
|
||||||
if (z_is_thread_queued(thread)) {
|
if (z_is_thread_queued(thread)) {
|
||||||
dequeue_thread(thread);
|
dequeue_thread(thread);
|
||||||
}
|
}
|
||||||
update_cache(thread == _current);
|
update_cache(thread == arch_current_thread());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* _sched_spinlock must be held */
|
/* _sched_spinlock must be held */
|
||||||
|
@ -596,7 +596,7 @@ static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q,
|
||||||
void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
|
void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
|
||||||
k_timeout_t timeout)
|
k_timeout_t timeout)
|
||||||
{
|
{
|
||||||
__ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread));
|
__ASSERT_NO_MSG(thread == arch_current_thread() || is_thread_dummy(thread));
|
||||||
K_SPINLOCK(&_sched_spinlock) {
|
K_SPINLOCK(&_sched_spinlock) {
|
||||||
pend_locked(thread, wait_q, timeout);
|
pend_locked(thread, wait_q, timeout);
|
||||||
}
|
}
|
||||||
|
@ -657,7 +657,7 @@ int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
|
||||||
_wait_q_t *wait_q, k_timeout_t timeout)
|
_wait_q_t *wait_q, k_timeout_t timeout)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
|
#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
|
||||||
pending_current = _current;
|
pending_current = arch_current_thread();
|
||||||
#endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
|
#endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
|
||||||
__ASSERT_NO_MSG(sizeof(_sched_spinlock) == 0 || lock != &_sched_spinlock);
|
__ASSERT_NO_MSG(sizeof(_sched_spinlock) == 0 || lock != &_sched_spinlock);
|
||||||
|
|
||||||
|
@ -670,7 +670,7 @@ int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
|
||||||
* held.
|
* held.
|
||||||
*/
|
*/
|
||||||
(void) k_spin_lock(&_sched_spinlock);
|
(void) k_spin_lock(&_sched_spinlock);
|
||||||
pend_locked(_current, wait_q, timeout);
|
pend_locked(arch_current_thread(), wait_q, timeout);
|
||||||
k_spin_release(lock);
|
k_spin_release(lock);
|
||||||
return z_swap(&_sched_spinlock, key);
|
return z_swap(&_sched_spinlock, key);
|
||||||
}
|
}
|
||||||
|
@ -768,7 +768,7 @@ static inline bool need_swap(void)
|
||||||
|
|
||||||
/* Check if the next ready thread is the same as the current thread */
|
/* Check if the next ready thread is the same as the current thread */
|
||||||
new_thread = _kernel.ready_q.cache;
|
new_thread = _kernel.ready_q.cache;
|
||||||
return new_thread != _current;
|
return new_thread != arch_current_thread();
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -804,15 +804,15 @@ void k_sched_lock(void)
|
||||||
void k_sched_unlock(void)
|
void k_sched_unlock(void)
|
||||||
{
|
{
|
||||||
K_SPINLOCK(&_sched_spinlock) {
|
K_SPINLOCK(&_sched_spinlock) {
|
||||||
__ASSERT(_current->base.sched_locked != 0U, "");
|
__ASSERT(arch_current_thread()->base.sched_locked != 0U, "");
|
||||||
__ASSERT(!arch_is_in_isr(), "");
|
__ASSERT(!arch_is_in_isr(), "");
|
||||||
|
|
||||||
++_current->base.sched_locked;
|
++arch_current_thread()->base.sched_locked;
|
||||||
update_cache(0);
|
update_cache(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_DBG("scheduler unlocked (%p:%d)",
|
LOG_DBG("scheduler unlocked (%p:%d)",
|
||||||
_current, _current->base.sched_locked);
|
arch_current_thread(), arch_current_thread()->base.sched_locked);
|
||||||
|
|
||||||
SYS_PORT_TRACING_FUNC(k_thread, sched_unlock);
|
SYS_PORT_TRACING_FUNC(k_thread, sched_unlock);
|
||||||
|
|
||||||
|
@ -824,10 +824,10 @@ struct k_thread *z_swap_next_thread(void)
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
struct k_thread *ret = next_up();
|
struct k_thread *ret = next_up();
|
||||||
|
|
||||||
if (ret == _current) {
|
if (ret == arch_current_thread()) {
|
||||||
/* When not swapping, have to signal IPIs here. In
|
/* When not swapping, have to signal IPIs here. In
|
||||||
* the context switch case it must happen later, after
|
* the context switch case it must happen later, after
|
||||||
* _current gets requeued.
|
* arch_current_thread() gets requeued.
|
||||||
*/
|
*/
|
||||||
signal_pending_ipi();
|
signal_pending_ipi();
|
||||||
}
|
}
|
||||||
|
@ -868,7 +868,7 @@ static inline void set_current(struct k_thread *new_thread)
|
||||||
* function.
|
* function.
|
||||||
*
|
*
|
||||||
* @warning
|
* @warning
|
||||||
* The _current value may have changed after this call and not refer
|
* The arch_current_thread() value may have changed after this call and not refer
|
||||||
* to the interrupted thread anymore. It might be necessary to make a local
|
* to the interrupted thread anymore. It might be necessary to make a local
|
||||||
* copy before calling this function.
|
* copy before calling this function.
|
||||||
*
|
*
|
||||||
|
@ -884,7 +884,7 @@ void *z_get_next_switch_handle(void *interrupted)
|
||||||
void *ret = NULL;
|
void *ret = NULL;
|
||||||
|
|
||||||
K_SPINLOCK(&_sched_spinlock) {
|
K_SPINLOCK(&_sched_spinlock) {
|
||||||
struct k_thread *old_thread = _current, *new_thread;
|
struct k_thread *old_thread = arch_current_thread(), *new_thread;
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_SMP)) {
|
if (IS_ENABLED(CONFIG_SMP)) {
|
||||||
old_thread->switch_handle = NULL;
|
old_thread->switch_handle = NULL;
|
||||||
|
@ -910,7 +910,7 @@ void *z_get_next_switch_handle(void *interrupted)
|
||||||
#endif /* CONFIG_TIMESLICING */
|
#endif /* CONFIG_TIMESLICING */
|
||||||
|
|
||||||
#ifdef CONFIG_SPIN_VALIDATE
|
#ifdef CONFIG_SPIN_VALIDATE
|
||||||
/* Changed _current! Update the spinlock
|
/* Changed arch_current_thread()! Update the spinlock
|
||||||
* bookkeeping so the validation doesn't get
|
* bookkeeping so the validation doesn't get
|
||||||
* confused when the "wrong" thread tries to
|
* confused when the "wrong" thread tries to
|
||||||
* release the lock.
|
* release the lock.
|
||||||
|
@ -945,9 +945,9 @@ void *z_get_next_switch_handle(void *interrupted)
|
||||||
return ret;
|
return ret;
|
||||||
#else
|
#else
|
||||||
z_sched_usage_switch(_kernel.ready_q.cache);
|
z_sched_usage_switch(_kernel.ready_q.cache);
|
||||||
_current->switch_handle = interrupted;
|
arch_current_thread()->switch_handle = interrupted;
|
||||||
set_current(_kernel.ready_q.cache);
|
set_current(_kernel.ready_q.cache);
|
||||||
return _current->switch_handle;
|
return arch_current_thread()->switch_handle;
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_USE_SWITCH */
|
#endif /* CONFIG_USE_SWITCH */
|
||||||
|
@ -993,7 +993,7 @@ void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
|
||||||
bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio);
|
bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio);
|
||||||
|
|
||||||
if ((need_sched) && (IS_ENABLED(CONFIG_SMP) ||
|
if ((need_sched) && (IS_ENABLED(CONFIG_SMP) ||
|
||||||
(_current->base.sched_locked == 0U))) {
|
(arch_current_thread()->base.sched_locked == 0U))) {
|
||||||
z_reschedule_unlocked();
|
z_reschedule_unlocked();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1059,7 +1059,7 @@ static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
|
||||||
bool k_can_yield(void)
|
bool k_can_yield(void)
|
||||||
{
|
{
|
||||||
return !(k_is_pre_kernel() || k_is_in_isr() ||
|
return !(k_is_pre_kernel() || k_is_in_isr() ||
|
||||||
z_is_idle_thread_object(_current));
|
z_is_idle_thread_object(arch_current_thread()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void z_impl_k_yield(void)
|
void z_impl_k_yield(void)
|
||||||
|
@ -1071,10 +1071,10 @@ void z_impl_k_yield(void)
|
||||||
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
|
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
|
||||||
|
|
||||||
if (!IS_ENABLED(CONFIG_SMP) ||
|
if (!IS_ENABLED(CONFIG_SMP) ||
|
||||||
z_is_thread_queued(_current)) {
|
z_is_thread_queued(arch_current_thread())) {
|
||||||
dequeue_thread(_current);
|
dequeue_thread(arch_current_thread());
|
||||||
}
|
}
|
||||||
queue_thread(_current);
|
queue_thread(arch_current_thread());
|
||||||
update_cache(1);
|
update_cache(1);
|
||||||
z_swap(&_sched_spinlock, key);
|
z_swap(&_sched_spinlock, key);
|
||||||
}
|
}
|
||||||
|
@ -1093,7 +1093,7 @@ static int32_t z_tick_sleep(k_ticks_t ticks)
|
||||||
|
|
||||||
__ASSERT(!arch_is_in_isr(), "");
|
__ASSERT(!arch_is_in_isr(), "");
|
||||||
|
|
||||||
LOG_DBG("thread %p for %lu ticks", _current, (unsigned long)ticks);
|
LOG_DBG("thread %p for %lu ticks", arch_current_thread(), (unsigned long)ticks);
|
||||||
|
|
||||||
/* wait of 0 ms is treated as a 'yield' */
|
/* wait of 0 ms is treated as a 'yield' */
|
||||||
if (ticks == 0) {
|
if (ticks == 0) {
|
||||||
|
@ -1111,15 +1111,15 @@ static int32_t z_tick_sleep(k_ticks_t ticks)
|
||||||
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
|
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
|
||||||
|
|
||||||
#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
|
#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
|
||||||
pending_current = _current;
|
pending_current = arch_current_thread();
|
||||||
#endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
|
#endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
|
||||||
unready_thread(_current);
|
unready_thread(arch_current_thread());
|
||||||
z_add_thread_timeout(_current, timeout);
|
z_add_thread_timeout(arch_current_thread(), timeout);
|
||||||
z_mark_thread_as_suspended(_current);
|
z_mark_thread_as_suspended(arch_current_thread());
|
||||||
|
|
||||||
(void)z_swap(&_sched_spinlock, key);
|
(void)z_swap(&_sched_spinlock, key);
|
||||||
|
|
||||||
__ASSERT(!z_is_thread_state_set(_current, _THREAD_SUSPENDED), "");
|
__ASSERT(!z_is_thread_state_set(arch_current_thread(), _THREAD_SUSPENDED), "");
|
||||||
|
|
||||||
ticks = (k_ticks_t)expected_wakeup_ticks - sys_clock_tick_get_32();
|
ticks = (k_ticks_t)expected_wakeup_ticks - sys_clock_tick_get_32();
|
||||||
if (ticks > 0) {
|
if (ticks > 0) {
|
||||||
|
@ -1140,7 +1140,7 @@ int32_t z_impl_k_sleep(k_timeout_t timeout)
|
||||||
/* in case of K_FOREVER, we suspend */
|
/* in case of K_FOREVER, we suspend */
|
||||||
if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
|
if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
|
||||||
|
|
||||||
k_thread_suspend(_current);
|
k_thread_suspend(arch_current_thread());
|
||||||
SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER);
|
SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER);
|
||||||
|
|
||||||
return (int32_t) K_TICKS_FOREVER;
|
return (int32_t) K_TICKS_FOREVER;
|
||||||
|
@ -1285,13 +1285,13 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state)
|
||||||
(void)z_abort_thread_timeout(thread);
|
(void)z_abort_thread_timeout(thread);
|
||||||
unpend_all(&thread->join_queue);
|
unpend_all(&thread->join_queue);
|
||||||
|
|
||||||
/* Edge case: aborting _current from within an
|
/* Edge case: aborting arch_current_thread() from within an
|
||||||
* ISR that preempted it requires clearing the
|
* ISR that preempted it requires clearing the
|
||||||
* _current pointer so the upcoming context
|
* arch_current_thread() pointer so the upcoming context
|
||||||
* switch doesn't clobber the now-freed
|
* switch doesn't clobber the now-freed
|
||||||
* memory
|
* memory
|
||||||
*/
|
*/
|
||||||
if (thread == _current && arch_is_in_isr()) {
|
if (thread == arch_current_thread() && arch_is_in_isr()) {
|
||||||
dummify = true;
|
dummify = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1334,10 +1334,10 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state)
|
||||||
k_thread_abort_cleanup(thread);
|
k_thread_abort_cleanup(thread);
|
||||||
#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
|
#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
|
||||||
|
|
||||||
/* Do this "set _current to dummy" step last so that
|
/* Do this "set arch_current_thread() to dummy" step last so that
|
||||||
* subsystems above can rely on _current being
|
* subsystems above can rely on arch_current_thread() being
|
||||||
* unchanged. Disabled for posix as that arch
|
* unchanged. Disabled for posix as that arch
|
||||||
* continues to use the _current pointer in its swap
|
* continues to use the arch_current_thread() pointer in its swap
|
||||||
* code. Note that we must leave a non-null switch
|
* code. Note that we must leave a non-null switch
|
||||||
* handle for any threads spinning in join() (this can
|
* handle for any threads spinning in join() (this can
|
||||||
* never be used, as our thread is flagged dead, but
|
* never be used, as our thread is flagged dead, but
|
||||||
|
@ -1345,7 +1345,7 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state)
|
||||||
*/
|
*/
|
||||||
if (dummify && !IS_ENABLED(CONFIG_ARCH_POSIX)) {
|
if (dummify && !IS_ENABLED(CONFIG_ARCH_POSIX)) {
|
||||||
#ifdef CONFIG_USE_SWITCH
|
#ifdef CONFIG_USE_SWITCH
|
||||||
_current->switch_handle = _current;
|
arch_current_thread()->switch_handle = arch_current_thread();
|
||||||
#endif
|
#endif
|
||||||
z_dummy_thread_init(&_thread_dummy);
|
z_dummy_thread_init(&_thread_dummy);
|
||||||
|
|
||||||
|
@ -1403,13 +1403,13 @@ int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
|
||||||
ret = 0;
|
ret = 0;
|
||||||
} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
|
} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
} else if ((thread == _current) ||
|
} else if ((thread == arch_current_thread()) ||
|
||||||
(thread->base.pended_on == &_current->join_queue)) {
|
(thread->base.pended_on == &arch_current_thread()->join_queue)) {
|
||||||
ret = -EDEADLK;
|
ret = -EDEADLK;
|
||||||
} else {
|
} else {
|
||||||
__ASSERT(!arch_is_in_isr(), "cannot join in ISR");
|
__ASSERT(!arch_is_in_isr(), "cannot join in ISR");
|
||||||
add_to_waitq_locked(_current, &thread->join_queue);
|
add_to_waitq_locked(arch_current_thread(), &thread->join_queue);
|
||||||
add_thread_timeout(_current, timeout);
|
add_thread_timeout(arch_current_thread(), timeout);
|
||||||
|
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout);
|
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout);
|
||||||
ret = z_swap(&_sched_spinlock, key);
|
ret = z_swap(&_sched_spinlock, key);
|
||||||
|
@ -1508,7 +1508,7 @@ int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
|
||||||
int ret = z_pend_curr(lock, key, wait_q, timeout);
|
int ret = z_pend_curr(lock, key, wait_q, timeout);
|
||||||
|
|
||||||
if (data != NULL) {
|
if (data != NULL) {
|
||||||
*data = _current->base.swap_data;
|
*data = arch_current_thread()->base.swap_data;
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
10
kernel/smp.c
10
kernel/smp.c
|
@ -58,23 +58,23 @@ unsigned int z_smp_global_lock(void)
|
||||||
{
|
{
|
||||||
unsigned int key = arch_irq_lock();
|
unsigned int key = arch_irq_lock();
|
||||||
|
|
||||||
if (!_current->base.global_lock_count) {
|
if (!arch_current_thread()->base.global_lock_count) {
|
||||||
while (!atomic_cas(&global_lock, 0, 1)) {
|
while (!atomic_cas(&global_lock, 0, 1)) {
|
||||||
arch_spin_relax();
|
arch_spin_relax();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_current->base.global_lock_count++;
|
arch_current_thread()->base.global_lock_count++;
|
||||||
|
|
||||||
return key;
|
return key;
|
||||||
}
|
}
|
||||||
|
|
||||||
void z_smp_global_unlock(unsigned int key)
|
void z_smp_global_unlock(unsigned int key)
|
||||||
{
|
{
|
||||||
if (_current->base.global_lock_count != 0U) {
|
if (arch_current_thread()->base.global_lock_count != 0U) {
|
||||||
_current->base.global_lock_count--;
|
arch_current_thread()->base.global_lock_count--;
|
||||||
|
|
||||||
if (!_current->base.global_lock_count) {
|
if (!arch_current_thread()->base.global_lock_count) {
|
||||||
(void)atomic_clear(&global_lock);
|
(void)atomic_clear(&global_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,11 +24,11 @@ bool z_spin_unlock_valid(struct k_spinlock *l)
|
||||||
|
|
||||||
l->thread_cpu = 0;
|
l->thread_cpu = 0;
|
||||||
|
|
||||||
if (arch_is_in_isr() && _current->base.thread_state & _THREAD_DUMMY) {
|
if (arch_is_in_isr() && arch_current_thread()->base.thread_state & _THREAD_DUMMY) {
|
||||||
/* Edge case where an ISR aborted _current */
|
/* Edge case where an ISR aborted arch_current_thread() */
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (tcpu != (_current_cpu->id | (uintptr_t)_current)) {
|
if (tcpu != (_current_cpu->id | (uintptr_t)arch_current_thread())) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
@ -36,7 +36,7 @@ bool z_spin_unlock_valid(struct k_spinlock *l)
|
||||||
|
|
||||||
void z_spin_lock_set_owner(struct k_spinlock *l)
|
void z_spin_lock_set_owner(struct k_spinlock *l)
|
||||||
{
|
{
|
||||||
l->thread_cpu = _current_cpu->id | (uintptr_t)_current;
|
l->thread_cpu = _current_cpu->id | (uintptr_t)arch_current_thread();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_KERNEL_COHERENCE
|
#ifdef CONFIG_KERNEL_COHERENCE
|
||||||
|
|
|
@ -182,7 +182,7 @@ int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data,
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
*data = (stack_data_t)_current->base.swap_data;
|
*data = (stack_data_t)arch_current_thread()->base.swap_data;
|
||||||
|
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_stack, pop, stack, timeout, 0);
|
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_stack, pop, stack, timeout, 0);
|
||||||
|
|
||||||
|
|
|
@ -82,7 +82,7 @@ EXPORT_SYMBOL(k_is_in_isr);
|
||||||
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
||||||
void z_impl_k_thread_custom_data_set(void *value)
|
void z_impl_k_thread_custom_data_set(void *value)
|
||||||
{
|
{
|
||||||
_current->custom_data = value;
|
arch_current_thread()->custom_data = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
|
@ -95,7 +95,7 @@ static inline void z_vrfy_k_thread_custom_data_set(void *data)
|
||||||
|
|
||||||
void *z_impl_k_thread_custom_data_get(void)
|
void *z_impl_k_thread_custom_data_get(void)
|
||||||
{
|
{
|
||||||
return _current->custom_data;
|
return arch_current_thread()->custom_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
|
@ -110,7 +110,7 @@ static inline void *z_vrfy_k_thread_custom_data_get(void)
|
||||||
|
|
||||||
int z_impl_k_is_preempt_thread(void)
|
int z_impl_k_is_preempt_thread(void)
|
||||||
{
|
{
|
||||||
return !arch_is_in_isr() && thread_is_preemptible(_current);
|
return !arch_is_in_isr() && thread_is_preemptible(arch_current_thread());
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
|
@ -139,7 +139,7 @@ int z_impl_k_thread_name_set(k_tid_t thread, const char *str)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_THREAD_NAME
|
#ifdef CONFIG_THREAD_NAME
|
||||||
if (thread == NULL) {
|
if (thread == NULL) {
|
||||||
thread = _current;
|
thread = arch_current_thread();
|
||||||
}
|
}
|
||||||
|
|
||||||
strncpy(thread->name, str, CONFIG_THREAD_MAX_NAME_LEN - 1);
|
strncpy(thread->name, str, CONFIG_THREAD_MAX_NAME_LEN - 1);
|
||||||
|
@ -331,11 +331,11 @@ void z_check_stack_sentinel(void)
|
||||||
{
|
{
|
||||||
uint32_t *stack;
|
uint32_t *stack;
|
||||||
|
|
||||||
if ((_current->base.thread_state & _THREAD_DUMMY) != 0) {
|
if ((arch_current_thread()->base.thread_state & _THREAD_DUMMY) != 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
stack = (uint32_t *)_current->stack_info.start;
|
stack = (uint32_t *)arch_current_thread()->stack_info.start;
|
||||||
if (*stack != STACK_SENTINEL) {
|
if (*stack != STACK_SENTINEL) {
|
||||||
/* Restore it so further checks don't trigger this same error */
|
/* Restore it so further checks don't trigger this same error */
|
||||||
*stack = STACK_SENTINEL;
|
*stack = STACK_SENTINEL;
|
||||||
|
@ -627,8 +627,8 @@ char *z_setup_new_thread(struct k_thread *new_thread,
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SCHED_CPU_MASK */
|
#endif /* CONFIG_SCHED_CPU_MASK */
|
||||||
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
|
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
|
||||||
/* _current may be null if the dummy thread is not used */
|
/* arch_current_thread() may be null if the dummy thread is not used */
|
||||||
if (!_current) {
|
if (!arch_current_thread()) {
|
||||||
new_thread->resource_pool = NULL;
|
new_thread->resource_pool = NULL;
|
||||||
return stack_ptr;
|
return stack_ptr;
|
||||||
}
|
}
|
||||||
|
@ -637,13 +637,13 @@ char *z_setup_new_thread(struct k_thread *new_thread,
|
||||||
z_mem_domain_init_thread(new_thread);
|
z_mem_domain_init_thread(new_thread);
|
||||||
|
|
||||||
if ((options & K_INHERIT_PERMS) != 0U) {
|
if ((options & K_INHERIT_PERMS) != 0U) {
|
||||||
k_thread_perms_inherit(_current, new_thread);
|
k_thread_perms_inherit(arch_current_thread(), new_thread);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_USERSPACE */
|
#endif /* CONFIG_USERSPACE */
|
||||||
#ifdef CONFIG_SCHED_DEADLINE
|
#ifdef CONFIG_SCHED_DEADLINE
|
||||||
new_thread->base.prio_deadline = 0;
|
new_thread->base.prio_deadline = 0;
|
||||||
#endif /* CONFIG_SCHED_DEADLINE */
|
#endif /* CONFIG_SCHED_DEADLINE */
|
||||||
new_thread->resource_pool = _current->resource_pool;
|
new_thread->resource_pool = arch_current_thread()->resource_pool;
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
z_waitq_init(&new_thread->halt_queue);
|
z_waitq_init(&new_thread->halt_queue);
|
||||||
|
@ -738,7 +738,7 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
|
||||||
*/
|
*/
|
||||||
K_OOPS(K_SYSCALL_VERIFY(_is_valid_prio(prio, NULL)));
|
K_OOPS(K_SYSCALL_VERIFY(_is_valid_prio(prio, NULL)));
|
||||||
K_OOPS(K_SYSCALL_VERIFY(z_is_prio_lower_or_equal(prio,
|
K_OOPS(K_SYSCALL_VERIFY(z_is_prio_lower_or_equal(prio,
|
||||||
_current->base.prio)));
|
arch_current_thread()->base.prio)));
|
||||||
|
|
||||||
z_setup_new_thread(new_thread, stack, stack_size,
|
z_setup_new_thread(new_thread, stack, stack_size,
|
||||||
entry, p1, p2, p3, prio, options, NULL);
|
entry, p1, p2, p3, prio, options, NULL);
|
||||||
|
@ -783,25 +783,25 @@ FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
|
||||||
{
|
{
|
||||||
SYS_PORT_TRACING_FUNC(k_thread, user_mode_enter);
|
SYS_PORT_TRACING_FUNC(k_thread, user_mode_enter);
|
||||||
|
|
||||||
_current->base.user_options |= K_USER;
|
arch_current_thread()->base.user_options |= K_USER;
|
||||||
z_thread_essential_clear(_current);
|
z_thread_essential_clear(arch_current_thread());
|
||||||
#ifdef CONFIG_THREAD_MONITOR
|
#ifdef CONFIG_THREAD_MONITOR
|
||||||
_current->entry.pEntry = entry;
|
arch_current_thread()->entry.pEntry = entry;
|
||||||
_current->entry.parameter1 = p1;
|
arch_current_thread()->entry.parameter1 = p1;
|
||||||
_current->entry.parameter2 = p2;
|
arch_current_thread()->entry.parameter2 = p2;
|
||||||
_current->entry.parameter3 = p3;
|
arch_current_thread()->entry.parameter3 = p3;
|
||||||
#endif /* CONFIG_THREAD_MONITOR */
|
#endif /* CONFIG_THREAD_MONITOR */
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
__ASSERT(z_stack_is_user_capable(_current->stack_obj),
|
__ASSERT(z_stack_is_user_capable(arch_current_thread()->stack_obj),
|
||||||
"dropping to user mode with kernel-only stack object");
|
"dropping to user mode with kernel-only stack object");
|
||||||
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
|
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
|
||||||
memset(_current->userspace_local_data, 0,
|
memset(arch_current_thread()->userspace_local_data, 0,
|
||||||
sizeof(struct _thread_userspace_local_data));
|
sizeof(struct _thread_userspace_local_data));
|
||||||
#endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
|
#endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
|
||||||
#ifdef CONFIG_THREAD_LOCAL_STORAGE
|
#ifdef CONFIG_THREAD_LOCAL_STORAGE
|
||||||
arch_tls_stack_setup(_current,
|
arch_tls_stack_setup(arch_current_thread(),
|
||||||
(char *)(_current->stack_info.start +
|
(char *)(arch_current_thread()->stack_info.start +
|
||||||
_current->stack_info.size));
|
arch_current_thread()->stack_info.size));
|
||||||
#endif /* CONFIG_THREAD_LOCAL_STORAGE */
|
#endif /* CONFIG_THREAD_LOCAL_STORAGE */
|
||||||
arch_user_mode_enter(entry, p1, p2, p3);
|
arch_user_mode_enter(entry, p1, p2, p3);
|
||||||
#else
|
#else
|
||||||
|
@ -929,7 +929,7 @@ static inline k_ticks_t z_vrfy_k_thread_timeout_expires_ticks(
|
||||||
void z_thread_mark_switched_in(void)
|
void z_thread_mark_switched_in(void)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
|
#if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
|
||||||
z_sched_usage_start(_current);
|
z_sched_usage_start(arch_current_thread());
|
||||||
#endif /* CONFIG_SCHED_THREAD_USAGE && !CONFIG_USE_SWITCH */
|
#endif /* CONFIG_SCHED_THREAD_USAGE && !CONFIG_USE_SWITCH */
|
||||||
|
|
||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
|
@ -946,8 +946,8 @@ void z_thread_mark_switched_out(void)
|
||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
#ifdef CONFIG_THREAD_LOCAL_STORAGE
|
#ifdef CONFIG_THREAD_LOCAL_STORAGE
|
||||||
/* Dummy thread won't have TLS set up to run arbitrary code */
|
/* Dummy thread won't have TLS set up to run arbitrary code */
|
||||||
if (!_current ||
|
if (!arch_current_thread() ||
|
||||||
(_current->base.thread_state & _THREAD_DUMMY) != 0)
|
(arch_current_thread()->base.thread_state & _THREAD_DUMMY) != 0)
|
||||||
return;
|
return;
|
||||||
#endif /* CONFIG_THREAD_LOCAL_STORAGE */
|
#endif /* CONFIG_THREAD_LOCAL_STORAGE */
|
||||||
SYS_PORT_TRACING_FUNC(k_thread, switched_out);
|
SYS_PORT_TRACING_FUNC(k_thread, switched_out);
|
||||||
|
@ -1097,7 +1097,7 @@ void k_thread_abort_cleanup(struct k_thread *thread)
|
||||||
thread_to_cleanup = NULL;
|
thread_to_cleanup = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (thread == _current) {
|
if (thread == arch_current_thread()) {
|
||||||
/* Need to defer for current running thread as the cleanup
|
/* Need to defer for current running thread as the cleanup
|
||||||
* might result in exception. Actual cleanup will be done
|
* might result in exception. Actual cleanup will be done
|
||||||
* at the next time k_thread_abort() is called, or at thread
|
* at the next time k_thread_abort() is called, or at thread
|
||||||
|
|
|
@ -15,7 +15,7 @@ static bool slice_expired[CONFIG_MP_MAX_NUM_CPUS];
|
||||||
|
|
||||||
#ifdef CONFIG_SWAP_NONATOMIC
|
#ifdef CONFIG_SWAP_NONATOMIC
|
||||||
/* If z_swap() isn't atomic, then it's possible for a timer interrupt
|
/* If z_swap() isn't atomic, then it's possible for a timer interrupt
|
||||||
* to try to timeslice away _current after it has already pended
|
* to try to timeslice away arch_current_thread() after it has already pended
|
||||||
* itself but before the corresponding context switch. Treat that as
|
* itself but before the corresponding context switch. Treat that as
|
||||||
* a noop condition in z_time_slice().
|
* a noop condition in z_time_slice().
|
||||||
*/
|
*/
|
||||||
|
@ -82,7 +82,7 @@ void k_sched_time_slice_set(int32_t slice, int prio)
|
||||||
K_SPINLOCK(&_sched_spinlock) {
|
K_SPINLOCK(&_sched_spinlock) {
|
||||||
slice_ticks = k_ms_to_ticks_ceil32(slice);
|
slice_ticks = k_ms_to_ticks_ceil32(slice);
|
||||||
slice_max_prio = prio;
|
slice_max_prio = prio;
|
||||||
z_reset_time_slice(_current);
|
z_reset_time_slice(arch_current_thread());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,7 +103,7 @@ void k_thread_time_slice_set(struct k_thread *thread, int32_t thread_slice_ticks
|
||||||
void z_time_slice(void)
|
void z_time_slice(void)
|
||||||
{
|
{
|
||||||
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
|
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
|
||||||
struct k_thread *curr = _current;
|
struct k_thread *curr = arch_current_thread();
|
||||||
|
|
||||||
#ifdef CONFIG_SWAP_NONATOMIC
|
#ifdef CONFIG_SWAP_NONATOMIC
|
||||||
if (pending_current == curr) {
|
if (pending_current == curr) {
|
||||||
|
|
|
@ -437,7 +437,7 @@ static void *z_object_alloc(enum k_objects otype, size_t size)
|
||||||
/* The allocating thread implicitly gets permission on kernel objects
|
/* The allocating thread implicitly gets permission on kernel objects
|
||||||
* that it allocates
|
* that it allocates
|
||||||
*/
|
*/
|
||||||
k_thread_perms_set(zo, _current);
|
k_thread_perms_set(zo, arch_current_thread());
|
||||||
|
|
||||||
/* Activates reference counting logic for automatic disposal when
|
/* Activates reference counting logic for automatic disposal when
|
||||||
* all permissions have been revoked
|
* all permissions have been revoked
|
||||||
|
@ -654,7 +654,7 @@ static int thread_perms_test(struct k_object *ko)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
index = thread_index_get(_current);
|
index = thread_index_get(arch_current_thread());
|
||||||
if (index != -1) {
|
if (index != -1) {
|
||||||
return sys_bitfield_test_bit((mem_addr_t)&ko->perms, index);
|
return sys_bitfield_test_bit((mem_addr_t)&ko->perms, index);
|
||||||
}
|
}
|
||||||
|
@ -663,9 +663,9 @@ static int thread_perms_test(struct k_object *ko)
|
||||||
|
|
||||||
static void dump_permission_error(struct k_object *ko)
|
static void dump_permission_error(struct k_object *ko)
|
||||||
{
|
{
|
||||||
int index = thread_index_get(_current);
|
int index = thread_index_get(arch_current_thread());
|
||||||
LOG_ERR("thread %p (%d) does not have permission on %s %p",
|
LOG_ERR("thread %p (%d) does not have permission on %s %p",
|
||||||
_current, index,
|
arch_current_thread(), index,
|
||||||
otype_to_str(ko->type), ko->name);
|
otype_to_str(ko->type), ko->name);
|
||||||
LOG_HEXDUMP_ERR(ko->perms, sizeof(ko->perms), "permission bitmap");
|
LOG_HEXDUMP_ERR(ko->perms, sizeof(ko->perms), "permission bitmap");
|
||||||
}
|
}
|
||||||
|
@ -718,7 +718,7 @@ void k_object_access_revoke(const void *object, struct k_thread *thread)
|
||||||
|
|
||||||
void z_impl_k_object_release(const void *object)
|
void z_impl_k_object_release(const void *object)
|
||||||
{
|
{
|
||||||
k_object_access_revoke(object, _current);
|
k_object_access_revoke(object, arch_current_thread());
|
||||||
}
|
}
|
||||||
|
|
||||||
void k_object_access_all_grant(const void *object)
|
void k_object_access_all_grant(const void *object)
|
||||||
|
@ -794,7 +794,7 @@ void k_object_recycle(const void *obj)
|
||||||
|
|
||||||
if (ko != NULL) {
|
if (ko != NULL) {
|
||||||
(void)memset(ko->perms, 0, sizeof(ko->perms));
|
(void)memset(ko->perms, 0, sizeof(ko->perms));
|
||||||
k_thread_perms_set(ko, _current);
|
k_thread_perms_set(ko, arch_current_thread());
|
||||||
ko->flags |= K_OBJ_FLAG_INITIALIZED;
|
ko->flags |= K_OBJ_FLAG_INITIALIZED;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -72,7 +72,7 @@ static inline void z_vrfy_k_object_release(const void *object)
|
||||||
|
|
||||||
ko = validate_any_object(object);
|
ko = validate_any_object(object);
|
||||||
K_OOPS(K_SYSCALL_VERIFY_MSG(ko != NULL, "object %p access denied", object));
|
K_OOPS(K_SYSCALL_VERIFY_MSG(ko != NULL, "object %p access denied", object));
|
||||||
k_thread_perms_clear(ko, _current);
|
k_thread_perms_clear(ko, arch_current_thread());
|
||||||
}
|
}
|
||||||
#include <zephyr/syscalls/k_object_release_mrsh.c>
|
#include <zephyr/syscalls/k_object_release_mrsh.c>
|
||||||
|
|
||||||
|
|
|
@ -262,7 +262,7 @@ static inline int queue_submit_locked(struct k_work_q *queue,
|
||||||
}
|
}
|
||||||
|
|
||||||
int ret;
|
int ret;
|
||||||
bool chained = (_current == &queue->thread) && !k_is_in_isr();
|
bool chained = (arch_current_thread() == &queue->thread) && !k_is_in_isr();
|
||||||
bool draining = flag_test(&queue->flags, K_WORK_QUEUE_DRAIN_BIT);
|
bool draining = flag_test(&queue->flags, K_WORK_QUEUE_DRAIN_BIT);
|
||||||
bool plugged = flag_test(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT);
|
bool plugged = flag_test(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT);
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,7 @@ void __stdout_hook_install(int (*hook)(int))
|
||||||
|
|
||||||
volatile int *__aeabi_errno_addr(void)
|
volatile int *__aeabi_errno_addr(void)
|
||||||
{
|
{
|
||||||
return &_current->errno_var;
|
return &arch_current_thread()->errno_var;
|
||||||
}
|
}
|
||||||
|
|
||||||
int fputc(int c, FILE *f)
|
int fputc(int c, FILE *f)
|
||||||
|
|
|
@ -87,10 +87,10 @@ static FUNC_NORETURN void p4wq_loop(void *p0, void *p1, void *p2)
|
||||||
= CONTAINER_OF(r, struct k_p4wq_work, rbnode);
|
= CONTAINER_OF(r, struct k_p4wq_work, rbnode);
|
||||||
|
|
||||||
rb_remove(&queue->queue, r);
|
rb_remove(&queue->queue, r);
|
||||||
w->thread = _current;
|
w->thread = arch_current_thread();
|
||||||
sys_dlist_append(&queue->active, &w->dlnode);
|
sys_dlist_append(&queue->active, &w->dlnode);
|
||||||
set_prio(_current, w);
|
set_prio(arch_current_thread(), w);
|
||||||
thread_clear_requeued(_current);
|
thread_clear_requeued(arch_current_thread());
|
||||||
|
|
||||||
k_spin_unlock(&queue->lock, k);
|
k_spin_unlock(&queue->lock, k);
|
||||||
|
|
||||||
|
@ -101,7 +101,7 @@ static FUNC_NORETURN void p4wq_loop(void *p0, void *p1, void *p2)
|
||||||
/* Remove from the active list only if it
|
/* Remove from the active list only if it
|
||||||
* wasn't resubmitted already
|
* wasn't resubmitted already
|
||||||
*/
|
*/
|
||||||
if (!thread_was_requeued(_current)) {
|
if (!thread_was_requeued(arch_current_thread())) {
|
||||||
sys_dlist_remove(&w->dlnode);
|
sys_dlist_remove(&w->dlnode);
|
||||||
w->thread = NULL;
|
w->thread = NULL;
|
||||||
k_sem_give(&w->done_sem);
|
k_sem_give(&w->done_sem);
|
||||||
|
@ -228,9 +228,9 @@ void k_p4wq_submit(struct k_p4wq *queue, struct k_p4wq_work *item)
|
||||||
item->deadline += k_cycle_get_32();
|
item->deadline += k_cycle_get_32();
|
||||||
|
|
||||||
/* Resubmission from within handler? Remove from active list */
|
/* Resubmission from within handler? Remove from active list */
|
||||||
if (item->thread == _current) {
|
if (item->thread == arch_current_thread()) {
|
||||||
sys_dlist_remove(&item->dlnode);
|
sys_dlist_remove(&item->dlnode);
|
||||||
thread_set_requeued(_current);
|
thread_set_requeued(arch_current_thread());
|
||||||
item->thread = NULL;
|
item->thread = NULL;
|
||||||
} else {
|
} else {
|
||||||
k_sem_init(&item->done_sem, 0, 1);
|
k_sem_init(&item->done_sem, 0, 1);
|
||||||
|
|
|
@ -345,7 +345,7 @@ def marshall_defs(func_name, func_type, args):
|
||||||
else:
|
else:
|
||||||
mrsh += "\t\t" + "uintptr_t arg3, uintptr_t arg4, void *more, void *ssf)\n"
|
mrsh += "\t\t" + "uintptr_t arg3, uintptr_t arg4, void *more, void *ssf)\n"
|
||||||
mrsh += "{\n"
|
mrsh += "{\n"
|
||||||
mrsh += "\t" + "_current->syscall_frame = ssf;\n"
|
mrsh += "\t" + "arch_current_thread()->syscall_frame = ssf;\n"
|
||||||
|
|
||||||
for unused_arg in range(nmrsh, 6):
|
for unused_arg in range(nmrsh, 6):
|
||||||
mrsh += "\t(void) arg%d;\t/* unused */\n" % unused_arg
|
mrsh += "\t(void) arg%d;\t/* unused */\n" % unused_arg
|
||||||
|
@ -371,7 +371,7 @@ def marshall_defs(func_name, func_type, args):
|
||||||
|
|
||||||
if func_type == "void":
|
if func_type == "void":
|
||||||
mrsh += "\t" + "%s;\n" % vrfy_call
|
mrsh += "\t" + "%s;\n" % vrfy_call
|
||||||
mrsh += "\t" + "_current->syscall_frame = NULL;\n"
|
mrsh += "\t" + "arch_current_thread()->syscall_frame = NULL;\n"
|
||||||
mrsh += "\t" + "return 0;\n"
|
mrsh += "\t" + "return 0;\n"
|
||||||
else:
|
else:
|
||||||
mrsh += "\t" + "%s ret = %s;\n" % (func_type, vrfy_call)
|
mrsh += "\t" + "%s ret = %s;\n" % (func_type, vrfy_call)
|
||||||
|
@ -380,10 +380,10 @@ def marshall_defs(func_name, func_type, args):
|
||||||
ptr = "((uint64_t *)%s)" % mrsh_rval(nmrsh - 1, nmrsh)
|
ptr = "((uint64_t *)%s)" % mrsh_rval(nmrsh - 1, nmrsh)
|
||||||
mrsh += "\t" + "K_OOPS(K_SYSCALL_MEMORY_WRITE(%s, 8));\n" % ptr
|
mrsh += "\t" + "K_OOPS(K_SYSCALL_MEMORY_WRITE(%s, 8));\n" % ptr
|
||||||
mrsh += "\t" + "*%s = ret;\n" % ptr
|
mrsh += "\t" + "*%s = ret;\n" % ptr
|
||||||
mrsh += "\t" + "_current->syscall_frame = NULL;\n"
|
mrsh += "\t" + "arch_current_thread()->syscall_frame = NULL;\n"
|
||||||
mrsh += "\t" + "return 0;\n"
|
mrsh += "\t" + "return 0;\n"
|
||||||
else:
|
else:
|
||||||
mrsh += "\t" + "_current->syscall_frame = NULL;\n"
|
mrsh += "\t" + "arch_current_thread()->syscall_frame = NULL;\n"
|
||||||
mrsh += "\t" + "return (uintptr_t) ret;\n"
|
mrsh += "\t" + "return (uintptr_t) ret;\n"
|
||||||
|
|
||||||
mrsh += "}\n"
|
mrsh += "}\n"
|
||||||
|
|
|
@ -117,7 +117,7 @@ void IRAM_ATTR __esp_platform_start(void)
|
||||||
: "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE));
|
: "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE));
|
||||||
|
|
||||||
/* Initialize the architecture CPU pointer. Some of the
|
/* Initialize the architecture CPU pointer. Some of the
|
||||||
* initialization code wants a valid _current before
|
* initialization code wants a valid arch_current_thread() before
|
||||||
* z_prep_c() is invoked.
|
* z_prep_c() is invoked.
|
||||||
*/
|
*/
|
||||||
__asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[0]));
|
__asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[0]));
|
||||||
|
|
|
@ -66,7 +66,7 @@ void __app_cpu_start(void)
|
||||||
: "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE));
|
: "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE));
|
||||||
|
|
||||||
/* Initialize the architecture CPU pointer. Some of the
|
/* Initialize the architecture CPU pointer. Some of the
|
||||||
* initialization code wants a valid _current before
|
* initialization code wants a valid arch_current_thread() before
|
||||||
* z_prep_c() is invoked.
|
* z_prep_c() is invoked.
|
||||||
*/
|
*/
|
||||||
__asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[0]));
|
__asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[0]));
|
||||||
|
|
|
@ -62,7 +62,7 @@ void __attribute__((section(".iram1"))) __esp_platform_start(void)
|
||||||
__asm__ __volatile__("wsr %0, PS" : : "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE));
|
__asm__ __volatile__("wsr %0, PS" : : "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE));
|
||||||
|
|
||||||
/* Initialize the architecture CPU pointer. Some of the
|
/* Initialize the architecture CPU pointer. Some of the
|
||||||
* initialization code wants a valid _current before
|
* initialization code wants a valid arch_current_thread() before
|
||||||
* arch_kernel_init() is invoked.
|
* arch_kernel_init() is invoked.
|
||||||
*/
|
*/
|
||||||
__asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[0]));
|
__asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[0]));
|
||||||
|
|
|
@ -97,7 +97,7 @@ void IRAM_ATTR __esp_platform_start(void)
|
||||||
__asm__ __volatile__("wsr %0, PS" : : "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE));
|
__asm__ __volatile__("wsr %0, PS" : : "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE));
|
||||||
|
|
||||||
/* Initialize the architecture CPU pointer. Some of the
|
/* Initialize the architecture CPU pointer. Some of the
|
||||||
* initialization code wants a valid _current before
|
* initialization code wants a valid arch_current_thread() before
|
||||||
* arch_kernel_init() is invoked.
|
* arch_kernel_init() is invoked.
|
||||||
*/
|
*/
|
||||||
__asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[0]));
|
__asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[0]));
|
||||||
|
|
|
@ -65,7 +65,7 @@ void IRAM_ATTR __appcpu_start(void)
|
||||||
__asm__ __volatile__("wsr %0, PS" : : "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE));
|
__asm__ __volatile__("wsr %0, PS" : : "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE));
|
||||||
|
|
||||||
/* Initialize the architecture CPU pointer. Some of the
|
/* Initialize the architecture CPU pointer. Some of the
|
||||||
* initialization code wants a valid _current before
|
* initialization code wants a valid arch_current_thread() before
|
||||||
* arch_kernel_init() is invoked.
|
* arch_kernel_init() is invoked.
|
||||||
*/
|
*/
|
||||||
__asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[1]));
|
__asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[1]));
|
||||||
|
|
|
@ -68,7 +68,7 @@ static inline void *get_sock_vtable(int sock,
|
||||||
|
|
||||||
if (ctx == NULL) {
|
if (ctx == NULL) {
|
||||||
NET_DBG("Invalid access on sock %d by thread %p (%s)", sock,
|
NET_DBG("Invalid access on sock %d by thread %p (%s)", sock,
|
||||||
_current, k_thread_name_get(_current));
|
arch_current_thread(), k_thread_name_get(arch_current_thread()));
|
||||||
}
|
}
|
||||||
|
|
||||||
return ctx;
|
return ctx;
|
||||||
|
|
|
@ -39,7 +39,7 @@ osStatus_t osKernelGetInfo(osVersion_t *version, char *id_buf, uint32_t id_size)
|
||||||
*/
|
*/
|
||||||
int32_t osKernelLock(void)
|
int32_t osKernelLock(void)
|
||||||
{
|
{
|
||||||
int temp = _current->base.sched_locked;
|
int temp = arch_current_thread()->base.sched_locked;
|
||||||
|
|
||||||
if (k_is_in_isr()) {
|
if (k_is_in_isr()) {
|
||||||
return osErrorISR;
|
return osErrorISR;
|
||||||
|
@ -55,7 +55,7 @@ int32_t osKernelLock(void)
|
||||||
*/
|
*/
|
||||||
int32_t osKernelUnlock(void)
|
int32_t osKernelUnlock(void)
|
||||||
{
|
{
|
||||||
int temp = _current->base.sched_locked;
|
int temp = arch_current_thread()->base.sched_locked;
|
||||||
|
|
||||||
if (k_is_in_isr()) {
|
if (k_is_in_isr()) {
|
||||||
return osErrorISR;
|
return osErrorISR;
|
||||||
|
@ -71,7 +71,7 @@ int32_t osKernelUnlock(void)
|
||||||
*/
|
*/
|
||||||
int32_t osKernelRestoreLock(int32_t lock)
|
int32_t osKernelRestoreLock(int32_t lock)
|
||||||
{
|
{
|
||||||
_current->base.sched_locked = lock;
|
arch_current_thread()->base.sched_locked = lock;
|
||||||
|
|
||||||
if (k_is_in_isr()) {
|
if (k_is_in_isr()) {
|
||||||
return osErrorISR;
|
return osErrorISR;
|
||||||
|
|
|
@ -76,10 +76,10 @@ size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size)
|
||||||
* function prologue or epilogue.
|
* function prologue or epilogue.
|
||||||
*/
|
*/
|
||||||
buf[idx++] = (uintptr_t)esf->ra;
|
buf[idx++] = (uintptr_t)esf->ra;
|
||||||
if (valid_stack((uintptr_t)new_fp, _current)) {
|
if (valid_stack((uintptr_t)new_fp, arch_current_thread())) {
|
||||||
fp = new_fp;
|
fp = new_fp;
|
||||||
}
|
}
|
||||||
while (valid_stack((uintptr_t)fp, _current)) {
|
while (valid_stack((uintptr_t)fp, arch_current_thread())) {
|
||||||
if (idx >= size) {
|
if (idx >= size) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,7 +67,7 @@ size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
buf[idx++] = (uintptr_t)isf->eip;
|
buf[idx++] = (uintptr_t)isf->eip;
|
||||||
while (valid_stack((uintptr_t)fp, _current)) {
|
while (valid_stack((uintptr_t)fp, arch_current_thread())) {
|
||||||
if (idx >= size) {
|
if (idx >= size) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,13 +35,13 @@ size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In x86_64 (arch/x86/core/intel64/locore.S) %rip and %rbp
|
* In x86_64 (arch/x86/core/intel64/locore.S) %rip and %rbp
|
||||||
* are always saved in _current->callee_saved before calling
|
* are always saved in arch_current_thread()->callee_saved before calling
|
||||||
* handler function if interrupt is not nested
|
* handler function if interrupt is not nested
|
||||||
*
|
*
|
||||||
* %rip points the location where interrupt was occurred
|
* %rip points the location where interrupt was occurred
|
||||||
*/
|
*/
|
||||||
buf[idx++] = (uintptr_t)_current->callee_saved.rip;
|
buf[idx++] = (uintptr_t)arch_current_thread()->callee_saved.rip;
|
||||||
void **fp = (void **)_current->callee_saved.rbp;
|
void **fp = (void **)arch_current_thread()->callee_saved.rbp;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* %rbp is frame pointer.
|
* %rbp is frame pointer.
|
||||||
|
@ -53,7 +53,7 @@ size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size)
|
||||||
* %rbp (next) <- %rbp (curr)
|
* %rbp (next) <- %rbp (curr)
|
||||||
* ....
|
* ....
|
||||||
*/
|
*/
|
||||||
while (valid_stack((uintptr_t)fp, _current)) {
|
while (valid_stack((uintptr_t)fp, arch_current_thread())) {
|
||||||
if (idx >= size) {
|
if (idx >= size) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,7 +30,7 @@ static int cmd_kernel_thread_unwind(const struct shell *sh, size_t argc, char **
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
if (argc == 1) {
|
if (argc == 1) {
|
||||||
thread = _current;
|
thread = arch_current_thread();
|
||||||
} else {
|
} else {
|
||||||
thread = UINT_TO_POINTER(shell_strtoull(argv[1], 16, &err));
|
thread = UINT_TO_POINTER(shell_strtoull(argv[1], 16, &err));
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
|
|
|
@ -177,7 +177,7 @@ ZTEST(arm_interrupt, test_arm_esf_collection)
|
||||||
* crashy thread we create below runs to completion before we get
|
* crashy thread we create below runs to completion before we get
|
||||||
* to the end of this function
|
* to the end of this function
|
||||||
*/
|
*/
|
||||||
k_thread_priority_set(_current, K_PRIO_PREEMPT(MAIN_PRIORITY));
|
k_thread_priority_set(arch_current_thread(), K_PRIO_PREEMPT(MAIN_PRIORITY));
|
||||||
|
|
||||||
TC_PRINT("Testing ESF Reporting\n");
|
TC_PRINT("Testing ESF Reporting\n");
|
||||||
k_thread_create(&esf_collection_thread, esf_collection_stack,
|
k_thread_create(&esf_collection_thread, esf_collection_stack,
|
||||||
|
@ -366,9 +366,9 @@ ZTEST(arm_interrupt, test_arm_interrupt)
|
||||||
uint32_t fp_extra_size =
|
uint32_t fp_extra_size =
|
||||||
(__get_CONTROL() & CONTROL_FPCA_Msk) ?
|
(__get_CONTROL() & CONTROL_FPCA_Msk) ?
|
||||||
FPU_STACK_EXTRA_SIZE : 0;
|
FPU_STACK_EXTRA_SIZE : 0;
|
||||||
__set_PSP(_current->stack_info.start + 0x10 + fp_extra_size);
|
__set_PSP(arch_current_thread()->stack_info.start + 0x10 + fp_extra_size);
|
||||||
#else
|
#else
|
||||||
__set_PSP(_current->stack_info.start + 0x10);
|
__set_PSP(arch_current_thread()->stack_info.start + 0x10);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
__enable_irq();
|
__enable_irq();
|
||||||
|
|
|
@ -42,20 +42,20 @@ void z_impl_test_arm_user_syscall(void)
|
||||||
* - PSPLIM register guards the privileged stack
|
* - PSPLIM register guards the privileged stack
|
||||||
* - MSPLIM register still guards the interrupt stack
|
* - MSPLIM register still guards the interrupt stack
|
||||||
*/
|
*/
|
||||||
zassert_true((_current->arch.mode & CONTROL_nPRIV_Msk) == 0,
|
zassert_true((arch_current_thread()->arch.mode & CONTROL_nPRIV_Msk) == 0,
|
||||||
"mode variable not set to PRIV mode in system call\n");
|
"mode variable not set to PRIV mode in system call\n");
|
||||||
|
|
||||||
zassert_false(arch_is_user_context(),
|
zassert_false(arch_is_user_context(),
|
||||||
"arch_is_user_context() indicates nPRIV\n");
|
"arch_is_user_context() indicates nPRIV\n");
|
||||||
|
|
||||||
zassert_true(
|
zassert_true(
|
||||||
((__get_PSP() >= _current->arch.priv_stack_start) &&
|
((__get_PSP() >= arch_current_thread()->arch.priv_stack_start) &&
|
||||||
(__get_PSP() < (_current->arch.priv_stack_start +
|
(__get_PSP() < (arch_current_thread()->arch.priv_stack_start +
|
||||||
CONFIG_PRIVILEGED_STACK_SIZE))),
|
CONFIG_PRIVILEGED_STACK_SIZE))),
|
||||||
"Process SP outside thread privileged stack limits\n");
|
"Process SP outside thread privileged stack limits\n");
|
||||||
|
|
||||||
#if defined(CONFIG_BUILTIN_STACK_GUARD)
|
#if defined(CONFIG_BUILTIN_STACK_GUARD)
|
||||||
zassert_true(__get_PSPLIM() == _current->arch.priv_stack_start,
|
zassert_true(__get_PSPLIM() == arch_current_thread()->arch.priv_stack_start,
|
||||||
"PSPLIM not guarding the thread's privileged stack\n");
|
"PSPLIM not guarding the thread's privileged stack\n");
|
||||||
zassert_true(__get_MSPLIM() == (uint32_t)z_interrupt_stacks,
|
zassert_true(__get_MSPLIM() == (uint32_t)z_interrupt_stacks,
|
||||||
"MSPLIM not guarding the interrupt stack\n");
|
"MSPLIM not guarding the interrupt stack\n");
|
||||||
|
@ -82,16 +82,16 @@ void arm_isr_handler(const void *args)
|
||||||
* - MSPLIM register still guards the interrupt stack
|
* - MSPLIM register still guards the interrupt stack
|
||||||
*/
|
*/
|
||||||
|
|
||||||
zassert_true((_current->arch.mode & CONTROL_nPRIV_Msk) != 0,
|
zassert_true((arch_current_thread()->arch.mode & CONTROL_nPRIV_Msk) != 0,
|
||||||
"mode variable not set to nPRIV mode for user thread\n");
|
"mode variable not set to nPRIV mode for user thread\n");
|
||||||
|
|
||||||
zassert_false(arch_is_user_context(),
|
zassert_false(arch_is_user_context(),
|
||||||
"arch_is_user_context() indicates nPRIV in ISR\n");
|
"arch_is_user_context() indicates nPRIV in ISR\n");
|
||||||
|
|
||||||
zassert_true(
|
zassert_true(
|
||||||
((__get_PSP() >= _current->stack_info.start) &&
|
((__get_PSP() >= arch_current_thread()->stack_info.start) &&
|
||||||
(__get_PSP() < (_current->stack_info.start +
|
(__get_PSP() < (arch_current_thread()->stack_info.start +
|
||||||
_current->stack_info.size))),
|
arch_current_thread()->stack_info.size))),
|
||||||
"Process SP outside thread stack limits\n");
|
"Process SP outside thread stack limits\n");
|
||||||
|
|
||||||
static int first_call = 1;
|
static int first_call = 1;
|
||||||
|
@ -101,7 +101,7 @@ void arm_isr_handler(const void *args)
|
||||||
|
|
||||||
/* Trigger thread yield() manually */
|
/* Trigger thread yield() manually */
|
||||||
(void)irq_lock();
|
(void)irq_lock();
|
||||||
z_move_thread_to_end_of_prio_q(_current);
|
z_move_thread_to_end_of_prio_q(arch_current_thread());
|
||||||
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
|
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
|
||||||
irq_unlock(0);
|
irq_unlock(0);
|
||||||
|
|
||||||
|
@ -169,20 +169,20 @@ ZTEST(arm_thread_swap, test_arm_syscalls)
|
||||||
* - PSPLIM register guards the default stack
|
* - PSPLIM register guards the default stack
|
||||||
* - MSPLIM register guards the interrupt stack
|
* - MSPLIM register guards the interrupt stack
|
||||||
*/
|
*/
|
||||||
zassert_true((_current->arch.mode & CONTROL_nPRIV_Msk) == 0,
|
zassert_true((arch_current_thread()->arch.mode & CONTROL_nPRIV_Msk) == 0,
|
||||||
"mode variable not set to PRIV mode for supervisor thread\n");
|
"mode variable not set to PRIV mode for supervisor thread\n");
|
||||||
|
|
||||||
zassert_false(arch_is_user_context(),
|
zassert_false(arch_is_user_context(),
|
||||||
"arch_is_user_context() indicates nPRIV\n");
|
"arch_is_user_context() indicates nPRIV\n");
|
||||||
|
|
||||||
zassert_true(
|
zassert_true(
|
||||||
((__get_PSP() >= _current->stack_info.start) &&
|
((__get_PSP() >= arch_current_thread()->stack_info.start) &&
|
||||||
(__get_PSP() < (_current->stack_info.start +
|
(__get_PSP() < (arch_current_thread()->stack_info.start +
|
||||||
_current->stack_info.size))),
|
arch_current_thread()->stack_info.size))),
|
||||||
"Process SP outside thread stack limits\n");
|
"Process SP outside thread stack limits\n");
|
||||||
|
|
||||||
#if defined(CONFIG_BUILTIN_STACK_GUARD)
|
#if defined(CONFIG_BUILTIN_STACK_GUARD)
|
||||||
zassert_true(__get_PSPLIM() == _current->stack_info.start,
|
zassert_true(__get_PSPLIM() == arch_current_thread()->stack_info.start,
|
||||||
"PSPLIM not guarding the default stack\n");
|
"PSPLIM not guarding the default stack\n");
|
||||||
zassert_true(__get_MSPLIM() == (uint32_t)z_interrupt_stacks,
|
zassert_true(__get_MSPLIM() == (uint32_t)z_interrupt_stacks,
|
||||||
"MSPLIM not guarding the interrupt stack\n");
|
"MSPLIM not guarding the interrupt stack\n");
|
||||||
|
|
|
@ -278,16 +278,16 @@ static void alt_thread_entry(void *p1, void *p2, void *p3)
|
||||||
/* Verify that the _current_ (alt) thread is
|
/* Verify that the _current_ (alt) thread is
|
||||||
* initialized with EXC_RETURN.Ftype set
|
* initialized with EXC_RETURN.Ftype set
|
||||||
*/
|
*/
|
||||||
zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
|
zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
|
||||||
"Alt thread FPCA flag not clear at initialization\n");
|
"Alt thread FPCA flag not clear at initialization\n");
|
||||||
#if defined(CONFIG_MPU_STACK_GUARD)
|
#if defined(CONFIG_MPU_STACK_GUARD)
|
||||||
/* Alt thread is created with K_FP_REGS set, so we
|
/* Alt thread is created with K_FP_REGS set, so we
|
||||||
* expect lazy stacking and long guard to be enabled.
|
* expect lazy stacking and long guard to be enabled.
|
||||||
*/
|
*/
|
||||||
zassert_true((_current->arch.mode &
|
zassert_true((arch_current_thread()->arch.mode &
|
||||||
Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0,
|
Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0,
|
||||||
"Alt thread MPU GUAR DFLOAT flag not set at initialization\n");
|
"Alt thread MPU GUAR DFLOAT flag not set at initialization\n");
|
||||||
zassert_true((_current->base.user_options & K_FP_REGS) != 0,
|
zassert_true((arch_current_thread()->base.user_options & K_FP_REGS) != 0,
|
||||||
"Alt thread K_FP_REGS not set at initialization\n");
|
"Alt thread K_FP_REGS not set at initialization\n");
|
||||||
zassert_true((FPU->FPCCR & FPU_FPCCR_LSPEN_Msk) != 0,
|
zassert_true((FPU->FPCCR & FPU_FPCCR_LSPEN_Msk) != 0,
|
||||||
"Lazy FP Stacking not set at initialization\n");
|
"Lazy FP Stacking not set at initialization\n");
|
||||||
|
@ -330,7 +330,7 @@ static void alt_thread_entry(void *p1, void *p2, void *p3)
|
||||||
p_ztest_thread->arch.swap_return_value = SWAP_RETVAL;
|
p_ztest_thread->arch.swap_return_value = SWAP_RETVAL;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
z_move_thread_to_end_of_prio_q(_current);
|
z_move_thread_to_end_of_prio_q(arch_current_thread());
|
||||||
|
|
||||||
/* Modify the callee-saved registers by zero-ing them.
|
/* Modify the callee-saved registers by zero-ing them.
|
||||||
* The main test thread will, later, assert that they
|
* The main test thread will, later, assert that they
|
||||||
|
@ -448,20 +448,20 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
|
||||||
*/
|
*/
|
||||||
load_callee_saved_regs(&ztest_thread_callee_saved_regs_init);
|
load_callee_saved_regs(&ztest_thread_callee_saved_regs_init);
|
||||||
|
|
||||||
k_thread_priority_set(_current, K_PRIO_COOP(PRIORITY));
|
k_thread_priority_set(arch_current_thread(), K_PRIO_COOP(PRIORITY));
|
||||||
|
|
||||||
/* Export current thread's callee-saved registers pointer
|
/* Export current thread's callee-saved registers pointer
|
||||||
* and arch.basepri variable pointer, into global pointer
|
* and arch.basepri variable pointer, into global pointer
|
||||||
* variables, so they can be easily accessible by other
|
* variables, so they can be easily accessible by other
|
||||||
* (alternative) test thread.
|
* (alternative) test thread.
|
||||||
*/
|
*/
|
||||||
p_ztest_thread = _current;
|
p_ztest_thread = arch_current_thread();
|
||||||
|
|
||||||
/* Confirm initial conditions before starting the test. */
|
/* Confirm initial conditions before starting the test. */
|
||||||
test_flag = switch_flag;
|
test_flag = switch_flag;
|
||||||
zassert_true(test_flag == false,
|
zassert_true(test_flag == false,
|
||||||
"Switch flag not initialized properly\n");
|
"Switch flag not initialized properly\n");
|
||||||
zassert_true(_current->arch.basepri == 0,
|
zassert_true(arch_current_thread()->arch.basepri == 0,
|
||||||
"Thread BASEPRI flag not clear at thread start\n");
|
"Thread BASEPRI flag not clear at thread start\n");
|
||||||
/* Verify, also, that the interrupts are unlocked. */
|
/* Verify, also, that the interrupts are unlocked. */
|
||||||
#if defined(CONFIG_CPU_CORTEX_M_HAS_BASEPRI)
|
#if defined(CONFIG_CPU_CORTEX_M_HAS_BASEPRI)
|
||||||
|
@ -481,16 +481,16 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
|
||||||
"Main test thread does not start in privilege mode\n");
|
"Main test thread does not start in privilege mode\n");
|
||||||
|
|
||||||
/* Assert that the mode status variable indicates privilege mode */
|
/* Assert that the mode status variable indicates privilege mode */
|
||||||
zassert_true((_current->arch.mode & CONTROL_nPRIV_Msk) == 0,
|
zassert_true((arch_current_thread()->arch.mode & CONTROL_nPRIV_Msk) == 0,
|
||||||
"Thread nPRIV flag not clear for supervisor thread: 0x%0x\n",
|
"Thread nPRIV flag not clear for supervisor thread: 0x%0x\n",
|
||||||
_current->arch.mode);
|
arch_current_thread()->arch.mode);
|
||||||
#endif /* CONFIG_USERSPACE */
|
#endif /* CONFIG_USERSPACE */
|
||||||
|
|
||||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||||
/* The main test thread is not (yet) actively using the FP registers */
|
/* The main test thread is not (yet) actively using the FP registers */
|
||||||
zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
|
zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
|
||||||
"Thread Ftype flag not set at initialization 0x%0x\n",
|
"Thread Ftype flag not set at initialization 0x%0x\n",
|
||||||
_current->arch.mode);
|
arch_current_thread()->arch.mode);
|
||||||
|
|
||||||
/* Verify that the main test thread is initialized with FPCA cleared. */
|
/* Verify that the main test thread is initialized with FPCA cleared. */
|
||||||
zassert_true((__get_CONTROL() & CONTROL_FPCA_Msk) == 0,
|
zassert_true((__get_CONTROL() & CONTROL_FPCA_Msk) == 0,
|
||||||
|
@ -503,7 +503,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
|
||||||
/* Clear the thread's floating-point callee-saved registers' container.
|
/* Clear the thread's floating-point callee-saved registers' container.
|
||||||
* The container will, later, be populated by the swap mechanism.
|
* The container will, later, be populated by the swap mechanism.
|
||||||
*/
|
*/
|
||||||
memset(&_current->arch.preempt_float, 0,
|
memset(&arch_current_thread()->arch.preempt_float, 0,
|
||||||
sizeof(struct _preempt_float));
|
sizeof(struct _preempt_float));
|
||||||
|
|
||||||
/* Randomize the FP callee-saved registers at test initialization */
|
/* Randomize the FP callee-saved registers at test initialization */
|
||||||
|
@ -517,13 +517,13 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
|
||||||
/* The main test thread is using the FP registers, but the .mode
|
/* The main test thread is using the FP registers, but the .mode
|
||||||
* flag is not updated until the next context switch.
|
* flag is not updated until the next context switch.
|
||||||
*/
|
*/
|
||||||
zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
|
zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
|
||||||
"Thread Ftype flag not set at initialization\n");
|
"Thread Ftype flag not set at initialization\n");
|
||||||
#if defined(CONFIG_MPU_STACK_GUARD)
|
#if defined(CONFIG_MPU_STACK_GUARD)
|
||||||
zassert_true((_current->arch.mode &
|
zassert_true((arch_current_thread()->arch.mode &
|
||||||
Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) == 0,
|
Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) == 0,
|
||||||
"Thread MPU GUAR DFLOAT flag not clear at initialization\n");
|
"Thread MPU GUAR DFLOAT flag not clear at initialization\n");
|
||||||
zassert_true((_current->base.user_options & K_FP_REGS) == 0,
|
zassert_true((arch_current_thread()->base.user_options & K_FP_REGS) == 0,
|
||||||
"Thread K_FP_REGS not clear at initialization\n");
|
"Thread K_FP_REGS not clear at initialization\n");
|
||||||
zassert_true((FPU->FPCCR & FPU_FPCCR_LSPEN_Msk) == 0,
|
zassert_true((FPU->FPCCR & FPU_FPCCR_LSPEN_Msk) == 0,
|
||||||
"Lazy FP Stacking not clear at initialization\n");
|
"Lazy FP Stacking not clear at initialization\n");
|
||||||
|
@ -552,13 +552,13 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
|
||||||
* explicitly required by the test.
|
* explicitly required by the test.
|
||||||
*/
|
*/
|
||||||
(void)irq_lock();
|
(void)irq_lock();
|
||||||
z_move_thread_to_end_of_prio_q(_current);
|
z_move_thread_to_end_of_prio_q(arch_current_thread());
|
||||||
|
|
||||||
/* Clear the thread's callee-saved registers' container.
|
/* Clear the thread's callee-saved registers' container.
|
||||||
* The container will, later, be populated by the swap
|
* The container will, later, be populated by the swap
|
||||||
* mechanism.
|
* mechanism.
|
||||||
*/
|
*/
|
||||||
memset(&_current->callee_saved, 0, sizeof(_callee_saved_t));
|
memset(&arch_current_thread()->callee_saved, 0, sizeof(_callee_saved_t));
|
||||||
|
|
||||||
/* Verify context-switch has not occurred yet. */
|
/* Verify context-switch has not occurred yet. */
|
||||||
test_flag = switch_flag;
|
test_flag = switch_flag;
|
||||||
|
@ -672,7 +672,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
|
||||||
*/
|
*/
|
||||||
verify_callee_saved(
|
verify_callee_saved(
|
||||||
&ztest_thread_callee_saved_regs_container,
|
&ztest_thread_callee_saved_regs_container,
|
||||||
&_current->callee_saved);
|
&arch_current_thread()->callee_saved);
|
||||||
|
|
||||||
/* Verify context-switch did occur. */
|
/* Verify context-switch did occur. */
|
||||||
test_flag = switch_flag;
|
test_flag = switch_flag;
|
||||||
|
@ -688,7 +688,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
|
||||||
* the alternative thread modified it, since the thread
|
* the alternative thread modified it, since the thread
|
||||||
* is now switched back in.
|
* is now switched back in.
|
||||||
*/
|
*/
|
||||||
zassert_true(_current->arch.basepri == 0,
|
zassert_true(arch_current_thread()->arch.basepri == 0,
|
||||||
"arch.basepri value not in accordance with the update\n");
|
"arch.basepri value not in accordance with the update\n");
|
||||||
|
|
||||||
#if defined(CONFIG_CPU_CORTEX_M_HAS_BASEPRI)
|
#if defined(CONFIG_CPU_CORTEX_M_HAS_BASEPRI)
|
||||||
|
@ -709,12 +709,12 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
|
||||||
|
|
||||||
#if !defined(CONFIG_NO_OPTIMIZATIONS)
|
#if !defined(CONFIG_NO_OPTIMIZATIONS)
|
||||||
/* The thread is now swapped-back in. */
|
/* The thread is now swapped-back in. */
|
||||||
zassert_equal(_current->arch.swap_return_value, SWAP_RETVAL,
|
zassert_equal(arch_current_thread()->arch.swap_return_value, SWAP_RETVAL,
|
||||||
"Swap value not set as expected: 0x%x (0x%x)\n",
|
"Swap value not set as expected: 0x%x (0x%x)\n",
|
||||||
_current->arch.swap_return_value, SWAP_RETVAL);
|
arch_current_thread()->arch.swap_return_value, SWAP_RETVAL);
|
||||||
zassert_equal(_current->arch.swap_return_value, ztest_swap_return_val,
|
zassert_equal(arch_current_thread()->arch.swap_return_value, ztest_swap_return_val,
|
||||||
"Swap value not returned as expected 0x%x (0x%x)\n",
|
"Swap value not returned as expected 0x%x (0x%x)\n",
|
||||||
_current->arch.swap_return_value, ztest_swap_return_val);
|
arch_current_thread()->arch.swap_return_value, ztest_swap_return_val);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||||
|
@ -732,7 +732,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
|
||||||
*/
|
*/
|
||||||
verify_fp_callee_saved(
|
verify_fp_callee_saved(
|
||||||
&ztest_thread_fp_callee_saved_regs,
|
&ztest_thread_fp_callee_saved_regs,
|
||||||
&_current->arch.preempt_float);
|
&arch_current_thread()->arch.preempt_float);
|
||||||
|
|
||||||
/* Verify that the main test thread restored the FPSCR bit-0. */
|
/* Verify that the main test thread restored the FPSCR bit-0. */
|
||||||
zassert_true((__get_FPSCR() & 0x1) == 0x1,
|
zassert_true((__get_FPSCR() & 0x1) == 0x1,
|
||||||
|
@ -741,13 +741,13 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
|
||||||
/* The main test thread is using the FP registers, and the .mode
|
/* The main test thread is using the FP registers, and the .mode
|
||||||
* flag and MPU GUARD flag are now updated.
|
* flag and MPU GUARD flag are now updated.
|
||||||
*/
|
*/
|
||||||
zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0,
|
zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0,
|
||||||
"Thread Ftype flag not cleared after main returned back\n");
|
"Thread Ftype flag not cleared after main returned back\n");
|
||||||
#if defined(CONFIG_MPU_STACK_GUARD)
|
#if defined(CONFIG_MPU_STACK_GUARD)
|
||||||
zassert_true((_current->arch.mode &
|
zassert_true((arch_current_thread()->arch.mode &
|
||||||
Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0,
|
Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0,
|
||||||
"Thread MPU GUARD FLOAT flag not set\n");
|
"Thread MPU GUARD FLOAT flag not set\n");
|
||||||
zassert_true((_current->base.user_options & K_FP_REGS) != 0,
|
zassert_true((arch_current_thread()->base.user_options & K_FP_REGS) != 0,
|
||||||
"Thread K_FPREGS not set after main returned back\n");
|
"Thread K_FPREGS not set after main returned back\n");
|
||||||
zassert_true((FPU->FPCCR & FPU_FPCCR_LSPEN_Msk) != 0,
|
zassert_true((FPU->FPCCR & FPU_FPCCR_LSPEN_Msk) != 0,
|
||||||
"Lazy FP Stacking not set after main returned back\n");
|
"Lazy FP Stacking not set after main returned back\n");
|
||||||
|
|
|
@ -28,12 +28,12 @@ void test_thread_entry(void *p, void *p1, void *p2)
|
||||||
|
|
||||||
void thread_swap(void *p1, void *p2, void *p3)
|
void thread_swap(void *p1, void *p2, void *p3)
|
||||||
{
|
{
|
||||||
k_thread_abort(_current);
|
k_thread_abort(arch_current_thread());
|
||||||
}
|
}
|
||||||
|
|
||||||
void thread_suspend(void *p1, void *p2, void *p3)
|
void thread_suspend(void *p1, void *p2, void *p3)
|
||||||
{
|
{
|
||||||
k_thread_suspend(_current);
|
k_thread_suspend(arch_current_thread());
|
||||||
}
|
}
|
||||||
|
|
||||||
void thread_yield0(void *p1, void *p2, void *p3)
|
void thread_yield0(void *p1, void *p2, void *p3)
|
||||||
|
|
|
@ -135,7 +135,7 @@ static void isr_handler(const void *data)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_current->base.prio < 0) {
|
if (arch_current_thread()->base.prio < 0) {
|
||||||
isr_info.value = K_COOP_THREAD;
|
isr_info.value = K_COOP_THREAD;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -643,9 +643,9 @@ ZTEST(context, test_ctx_thread)
|
||||||
TC_PRINT("Testing k_is_in_isr() from a preemptible thread\n");
|
TC_PRINT("Testing k_is_in_isr() from a preemptible thread\n");
|
||||||
zassert_false(k_is_in_isr(), "Should not be in ISR context");
|
zassert_false(k_is_in_isr(), "Should not be in ISR context");
|
||||||
|
|
||||||
zassert_false(_current->base.prio < 0,
|
zassert_false(arch_current_thread()->base.prio < 0,
|
||||||
"Current thread should have preemptible priority: %d",
|
"Current thread should have preemptible priority: %d",
|
||||||
_current->base.prio);
|
arch_current_thread()->base.prio);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -683,7 +683,7 @@ static void _test_kernel_thread(k_tid_t _thread_id)
|
||||||
|
|
||||||
zassert_false(k_is_in_isr(), "k_is_in_isr() when called from a thread is true");
|
zassert_false(k_is_in_isr(), "k_is_in_isr() when called from a thread is true");
|
||||||
|
|
||||||
zassert_false((_current->base.prio >= 0),
|
zassert_false((arch_current_thread()->base.prio >= 0),
|
||||||
"thread is not a cooperative thread");
|
"thread is not a cooperative thread");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -314,7 +314,7 @@ ZTEST(fatal_exception, test_fatal)
|
||||||
* priority -1. To run the test smoothly make both main and ztest
|
* priority -1. To run the test smoothly make both main and ztest
|
||||||
* threads run at same priority level.
|
* threads run at same priority level.
|
||||||
*/
|
*/
|
||||||
k_thread_priority_set(_current, K_PRIO_PREEMPT(MAIN_PRIORITY));
|
k_thread_priority_set(arch_current_thread(), K_PRIO_PREEMPT(MAIN_PRIORITY));
|
||||||
|
|
||||||
#ifndef CONFIG_ARCH_POSIX
|
#ifndef CONFIG_ARCH_POSIX
|
||||||
TC_PRINT("test alt thread 1: generic CPU exception\n");
|
TC_PRINT("test alt thread 1: generic CPU exception\n");
|
||||||
|
|
|
@ -86,7 +86,7 @@ int main(void)
|
||||||
* panic and not an oops). Set the thread non-essential as a
|
* panic and not an oops). Set the thread non-essential as a
|
||||||
* workaround.
|
* workaround.
|
||||||
*/
|
*/
|
||||||
z_thread_essential_clear(_current);
|
z_thread_essential_clear(arch_current_thread());
|
||||||
|
|
||||||
test_message_capture();
|
test_message_capture();
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -116,7 +116,7 @@ void thread3_entry(void *p1, void *p2, void *p3)
|
||||||
|
|
||||||
/* 9.1 - T3 should be executing on the same CPU that T1 was. */
|
/* 9.1 - T3 should be executing on the same CPU that T1 was. */
|
||||||
|
|
||||||
cpu_t3 = _current->base.cpu;
|
cpu_t3 = arch_current_thread()->base.cpu;
|
||||||
|
|
||||||
zassert_true(cpu_t3 == cpu_t1, "T3 not executing on T1's original CPU");
|
zassert_true(cpu_t3 == cpu_t1, "T3 not executing on T1's original CPU");
|
||||||
|
|
||||||
|
@ -136,7 +136,7 @@ void thread4_entry(void *p1, void *p2, void *p3)
|
||||||
* It is expected to execute on the same CPU that T2 did.
|
* It is expected to execute on the same CPU that T2 did.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
cpu_t4 = _current->base.cpu;
|
cpu_t4 = arch_current_thread()->base.cpu;
|
||||||
|
|
||||||
zassert_true(cpu_t4 == cpu_t2, "T4 on unexpected CPU");
|
zassert_true(cpu_t4 == cpu_t2, "T4 on unexpected CPU");
|
||||||
|
|
||||||
|
@ -165,7 +165,7 @@ void thread2_entry(void *p1, void *p2, void *p3)
|
||||||
arch_irq_unlock(key);
|
arch_irq_unlock(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
cpu_t2 = _current->base.cpu;
|
cpu_t2 = arch_current_thread()->base.cpu;
|
||||||
|
|
||||||
zassert_false(cpu_t2 == cpu_t1, "T2 and T1 unexpectedly on the same CPU");
|
zassert_false(cpu_t2 == cpu_t1, "T2 and T1 unexpectedly on the same CPU");
|
||||||
|
|
||||||
|
@ -205,7 +205,7 @@ ZTEST(ipi_cascade, test_ipi_cascade)
|
||||||
|
|
||||||
/* 3. T3 and T4 are blocked. Pin T3 to this CPU */
|
/* 3. T3 and T4 are blocked. Pin T3 to this CPU */
|
||||||
|
|
||||||
cpu_t1 = _current->base.cpu;
|
cpu_t1 = arch_current_thread()->base.cpu;
|
||||||
status = k_thread_cpu_pin(&thread3, cpu_t1);
|
status = k_thread_cpu_pin(&thread3, cpu_t1);
|
||||||
|
|
||||||
zassert_true(status == 0, "Failed to pin T3 to %d : %d\n", cpu_t1, status);
|
zassert_true(status == 0, "Failed to pin T3 to %d : %d\n", cpu_t1, status);
|
||||||
|
@ -249,7 +249,7 @@ ZTEST(ipi_cascade, test_ipi_cascade)
|
||||||
|
|
||||||
zassert_false(timer_expired, "Test terminated by timer");
|
zassert_false(timer_expired, "Test terminated by timer");
|
||||||
|
|
||||||
zassert_true(cpu_t1 != _current->base.cpu,
|
zassert_true(cpu_t1 != arch_current_thread()->base.cpu,
|
||||||
"Main thread (T1) did not change CPUs\n");
|
"Main thread (T1) did not change CPUs\n");
|
||||||
|
|
||||||
show_executing_threads("Final");
|
show_executing_threads("Final");
|
||||||
|
|
|
@ -125,7 +125,7 @@ ZTEST(mem_protect, test_permission_inheritance)
|
||||||
|
|
||||||
struct k_heap *z_impl_ret_resource_pool_ptr(void)
|
struct k_heap *z_impl_ret_resource_pool_ptr(void)
|
||||||
{
|
{
|
||||||
return _current->resource_pool;
|
return arch_current_thread()->resource_pool;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct k_heap *z_vrfy_ret_resource_pool_ptr(void)
|
static inline struct k_heap *z_vrfy_ret_resource_pool_ptr(void)
|
||||||
|
|
|
@ -132,7 +132,7 @@ ZTEST(object_validation, test_generic_object)
|
||||||
ZTEST(object_validation, test_kobj_assign_perms_on_alloc_obj)
|
ZTEST(object_validation, test_kobj_assign_perms_on_alloc_obj)
|
||||||
{
|
{
|
||||||
static struct k_sem *test_dyn_sem;
|
static struct k_sem *test_dyn_sem;
|
||||||
struct k_thread *thread = _current;
|
struct k_thread *thread = arch_current_thread();
|
||||||
|
|
||||||
uintptr_t start_addr, end_addr;
|
uintptr_t start_addr, end_addr;
|
||||||
size_t size_heap = K_HEAP_MEM_POOL_SIZE;
|
size_t size_heap = K_HEAP_MEM_POOL_SIZE;
|
||||||
|
@ -173,7 +173,7 @@ ZTEST(object_validation, test_no_ref_dyn_kobj_release_mem)
|
||||||
zassert_not_null(test_dyn_mutex,
|
zassert_not_null(test_dyn_mutex,
|
||||||
"Can not allocate dynamic kernel object");
|
"Can not allocate dynamic kernel object");
|
||||||
|
|
||||||
struct k_thread *thread = _current;
|
struct k_thread *thread = arch_current_thread();
|
||||||
|
|
||||||
/* revoke access from the current thread */
|
/* revoke access from the current thread */
|
||||||
k_object_access_revoke(test_dyn_mutex, thread);
|
k_object_access_revoke(test_dyn_mutex, thread);
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue