arch: deprecate _current

`_current` is now functionally equals to `arch_curr_thread()`, remove
its usage in-tree and deprecate it instead of removing it outright,
as it has been with us since forever.

Signed-off-by: Yong Cong Sin <ycsin@meta.com>
Signed-off-by: Yong Cong Sin <yongcong.sin@gmail.com>
This commit is contained in:
Yong Cong Sin 2024-11-19 13:57:54 +08:00 committed by Anas Nashif
commit b1def7145f
107 changed files with 490 additions and 479 deletions

View file

@ -177,7 +177,7 @@ ZTEST(arm_interrupt, test_arm_esf_collection)
* crashy thread we create below runs to completion before we get
* to the end of this function
*/
k_thread_priority_set(_current, K_PRIO_PREEMPT(MAIN_PRIORITY));
k_thread_priority_set(arch_current_thread(), K_PRIO_PREEMPT(MAIN_PRIORITY));
TC_PRINT("Testing ESF Reporting\n");
k_thread_create(&esf_collection_thread, esf_collection_stack,
@ -366,9 +366,9 @@ ZTEST(arm_interrupt, test_arm_interrupt)
uint32_t fp_extra_size =
(__get_CONTROL() & CONTROL_FPCA_Msk) ?
FPU_STACK_EXTRA_SIZE : 0;
__set_PSP(_current->stack_info.start + 0x10 + fp_extra_size);
__set_PSP(arch_current_thread()->stack_info.start + 0x10 + fp_extra_size);
#else
__set_PSP(_current->stack_info.start + 0x10);
__set_PSP(arch_current_thread()->stack_info.start + 0x10);
#endif
__enable_irq();

View file

@ -42,20 +42,20 @@ void z_impl_test_arm_user_syscall(void)
* - PSPLIM register guards the privileged stack
* - MSPLIM register still guards the interrupt stack
*/
zassert_true((_current->arch.mode & CONTROL_nPRIV_Msk) == 0,
zassert_true((arch_current_thread()->arch.mode & CONTROL_nPRIV_Msk) == 0,
"mode variable not set to PRIV mode in system call\n");
zassert_false(arch_is_user_context(),
"arch_is_user_context() indicates nPRIV\n");
zassert_true(
((__get_PSP() >= _current->arch.priv_stack_start) &&
(__get_PSP() < (_current->arch.priv_stack_start +
((__get_PSP() >= arch_current_thread()->arch.priv_stack_start) &&
(__get_PSP() < (arch_current_thread()->arch.priv_stack_start +
CONFIG_PRIVILEGED_STACK_SIZE))),
"Process SP outside thread privileged stack limits\n");
#if defined(CONFIG_BUILTIN_STACK_GUARD)
zassert_true(__get_PSPLIM() == _current->arch.priv_stack_start,
zassert_true(__get_PSPLIM() == arch_current_thread()->arch.priv_stack_start,
"PSPLIM not guarding the thread's privileged stack\n");
zassert_true(__get_MSPLIM() == (uint32_t)z_interrupt_stacks,
"MSPLIM not guarding the interrupt stack\n");
@ -82,16 +82,16 @@ void arm_isr_handler(const void *args)
* - MSPLIM register still guards the interrupt stack
*/
zassert_true((_current->arch.mode & CONTROL_nPRIV_Msk) != 0,
zassert_true((arch_current_thread()->arch.mode & CONTROL_nPRIV_Msk) != 0,
"mode variable not set to nPRIV mode for user thread\n");
zassert_false(arch_is_user_context(),
"arch_is_user_context() indicates nPRIV in ISR\n");
zassert_true(
((__get_PSP() >= _current->stack_info.start) &&
(__get_PSP() < (_current->stack_info.start +
_current->stack_info.size))),
((__get_PSP() >= arch_current_thread()->stack_info.start) &&
(__get_PSP() < (arch_current_thread()->stack_info.start +
arch_current_thread()->stack_info.size))),
"Process SP outside thread stack limits\n");
static int first_call = 1;
@ -101,7 +101,7 @@ void arm_isr_handler(const void *args)
/* Trigger thread yield() manually */
(void)irq_lock();
z_move_thread_to_end_of_prio_q(_current);
z_move_thread_to_end_of_prio_q(arch_current_thread());
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
irq_unlock(0);
@ -169,20 +169,20 @@ ZTEST(arm_thread_swap, test_arm_syscalls)
* - PSPLIM register guards the default stack
* - MSPLIM register guards the interrupt stack
*/
zassert_true((_current->arch.mode & CONTROL_nPRIV_Msk) == 0,
zassert_true((arch_current_thread()->arch.mode & CONTROL_nPRIV_Msk) == 0,
"mode variable not set to PRIV mode for supervisor thread\n");
zassert_false(arch_is_user_context(),
"arch_is_user_context() indicates nPRIV\n");
zassert_true(
((__get_PSP() >= _current->stack_info.start) &&
(__get_PSP() < (_current->stack_info.start +
_current->stack_info.size))),
((__get_PSP() >= arch_current_thread()->stack_info.start) &&
(__get_PSP() < (arch_current_thread()->stack_info.start +
arch_current_thread()->stack_info.size))),
"Process SP outside thread stack limits\n");
#if defined(CONFIG_BUILTIN_STACK_GUARD)
zassert_true(__get_PSPLIM() == _current->stack_info.start,
zassert_true(__get_PSPLIM() == arch_current_thread()->stack_info.start,
"PSPLIM not guarding the default stack\n");
zassert_true(__get_MSPLIM() == (uint32_t)z_interrupt_stacks,
"MSPLIM not guarding the interrupt stack\n");

View file

@ -278,16 +278,16 @@ static void alt_thread_entry(void *p1, void *p2, void *p3)
/* Verify that the _current_ (alt) thread is
* initialized with EXC_RETURN.Ftype set
*/
zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
"Alt thread FPCA flag not clear at initialization\n");
#if defined(CONFIG_MPU_STACK_GUARD)
/* Alt thread is created with K_FP_REGS set, so we
* expect lazy stacking and long guard to be enabled.
*/
zassert_true((_current->arch.mode &
zassert_true((arch_current_thread()->arch.mode &
Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0,
"Alt thread MPU GUAR DFLOAT flag not set at initialization\n");
zassert_true((_current->base.user_options & K_FP_REGS) != 0,
zassert_true((arch_current_thread()->base.user_options & K_FP_REGS) != 0,
"Alt thread K_FP_REGS not set at initialization\n");
zassert_true((FPU->FPCCR & FPU_FPCCR_LSPEN_Msk) != 0,
"Lazy FP Stacking not set at initialization\n");
@ -330,7 +330,7 @@ static void alt_thread_entry(void *p1, void *p2, void *p3)
p_ztest_thread->arch.swap_return_value = SWAP_RETVAL;
#endif
z_move_thread_to_end_of_prio_q(_current);
z_move_thread_to_end_of_prio_q(arch_current_thread());
/* Modify the callee-saved registers by zero-ing them.
* The main test thread will, later, assert that they
@ -448,20 +448,20 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
*/
load_callee_saved_regs(&ztest_thread_callee_saved_regs_init);
k_thread_priority_set(_current, K_PRIO_COOP(PRIORITY));
k_thread_priority_set(arch_current_thread(), K_PRIO_COOP(PRIORITY));
/* Export current thread's callee-saved registers pointer
* and arch.basepri variable pointer, into global pointer
* variables, so they can be easily accessible by other
* (alternative) test thread.
*/
p_ztest_thread = _current;
p_ztest_thread = arch_current_thread();
/* Confirm initial conditions before starting the test. */
test_flag = switch_flag;
zassert_true(test_flag == false,
"Switch flag not initialized properly\n");
zassert_true(_current->arch.basepri == 0,
zassert_true(arch_current_thread()->arch.basepri == 0,
"Thread BASEPRI flag not clear at thread start\n");
/* Verify, also, that the interrupts are unlocked. */
#if defined(CONFIG_CPU_CORTEX_M_HAS_BASEPRI)
@ -481,16 +481,16 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
"Main test thread does not start in privilege mode\n");
/* Assert that the mode status variable indicates privilege mode */
zassert_true((_current->arch.mode & CONTROL_nPRIV_Msk) == 0,
zassert_true((arch_current_thread()->arch.mode & CONTROL_nPRIV_Msk) == 0,
"Thread nPRIV flag not clear for supervisor thread: 0x%0x\n",
_current->arch.mode);
arch_current_thread()->arch.mode);
#endif /* CONFIG_USERSPACE */
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
/* The main test thread is not (yet) actively using the FP registers */
zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
"Thread Ftype flag not set at initialization 0x%0x\n",
_current->arch.mode);
arch_current_thread()->arch.mode);
/* Verify that the main test thread is initialized with FPCA cleared. */
zassert_true((__get_CONTROL() & CONTROL_FPCA_Msk) == 0,
@ -503,7 +503,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
/* Clear the thread's floating-point callee-saved registers' container.
* The container will, later, be populated by the swap mechanism.
*/
memset(&_current->arch.preempt_float, 0,
memset(&arch_current_thread()->arch.preempt_float, 0,
sizeof(struct _preempt_float));
/* Randomize the FP callee-saved registers at test initialization */
@ -517,13 +517,13 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
/* The main test thread is using the FP registers, but the .mode
* flag is not updated until the next context switch.
*/
zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
"Thread Ftype flag not set at initialization\n");
#if defined(CONFIG_MPU_STACK_GUARD)
zassert_true((_current->arch.mode &
zassert_true((arch_current_thread()->arch.mode &
Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) == 0,
"Thread MPU GUAR DFLOAT flag not clear at initialization\n");
zassert_true((_current->base.user_options & K_FP_REGS) == 0,
zassert_true((arch_current_thread()->base.user_options & K_FP_REGS) == 0,
"Thread K_FP_REGS not clear at initialization\n");
zassert_true((FPU->FPCCR & FPU_FPCCR_LSPEN_Msk) == 0,
"Lazy FP Stacking not clear at initialization\n");
@ -552,13 +552,13 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
* explicitly required by the test.
*/
(void)irq_lock();
z_move_thread_to_end_of_prio_q(_current);
z_move_thread_to_end_of_prio_q(arch_current_thread());
/* Clear the thread's callee-saved registers' container.
* The container will, later, be populated by the swap
* mechanism.
*/
memset(&_current->callee_saved, 0, sizeof(_callee_saved_t));
memset(&arch_current_thread()->callee_saved, 0, sizeof(_callee_saved_t));
/* Verify context-switch has not occurred yet. */
test_flag = switch_flag;
@ -672,7 +672,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
*/
verify_callee_saved(
&ztest_thread_callee_saved_regs_container,
&_current->callee_saved);
&arch_current_thread()->callee_saved);
/* Verify context-switch did occur. */
test_flag = switch_flag;
@ -688,7 +688,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
* the alternative thread modified it, since the thread
* is now switched back in.
*/
zassert_true(_current->arch.basepri == 0,
zassert_true(arch_current_thread()->arch.basepri == 0,
"arch.basepri value not in accordance with the update\n");
#if defined(CONFIG_CPU_CORTEX_M_HAS_BASEPRI)
@ -709,12 +709,12 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
#if !defined(CONFIG_NO_OPTIMIZATIONS)
/* The thread is now swapped-back in. */
zassert_equal(_current->arch.swap_return_value, SWAP_RETVAL,
zassert_equal(arch_current_thread()->arch.swap_return_value, SWAP_RETVAL,
"Swap value not set as expected: 0x%x (0x%x)\n",
_current->arch.swap_return_value, SWAP_RETVAL);
zassert_equal(_current->arch.swap_return_value, ztest_swap_return_val,
arch_current_thread()->arch.swap_return_value, SWAP_RETVAL);
zassert_equal(arch_current_thread()->arch.swap_return_value, ztest_swap_return_val,
"Swap value not returned as expected 0x%x (0x%x)\n",
_current->arch.swap_return_value, ztest_swap_return_val);
arch_current_thread()->arch.swap_return_value, ztest_swap_return_val);
#endif
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
@ -732,7 +732,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
*/
verify_fp_callee_saved(
&ztest_thread_fp_callee_saved_regs,
&_current->arch.preempt_float);
&arch_current_thread()->arch.preempt_float);
/* Verify that the main test thread restored the FPSCR bit-0. */
zassert_true((__get_FPSCR() & 0x1) == 0x1,
@ -741,13 +741,13 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
/* The main test thread is using the FP registers, and the .mode
* flag and MPU GUARD flag are now updated.
*/
zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0,
zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0,
"Thread Ftype flag not cleared after main returned back\n");
#if defined(CONFIG_MPU_STACK_GUARD)
zassert_true((_current->arch.mode &
zassert_true((arch_current_thread()->arch.mode &
Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0,
"Thread MPU GUARD FLOAT flag not set\n");
zassert_true((_current->base.user_options & K_FP_REGS) != 0,
zassert_true((arch_current_thread()->base.user_options & K_FP_REGS) != 0,
"Thread K_FPREGS not set after main returned back\n");
zassert_true((FPU->FPCCR & FPU_FPCCR_LSPEN_Msk) != 0,
"Lazy FP Stacking not set after main returned back\n");

View file

@ -28,12 +28,12 @@ void test_thread_entry(void *p, void *p1, void *p2)
void thread_swap(void *p1, void *p2, void *p3)
{
k_thread_abort(_current);
k_thread_abort(arch_current_thread());
}
void thread_suspend(void *p1, void *p2, void *p3)
{
k_thread_suspend(_current);
k_thread_suspend(arch_current_thread());
}
void thread_yield0(void *p1, void *p2, void *p3)

View file

@ -135,7 +135,7 @@ static void isr_handler(const void *data)
break;
}
if (_current->base.prio < 0) {
if (arch_current_thread()->base.prio < 0) {
isr_info.value = K_COOP_THREAD;
break;
}
@ -643,9 +643,9 @@ ZTEST(context, test_ctx_thread)
TC_PRINT("Testing k_is_in_isr() from a preemptible thread\n");
zassert_false(k_is_in_isr(), "Should not be in ISR context");
zassert_false(_current->base.prio < 0,
zassert_false(arch_current_thread()->base.prio < 0,
"Current thread should have preemptible priority: %d",
_current->base.prio);
arch_current_thread()->base.prio);
}
@ -683,7 +683,7 @@ static void _test_kernel_thread(k_tid_t _thread_id)
zassert_false(k_is_in_isr(), "k_is_in_isr() when called from a thread is true");
zassert_false((_current->base.prio >= 0),
zassert_false((arch_current_thread()->base.prio >= 0),
"thread is not a cooperative thread");
}

View file

@ -314,7 +314,7 @@ ZTEST(fatal_exception, test_fatal)
* priority -1. To run the test smoothly make both main and ztest
* threads run at same priority level.
*/
k_thread_priority_set(_current, K_PRIO_PREEMPT(MAIN_PRIORITY));
k_thread_priority_set(arch_current_thread(), K_PRIO_PREEMPT(MAIN_PRIORITY));
#ifndef CONFIG_ARCH_POSIX
TC_PRINT("test alt thread 1: generic CPU exception\n");

View file

@ -86,7 +86,7 @@ int main(void)
* panic and not an oops). Set the thread non-essential as a
* workaround.
*/
z_thread_essential_clear(_current);
z_thread_essential_clear(arch_current_thread());
test_message_capture();
return 0;

View file

@ -116,7 +116,7 @@ void thread3_entry(void *p1, void *p2, void *p3)
/* 9.1 - T3 should be executing on the same CPU that T1 was. */
cpu_t3 = _current->base.cpu;
cpu_t3 = arch_current_thread()->base.cpu;
zassert_true(cpu_t3 == cpu_t1, "T3 not executing on T1's original CPU");
@ -136,7 +136,7 @@ void thread4_entry(void *p1, void *p2, void *p3)
* It is expected to execute on the same CPU that T2 did.
*/
cpu_t4 = _current->base.cpu;
cpu_t4 = arch_current_thread()->base.cpu;
zassert_true(cpu_t4 == cpu_t2, "T4 on unexpected CPU");
@ -165,7 +165,7 @@ void thread2_entry(void *p1, void *p2, void *p3)
arch_irq_unlock(key);
}
cpu_t2 = _current->base.cpu;
cpu_t2 = arch_current_thread()->base.cpu;
zassert_false(cpu_t2 == cpu_t1, "T2 and T1 unexpectedly on the same CPU");
@ -205,7 +205,7 @@ ZTEST(ipi_cascade, test_ipi_cascade)
/* 3. T3 and T4 are blocked. Pin T3 to this CPU */
cpu_t1 = _current->base.cpu;
cpu_t1 = arch_current_thread()->base.cpu;
status = k_thread_cpu_pin(&thread3, cpu_t1);
zassert_true(status == 0, "Failed to pin T3 to %d : %d\n", cpu_t1, status);
@ -249,7 +249,7 @@ ZTEST(ipi_cascade, test_ipi_cascade)
zassert_false(timer_expired, "Test terminated by timer");
zassert_true(cpu_t1 != _current->base.cpu,
zassert_true(cpu_t1 != arch_current_thread()->base.cpu,
"Main thread (T1) did not change CPUs\n");
show_executing_threads("Final");

View file

@ -125,7 +125,7 @@ ZTEST(mem_protect, test_permission_inheritance)
struct k_heap *z_impl_ret_resource_pool_ptr(void)
{
return _current->resource_pool;
return arch_current_thread()->resource_pool;
}
static inline struct k_heap *z_vrfy_ret_resource_pool_ptr(void)

View file

@ -132,7 +132,7 @@ ZTEST(object_validation, test_generic_object)
ZTEST(object_validation, test_kobj_assign_perms_on_alloc_obj)
{
static struct k_sem *test_dyn_sem;
struct k_thread *thread = _current;
struct k_thread *thread = arch_current_thread();
uintptr_t start_addr, end_addr;
size_t size_heap = K_HEAP_MEM_POOL_SIZE;
@ -173,7 +173,7 @@ ZTEST(object_validation, test_no_ref_dyn_kobj_release_mem)
zassert_not_null(test_dyn_mutex,
"Can not allocate dynamic kernel object");
struct k_thread *thread = _current;
struct k_thread *thread = arch_current_thread();
/* revoke access from the current thread */
k_object_access_revoke(test_dyn_mutex, thread);

View file

@ -312,7 +312,7 @@ ZTEST_USER(userspace, test_read_kernram)
set_fault(K_ERR_CPU_EXCEPTION);
p = _current->init_data;
p = arch_current_thread()->init_data;
printk("%p\n", p);
zassert_unreachable("Read from kernel RAM did not fault");
}
@ -327,7 +327,7 @@ ZTEST_USER(userspace, test_write_kernram)
/* Try to write to kernel RAM. */
set_fault(K_ERR_CPU_EXCEPTION);
_current->init_data = NULL;
arch_current_thread()->init_data = NULL;
zassert_unreachable("Write to kernel RAM did not fault");
}
@ -1038,11 +1038,11 @@ ZTEST(userspace, test_tls_leakage)
* supervisor mode to be leaked
*/
memset(_current->userspace_local_data, 0xff,
memset(arch_current_thread()->userspace_local_data, 0xff,
sizeof(struct _thread_userspace_local_data));
k_thread_user_mode_enter(tls_leakage_user_part,
_current->userspace_local_data, NULL, NULL);
arch_current_thread()->userspace_local_data, NULL, NULL);
#else
ztest_test_skip();
#endif

View file

@ -318,8 +318,9 @@ ZTEST(smp, test_coop_switch_in_abort)
unsigned int num_threads = arch_num_cpus();
unsigned int i;
zassert_true(_current->base.prio < 0, "test case relies on ztest thread be cooperative");
zassert_true(_current->base.prio > SPAWN_AB_PRIO,
zassert_true(arch_current_thread()->base.prio < 0,
"test case relies on ztest thread be cooperative");
zassert_true(arch_current_thread()->base.prio > SPAWN_AB_PRIO,
"spawn test need to have higher priority than ztest thread");
/* Spawn N number of cooperative threads, where N = number of CPUs */
@ -869,15 +870,15 @@ static void t2_mutex_lock(void *p1, void *p2, void *p3)
ARG_UNUSED(p2);
ARG_UNUSED(p3);
zassert_equal(_current->base.global_lock_count, 0,
zassert_equal(arch_current_thread()->base.global_lock_count, 0,
"thread global lock cnt %d is incorrect",
_current->base.global_lock_count);
arch_current_thread()->base.global_lock_count);
k_mutex_lock((struct k_mutex *)p1, K_FOREVER);
zassert_equal(_current->base.global_lock_count, 0,
zassert_equal(arch_current_thread()->base.global_lock_count, 0,
"thread global lock cnt %d is incorrect",
_current->base.global_lock_count);
arch_current_thread()->base.global_lock_count);
k_mutex_unlock((struct k_mutex *)p1);
@ -885,9 +886,9 @@ static void t2_mutex_lock(void *p1, void *p2, void *p3)
* context switch but global_lock_cnt has not been decrease
* because no irq_lock() was called.
*/
zassert_equal(_current->base.global_lock_count, 0,
zassert_equal(arch_current_thread()->base.global_lock_count, 0,
"thread global lock cnt %d is incorrect",
_current->base.global_lock_count);
arch_current_thread()->base.global_lock_count);
}
/**

View file

@ -232,7 +232,7 @@ static void umode_entry(void *thread_id, void *p2, void *p3)
ARG_UNUSED(p2);
ARG_UNUSED(p3);
if (!z_is_thread_essential(_current) &&
if (!z_is_thread_essential(arch_current_thread()) &&
(k_current_get() == (k_tid_t)thread_id)) {
ztest_test_pass();
} else {
@ -249,9 +249,9 @@ static void umode_entry(void *thread_id, void *p2, void *p3)
*/
static void enter_user_mode_entry(void *p1, void *p2, void *p3)
{
z_thread_essential_set(_current);
z_thread_essential_set(arch_current_thread());
zassert_true(z_is_thread_essential(_current), "Thread isn't set"
zassert_true(z_is_thread_essential(arch_current_thread()), "Thread isn't set"
" as essential\n");
k_thread_user_mode_enter(umode_entry,

View file

@ -27,16 +27,16 @@ static void thread_entry(void *p1, void *p2, void *p3)
ARG_UNUSED(p2);
ARG_UNUSED(p3);
z_thread_essential_set(_current);
z_thread_essential_set(arch_current_thread());
if (z_is_thread_essential(_current)) {
if (z_is_thread_essential(arch_current_thread())) {
k_busy_wait(100);
} else {
zassert_unreachable("The thread is not set as essential");
}
z_thread_essential_clear(_current);
zassert_false(z_is_thread_essential(_current),
z_thread_essential_clear(arch_current_thread());
zassert_false(z_is_thread_essential(arch_current_thread()),
"Essential flag of the thread is not cleared");
k_sem_give(&sync_sem);
@ -68,7 +68,7 @@ void k_sys_fatal_error_handler(unsigned int reason,
fatal_error_signaled = true;
z_thread_essential_clear(_current);
z_thread_essential_clear(arch_current_thread());
}
static void abort_thread_entry(void *p1, void *p2, void *p3)
@ -77,9 +77,9 @@ static void abort_thread_entry(void *p1, void *p2, void *p3)
ARG_UNUSED(p2);
ARG_UNUSED(p3);
z_thread_essential_set(_current);
z_thread_essential_set(arch_current_thread());
if (z_is_thread_essential(_current)) {
if (z_is_thread_essential(arch_current_thread())) {
k_busy_wait(100);
} else {
zassert_unreachable("The thread is not set as essential");

View file

@ -72,7 +72,7 @@ ZTEST(usage_api, test_all_stats_usage)
k_thread_runtime_stats_t stats4;
k_thread_runtime_stats_t stats5;
priority = k_thread_priority_get(_current);
priority = k_thread_priority_get(arch_current_thread());
tid = k_thread_create(&helper_thread, helper_stack,
K_THREAD_STACK_SIZEOF(helper_stack),
helper1, NULL, NULL, NULL,
@ -196,7 +196,7 @@ ZTEST(usage_api, test_thread_stats_enable_disable)
k_thread_runtime_stats_t helper_stats3;
int priority;
priority = k_thread_priority_get(_current);
priority = k_thread_priority_get(arch_current_thread());
tid = k_thread_create(&helper_thread, helper_stack,
K_THREAD_STACK_SIZEOF(helper_stack),
helper1, NULL, NULL, NULL,
@ -209,7 +209,7 @@ ZTEST(usage_api, test_thread_stats_enable_disable)
k_sleep(K_TICKS(5));
k_thread_runtime_stats_get(_current, &stats1);
k_thread_runtime_stats_get(arch_current_thread(), &stats1);
k_thread_runtime_stats_get(tid, &helper_stats1);
k_thread_runtime_stats_disable(tid);
@ -225,7 +225,7 @@ ZTEST(usage_api, test_thread_stats_enable_disable)
k_sleep(K_TICKS(2));
k_thread_runtime_stats_enable(tid);
k_thread_runtime_stats_get(_current, &stats2);
k_thread_runtime_stats_get(arch_current_thread(), &stats2);
k_thread_runtime_stats_get(tid, &helper_stats2);
/* Sleep for two ticks to let the helper thread execute again. */
@ -280,12 +280,12 @@ ZTEST(usage_api, test_sys_stats_enable_disable)
k_sys_runtime_stats_disable();
k_thread_runtime_stats_get(_current, &thread_stats1);
k_thread_runtime_stats_get(arch_current_thread(), &thread_stats1);
k_thread_runtime_stats_all_get(&sys_stats1);
busy_loop(2);
k_thread_runtime_stats_get(_current, &thread_stats2);
k_thread_runtime_stats_get(arch_current_thread(), &thread_stats2);
k_thread_runtime_stats_all_get(&sys_stats2);
/*
@ -297,7 +297,7 @@ ZTEST(usage_api, test_sys_stats_enable_disable)
busy_loop(2);
k_thread_runtime_stats_get(_current, &thread_stats3);
k_thread_runtime_stats_get(arch_current_thread(), &thread_stats3);
k_thread_runtime_stats_all_get(&sys_stats3);
/*
@ -398,7 +398,7 @@ ZTEST(usage_api, test_thread_stats_usage)
k_thread_runtime_stats_t stats2;
k_thread_runtime_stats_t stats3;
priority = k_thread_priority_get(_current);
priority = k_thread_priority_get(arch_current_thread());
/*
* Verify that k_thread_runtime_stats_get() returns the expected
@ -408,7 +408,7 @@ ZTEST(usage_api, test_thread_stats_usage)
status = k_thread_runtime_stats_get(NULL, &stats1);
zassert_true(status == -EINVAL);
status = k_thread_runtime_stats_get(_current, NULL);
status = k_thread_runtime_stats_get(arch_current_thread(), NULL);
zassert_true(status == -EINVAL);
/* Align to the next tick */
@ -422,7 +422,7 @@ ZTEST(usage_api, test_thread_stats_usage)
helper1, NULL, NULL, NULL,
priority + 2, 0, K_TICKS(1));
main_thread = _current;
main_thread = arch_current_thread();
k_timer_init(&timer, resume_main, NULL);
k_timer_start(&timer, K_TICKS(1), K_TICKS(10));
@ -440,7 +440,7 @@ ZTEST(usage_api, test_thread_stats_usage)
* the helper threads runtime stats.
*/
k_thread_suspend(_current);
k_thread_suspend(arch_current_thread());
/*
* T = 1.
@ -449,14 +449,14 @@ ZTEST(usage_api, test_thread_stats_usage)
*/
k_thread_runtime_stats_get(tid, &stats1);
k_thread_suspend(_current);
k_thread_suspend(arch_current_thread());
/*
* T = 11.
* Timer woke the main thread. Suspend main thread again.
*/
k_thread_suspend(_current);
k_thread_suspend(arch_current_thread());
/*
* T = 21.
@ -465,7 +465,7 @@ ZTEST(usage_api, test_thread_stats_usage)
*/
k_thread_runtime_stats_get(tid, &stats2);
k_thread_suspend(_current);
k_thread_suspend(arch_current_thread());
/*
* T = 31.

View file

@ -254,7 +254,7 @@ const struct pm_state_info *pm_policy_next_state(uint8_t cpu, int32_t ticks)
"There is no power state defined");
/* make sure this is idle thread */
zassert_true(z_is_idle_thread_object(_current));
zassert_true(z_is_idle_thread_object(arch_current_thread()));
zassert_true(ticks == _kernel.idle);
zassert_false(k_can_yield());
idle_entered = true;
@ -276,7 +276,7 @@ static void notify_pm_state_entry(enum pm_state state)
/* enter suspend */
zassert_true(notify_app_entry == true,
"Notification to enter suspend was not sent to the App");
zassert_true(z_is_idle_thread_object(_current));
zassert_true(z_is_idle_thread_object(arch_current_thread()));
zassert_equal(state, PM_STATE_SUSPEND_TO_IDLE);
pm_device_state_get(device_dummy, &device_power_state);
@ -301,7 +301,7 @@ static void notify_pm_state_exit(enum pm_state state)
/* leave suspend */
zassert_true(notify_app_exit == true,
"Notification to leave suspend was not sent to the App");
zassert_true(z_is_idle_thread_object(_current));
zassert_true(z_is_idle_thread_object(arch_current_thread()));
zassert_equal(state, PM_STATE_SUSPEND_TO_IDLE);
/* at this point, devices are active again*/

View file

@ -71,10 +71,10 @@ __no_optimization static void trigger_fault_access(void)
#elif defined(CONFIG_CPU_CORTEX_M) || defined(CONFIG_CPU_AARCH32_CORTEX_R) || \
defined(CONFIG_CPU_AARCH64_CORTEX_R)
/* As this test case only runs when User Mode is enabled,
* accessing _current always triggers a memory access fault,
* accessing arch_current_thread() always triggers a memory access fault,
* and is guaranteed not to trigger SecureFault exceptions.
*/
void *a = (void *)_current;
void *a = (void *)arch_current_thread();
#else
/* For most arch which support userspace, dereferencing NULL
* pointer will be caught by exception.
@ -338,7 +338,7 @@ ZTEST(error_hook_tests, test_catch_assert_in_isr)
static void trigger_z_oops(void)
{
/* Set up a dummy syscall frame, pointing to a valid area in memory. */
_current->syscall_frame = _image_ram_start;
arch_current_thread()->syscall_frame = _image_ram_start;
K_OOPS(true);
}