From 4f77c2ad5357d388daad3841928ea038698015e8 Mon Sep 17 00:00:00 2001 From: Andrew Boie Date: Thu, 7 Nov 2019 12:43:29 -0800 Subject: [PATCH] kernel: rename z_arch_ to arch_ Promote the private z_arch_* namespace, which specifies the interface between the core kernel and the architecture code, to a new top-level namespace named arch_*. This allows our documentation generation to create online documentation for this set of interfaces, and this set of interfaces is worth treating in a more formal way anyway. Signed-off-by: Andrew Boie --- arch/arc/core/arc_connect.c | 4 +- arch/arc/core/arc_smp.c | 10 +- arch/arc/core/cpu_idle.S | 10 +- arch/arc/core/fatal.c | 4 +- arch/arc/core/irq_manage.c | 12 +- arch/arc/core/irq_offload.c | 2 +- arch/arc/core/isr_wrapper.S | 4 +- arch/arc/core/mpu/arc_core_mpu.c | 20 +-- arch/arc/core/regular_irq.S | 2 +- arch/arc/core/switch.S | 20 +-- arch/arc/core/thread.c | 24 +-- arch/arc/core/thread_entry_wrapper.S | 2 +- arch/arc/core/userspace.S | 6 +- arch/arc/include/kernel_arch_func.h | 8 +- arch/arc/include/swap_macros.h | 2 +- arch/arm/core/cortex_m/fault.c | 6 +- arch/arm/core/cortex_m/mpu/arm_core_mpu.c | 20 +-- arch/arm/core/cpu_idle.S | 10 +- arch/arm/core/fatal.c | 2 +- arch/arm/core/irq_manage.c | 20 +-- arch/arm/core/irq_offload.c | 2 +- arch/arm/core/swap.c | 6 +- arch/arm/core/swap_helper.S | 2 +- arch/arm/core/thread.c | 32 ++-- arch/arm/core/userspace.S | 6 +- arch/arm/include/cortex_m/exc.h | 5 +- arch/arm/include/cortex_r/exc.h | 2 +- arch/arm/include/kernel_arch_func.h | 4 +- arch/common/sw_isr_common.c | 10 +- arch/common/timing_info_bench.c | 41 ++--- arch/nios2/core/cpu_idle.c | 4 +- arch/nios2/core/exception.S | 6 +- arch/nios2/core/fatal.c | 2 +- arch/nios2/core/irq_manage.c | 10 +- arch/nios2/core/irq_offload.c | 2 +- arch/nios2/core/swap.S | 14 +- arch/nios2/core/thread.c | 8 +- arch/nios2/include/kernel_arch_func.h | 6 +- arch/posix/core/cpuhalt.c | 8 +- arch/posix/core/fatal.c | 2 +- arch/posix/core/irq.c | 14 +- arch/posix/core/posix_core.c | 10 +- arch/posix/core/swap.c | 14 +- arch/posix/core/thread.c | 8 +- arch/posix/include/kernel_arch_func.h | 12 +- arch/riscv/core/cpu_idle.c | 8 +- arch/riscv/core/irq_manage.c | 6 +- arch/riscv/core/irq_offload.c | 2 +- arch/riscv/core/swap.S | 14 +- arch/riscv/core/thread.c | 8 +- arch/riscv/include/kernel_arch_func.h | 6 +- arch/x86/Kconfig | 2 +- arch/x86/core/cpuhalt.c | 6 +- arch/x86/core/fatal.c | 4 +- arch/x86/core/ia32/fatal.c | 6 +- arch/x86/core/ia32/intstub.S | 22 +-- arch/x86/core/ia32/irq_manage.c | 10 +- arch/x86/core/ia32/irq_offload.c | 2 +- arch/x86/core/ia32/swap.S | 38 ++--- arch/x86/core/ia32/thread.c | 14 +- arch/x86/core/ia32/userspace.S | 10 +- arch/x86/core/intel64/cpu.c | 2 +- arch/x86/core/intel64/irq.c | 6 +- arch/x86/core/intel64/thread.c | 8 +- arch/x86/core/x86_mmu.c | 22 +-- arch/x86/include/ia32/kernel_arch_func.h | 6 +- arch/x86/include/intel64/kernel_arch_func.h | 4 +- arch/x86/include/kernel_arch_func.h | 4 +- arch/xtensa/core/cpu_idle.c | 4 +- arch/xtensa/core/irq_offload.c | 6 +- arch/xtensa/core/xtensa-asm2.c | 10 +- arch/xtensa/include/kernel_arch_func.h | 8 +- boards/posix/native_posix/board_irq.h | 14 +- boards/posix/nrf52_bsim/board_irq.h | 14 +- boards/posix/nrf52_bsim/k_busy_wait.c | 2 +- doc/guides/porting/arch.rst | 24 +-- doc/reference/kernel/smp/smp.rst | 10 +- doc/reference/usermode/syscalls.rst | 2 +- drivers/interrupt_controller/plic.c | 6 +- drivers/interrupt_controller/system_apic.c | 4 +- drivers/interrupt_controller/vexriscv_litex.c | 6 +- drivers/timer/arcv2_timer0.c | 4 +- drivers/timer/loapic_timer.c | 8 +- drivers/timer/mchp_xec_rtos_timer.c | 4 +- drivers/timer/native_posix_timer.c | 2 +- include/arch/arc/arch.h | 34 ++-- include/arch/arc/arch_inlines.h | 2 +- include/arch/arc/syscall.h | 38 ++--- include/arch/arc/v2/error.h | 2 +- include/arch/arc/v2/irq.h | 32 ++-- include/arch/arc/v2/misc.h | 2 +- include/arch/arm/arch.h | 28 ++-- include/arch/arm/asm_inline_gcc.h | 6 +- include/arch/arm/error.h | 4 +- include/arch/arm/irq.h | 30 ++-- include/arch/arm/misc.h | 4 +- include/arch/arm/syscall.h | 40 ++--- include/arch/nios2/arch.h | 16 +- include/arch/posix/arch.h | 10 +- include/arch/riscv/arch.h | 28 ++-- include/arch/x86/arch.h | 12 +- include/arch/x86/arch_inlines.h | 2 +- include/arch/x86/ia32/arch.h | 24 +-- include/arch/x86/ia32/syscall.h | 40 ++--- include/arch/x86/ia32/thread.h | 2 +- include/arch/x86/intel64/arch.h | 4 +- include/arch/x86/thread_stack.h | 26 +-- include/arch/xtensa/arch.h | 6 +- include/arch/xtensa/arch_inlines.h | 2 +- include/arch/xtensa/irq.h | 18 +-- include/exc_handle.h | 2 +- include/irq.h | 25 +-- include/irq_offload.h | 2 +- include/kernel.h | 32 ++-- include/kernel_structs.h | 4 +- include/spinlock.h | 4 +- include/sys/arch_interface.h | 152 +++++++++--------- include/syscall.h | 4 +- include/syscall_handler.h | 6 +- kernel/Kconfig | 8 +- kernel/fatal.c | 16 +- kernel/futex.c | 2 +- kernel/idle.c | 2 +- kernel/include/kernel_arch_interface.h | 44 ++--- kernel/include/kernel_internal.h | 4 +- kernel/include/ksched.h | 8 +- kernel/include/kswap.h | 14 +- kernel/init.c | 10 +- kernel/mailbox.c | 4 +- kernel/mem_domain.c | 12 +- kernel/mempool.c | 2 +- kernel/msg_q.c | 10 +- kernel/mutex.c | 2 +- kernel/poll.c | 4 +- kernel/sched.c | 18 +-- kernel/sem.c | 4 +- kernel/smp.c | 14 +- kernel/thread.c | 16 +- kernel/thread_abort.c | 2 +- kernel/timer.c | 4 +- kernel/userspace.c | 4 +- lib/os/printk.c | 4 +- lib/posix/pthread_mutex.c | 2 +- scripts/gen_syscalls.py | 2 +- soc/arm/nordic_nrf/nrf51/soc.c | 2 +- soc/arm/nordic_nrf/nrf52/soc.c | 2 +- soc/arm/nordic_nrf/nrf91/soc.c | 2 +- soc/posix/inf_clock/soc.c | 19 ++- soc/riscv/openisa_rv32m1/soc.c | 6 +- soc/riscv/riscv-privilege/common/idle.c | 6 +- .../riscv-privilege/common/soc_common_irq.c | 8 +- soc/xtensa/esp32/esp32-mp.c | 4 +- soc/xtensa/esp32/soc.c | 4 +- subsys/logging/log_msg.c | 6 +- subsys/testsuite/ztest/include/arch/cpu.h | 8 +- subsys/testsuite/ztest/src/ztest.c | 4 +- tests/arch/arm/arm_interrupt/README.txt | 4 +- .../arm/arm_interrupt/src/arm_interrupt.c | 2 +- tests/arch/arm/arm_ramfunc/src/arm_ramfunc.c | 2 +- tests/arch/arm/arm_thread_swap/README.txt | 2 +- .../arm/arm_thread_swap/src/arm_thread_arch.c | 6 +- .../src/arm_zero_latency_irqs.c | 2 +- .../timing_info/src/msg_passing_bench.c | 20 +-- .../timing_info/src/semaphore_bench.c | 12 +- .../benchmarks/timing_info/src/thread_bench.c | 34 ++-- .../timing_info/src/userspace_bench.c | 4 +- .../benchmarks/timing_info/src/yield_bench.c | 4 +- tests/kernel/common/src/irq_offload.c | 12 +- .../float_disable/src/k_float_disable.c | 2 +- tests/kernel/gen_isr_table/src/main.c | 2 +- tests/kernel/interrupt/src/main.c | 2 +- tests/kernel/interrupt/src/nested_irq.c | 4 +- .../mem_protect/mem_protect/src/mem_domain.c | 4 +- tests/kernel/mem_protect/syscalls/src/main.c | 2 +- tests/kernel/mem_protect/userspace/src/main.c | 8 +- tests/kernel/mp/src/main.c | 4 +- tests/kernel/smp/src/main.c | 6 +- tests/kernel/spinlock/src/main.c | 2 +- 178 files changed, 912 insertions(+), 910 deletions(-) diff --git a/arch/arc/core/arc_connect.c b/arch/arc/core/arc_connect.c index 7a673107d92..030f0e52b05 100644 --- a/arch/arc/core/arc_connect.c +++ b/arch/arc/core/arc_connect.c @@ -223,7 +223,7 @@ u64_t z_arc_connect_gfrc_read(void) * sub-components. For GFRC, HW allows simultaneously accessing to * counters. So an irq lock is enough. */ - key = z_arch_irq_lock(); + key = arch_irq_lock(); z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_READ_LO, 0); low = z_arc_connect_cmd_readback(); @@ -231,7 +231,7 @@ u64_t z_arc_connect_gfrc_read(void) z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_READ_HI, 0); high = z_arc_connect_cmd_readback(); - z_arch_irq_unlock(key); + arch_irq_unlock(key); return (((u64_t)high) << 32) | low; } diff --git a/arch/arc/core/arc_smp.c b/arch/arc/core/arc_smp.c index ed851927da9..9bf4b4ccb63 100644 --- a/arch/arc/core/arc_smp.c +++ b/arch/arc/core/arc_smp.c @@ -55,7 +55,7 @@ u64_t z_arc_smp_switch_in_isr(void) if (new_thread != old_thread) { _current_cpu->swap_ok = 0; ((struct k_thread *)new_thread)->base.cpu = - z_arch_curr_cpu()->id; + arch_curr_cpu()->id; _current = (struct k_thread *) new_thread; ret = new_thread | ((u64_t)(old_thread) << 32); } @@ -83,8 +83,8 @@ volatile u32_t arc_cpu_wake_flag; volatile _cpu_t *_curr_cpu[CONFIG_MP_NUM_CPUS]; /* Called from Zephyr initialization */ -void z_arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, - void (*fn)(int, void *), void *arg) +void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, + void (*fn)(int, void *), void *arg) { _curr_cpu[cpu_num] = &(_kernel.cpus[cpu_num]); arc_cpu_init[cpu_num].fn = fn; @@ -109,14 +109,14 @@ void z_arc_slave_start(int cpu_num) z_irq_priority_set(IRQ_ICI, ARCV2_ICI_IRQ_PRIORITY, 0); irq_enable(IRQ_ICI); - /* call the function set by z_arch_start_cpu */ + /* call the function set by arch_start_cpu */ fn = arc_cpu_init[cpu_num].fn; fn(cpu_num, arc_cpu_init[cpu_num].arg); } /* arch implementation of sched_ipi */ -void z_arch_sched_ipi(void) +void arch_sched_ipi(void) { u32_t i; diff --git a/arch/arc/core/cpu_idle.S b/arch/arc/core/cpu_idle.S index bbd0dea5123..78428ac5712 100644 --- a/arch/arc/core/cpu_idle.S +++ b/arch/arc/core/cpu_idle.S @@ -17,8 +17,8 @@ #include #include -GTEXT(z_arch_cpu_idle) -GTEXT(z_arch_cpu_atomic_idle) +GTEXT(arch_cpu_idle) +GTEXT(arch_cpu_atomic_idle) GDATA(z_arc_cpu_sleep_mode) SECTION_VAR(BSS, z_arc_cpu_sleep_mode) @@ -33,7 +33,7 @@ SECTION_VAR(BSS, z_arc_cpu_sleep_mode) * void nanCpuIdle(void) */ -SECTION_FUNC(TEXT, z_arch_cpu_idle) +SECTION_FUNC(TEXT, arch_cpu_idle) #ifdef CONFIG_TRACING push_s blink @@ -52,9 +52,9 @@ SECTION_FUNC(TEXT, z_arch_cpu_idle) * * This function exits with interrupts restored to . * - * void z_arch_cpu_atomic_idle(unsigned int key) + * void arch_cpu_atomic_idle(unsigned int key) */ -SECTION_FUNC(TEXT, z_arch_cpu_atomic_idle) +SECTION_FUNC(TEXT, arch_cpu_atomic_idle) #ifdef CONFIG_TRACING push_s blink diff --git a/arch/arc/core/fatal.c b/arch/arc/core/fatal.c index 774b4291ed3..a6ad1edb7db 100644 --- a/arch/arc/core/fatal.c +++ b/arch/arc/core/fatal.c @@ -28,13 +28,13 @@ void z_arc_fatal_error(unsigned int reason, const z_arch_esf_t *esf) z_fatal_error(reason, esf); } -FUNC_NORETURN void z_arch_syscall_oops(void *ssf_ptr) +FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr) { z_arc_fatal_error(K_ERR_KERNEL_OOPS, ssf_ptr); CODE_UNREACHABLE; } -FUNC_NORETURN void z_arch_system_halt(unsigned int reason) +FUNC_NORETURN void arch_system_halt(unsigned int reason) { ARG_UNUSED(reason); diff --git a/arch/arc/core/irq_manage.c b/arch/arc/core/irq_manage.c index 7a7877497b1..f69ce80bfc2 100644 --- a/arch/arc/core/irq_manage.c +++ b/arch/arc/core/irq_manage.c @@ -93,7 +93,7 @@ void z_arc_firq_stack_set(void) * @return N/A */ -void z_arch_irq_enable(unsigned int irq) +void arch_irq_enable(unsigned int irq) { unsigned int key = irq_lock(); @@ -110,7 +110,7 @@ void z_arch_irq_enable(unsigned int irq) * @return N/A */ -void z_arch_irq_disable(unsigned int irq) +void arch_irq_disable(unsigned int irq) { unsigned int key = irq_lock(); @@ -124,7 +124,7 @@ void z_arch_irq_disable(unsigned int irq) * @param irq IRQ line * @return interrupt enable state, true or false */ -int z_arch_irq_is_enabled(unsigned int irq) +int arch_irq_is_enabled(unsigned int irq) { return z_arc_v2_irq_unit_int_enabled(irq); } @@ -181,9 +181,9 @@ void z_irq_spurious(void *unused) } #ifdef CONFIG_DYNAMIC_INTERRUPTS -int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, - void (*routine)(void *parameter), void *parameter, - u32_t flags) +int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, + void (*routine)(void *parameter), void *parameter, + u32_t flags) { z_isr_install(irq, routine, parameter); z_irq_priority_set(irq, priority, flags); diff --git a/arch/arc/core/irq_offload.c b/arch/arc/core/irq_offload.c index f594536a7ce..74372c8b598 100644 --- a/arch/arc/core/irq_offload.c +++ b/arch/arc/core/irq_offload.c @@ -20,7 +20,7 @@ void z_irq_do_offload(void) offload_routine(offload_param); } -void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter) +void arch_irq_offload(irq_offload_routine_t routine, void *parameter) { unsigned int key; diff --git a/arch/arc/core/isr_wrapper.S b/arch/arc/core/isr_wrapper.S index d4cca4eff85..ad035f808eb 100644 --- a/arch/arc/core/isr_wrapper.S +++ b/arch/arc/core/isr_wrapper.S @@ -68,7 +68,7 @@ The context switch code adopts this standard so that it is easier to follow: transition from outgoing thread to incoming thread Not loading _kernel into r0 allows loading _kernel without stomping on -the parameter in r0 in z_arch_switch(). +the parameter in r0 in arch_switch(). ARCv2 processors have two kinds of interrupts: fast (FIRQ) and regular. The @@ -168,7 +168,7 @@ From FIRQ: o to coop - The address of the returning instruction from z_arch_switch() is loaded + The address of the returning instruction from arch_switch() is loaded in ilink and the saved status32 in status32_p0. o to any irq diff --git a/arch/arc/core/mpu/arc_core_mpu.c b/arch/arc/core/mpu/arc_core_mpu.c index a644a81180d..db4c7d6f0be 100644 --- a/arch/arc/core/mpu/arc_core_mpu.c +++ b/arch/arc/core/mpu/arc_core_mpu.c @@ -27,7 +27,7 @@ void configure_mpu_thread(struct k_thread *thread) #if defined(CONFIG_USERSPACE) -int z_arch_mem_domain_max_partitions_get(void) +int arch_mem_domain_max_partitions_get(void) { return arc_core_mpu_get_max_domain_partition_regions(); } @@ -35,8 +35,8 @@ int z_arch_mem_domain_max_partitions_get(void) /* * Reset MPU region for a single memory partition */ -void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain, - u32_t partition_id) +void arch_mem_domain_partition_remove(struct k_mem_domain *domain, + u32_t partition_id) { if (_current->mem_domain_info.mem_domain != domain) { return; @@ -50,7 +50,7 @@ void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain, /* * Configure MPU memory domain */ -void z_arch_mem_domain_thread_add(struct k_thread *thread) +void arch_mem_domain_thread_add(struct k_thread *thread) { if (_current != thread) { return; @@ -64,7 +64,7 @@ void z_arch_mem_domain_thread_add(struct k_thread *thread) /* * Destroy MPU regions for the mem domain */ -void z_arch_mem_domain_destroy(struct k_mem_domain *domain) +void arch_mem_domain_destroy(struct k_mem_domain *domain) { if (_current->mem_domain_info.mem_domain != domain) { return; @@ -75,25 +75,25 @@ void z_arch_mem_domain_destroy(struct k_mem_domain *domain) arc_core_mpu_enable(); } -void z_arch_mem_domain_partition_add(struct k_mem_domain *domain, - u32_t partition_id) +void arch_mem_domain_partition_add(struct k_mem_domain *domain, + u32_t partition_id) { /* No-op on this architecture */ } -void z_arch_mem_domain_thread_remove(struct k_thread *thread) +void arch_mem_domain_thread_remove(struct k_thread *thread) { if (_current != thread) { return; } - z_arch_mem_domain_destroy(thread->mem_domain_info.mem_domain); + arch_mem_domain_destroy(thread->mem_domain_info.mem_domain); } /* * Validate the given buffer is user accessible or not */ -int z_arch_buffer_validate(void *addr, size_t size, int write) +int arch_buffer_validate(void *addr, size_t size, int write) { return arc_core_mpu_buffer_validate(addr, size, write); } diff --git a/arch/arc/core/regular_irq.S b/arch/arc/core/regular_irq.S index 8c3de892b1c..7f577a8ceb2 100644 --- a/arch/arc/core/regular_irq.S +++ b/arch/arc/core/regular_irq.S @@ -64,7 +64,7 @@ PRE-CONTEXT-SWITCH STACK -------------------------------------- SP -> | Return address; PC (Program Counter), in fact value taken from - | BLINK register in z_arch_switch() + | BLINK register in arch_switch() -------------------------------------- | STATUS32 value, we explicitly save it here for later usage, read-on -------------------------------------- diff --git a/arch/arc/core/switch.S b/arch/arc/core/switch.S index e312564e2e1..544edb889ff 100644 --- a/arch/arc/core/switch.S +++ b/arch/arc/core/switch.S @@ -22,37 +22,37 @@ #include #include -GTEXT(z_arch_switch) +GTEXT(arch_switch) /** * * @brief Initiate a cooperative context switch * - * The z_arch_switch routine is invoked by various kernel services to effect - * a cooperative context switch. Prior to invoking z_arch_switch, the caller + * The arch_switch routine is invoked by various kernel services to effect + * a cooperative context switch. Prior to invoking arch_switch, the caller * disables interrupts via irq_lock() - * Given that z_arch_switch() is called to effect a cooperative context switch, + * Given that arch_switch() is called to effect a cooperative context switch, * the caller-saved integer registers are saved on the stack by the function - * call preamble to z_arch_switch. This creates a custom stack frame that will - * be popped when returning from z_arch_switch, but is not suitable for handling + * call preamble to arch_switch. This creates a custom stack frame that will + * be popped when returning from arch_switch, but is not suitable for handling * a return from an exception. Thus, the fact that the thread is pending because - * of a cooperative call to z_arch_switch() has to be recorded via the + * of a cooperative call to arch_switch() has to be recorded via the * _CAUSE_COOP code in the relinquish_cause of the thread's k_thread structure. * The _rirq_exit()/_firq_exit() code will take care of doing the right thing * to restore the thread status. * - * When z_arch_switch() is invoked, we know the decision to perform a context + * When arch_switch() is invoked, we know the decision to perform a context * switch or not has already been taken and a context switch must happen. * * * C function prototype: * - * void z_arch_switch(void *switch_to, void **switched_from); + * void arch_switch(void *switch_to, void **switched_from); * */ -SECTION_FUNC(TEXT, z_arch_switch) +SECTION_FUNC(TEXT, arch_switch) #ifdef CONFIG_EXECUTION_BENCHMARKING push_s r0 diff --git a/arch/arc/core/thread.c b/arch/arc/core/thread.c index e6758076628..bf4a1ca4236 100644 --- a/arch/arc/core/thread.c +++ b/arch/arc/core/thread.c @@ -58,10 +58,10 @@ struct init_stack_frame { * * @return N/A */ -void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, - size_t stackSize, k_thread_entry_t pEntry, - void *parameter1, void *parameter2, void *parameter3, - int priority, unsigned int options) +void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, + size_t stackSize, k_thread_entry_t pEntry, + void *parameter1, void *parameter2, void *parameter3, + int priority, unsigned int options) { char *pStackMem = Z_THREAD_STACK_BUFFER(stack); Z_ASSERT_VALID_PRIO(priority, pEntry); @@ -92,7 +92,7 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, (u32_t)(stackEnd + STACK_GUARD_SIZE); stackAdjEnd = (char *)STACK_ROUND_DOWN(stackEnd + - Z_ARCH_THREAD_STACK_RESERVED); + ARCH_THREAD_STACK_RESERVED); /* reserve 4 bytes for the start of user sp */ stackAdjEnd -= 4; @@ -122,7 +122,7 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, */ pStackMem += STACK_GUARD_SIZE; stackAdjSize = stackAdjSize + CONFIG_PRIVILEGED_STACK_SIZE; - stackEnd += Z_ARCH_THREAD_STACK_RESERVED; + stackEnd += ARCH_THREAD_STACK_RESERVED; thread->arch.priv_stack_start = 0; @@ -161,7 +161,7 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, */ pInitCtx->status32 |= _ARC_V2_STATUS32_US; #else /* For no USERSPACE feature */ - pStackMem += Z_ARCH_THREAD_STACK_RESERVED; + pStackMem += ARCH_THREAD_STACK_RESERVED; stackEnd = pStackMem + stackSize; z_new_thread_init(thread, pStackMem, stackSize, priority, options); @@ -199,7 +199,7 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, thread->arch.k_stack_top = (u32_t)(stackEnd + STACK_GUARD_SIZE); thread->arch.k_stack_base = (u32_t) - (stackEnd + Z_ARCH_THREAD_STACK_RESERVED); + (stackEnd + ARCH_THREAD_STACK_RESERVED); } else { thread->arch.k_stack_top = (u32_t)pStackMem; thread->arch.k_stack_base = (u32_t)stackEnd; @@ -227,8 +227,8 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, #ifdef CONFIG_USERSPACE -FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry, - void *p1, void *p2, void *p3) +FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, + void *p1, void *p2, void *p3) { /* @@ -270,7 +270,7 @@ FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry, #endif #if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING) -int z_arch_float_disable(struct k_thread *thread) +int arch_float_disable(struct k_thread *thread) { unsigned int key; @@ -287,7 +287,7 @@ int z_arch_float_disable(struct k_thread *thread) } -int z_arch_float_enable(struct k_thread *thread) +int arch_float_enable(struct k_thread *thread) { unsigned int key; diff --git a/arch/arc/core/thread_entry_wrapper.S b/arch/arc/core/thread_entry_wrapper.S index 47b15a87180..f0e79d9010f 100644 --- a/arch/arc/core/thread_entry_wrapper.S +++ b/arch/arc/core/thread_entry_wrapper.S @@ -22,7 +22,7 @@ GTEXT(z_thread_entry_wrapper1) * @brief Wrapper for z_thread_entry * * The routine pops parameters for the z_thread_entry from stack frame, prepared - * by the z_arch_new_thread() routine. + * by the arch_new_thread() routine. * * @return N/A */ diff --git a/arch/arc/core/userspace.S b/arch/arc/core/userspace.S index 93cd7c7971d..20169536708 100644 --- a/arch/arc/core/userspace.S +++ b/arch/arc/core/userspace.S @@ -48,7 +48,7 @@ GTEXT(z_arc_userspace_enter) GTEXT(_arc_do_syscall) GTEXT(z_user_thread_entry_wrapper) -GTEXT(z_arch_user_string_nlen) +GTEXT(arch_user_string_nlen) GTEXT(z_arc_user_string_nlen_fault_start) GTEXT(z_arc_user_string_nlen_fault_end) GTEXT(z_arc_user_string_nlen_fixup) @@ -248,9 +248,9 @@ SECTION_FUNC(TEXT, _arc_do_syscall) rtie /* - * size_t z_arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg) + * size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg) */ -SECTION_FUNC(TEXT, z_arch_user_string_nlen) +SECTION_FUNC(TEXT, arch_user_string_nlen) /* int err; */ sub_s sp,sp,0x4 diff --git a/arch/arc/include/kernel_arch_func.h b/arch/arc/include/kernel_arch_func.h index 9883b538060..c0b235cac4f 100644 --- a/arch/arc/include/kernel_arch_func.h +++ b/arch/arc/include/kernel_arch_func.h @@ -33,7 +33,7 @@ extern "C" { #endif -static ALWAYS_INLINE void z_arch_kernel_init(void) +static ALWAYS_INLINE void arch_kernel_init(void) { z_irq_setup(); _current_cpu->irq_stack = @@ -55,7 +55,7 @@ static ALWAYS_INLINE int Z_INTERRUPT_CAUSE(void) return irq_num; } -static inline bool z_arch_is_in_isr(void) +static inline bool arch_is_in_isr(void) { return z_arc_v2_irq_unit_is_in_isr(); } @@ -67,10 +67,10 @@ extern void z_arc_userspace_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3, u32_t stack, u32_t size); -extern void z_arch_switch(void *switch_to, void **switched_from); +extern void arch_switch(void *switch_to, void **switched_from); extern void z_arc_fatal_error(unsigned int reason, const z_arch_esf_t *esf); -extern void z_arch_sched_ipi(void); +extern void arch_sched_ipi(void); #ifdef __cplusplus } diff --git a/arch/arc/include/swap_macros.h b/arch/arc/include/swap_macros.h index f92ac12f71c..4cec10855cb 100644 --- a/arch/arc/include/swap_macros.h +++ b/arch/arc/include/swap_macros.h @@ -258,7 +258,7 @@ * The pc and status32 values will still be on the stack. We cannot * pop them yet because the callers of _pop_irq_stack_frame must reload * status32 differently depending on the execution context they are - * running in (z_arch_switch(), firq or exception). + * running in (arch_switch(), firq or exception). */ add_s sp, sp, ___isf_t_SIZEOF diff --git a/arch/arm/core/cortex_m/fault.c b/arch/arm/core/cortex_m/fault.c index 21bfa04a3fe..117eb0257c4 100644 --- a/arch/arm/core/cortex_m/fault.c +++ b/arch/arm/core/cortex_m/fault.c @@ -607,7 +607,7 @@ static u32_t hard_fault(z_arch_esf_t *esf, bool *recoverable) /* Workaround for #18712: * HardFault may be due to escalation, as a result of * an SVC instruction that could not be executed; this - * can occur if Z_ARCH_EXCEPT() is called by an ISR, + * can occur if ARCH_EXCEPT() is called by an ISR, * which executes at priority equal to the SVC handler * priority. We handle the case of Kernel OOPS and Stack * Fail here. @@ -623,7 +623,7 @@ static u32_t hard_fault(z_arch_esf_t *esf, bool *recoverable) if (((fault_insn & 0xff00) == _SVC_OPCODE) && ((fault_insn & 0x00ff) == _SVC_CALL_RUNTIME_EXCEPT)) { - PR_EXC("Z_ARCH_EXCEPT with reason %x\n", esf->basic.r0); + PR_EXC("ARCH_EXCEPT with reason %x\n", esf->basic.r0); reason = esf->basic.r0; } #undef _SVC_OPCODE @@ -935,7 +935,7 @@ void z_arm_fault(u32_t msp, u32_t psp, u32_t exc_return) z_arch_esf_t esf_copy; /* Force unlock interrupts */ - z_arch_irq_unlock(0); + arch_irq_unlock(0); /* Retrieve the Exception Stack Frame (ESF) to be supplied * as argument to the remainder of the fault handling process. diff --git a/arch/arm/core/cortex_m/mpu/arm_core_mpu.c b/arch/arm/core/cortex_m/mpu/arm_core_mpu.c index 6496adfa600..119113b4b0c 100644 --- a/arch/arm/core/cortex_m/mpu/arm_core_mpu.c +++ b/arch/arm/core/cortex_m/mpu/arm_core_mpu.c @@ -259,7 +259,7 @@ void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread) } #if defined(CONFIG_USERSPACE) -int z_arch_mem_domain_max_partitions_get(void) +int arch_mem_domain_max_partitions_get(void) { int available_regions = arm_core_mpu_get_max_available_dyn_regions(); @@ -274,7 +274,7 @@ int z_arch_mem_domain_max_partitions_get(void) return ARM_CORE_MPU_MAX_DOMAIN_PARTITIONS_GET(available_regions); } -void z_arch_mem_domain_thread_add(struct k_thread *thread) +void arch_mem_domain_thread_add(struct k_thread *thread) { if (_current != thread) { return; @@ -287,7 +287,7 @@ void z_arch_mem_domain_thread_add(struct k_thread *thread) z_arm_configure_dynamic_mpu_regions(thread); } -void z_arch_mem_domain_destroy(struct k_mem_domain *domain) +void arch_mem_domain_destroy(struct k_mem_domain *domain) { /* This function will reset the access permission configuration * of the active partitions of the memory domain. @@ -317,8 +317,8 @@ void z_arch_mem_domain_destroy(struct k_mem_domain *domain) } } -void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain, - u32_t partition_id) +void arch_mem_domain_partition_remove(struct k_mem_domain *domain, + u32_t partition_id) { /* Request to remove a partition from a memory domain. * This resets the access permissions of the partition @@ -334,22 +334,22 @@ void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain, &domain->partitions[partition_id], &reset_attr); } -void z_arch_mem_domain_partition_add(struct k_mem_domain *domain, - u32_t partition_id) +void arch_mem_domain_partition_add(struct k_mem_domain *domain, + u32_t partition_id) { /* No-op on this architecture */ } -void z_arch_mem_domain_thread_remove(struct k_thread *thread) +void arch_mem_domain_thread_remove(struct k_thread *thread) { if (_current != thread) { return; } - z_arch_mem_domain_destroy(thread->mem_domain_info.mem_domain); + arch_mem_domain_destroy(thread->mem_domain_info.mem_domain); } -int z_arch_buffer_validate(void *addr, size_t size, int write) +int arch_buffer_validate(void *addr, size_t size, int write) { return arm_core_mpu_buffer_validate(addr, size, write); } diff --git a/arch/arm/core/cpu_idle.S b/arch/arm/core/cpu_idle.S index 172dd4867d4..45aa67a2412 100644 --- a/arch/arm/core/cpu_idle.S +++ b/arch/arm/core/cpu_idle.S @@ -16,8 +16,8 @@ _ASM_FILE_PROLOGUE GTEXT(z_arm_cpu_idle_init) -GTEXT(z_arch_cpu_idle) -GTEXT(z_arch_cpu_atomic_idle) +GTEXT(arch_cpu_idle) +GTEXT(arch_cpu_atomic_idle) #if defined(CONFIG_CPU_CORTEX_M) #define _SCB_SCR 0xE000ED10 @@ -32,7 +32,7 @@ GTEXT(z_arch_cpu_atomic_idle) * * @brief Initialization of CPU idle * - * Only called by z_arch_kernel_init(). Sets SEVONPEND bit once for the system's + * Only called by arch_kernel_init(). Sets SEVONPEND bit once for the system's * duration. * * @return N/A @@ -50,7 +50,7 @@ SECTION_FUNC(TEXT, z_arm_cpu_idle_init) #endif bx lr -SECTION_FUNC(TEXT, z_arch_cpu_idle) +SECTION_FUNC(TEXT, arch_cpu_idle) #ifdef CONFIG_TRACING push {r0, lr} bl sys_trace_idle @@ -77,7 +77,7 @@ SECTION_FUNC(TEXT, z_arch_cpu_idle) bx lr -SECTION_FUNC(TEXT, z_arch_cpu_atomic_idle) +SECTION_FUNC(TEXT, arch_cpu_atomic_idle) #ifdef CONFIG_TRACING push {r0, lr} bl sys_trace_idle diff --git a/arch/arm/core/fatal.c b/arch/arm/core/fatal.c index 256d5cebd61..7d98367c8a6 100644 --- a/arch/arm/core/fatal.c +++ b/arch/arm/core/fatal.c @@ -86,7 +86,7 @@ void z_do_kernel_oops(const z_arch_esf_t *esf) z_arm_fatal_error(reason, esf); } -FUNC_NORETURN void z_arch_syscall_oops(void *ssf_ptr) +FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr) { u32_t *ssf_contents = ssf_ptr; z_arch_esf_t oops_esf = { 0 }; diff --git a/arch/arm/core/irq_manage.c b/arch/arm/core/irq_manage.c index 6c9c8334302..cb98f344c69 100644 --- a/arch/arm/core/irq_manage.c +++ b/arch/arm/core/irq_manage.c @@ -36,17 +36,17 @@ extern void z_arm_reserved(void); #define REG_FROM_IRQ(irq) (irq / NUM_IRQS_PER_REG) #define BIT_FROM_IRQ(irq) (irq % NUM_IRQS_PER_REG) -void z_arch_irq_enable(unsigned int irq) +void arch_irq_enable(unsigned int irq) { NVIC_EnableIRQ((IRQn_Type)irq); } -void z_arch_irq_disable(unsigned int irq) +void arch_irq_disable(unsigned int irq) { NVIC_DisableIRQ((IRQn_Type)irq); } -int z_arch_irq_is_enabled(unsigned int irq) +int arch_irq_is_enabled(unsigned int irq) { return NVIC->ISER[REG_FROM_IRQ(irq)] & BIT(BIT_FROM_IRQ(irq)); } @@ -97,21 +97,21 @@ void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags) } #elif defined(CONFIG_CPU_CORTEX_R) -void z_arch_irq_enable(unsigned int irq) +void arch_irq_enable(unsigned int irq) { struct device *dev = _sw_isr_table[0].arg; irq_enable_next_level(dev, (irq >> 8) - 1); } -void z_arch_irq_disable(unsigned int irq) +void arch_irq_disable(unsigned int irq) { struct device *dev = _sw_isr_table[0].arg; irq_disable_next_level(dev, (irq >> 8) - 1); } -int z_arch_irq_is_enabled(unsigned int irq) +int arch_irq_is_enabled(unsigned int irq) { struct device *dev = _sw_isr_table[0].arg; @@ -206,7 +206,7 @@ void _arch_isr_direct_pm(void) } #endif -void z_arch_isr_direct_header(void) +void arch_isr_direct_header(void) { sys_trace_isr_enter(); } @@ -268,9 +268,9 @@ int irq_target_state_is_secure(unsigned int irq) #endif /* CONFIG_ARM_SECURE_FIRMWARE */ #ifdef CONFIG_DYNAMIC_INTERRUPTS -int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, - void (*routine)(void *parameter), void *parameter, - u32_t flags) +int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, + void (*routine)(void *parameter), void *parameter, + u32_t flags) { z_isr_install(irq, routine, parameter); z_arm_irq_priority_set(irq, priority, flags); diff --git a/arch/arm/core/irq_offload.c b/arch/arm/core/irq_offload.c index 41ea8238af4..df6011623ee 100644 --- a/arch/arm/core/irq_offload.c +++ b/arch/arm/core/irq_offload.c @@ -20,7 +20,7 @@ void z_irq_do_offload(void) offload_routine(offload_param); } -void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter) +void arch_irq_offload(irq_offload_routine_t routine, void *parameter) { #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) && defined(CONFIG_ASSERT) /* ARMv6-M/ARMv8-M Baseline HardFault if you make a SVC call with diff --git a/arch/arm/core/swap.c b/arch/arm/core/swap.c index 79cdea2bbfb..3811d819108 100644 --- a/arch/arm/core/swap.c +++ b/arch/arm/core/swap.c @@ -15,7 +15,7 @@ extern const int _k_neg_eagain; /* The 'key' actually represents the BASEPRI register * prior to disabling interrupts via the BASEPRI mechanism. * - * z_arch_swap() itself does not do much. + * arch_swap() itself does not do much. * * It simply stores the intlock key (the BASEPRI value) parameter into * current->basepri, and then triggers a PendSV exception, which does @@ -25,7 +25,7 @@ extern const int _k_neg_eagain; * z_arm_pendsv all come from handling an interrupt, which means we know the * interrupts were not locked: in that case the BASEPRI value is 0. * - * Given that z_arch_swap() is called to effect a cooperative context switch, + * Given that arch_swap() is called to effect a cooperative context switch, * only the caller-saved integer registers need to be saved in the thread of the * outgoing thread. This is all performed by the hardware, which stores it in * its exception stack frame, created when handling the z_arm_pendsv exception. @@ -33,7 +33,7 @@ extern const int _k_neg_eagain; * On ARMv6-M, the intlock key is represented by the PRIMASK register, * as BASEPRI is not available. */ -int z_arch_swap(unsigned int key) +int arch_swap(unsigned int key) { #ifdef CONFIG_EXECUTION_BENCHMARKING read_timer_start_of_swap(); diff --git a/arch/arm/core/swap_helper.S b/arch/arm/core/swap_helper.S index 44ee723cc49..ab79b94801e 100644 --- a/arch/arm/core/swap_helper.S +++ b/arch/arm/core/swap_helper.S @@ -125,7 +125,7 @@ out_fp_endif: isb /* Make the effect of disabling interrupts be realized immediately */ #elif defined(CONFIG_ARMV7_R) /* - * Interrupts are still disabled from z_arch_swap so empty clause + * Interrupts are still disabled from arch_swap so empty clause * here to avoid the preprocessor error below */ #else diff --git a/arch/arm/core/thread.c b/arch/arm/core/thread.c index 42672f77eab..2109f82e8b6 100644 --- a/arch/arm/core/thread.c +++ b/arch/arm/core/thread.c @@ -32,10 +32,10 @@ extern u8_t *z_priv_stack_find(void *obj); * addresses, we have to unset it manually before storing it in the 'pc' field * of the ESF. */ -void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, - size_t stackSize, k_thread_entry_t pEntry, - void *parameter1, void *parameter2, void *parameter3, - int priority, unsigned int options) +void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, + size_t stackSize, k_thread_entry_t pEntry, + void *parameter1, void *parameter2, void *parameter3, + int priority, unsigned int options) { char *pStackMem = Z_THREAD_STACK_BUFFER(stack); char *stackEnd; @@ -112,7 +112,7 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, #if defined(CONFIG_USERSPACE) if ((options & K_USER) != 0) { - pInitCtx->basic.pc = (u32_t)z_arch_user_mode_enter; + pInitCtx->basic.pc = (u32_t)arch_user_mode_enter; } else { pInitCtx->basic.pc = (u32_t)z_thread_entry; } @@ -157,8 +157,8 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, #ifdef CONFIG_USERSPACE -FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry, - void *p1, void *p2, void *p3) +FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, + void *p1, void *p2, void *p3) { /* Set up privileged stack before entering user mode */ @@ -328,13 +328,13 @@ u32_t z_check_thread_stack_fail(const u32_t fault_addr, const u32_t psp) #endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */ #if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING) -int z_arch_float_disable(struct k_thread *thread) +int arch_float_disable(struct k_thread *thread) { if (thread != _current) { return -EINVAL; } - if (z_arch_is_in_isr()) { + if (arch_is_in_isr()) { return -EINVAL; } @@ -345,26 +345,26 @@ int z_arch_float_disable(struct k_thread *thread) * fault to take an outdated thread user_options flag into * account. */ - int key = z_arch_irq_lock(); + int key = arch_irq_lock(); thread->base.user_options &= ~K_FP_REGS; __set_CONTROL(__get_CONTROL() & (~CONTROL_FPCA_Msk)); /* No need to add an ISB barrier after setting the CONTROL - * register; z_arch_irq_unlock() already adds one. + * register; arch_irq_unlock() already adds one. */ - z_arch_irq_unlock(key); + arch_irq_unlock(key); return 0; } #endif /* CONFIG_FLOAT && CONFIG_FP_SHARING */ -void z_arch_switch_to_main_thread(struct k_thread *main_thread, - k_thread_stack_t *main_stack, - size_t main_stack_size, - k_thread_entry_t _main) +void arch_switch_to_main_thread(struct k_thread *main_thread, + k_thread_stack_t *main_stack, + size_t main_stack_size, + k_thread_entry_t _main) { #if defined(CONFIG_FLOAT) /* Initialize the Floating Point Status and Control Register when in diff --git a/arch/arm/core/userspace.S b/arch/arm/core/userspace.S index 64e519546e2..39069914cac 100644 --- a/arch/arm/core/userspace.S +++ b/arch/arm/core/userspace.S @@ -16,7 +16,7 @@ _ASM_FILE_PROLOGUE GTEXT(z_arm_userspace_enter) GTEXT(z_arm_do_syscall) -GTEXT(z_arch_user_string_nlen) +GTEXT(arch_user_string_nlen) GTEXT(z_arm_user_string_nlen_fault_start) GTEXT(z_arm_user_string_nlen_fault_end) GTEXT(z_arm_user_string_nlen_fixup) @@ -497,9 +497,9 @@ dispatch_syscall: /* - * size_t z_arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg) + * size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg) */ -SECTION_FUNC(TEXT, z_arch_user_string_nlen) +SECTION_FUNC(TEXT, arch_user_string_nlen) push {r0, r1, r2, r4, r5, lr} /* sp+4 is error value, init to -1 */ diff --git a/arch/arm/include/cortex_m/exc.h b/arch/arm/include/cortex_m/exc.h index e4f57aab646..929c2816ff7 100644 --- a/arch/arm/include/cortex_m/exc.h +++ b/arch/arm/include/cortex_m/exc.h @@ -43,7 +43,7 @@ extern volatile irq_offload_routine_t offload_routine; * The current executing vector is found in the IPSR register. All * IRQs and system exceptions are considered as interrupt context. */ -static ALWAYS_INLINE bool z_arch_is_in_isr(void) +static ALWAYS_INLINE bool arch_is_in_isr(void) { return (__get_IPSR()) ? (true) : (false); } @@ -68,8 +68,7 @@ static ALWAYS_INLINE bool z_arch_is_in_isr(void) * @return true if execution state was in handler mode, before * the current exception occurred, otherwise false. */ -static ALWAYS_INLINE bool z_arch_is_in_nested_exception( - const z_arch_esf_t *esf) +static ALWAYS_INLINE bool arch_is_in_nested_exception(const z_arch_esf_t *esf) { return (esf->basic.xpsr & IPSR_ISR_Msk) ? (true) : (false); } diff --git a/arch/arm/include/cortex_r/exc.h b/arch/arm/include/cortex_r/exc.h index 50b42c8d183..05b0235204b 100644 --- a/arch/arm/include/cortex_r/exc.h +++ b/arch/arm/include/cortex_r/exc.h @@ -33,7 +33,7 @@ extern volatile irq_offload_routine_t offload_routine; #endif /* Check the CPSR mode bits to see if we are in IRQ or FIQ mode */ -static ALWAYS_INLINE bool z_arch_is_in_isr(void) +static ALWAYS_INLINE bool arch_is_in_isr(void) { unsigned int status; diff --git a/arch/arm/include/kernel_arch_func.h b/arch/arm/include/kernel_arch_func.h index fc7476d13ab..402a0d7d50b 100644 --- a/arch/arm/include/kernel_arch_func.h +++ b/arch/arm/include/kernel_arch_func.h @@ -34,7 +34,7 @@ extern void z_arm_configure_static_mpu_regions(void); extern void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread); #endif /* CONFIG_ARM_MPU */ -static ALWAYS_INLINE void z_arch_kernel_init(void) +static ALWAYS_INLINE void arch_kernel_init(void) { z_arm_interrupt_stack_setup(); z_arm_exc_setup(); @@ -44,7 +44,7 @@ static ALWAYS_INLINE void z_arch_kernel_init(void) } static ALWAYS_INLINE void -z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value) +arch_thread_return_value_set(struct k_thread *thread, unsigned int value) { thread->arch.swap_return_value = value; } diff --git a/arch/common/sw_isr_common.c b/arch/common/sw_isr_common.c index 79be8657328..25d14ce903a 100644 --- a/arch/common/sw_isr_common.c +++ b/arch/common/sw_isr_common.c @@ -28,11 +28,11 @@ void z_isr_install(unsigned int irq, void (*routine)(void *), void *param) /* Some architectures don't/can't interpret flags or priority and have * no more processing to do than this. Provide a generic fallback. */ -int __weak z_arch_irq_connect_dynamic(unsigned int irq, - unsigned int priority, - void (*routine)(void *), - void *parameter, - u32_t flags) +int __weak arch_irq_connect_dynamic(unsigned int irq, + unsigned int priority, + void (*routine)(void *), + void *parameter, + u32_t flags) { ARG_UNUSED(flags); ARG_UNUSED(priority); diff --git a/arch/common/timing_info_bench.c b/arch/common/timing_info_bench.c index 45fb26e3227..1c89b87f503 100644 --- a/arch/common/timing_info_bench.c +++ b/arch/common/timing_info_bench.c @@ -6,18 +6,18 @@ #include #include -u64_t z_arch_timing_swap_start; -u64_t z_arch_timing_swap_end; -u64_t z_arch_timing_irq_start; -u64_t z_arch_timing_irq_end; -u64_t z_arch_timing_tick_start; -u64_t z_arch_timing_tick_end; -u64_t z_arch_timing_enter_user_mode_end; +u64_t arch_timing_swap_start; +u64_t arch_timing_swap_end; +u64_t arch_timing_irq_start; +u64_t arch_timing_irq_end; +u64_t arch_timing_tick_start; +u64_t arch_timing_tick_end; +u64_t arch_timing_enter_user_mode_end; /* location of the time stamps*/ -u32_t z_arch_timing_value_swap_end; -u64_t z_arch_timing_value_swap_common; -u64_t z_arch_timing_value_swap_temp; +u32_t arch_timing_value_swap_end; +u64_t arch_timing_value_swap_common; +u64_t arch_timing_value_swap_temp; #ifdef CONFIG_NRF_RTC_TIMER #include @@ -79,18 +79,19 @@ u64_t z_arch_timing_value_swap_temp; void read_timer_start_of_swap(void) { - if (z_arch_timing_value_swap_end == 1U) { + if (arch_timing_value_swap_end == 1U) { TIMING_INFO_PRE_READ(); - z_arch_timing_swap_start = (u32_t) TIMING_INFO_OS_GET_TIME(); + arch_timing_swap_start = (u32_t) TIMING_INFO_OS_GET_TIME(); } } void read_timer_end_of_swap(void) { - if (z_arch_timing_value_swap_end == 1U) { + if (arch_timing_value_swap_end == 1U) { TIMING_INFO_PRE_READ(); - z_arch_timing_value_swap_end = 2U; - z_arch_timing_value_swap_common = (u64_t)TIMING_INFO_OS_GET_TIME(); + arch_timing_value_swap_end = 2U; + arch_timing_value_swap_common = + (u64_t)TIMING_INFO_OS_GET_TIME(); } } @@ -100,29 +101,29 @@ void read_timer_end_of_swap(void) void read_timer_start_of_isr(void) { TIMING_INFO_PRE_READ(); - z_arch_timing_irq_start = (u32_t) TIMING_INFO_GET_TIMER_VALUE(); + arch_timing_irq_start = (u32_t) TIMING_INFO_GET_TIMER_VALUE(); } void read_timer_end_of_isr(void) { TIMING_INFO_PRE_READ(); - z_arch_timing_irq_end = (u32_t) TIMING_INFO_GET_TIMER_VALUE(); + arch_timing_irq_end = (u32_t) TIMING_INFO_GET_TIMER_VALUE(); } void read_timer_start_of_tick_handler(void) { TIMING_INFO_PRE_READ(); - z_arch_timing_tick_start = (u32_t)TIMING_INFO_GET_TIMER_VALUE(); + arch_timing_tick_start = (u32_t)TIMING_INFO_GET_TIMER_VALUE(); } void read_timer_end_of_tick_handler(void) { TIMING_INFO_PRE_READ(); - z_arch_timing_tick_end = (u32_t) TIMING_INFO_GET_TIMER_VALUE(); + arch_timing_tick_end = (u32_t) TIMING_INFO_GET_TIMER_VALUE(); } void read_timer_end_of_userspace_enter(void) { TIMING_INFO_PRE_READ(); - z_arch_timing_enter_user_mode_end = (u32_t) TIMING_INFO_GET_TIMER_VALUE(); + arch_timing_enter_user_mode_end = (u32_t)TIMING_INFO_GET_TIMER_VALUE(); } diff --git a/arch/nios2/core/cpu_idle.c b/arch/nios2/core/cpu_idle.c index 251c405fbdb..e98cecffc95 100644 --- a/arch/nios2/core/cpu_idle.c +++ b/arch/nios2/core/cpu_idle.c @@ -7,7 +7,7 @@ #include #include -void z_arch_cpu_idle(void) +void arch_cpu_idle(void) { /* Do nothing but unconditionally unlock interrupts and return to the * caller. This CPU does not have any kind of power saving instruction. @@ -15,7 +15,7 @@ void z_arch_cpu_idle(void) irq_unlock(NIOS2_STATUS_PIE_MSK); } -void z_arch_cpu_atomic_idle(unsigned int key) +void arch_cpu_atomic_idle(unsigned int key) { /* Do nothing but restore IRQ state. This CPU does not have any * kind of power saving instruction. diff --git a/arch/nios2/core/exception.S b/arch/nios2/core/exception.S index 6090753ba6e..1a78e1cec6b 100644 --- a/arch/nios2/core/exception.S +++ b/arch/nios2/core/exception.S @@ -13,7 +13,7 @@ GTEXT(_exception) /* import */ GTEXT(_Fault) -GTEXT(z_arch_swap) +GTEXT(arch_swap) #ifdef CONFIG_IRQ_OFFLOAD GTEXT(z_irq_do_offload) GTEXT(_offload_routine) @@ -127,7 +127,7 @@ on_irq_stack: /* * A context reschedule is required: keep the volatile registers of * the interrupted thread on the context's stack. Utilize - * the existing z_arch_swap() primitive to save the remaining + * the existing arch_swap() primitive to save the remaining * thread's registers (including floating point) and perform * a switch to the new thread. */ @@ -144,7 +144,7 @@ on_irq_stack: */ mov r4, et - call z_arch_swap + call arch_swap jmpi _exception_exit #else jmpi no_reschedule diff --git a/arch/nios2/core/fatal.c b/arch/nios2/core/fatal.c index 6438a4f2278..eef778eef13 100644 --- a/arch/nios2/core/fatal.c +++ b/arch/nios2/core/fatal.c @@ -132,7 +132,7 @@ FUNC_NORETURN void _Fault(const z_arch_esf_t *esf) } #ifdef ALT_CPU_HAS_DEBUG_STUB -FUNC_NORETURN void z_arch_system_halt(unsigned int reason) +FUNC_NORETURN void arch_system_halt(unsigned int reason) { ARG_UNUSED(reason); diff --git a/arch/nios2/core/irq_manage.c b/arch/nios2/core/irq_manage.c index c1ee42f0ba6..cfffba82d39 100644 --- a/arch/nios2/core/irq_manage.c +++ b/arch/nios2/core/irq_manage.c @@ -31,7 +31,7 @@ FUNC_NORETURN void z_irq_spurious(void *unused) } -void z_arch_irq_enable(unsigned int irq) +void arch_irq_enable(unsigned int irq) { u32_t ienable; unsigned int key; @@ -47,7 +47,7 @@ void z_arch_irq_enable(unsigned int irq) -void z_arch_irq_disable(unsigned int irq) +void arch_irq_disable(unsigned int irq) { u32_t ienable; unsigned int key; @@ -109,9 +109,9 @@ void _enter_irq(u32_t ipending) } #ifdef CONFIG_DYNAMIC_INTERRUPTS -int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, - void (*routine)(void *parameter), void *parameter, - u32_t flags) +int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, + void (*routine)(void *parameter), void *parameter, + u32_t flags) { ARG_UNUSED(flags); ARG_UNUSED(priority); diff --git a/arch/nios2/core/irq_offload.c b/arch/nios2/core/irq_offload.c index 97a3169b17a..cc09beca315 100644 --- a/arch/nios2/core/irq_offload.c +++ b/arch/nios2/core/irq_offload.c @@ -29,7 +29,7 @@ void z_irq_do_offload(void) tmp((void *)offload_param); } -void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter) +void arch_irq_offload(irq_offload_routine_t routine, void *parameter) { unsigned int key; diff --git a/arch/nios2/core/swap.S b/arch/nios2/core/swap.S index c4f2ccf1171..e26acfd96c7 100644 --- a/arch/nios2/core/swap.S +++ b/arch/nios2/core/swap.S @@ -9,18 +9,18 @@ #include /* exports */ -GTEXT(z_arch_swap) +GTEXT(arch_swap) GTEXT(z_thread_entry_wrapper) /* imports */ GTEXT(sys_trace_thread_switched_in) GTEXT(_k_neg_eagain) -/* unsigned int z_arch_swap(unsigned int key) +/* unsigned int arch_swap(unsigned int key) * * Always called with interrupts locked */ -SECTION_FUNC(exception.other, z_arch_swap) +SECTION_FUNC(exception.other, arch_swap) #ifdef CONFIG_EXECUTION_BENCHMARKING /* Get a reference to _kernel in r10 */ @@ -57,7 +57,7 @@ SECTION_FUNC(exception.other, z_arch_swap) ldw r11, _kernel_offset_to_current(r10) /* Store all the callee saved registers. We either got here via - * an exception or from a cooperative invocation of z_arch_swap() from C + * an exception or from a cooperative invocation of arch_swap() from C * domain, so all the caller-saved registers have already been * saved by the exception asm or the calling C code already. */ @@ -115,14 +115,14 @@ SECTION_FUNC(exception.other, z_arch_swap) ldw sp, _thread_offset_to_sp(r2) /* We need to irq_unlock(current->coopReg.key); - * key was supplied as argument to z_arch_swap(). Fetch it. + * key was supplied as argument to arch_swap(). Fetch it. */ ldw r3, _thread_offset_to_key(r2) /* * Load return value into r2 (return value register). -EAGAIN unless - * someone previously called z_arch_thread_return_value_set(). Do this before - * we potentially unlock interrupts. + * someone previously called arch_thread_return_value_set(). Do this + * before we potentially unlock interrupts. */ ldw r2, _thread_offset_to_retval(r2) diff --git a/arch/nios2/core/thread.c b/arch/nios2/core/thread.c index 8ea1337ed4c..07363c5d532 100644 --- a/arch/nios2/core/thread.c +++ b/arch/nios2/core/thread.c @@ -28,10 +28,10 @@ struct init_stack_frame { }; -void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, - size_t stack_size, k_thread_entry_t thread_func, - void *arg1, void *arg2, void *arg3, - int priority, unsigned int options) +void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, + size_t stack_size, k_thread_entry_t thread_func, + void *arg1, void *arg2, void *arg3, + int priority, unsigned int options) { char *stack_memory = Z_THREAD_STACK_BUFFER(stack); Z_ASSERT_VALID_PRIO(priority, thread_func); diff --git a/arch/nios2/include/kernel_arch_func.h b/arch/nios2/include/kernel_arch_func.h index 84ab183b5a0..735bb7e3020 100644 --- a/arch/nios2/include/kernel_arch_func.h +++ b/arch/nios2/include/kernel_arch_func.h @@ -28,14 +28,14 @@ extern "C" { #ifndef _ASMLANGUAGE -static ALWAYS_INLINE void z_arch_kernel_init(void) +static ALWAYS_INLINE void arch_kernel_init(void) { _kernel.irq_stack = Z_THREAD_STACK_BUFFER(_interrupt_stack) + CONFIG_ISR_STACK_SIZE; } static ALWAYS_INLINE void -z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value) +arch_thread_return_value_set(struct k_thread *thread, unsigned int value) { thread->callee_saved.retval = value; } @@ -43,7 +43,7 @@ z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value) FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason, const z_arch_esf_t *esf); -static inline bool z_arch_is_in_isr(void) +static inline bool arch_is_in_isr(void) { return _kernel.nested != 0U; } diff --git a/arch/posix/core/cpuhalt.c b/arch/posix/core/cpuhalt.c index 57356ba97fb..adae4182ac9 100644 --- a/arch/posix/core/cpuhalt.c +++ b/arch/posix/core/cpuhalt.c @@ -10,11 +10,11 @@ * This module provides: * * An implementation of the architecture-specific - * z_arch_cpu_idle() primitive required by the kernel idle loop component. + * arch_cpu_idle() primitive required by the kernel idle loop component. * It can be called within an implementation of _sys_power_save_idle(), * which is provided for the kernel by the platform. * - * An implementation of z_arch_cpu_atomic_idle(), which + * An implementation of arch_cpu_atomic_idle(), which * atomically re-enables interrupts and enters low power mode. * * A weak stub for sys_arch_reboot(), which does nothing @@ -24,14 +24,14 @@ #include #include -void z_arch_cpu_idle(void) +void arch_cpu_idle(void) { sys_trace_idle(); posix_irq_full_unlock(); posix_halt_cpu(); } -void z_arch_cpu_atomic_idle(unsigned int key) +void arch_cpu_atomic_idle(unsigned int key) { sys_trace_idle(); posix_atomic_halt_cpu(key); diff --git a/arch/posix/core/fatal.c b/arch/posix/core/fatal.c index 6fa8df680dd..7d942f6b9ce 100644 --- a/arch/posix/core/fatal.c +++ b/arch/posix/core/fatal.c @@ -13,7 +13,7 @@ #include #include -FUNC_NORETURN void z_arch_system_halt(unsigned int reason) +FUNC_NORETURN void arch_system_halt(unsigned int reason) { ARG_UNUSED(reason); diff --git a/arch/posix/core/irq.c b/arch/posix/core/irq.c index 5fffad7a70d..0a4c00f6668 100644 --- a/arch/posix/core/irq.c +++ b/arch/posix/core/irq.c @@ -10,23 +10,23 @@ #ifdef CONFIG_IRQ_OFFLOAD #include "irq_offload.h" -void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter) +void arch_irq_offload(irq_offload_routine_t routine, void *parameter) { posix_irq_offload(routine, parameter); } #endif -void z_arch_irq_enable(unsigned int irq) +void arch_irq_enable(unsigned int irq) { posix_irq_enable(irq); } -void z_arch_irq_disable(unsigned int irq) +void arch_irq_disable(unsigned int irq) { posix_irq_disable(irq); } -int z_arch_irq_is_enabled(unsigned int irq) +int arch_irq_is_enabled(unsigned int irq) { return posix_irq_is_enabled(irq); } @@ -45,9 +45,9 @@ int z_arch_irq_is_enabled(unsigned int irq) * * @return The vector assigned to this interrupt */ -int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, - void (*routine)(void *parameter), - void *parameter, u32_t flags) +int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, + void (*routine)(void *parameter), + void *parameter, u32_t flags) { posix_isr_declare(irq, (int)flags, routine, parameter); posix_irq_priority_set(irq, priority, flags); diff --git a/arch/posix/core/posix_core.c b/arch/posix/core/posix_core.c index 417b824d051..9ee87626323 100644 --- a/arch/posix/core/posix_core.c +++ b/arch/posix/core/posix_core.c @@ -187,7 +187,7 @@ static void posix_preexit_cleanup(void) /** * Let the ready thread run and block this thread until it is allowed again * - * called from z_arch_swap() which does the picking from the kernel structures + * called from arch_swap() which does the picking from the kernel structures */ void posix_swap(int next_allowed_thread_nbr, int this_th_nbr) { @@ -207,7 +207,7 @@ void posix_swap(int next_allowed_thread_nbr, int this_th_nbr) /** * Let the ready thread (main) run, and exit this thread (init) * - * Called from z_arch_switch_to_main_thread() which does the picking from the + * Called from arch_switch_to_main_thread() which does the picking from the * kernel structures * * Note that we could have just done a swap(), but that would have left the @@ -256,7 +256,7 @@ static void posix_cleanup_handler(void *arg) /** * Helper function to start a Zephyr thread as a POSIX thread: - * It will block the thread until a z_arch_swap() is called for it + * It will block the thread until a arch_swap() is called for it * * Spawned from posix_new_thread() below */ @@ -361,9 +361,9 @@ static int ttable_get_empty_slot(void) } /** - * Called from z_arch_new_thread(), + * Called from arch_new_thread(), * Create a new POSIX thread for the new Zephyr thread. - * z_arch_new_thread() picks from the kernel structures what it is that we need + * arch_new_thread() picks from the kernel structures what it is that we need * to call with what parameters */ void posix_new_thread(posix_thread_status_t *ptr) diff --git a/arch/posix/core/swap.c b/arch/posix/core/swap.c index 382b9c249e4..08f1eabd59c 100644 --- a/arch/posix/core/swap.c +++ b/arch/posix/core/swap.c @@ -9,7 +9,7 @@ * @file * @brief Kernel swapper code for POSIX * - * This module implements the z_arch_swap() routine for the POSIX architecture. + * This module implements the arch_swap() routine for the POSIX architecture. * */ @@ -19,7 +19,7 @@ #include "irq.h" #include "kswap.h" -int z_arch_swap(unsigned int key) +int arch_swap(unsigned int key) { /* * struct k_thread * _kernel.current is the currently runnig thread @@ -34,7 +34,7 @@ int z_arch_swap(unsigned int key) _kernel.current->callee_saved.retval = -EAGAIN; /* retval may be modified with a call to - * z_arch_thread_return_value_set() + * arch_thread_return_value_set() */ posix_thread_status_t *ready_thread_ptr = @@ -67,15 +67,15 @@ int z_arch_swap(unsigned int key) #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN -/* This is just a version of z_arch_swap() in which we do not save anything +/* This is just a version of arch_swap() in which we do not save anything * about the current thread. * * Note that we will never come back to this thread: posix_main_thread_start() * does never return. */ -void z_arch_switch_to_main_thread(struct k_thread *main_thread, - k_thread_stack_t *main_stack, - size_t main_stack_size, k_thread_entry_t _main) +void arch_switch_to_main_thread(struct k_thread *main_thread, + k_thread_stack_t *main_stack, + size_t main_stack_size, k_thread_entry_t _main) { posix_thread_status_t *ready_thread_ptr = (posix_thread_status_t *) diff --git a/arch/posix/core/thread.c b/arch/posix/core/thread.c index 0d207db1237..ae4807e1c9f 100644 --- a/arch/posix/core/thread.c +++ b/arch/posix/core/thread.c @@ -24,10 +24,10 @@ /* Note that in this arch we cheat quite a bit: we use as stack a normal * pthreads stack and therefore we ignore the stack size */ -void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, - size_t stack_size, k_thread_entry_t thread_func, - void *arg1, void *arg2, void *arg3, - int priority, unsigned int options) +void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, + size_t stack_size, k_thread_entry_t thread_func, + void *arg1, void *arg2, void *arg3, + int priority, unsigned int options) { char *stack_memory = Z_THREAD_STACK_BUFFER(stack); diff --git a/arch/posix/include/kernel_arch_func.h b/arch/posix/include/kernel_arch_func.h index c5b80e90575..efa5d91493c 100644 --- a/arch/posix/include/kernel_arch_func.h +++ b/arch/posix/include/kernel_arch_func.h @@ -19,18 +19,18 @@ extern "C" { #endif #if defined(CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN) -void z_arch_switch_to_main_thread(struct k_thread *main_thread, - k_thread_stack_t *main_stack, - size_t main_stack_size, k_thread_entry_t _main); +void arch_switch_to_main_thread(struct k_thread *main_thread, + k_thread_stack_t *main_stack, + size_t main_stack_size, k_thread_entry_t _main); #endif -static inline void z_arch_kernel_init(void) +static inline void arch_kernel_init(void) { /* Nothing to be done */ } static ALWAYS_INLINE void -z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value) +arch_thread_return_value_set(struct k_thread *thread, unsigned int value) { thread->callee_saved.retval = value; } @@ -39,7 +39,7 @@ z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value) } #endif -static inline bool z_arch_is_in_isr(void) +static inline bool arch_is_in_isr(void) { return _kernel.nested != 0U; } diff --git a/arch/riscv/core/cpu_idle.c b/arch/riscv/core/cpu_idle.c index 051f171eddc..ecfb4861b69 100644 --- a/arch/riscv/core/cpu_idle.c +++ b/arch/riscv/core/cpu_idle.c @@ -9,20 +9,20 @@ /* * In RISC-V there is no conventional way to handle CPU power save. * Each RISC-V SOC handles it in its own way. - * Hence, by default, z_arch_cpu_idle and z_arch_cpu_atomic_idle functions just + * Hence, by default, arch_cpu_idle and arch_cpu_atomic_idle functions just * unlock interrupts and return to the caller, without issuing any CPU power * saving instruction. * - * Nonetheless, define the default z_arch_cpu_idle and z_arch_cpu_atomic_idle + * Nonetheless, define the default arch_cpu_idle and arch_cpu_atomic_idle * functions as weak functions, so that they can be replaced at the SOC-level. */ -void __weak z_arch_cpu_idle(void) +void __weak arch_cpu_idle(void) { irq_unlock(SOC_MSTATUS_IEN); } -void __weak z_arch_cpu_atomic_idle(unsigned int key) +void __weak arch_cpu_atomic_idle(unsigned int key) { irq_unlock(key); } diff --git a/arch/riscv/core/irq_manage.c b/arch/riscv/core/irq_manage.c index 43b7e5850f1..b8f4a74057e 100644 --- a/arch/riscv/core/irq_manage.c +++ b/arch/riscv/core/irq_manage.c @@ -30,9 +30,9 @@ FUNC_NORETURN void z_irq_spurious(void *unused) } #ifdef CONFIG_DYNAMIC_INTERRUPTS -int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, - void (*routine)(void *parameter), void *parameter, - u32_t flags) +int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, + void (*routine)(void *parameter), void *parameter, + u32_t flags) { ARG_UNUSED(flags); diff --git a/arch/riscv/core/irq_offload.c b/arch/riscv/core/irq_offload.c index d4bd4fc22ab..2ba4d413f75 100644 --- a/arch/riscv/core/irq_offload.c +++ b/arch/riscv/core/irq_offload.c @@ -31,7 +31,7 @@ void z_irq_do_offload(void) tmp((void *)offload_param); } -void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter) +void arch_irq_offload(irq_offload_routine_t routine, void *parameter) { unsigned int key; diff --git a/arch/riscv/core/swap.S b/arch/riscv/core/swap.S index c81910a8348..661175a804f 100644 --- a/arch/riscv/core/swap.S +++ b/arch/riscv/core/swap.S @@ -10,18 +10,18 @@ #include /* exports */ -GTEXT(z_arch_swap) +GTEXT(arch_swap) GTEXT(z_thread_entry_wrapper) /* Use ABI name of registers for the sake of simplicity */ /* - * unsigned int z_arch_swap(unsigned int key) + * unsigned int arch_swap(unsigned int key) * * Always called with interrupts locked * key is stored in a0 register */ -SECTION_FUNC(exception.other, z_arch_swap) +SECTION_FUNC(exception.other, arch_swap) /* Make a system call to perform context switch */ #ifdef CONFIG_EXECUTION_BENCHMARKING @@ -77,16 +77,16 @@ SECTION_FUNC(exception.other, z_arch_swap) * Restored register a0 contains IRQ lock state of thread. * * Prior to unlocking irq, load return value of - * z_arch_swap to temp register t2 (from + * arch_swap to temp register t2 (from * _thread_offset_to_swap_return_value). Normally, it should be -EAGAIN, - * unless someone has previously called z_arch_thread_return_value_set(..). + * unless someone has previously called arch_thread_return_value_set(..). */ la t0, _kernel /* Get pointer to _kernel.current */ RV_OP_LOADREG t1, _kernel_offset_to_current(t0) - /* Load return value of z_arch_swap function in temp register t2 */ + /* Load return value of arch_swap function in temp register t2 */ lw t2, _thread_offset_to_swap_return_value(t1) /* @@ -109,7 +109,7 @@ SECTION_FUNC(exception.other, z_arch_swap) SECTION_FUNC(TEXT, z_thread_entry_wrapper) /* * z_thread_entry_wrapper is called for every new thread upon the return - * of z_arch_swap or ISR. Its address, as well as its input function + * of arch_swap or ISR. Its address, as well as its input function * arguments thread_entry_t, void *, void *, void * are restored from * the thread stack (initialized via function _thread). * In this case, thread_entry_t, * void *, void * and void * are stored diff --git a/arch/riscv/core/thread.c b/arch/riscv/core/thread.c index f9bcc4813c4..612868022c9 100644 --- a/arch/riscv/core/thread.c +++ b/arch/riscv/core/thread.c @@ -12,10 +12,10 @@ void z_thread_entry_wrapper(k_thread_entry_t thread, void *arg2, void *arg3); -void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, - size_t stack_size, k_thread_entry_t thread_func, - void *arg1, void *arg2, void *arg3, - int priority, unsigned int options) +void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, + size_t stack_size, k_thread_entry_t thread_func, + void *arg1, void *arg2, void *arg3, + int priority, unsigned int options) { char *stack_memory = Z_THREAD_STACK_BUFFER(stack); Z_ASSERT_VALID_PRIO(priority, thread_func); diff --git a/arch/riscv/include/kernel_arch_func.h b/arch/riscv/include/kernel_arch_func.h index bca8f1d7959..e5a0b9decf5 100644 --- a/arch/riscv/include/kernel_arch_func.h +++ b/arch/riscv/include/kernel_arch_func.h @@ -22,14 +22,14 @@ extern "C" { #endif #ifndef _ASMLANGUAGE -static ALWAYS_INLINE void z_arch_kernel_init(void) +static ALWAYS_INLINE void arch_kernel_init(void) { _kernel.irq_stack = Z_THREAD_STACK_BUFFER(_interrupt_stack) + CONFIG_ISR_STACK_SIZE; } static ALWAYS_INLINE void -z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value) +arch_thread_return_value_set(struct k_thread *thread, unsigned int value) { thread->arch.swap_return_value = value; } @@ -37,7 +37,7 @@ z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value) FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason, const z_arch_esf_t *esf); -static inline bool z_arch_is_in_isr(void) +static inline bool arch_is_in_isr(void) { return _kernel.nested != 0U; } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d2ea1b41a8d..76c9980787a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -162,7 +162,7 @@ config X86_VERY_EARLY_CONSOLE Non-emulated X86 devices often require special hardware to attach a debugger, which may not be easily available. This option adds a very minimal serial driver which gets initialized at the very - beginning of z_cstart(), via z_arch_kernel_init(). This driver enables + beginning of z_cstart(), via arch_kernel_init(). This driver enables printk to emit messages to the 16550 UART port 0 instance in device tree. This mini-driver assumes I/O to the UART is done via ports. diff --git a/arch/x86/core/cpuhalt.c b/arch/x86/core/cpuhalt.c index cae26ec486a..af6247fdbc5 100644 --- a/arch/x86/core/cpuhalt.c +++ b/arch/x86/core/cpuhalt.c @@ -7,7 +7,7 @@ #include #include -void z_arch_cpu_idle(void) +void arch_cpu_idle(void) { sys_trace_idle(); __asm__ volatile ( @@ -15,7 +15,7 @@ void z_arch_cpu_idle(void) "hlt\n\t"); } -void z_arch_cpu_atomic_idle(unsigned int key) +void arch_cpu_atomic_idle(unsigned int key) { sys_trace_idle(); @@ -30,7 +30,7 @@ void z_arch_cpu_atomic_idle(unsigned int key) * external, maskable interrupts after the next instruction is * executed." * - * Thus the IA-32 implementation of z_arch_cpu_atomic_idle() will + * Thus the IA-32 implementation of arch_cpu_atomic_idle() will * atomically re-enable interrupts and enter a low-power mode. */ "hlt\n\t"); diff --git a/arch/x86/core/fatal.c b/arch/x86/core/fatal.c index 47ba60b5df8..2769af002de 100644 --- a/arch/x86/core/fatal.c +++ b/arch/x86/core/fatal.c @@ -40,9 +40,9 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, u16_t cs) { uintptr_t start, end; - if (z_arch_is_in_isr()) { + if (arch_is_in_isr()) { /* We were servicing an interrupt */ - start = (uintptr_t)Z_ARCH_THREAD_STACK_BUFFER(_interrupt_stack); + start = (uintptr_t)ARCH_THREAD_STACK_BUFFER(_interrupt_stack); end = start + CONFIG_ISR_STACK_SIZE; } else if ((cs & 0x3U) != 0U || (_current->base.user_options & K_USER) == 0) { diff --git a/arch/x86/core/ia32/fatal.c b/arch/x86/core/ia32/fatal.c index 888f4efe649..7f1c9fa4379 100644 --- a/arch/x86/core/ia32/fatal.c +++ b/arch/x86/core/ia32/fatal.c @@ -23,7 +23,7 @@ __weak void z_debug_fatal_hook(const z_arch_esf_t *esf) { ARG_UNUSED(esf); } #ifdef CONFIG_BOARD_QEMU_X86 -FUNC_NORETURN void z_arch_system_halt(unsigned int reason) +FUNC_NORETURN void arch_system_halt(unsigned int reason) { ARG_UNUSED(reason); @@ -46,7 +46,7 @@ void z_x86_spurious_irq(const z_arch_esf_t *esf) z_x86_fatal_error(K_ERR_SPURIOUS_IRQ, esf); } -void z_arch_syscall_oops(void *ssf_ptr) +void arch_syscall_oops(void *ssf_ptr) { struct _x86_syscall_stack_frame *ssf = (struct _x86_syscall_stack_frame *)ssf_ptr; @@ -229,7 +229,7 @@ static FUNC_NORETURN __used void df_handler_top(void) _df_esf.eflags = _main_tss.eflags; /* Restore the main IA task to a runnable state */ - _main_tss.esp = (u32_t)(Z_ARCH_THREAD_STACK_BUFFER(_interrupt_stack) + + _main_tss.esp = (u32_t)(ARCH_THREAD_STACK_BUFFER(_interrupt_stack) + CONFIG_ISR_STACK_SIZE); _main_tss.cs = CODE_SEG; _main_tss.ds = DATA_SEG; diff --git a/arch/x86/core/ia32/intstub.S b/arch/x86/core/ia32/intstub.S index 6ed8f597b5a..1e46ed0e59a 100644 --- a/arch/x86/core/ia32/intstub.S +++ b/arch/x86/core/ia32/intstub.S @@ -29,7 +29,7 @@ /* externs */ - GTEXT(z_arch_swap) + GTEXT(arch_swap) #ifdef CONFIG_SYS_POWER_MANAGEMENT GTEXT(z_sys_power_save_idle_exit) @@ -83,8 +83,8 @@ SECTION_FUNC(TEXT, _interrupt_enter) pushl %eax pushl %edx rdtsc - mov %eax, z_arch_timing_irq_start - mov %edx, z_arch_timing_irq_start+4 + mov %eax, arch_timing_irq_start + mov %edx, arch_timing_irq_start+4 pop %edx pop %eax #endif @@ -125,7 +125,7 @@ SECTION_FUNC(TEXT, _interrupt_enter) /* Push EDI as we will use it for scratch space. * Rest of the callee-saved regs get saved by invocation of C - * functions (isr handler, z_arch_swap(), etc) + * functions (isr handler, arch_swap(), etc) */ pushl %edi @@ -186,8 +186,8 @@ alreadyOnIntStack: pushl %eax pushl %edx rdtsc - mov %eax,z_arch_timing_irq_end - mov %edx,z_arch_timing_irq_end+4 + mov %eax,arch_timing_irq_end + mov %edx,arch_timing_irq_end+4 pop %edx pop %eax #endif @@ -227,7 +227,7 @@ alreadyOnIntStack: /* * Set X86_THREAD_FLAG_INT bit in k_thread to allow the upcoming call - * to z_arch_swap() to determine whether non-floating registers need to be + * to arch_swap() to determine whether non-floating registers need to be * preserved using the lazy save/restore algorithm, or to indicate to * debug tools that a preemptive context switch has occurred. */ @@ -239,7 +239,7 @@ alreadyOnIntStack: /* * A context reschedule is required: keep the volatile registers of * the interrupted thread on the context's stack. Utilize - * the existing z_arch_swap() primitive to save the remaining + * the existing arch_swap() primitive to save the remaining * thread's registers (including floating point) and perform * a switch to the new thread. */ @@ -250,12 +250,12 @@ alreadyOnIntStack: call z_check_stack_sentinel #endif pushfl /* push KERNEL_LOCK_KEY argument */ - call z_arch_swap + call arch_swap addl $4, %esp /* pop KERNEL_LOCK_KEY argument */ /* * The interrupted thread has now been scheduled, - * as the result of a _later_ invocation of z_arch_swap(). + * as the result of a _later_ invocation of arch_swap(). * * Now need to restore the interrupted thread's environment before * returning control to it at the point where it was interrupted ... @@ -263,7 +263,7 @@ alreadyOnIntStack: #if defined(CONFIG_LAZY_FP_SHARING) /* - * z_arch_swap() has restored the floating point registers, if needed. + * arch_swap() has restored the floating point registers, if needed. * Clear X86_THREAD_FLAG_INT in the interrupted thread's state * since it has served its purpose. */ diff --git a/arch/x86/core/ia32/irq_manage.c b/arch/x86/core/ia32/irq_manage.c index 5a1cad4eb6e..97fd0574356 100644 --- a/arch/x86/core/ia32/irq_manage.c +++ b/arch/x86/core/ia32/irq_manage.c @@ -48,7 +48,7 @@ void *__attribute__((section(".spurNoErrIsr"))) */ #ifdef CONFIG_SYS_POWER_MANAGEMENT -void z_arch_irq_direct_pm(void) +void arch_irq_direct_pm(void) { if (_kernel.idle) { s32_t idle_val = _kernel.idle; @@ -59,17 +59,17 @@ void z_arch_irq_direct_pm(void) } #endif -void z_arch_isr_direct_header(void) +void arch_isr_direct_header(void) { sys_trace_isr_enter(); /* We're not going to unlock IRQs, but we still need to increment this - * so that z_arch_is_in_isr() works + * so that arch_is_in_isr() works */ ++_kernel.nested; } -void z_arch_isr_direct_footer(int swap) +void arch_isr_direct_footer(int swap) { z_irq_controller_eoi(); sys_trace_isr_exit(); @@ -250,7 +250,7 @@ static void idt_vector_install(int vector, void *irq_handler) irq_unlock(key); } -int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, +int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, void (*routine)(void *parameter), void *parameter, u32_t flags) { diff --git a/arch/x86/core/ia32/irq_offload.c b/arch/x86/core/ia32/irq_offload.c index 4c5265ffb94..c6a26b34930 100644 --- a/arch/x86/core/ia32/irq_offload.c +++ b/arch/x86/core/ia32/irq_offload.c @@ -25,7 +25,7 @@ void z_irq_do_offload(void) offload_routine(offload_param); } -void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter) +void arch_irq_offload(irq_offload_routine_t routine, void *parameter) { unsigned int key; diff --git a/arch/x86/core/ia32/swap.S b/arch/x86/core/ia32/swap.S index f37ff8c8702..69f42c60bd9 100644 --- a/arch/x86/core/ia32/swap.S +++ b/arch/x86/core/ia32/swap.S @@ -8,7 +8,7 @@ * @file * @brief Kernel swapper code for IA-32 * - * This module implements the z_arch_swap() routine for the IA-32 architecture. + * This module implements the arch_swap() routine for the IA-32 architecture. */ #include @@ -19,7 +19,7 @@ /* exports (internal APIs) */ - GTEXT(z_arch_swap) + GTEXT(arch_swap) GTEXT(z_x86_thread_entry_wrapper) GTEXT(_x86_user_thread_entry_wrapper) @@ -30,7 +30,7 @@ GDATA(_k_neg_eagain) /* - * Given that z_arch_swap() is called to effect a cooperative context switch, + * Given that arch_swap() is called to effect a cooperative context switch, * only the non-volatile integer registers need to be saved in the TCS of the * outgoing thread. The restoration of the integer registers of the incoming * thread depends on whether that thread was preemptively context switched out. @@ -62,7 +62,7 @@ * * C function prototype: * - * unsigned int z_arch_swap (unsigned int eflags); + * unsigned int arch_swap (unsigned int eflags); */ .macro read_tsc var_name @@ -74,7 +74,7 @@ pop %edx pop %eax .endm -SECTION_FUNC(TEXT, z_arch_swap) +SECTION_FUNC(TEXT, arch_swap) #ifdef CONFIG_EXECUTION_BENCHMARKING /* Save the eax and edx registers before reading the time stamp * once done pop the values. @@ -82,8 +82,8 @@ SECTION_FUNC(TEXT, z_arch_swap) push %eax push %edx rdtsc - mov %eax,z_arch_timing_swap_start - mov %edx,z_arch_timing_swap_start+4 + mov %eax,arch_timing_swap_start + mov %edx,arch_timing_swap_start+4 pop %edx pop %eax #endif @@ -106,7 +106,7 @@ SECTION_FUNC(TEXT, z_arch_swap) * Carve space for the return value. Setting it to a default of * -EAGAIN eliminates the need for the timeout code to set it. * If another value is ever needed, it can be modified with - * z_arch_thread_return_value_set(). + * arch_thread_return_value_set(). */ pushl _k_neg_eagain @@ -331,7 +331,7 @@ CROHandlingDone: movl _thread_offset_to_esp(%eax), %esp - /* load return value from a possible z_arch_thread_return_value_set() */ + /* load return value from a possible arch_thread_return_value_set() */ popl %eax @@ -345,23 +345,23 @@ CROHandlingDone: /* * %eax may contain one of these values: * - * - the return value for z_arch_swap() that was set up by a call to - * z_arch_thread_return_value_set() + * - the return value for arch_swap() that was set up by a call to + * arch_thread_return_value_set() * - -EINVAL */ - /* Utilize the 'eflags' parameter to z_arch_swap() */ + /* Utilize the 'eflags' parameter to arch_swap() */ pushl 4(%esp) popfl #ifdef CONFIG_EXECUTION_BENCHMARKING - cmp $0x1,z_arch_timing_value_swap_end + cmp $0x1,arch_timing_value_swap_end jne time_read_not_needed - movw $0x2,z_arch_timing_value_swap_end - read_tsc z_arch_timing_value_swap_common - pushl z_arch_timing_swap_start - popl z_arch_timing_value_swap_temp + movw $0x2,arch_timing_value_swap_end + read_tsc arch_timing_value_swap_common + pushl arch_timing_swap_start + popl arch_timing_value_swap_temp time_read_not_needed: #endif ret @@ -371,7 +371,7 @@ time_read_not_needed: * * @brief Adjust stack/parameters before invoking thread entry function * - * This function adjusts the initial stack frame created by z_arch_new_thread() + * This function adjusts the initial stack frame created by arch_new_thread() * such that the GDB stack frame unwinders recognize it as the outermost frame * in the thread's stack. * @@ -380,7 +380,7 @@ time_read_not_needed: * a main() function, and there does not appear to be a simple way of stopping * the unwinding of the stack. * - * Given the initial thread created by z_arch_new_thread(), GDB expects to find + * Given the initial thread created by arch_new_thread(), GDB expects to find * a return address on the stack immediately above the thread entry routine * z_thread_entry, in the location occupied by the initial EFLAGS. GDB * attempts to examine the memory at this return address, which typically diff --git a/arch/x86/core/ia32/thread.c b/arch/x86/core/ia32/thread.c index 711e253e8e8..9faacfa912b 100644 --- a/arch/x86/core/ia32/thread.c +++ b/arch/x86/core/ia32/thread.c @@ -109,8 +109,8 @@ static FUNC_NORETURN void drop_to_user(k_thread_entry_t user_entry, CODE_UNREACHABLE; } -FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry, - void *p1, void *p2, void *p3) +FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, + void *p1, void *p2, void *p3) { struct z_x86_thread_stack_header *header = (struct z_x86_thread_stack_header *)_current->stack_obj; @@ -161,7 +161,7 @@ NANO_CPU_INT_REGISTER(z_x86_syscall_entry_stub, -1, -1, 0x80, 3); extern int z_float_disable(struct k_thread *thread); -int z_arch_float_disable(struct k_thread *thread) +int arch_float_disable(struct k_thread *thread) { #if defined(CONFIG_LAZY_FP_SHARING) return z_float_disable(thread); @@ -171,10 +171,10 @@ int z_arch_float_disable(struct k_thread *thread) } #endif /* CONFIG_FLOAT && CONFIG_FP_SHARING */ -void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, - size_t stack_size, k_thread_entry_t entry, - void *parameter1, void *parameter2, void *parameter3, - int priority, unsigned int options) +void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, + size_t stack_size, k_thread_entry_t entry, + void *parameter1, void *parameter2, void *parameter3, + int priority, unsigned int options) { char *stack_buf; char *stack_high; diff --git a/arch/x86/core/ia32/userspace.S b/arch/x86/core/ia32/userspace.S index 67dbd1c3d9c..c37fe920888 100644 --- a/arch/x86/core/ia32/userspace.S +++ b/arch/x86/core/ia32/userspace.S @@ -12,7 +12,7 @@ /* Exports */ GTEXT(z_x86_syscall_entry_stub) GTEXT(z_x86_userspace_enter) -GTEXT(z_arch_user_string_nlen) +GTEXT(arch_user_string_nlen) GTEXT(z_x86_user_string_nlen_fault_start) GTEXT(z_x86_user_string_nlen_fault_end) GTEXT(z_x86_user_string_nlen_fixup) @@ -254,9 +254,9 @@ _bad_syscall: /* - * size_t z_arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg) + * size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg) */ -SECTION_FUNC(TEXT, z_arch_user_string_nlen) +SECTION_FUNC(TEXT, arch_user_string_nlen) push %ebp mov %esp, %ebp @@ -393,8 +393,8 @@ SECTION_FUNC(TEXT, z_x86_userspace_enter) push %eax push %edx rdtsc - mov %eax,z_arch_timing_enter_user_mode_end - mov %edx,z_arch_timing_enter_user_mode_end+4 + mov %eax,arch_timing_enter_user_mode_end + mov %edx,arch_timing_enter_user_mode_end+4 pop %edx pop %eax #endif diff --git a/arch/x86/core/intel64/cpu.c b/arch/x86/core/intel64/cpu.c index 60291c57a39..9f4a627e074 100644 --- a/arch/x86/core/intel64/cpu.c +++ b/arch/x86/core/intel64/cpu.c @@ -97,7 +97,7 @@ struct x86_cpuboot x86_cpuboot[] = { * will enter the kernel at fn(---, arg), running on the specified stack. */ -void z_arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, +void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, void (*fn)(int key, void *data), void *arg) { u8_t vector = ((unsigned long) x86_ap_start) >> 12; diff --git a/arch/x86/core/intel64/irq.c b/arch/x86/core/intel64/irq.c index e3051b9958c..dd01d9cffec 100644 --- a/arch/x86/core/intel64/irq.c +++ b/arch/x86/core/intel64/irq.c @@ -66,7 +66,7 @@ static int allocate_vector(unsigned int priority) * allocated. Whether it should simply __ASSERT instead is up for debate. */ -int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, +int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, void (*func)(void *arg), void *arg, u32_t flags) { u32_t key; @@ -91,7 +91,7 @@ int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, #ifdef CONFIG_IRQ_OFFLOAD #include -void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter) +void arch_irq_offload(irq_offload_routine_t routine, void *parameter) { x86_irq_funcs[CONFIG_IRQ_OFFLOAD_VECTOR - IV_IRQS] = routine; x86_irq_args[CONFIG_IRQ_OFFLOAD_VECTOR - IV_IRQS] = parameter; @@ -119,7 +119,7 @@ void z_x86_ipi_setup(void) * it is not clear exactly how/where/why to abstract this, as it * assumes the use of a local APIC (but there's no other mechanism). */ -void z_arch_sched_ipi(void) +void arch_sched_ipi(void) { z_loapic_ipi(0, LOAPIC_ICR_IPI_OTHERS, CONFIG_SCHED_IPI_VECTOR); } diff --git a/arch/x86/core/intel64/thread.c b/arch/x86/core/intel64/thread.c index 78d9981ad9f..44d284ed8a1 100644 --- a/arch/x86/core/intel64/thread.c +++ b/arch/x86/core/intel64/thread.c @@ -10,10 +10,10 @@ extern void x86_sse_init(struct k_thread *); /* in locore.S */ -void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, - size_t stack_size, k_thread_entry_t entry, - void *parameter1, void *parameter2, void *parameter3, - int priority, unsigned int options) +void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, + size_t stack_size, k_thread_entry_t entry, + void *parameter1, void *parameter2, void *parameter3, + int priority, unsigned int options) { #if defined(CONFIG_X86_USERSPACE) || defined(CONFIG_X86_STACK_PROTECTION) struct z_x86_thread_stack_header *header = diff --git a/arch/x86/core/x86_mmu.c b/arch/x86/core/x86_mmu.c index b4bf12ab290..a8075259cab 100644 --- a/arch/x86/core/x86_mmu.c +++ b/arch/x86/core/x86_mmu.c @@ -746,7 +746,7 @@ static void add_mmu_region(struct x86_page_tables *ptables, } } -/* Called from x86's z_arch_kernel_init() */ +/* Called from x86's arch_kernel_init() */ void z_x86_paging_init(void) { size_t pages_free; @@ -777,7 +777,7 @@ void z_x86_paging_init(void) } #ifdef CONFIG_X86_USERSPACE -int z_arch_buffer_validate(void *addr, size_t size, int write) +int arch_buffer_validate(void *addr, size_t size, int write) { return z_x86_mmu_validate(z_x86_thread_page_tables_get(_current), addr, size, write != 0); @@ -1003,8 +1003,8 @@ void z_x86_thread_pt_init(struct k_thread *thread) * mode the per-thread page tables will be generated and the memory domain * configuration applied. */ -void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain, - u32_t partition_id) +void arch_mem_domain_partition_remove(struct k_mem_domain *domain, + u32_t partition_id) { sys_dnode_t *node, *next_node; @@ -1024,7 +1024,7 @@ void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain, } } -void z_arch_mem_domain_destroy(struct k_mem_domain *domain) +void arch_mem_domain_destroy(struct k_mem_domain *domain) { for (int i = 0, pcount = 0; pcount < domain->num_partitions; i++) { struct k_mem_partition *partition; @@ -1035,11 +1035,11 @@ void z_arch_mem_domain_destroy(struct k_mem_domain *domain) } pcount++; - z_arch_mem_domain_partition_remove(domain, i); + arch_mem_domain_partition_remove(domain, i); } } -void z_arch_mem_domain_thread_remove(struct k_thread *thread) +void arch_mem_domain_thread_remove(struct k_thread *thread) { struct k_mem_domain *domain = thread->mem_domain_info.mem_domain; @@ -1062,8 +1062,8 @@ void z_arch_mem_domain_thread_remove(struct k_thread *thread) } } -void z_arch_mem_domain_partition_add(struct k_mem_domain *domain, - u32_t partition_id) +void arch_mem_domain_partition_add(struct k_mem_domain *domain, + u32_t partition_id) { sys_dnode_t *node, *next_node; @@ -1080,7 +1080,7 @@ void z_arch_mem_domain_partition_add(struct k_mem_domain *domain, } } -void z_arch_mem_domain_thread_add(struct k_thread *thread) +void arch_mem_domain_thread_add(struct k_thread *thread) { if ((thread->base.user_options & K_USER) == 0) { return; @@ -1090,7 +1090,7 @@ void z_arch_mem_domain_thread_add(struct k_thread *thread) thread->mem_domain_info.mem_domain); } -int z_arch_mem_domain_max_partitions_get(void) +int arch_mem_domain_max_partitions_get(void) { return CONFIG_MAX_DOMAIN_PARTITIONS; } diff --git a/arch/x86/include/ia32/kernel_arch_func.h b/arch/x86/include/ia32/kernel_arch_func.h index 4ed7bd68a37..4410a88f94b 100644 --- a/arch/x86/include/ia32/kernel_arch_func.h +++ b/arch/x86/include/ia32/kernel_arch_func.h @@ -18,20 +18,20 @@ extern "C" { #endif -static inline void z_arch_kernel_init(void) +static inline void arch_kernel_init(void) { /* No-op on this arch */ } static ALWAYS_INLINE void -z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value) +arch_thread_return_value_set(struct k_thread *thread, unsigned int value) { /* write into 'eax' slot created in z_swap() entry */ *(unsigned int *)(thread->callee_saved.esp) = value; } -extern void z_arch_cpu_atomic_idle(unsigned int key); +extern void arch_cpu_atomic_idle(unsigned int key); #ifdef CONFIG_USERSPACE extern FUNC_NORETURN void z_x86_userspace_enter(k_thread_entry_t user_entry, diff --git a/arch/x86/include/intel64/kernel_arch_func.h b/arch/x86/include/intel64/kernel_arch_func.h index 7c717fdc2a6..3f7f2ba1167 100644 --- a/arch/x86/include/intel64/kernel_arch_func.h +++ b/arch/x86/include/intel64/kernel_arch_func.h @@ -12,7 +12,7 @@ extern void z_x86_switch(void *switch_to, void **switched_from); -static inline void z_arch_switch(void *switch_to, void **switched_from) +static inline void arch_switch(void *switch_to, void **switched_from) { z_x86_switch(switch_to, switched_from); } @@ -25,7 +25,7 @@ static inline void z_arch_switch(void *switch_to, void **switched_from) extern void z_x86_ipi_setup(void); -static inline void z_arch_kernel_init(void) +static inline void arch_kernel_init(void) { /* nothing */; } diff --git a/arch/x86/include/kernel_arch_func.h b/arch/x86/include/kernel_arch_func.h index 616dae6e165..7537474813f 100644 --- a/arch/x86/include/kernel_arch_func.h +++ b/arch/x86/include/kernel_arch_func.h @@ -15,10 +15,10 @@ #endif #ifndef _ASMLANGUAGE -static inline bool z_arch_is_in_isr(void) +static inline bool arch_is_in_isr(void) { #ifdef CONFIG_SMP - return z_arch_curr_cpu()->nested != 0; + return arch_curr_cpu()->nested != 0; #else return _kernel.nested != 0U; #endif diff --git a/arch/xtensa/core/cpu_idle.c b/arch/xtensa/core/cpu_idle.c index c14502b6cc7..8f0c04a4c18 100644 --- a/arch/xtensa/core/cpu_idle.c +++ b/arch/xtensa/core/cpu_idle.c @@ -5,12 +5,12 @@ #include -void z_arch_cpu_idle(void) +void arch_cpu_idle(void) { sys_trace_idle(); __asm__ volatile ("waiti 0"); } -void z_arch_cpu_atomic_idle(unsigned int key) +void arch_cpu_atomic_idle(unsigned int key) { sys_trace_idle(); __asm__ volatile ("waiti 0\n\t" diff --git a/arch/xtensa/core/irq_offload.c b/arch/xtensa/core/irq_offload.c index 981e70d74e1..9fcfb1be568 100644 --- a/arch/xtensa/core/irq_offload.c +++ b/arch/xtensa/core/irq_offload.c @@ -23,11 +23,11 @@ void z_irq_do_offload(void *unused) offload_routine(offload_param); } -void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter) +void arch_irq_offload(irq_offload_routine_t routine, void *parameter) { IRQ_CONNECT(CONFIG_IRQ_OFFLOAD_INTNUM, XCHAL_EXCM_LEVEL, z_irq_do_offload, NULL, 0); - z_arch_irq_disable(CONFIG_IRQ_OFFLOAD_INTNUM); + arch_irq_disable(CONFIG_IRQ_OFFLOAD_INTNUM); offload_routine = routine; offload_param = parameter; z_xt_set_intset(BIT(CONFIG_IRQ_OFFLOAD_INTNUM)); @@ -35,5 +35,5 @@ void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter) * Enable the software interrupt, in case it is disabled, so that IRQ * offload is serviced. */ - z_arch_irq_enable(CONFIG_IRQ_OFFLOAD_INTNUM); + arch_irq_enable(CONFIG_IRQ_OFFLOAD_INTNUM); } diff --git a/arch/xtensa/core/xtensa-asm2.c b/arch/xtensa/core/xtensa-asm2.c index 9d64c74c193..33bb309b3a4 100644 --- a/arch/xtensa/core/xtensa-asm2.c +++ b/arch/xtensa/core/xtensa-asm2.c @@ -56,10 +56,10 @@ void *xtensa_init_stack(int *stack_top, return &bsa[-9]; } -void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, - size_t sz, k_thread_entry_t entry, - void *p1, void *p2, void *p3, - int prio, unsigned int opts) +void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, + size_t sz, k_thread_entry_t entry, + void *p1, void *p2, void *p3, + int prio, unsigned int opts) { char *base = Z_THREAD_STACK_BUFFER(stack); char *top = base + sz; @@ -194,7 +194,7 @@ void *xtensa_excint1_c(int *interrupted_stack) LOG_ERR(" ** FATAL EXCEPTION"); LOG_ERR(" ** CPU %d EXCCAUSE %d (%s)", - z_arch_curr_cpu()->id, cause, + arch_curr_cpu()->id, cause, z_xtensa_exccause(cause)); LOG_ERR(" ** PC %p VADDR %p", (void *)bsa[BSA_PC_OFF/4], (void *)vaddr); diff --git a/arch/xtensa/include/kernel_arch_func.h b/arch/xtensa/include/kernel_arch_func.h index 71762205841..73e243d3fcc 100644 --- a/arch/xtensa/include/kernel_arch_func.h +++ b/arch/xtensa/include/kernel_arch_func.h @@ -31,7 +31,7 @@ extern void z_xt_coproc_init(void); extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE); -static ALWAYS_INLINE void z_arch_kernel_init(void) +static ALWAYS_INLINE void arch_kernel_init(void) { _cpu_t *cpu0 = &_kernel.cpus[0]; @@ -55,7 +55,7 @@ static ALWAYS_INLINE void z_arch_kernel_init(void) void xtensa_switch(void *switch_to, void **switched_from); -static inline void z_arch_switch(void *switch_to, void **switched_from) +static inline void arch_switch(void *switch_to, void **switched_from) { return xtensa_switch(switch_to, switched_from); } @@ -64,9 +64,9 @@ static inline void z_arch_switch(void *switch_to, void **switched_from) } #endif -static inline bool z_arch_is_in_isr(void) +static inline bool arch_is_in_isr(void) { - return z_arch_curr_cpu()->nested != 0U; + return arch_curr_cpu()->nested != 0U; } #endif /* _ASMLANGUAGE */ diff --git a/boards/posix/native_posix/board_irq.h b/boards/posix/native_posix/board_irq.h index ccb95ef2207..b05dbbdef6f 100644 --- a/boards/posix/native_posix/board_irq.h +++ b/boards/posix/native_posix/board_irq.h @@ -30,7 +30,7 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags); * * @return The vector assigned to this interrupt */ -#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ +#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ ({ \ posix_isr_declare(irq_p, 0, isr_p, isr_param_p); \ posix_irq_priority_set(irq_p, priority_p, flags_p); \ @@ -43,7 +43,7 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags); * * See include/irq.h for details. */ -#define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \ +#define ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \ ({ \ posix_isr_declare(irq_p, ISR_FLAG_DIRECT, (void (*)(void *))isr_p, \ NULL); \ @@ -63,7 +63,7 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags); * All pre/post irq work of the interrupt is handled in the board * posix_irq_handler() both for direct and normal interrupts together */ -#define Z_ARCH_ISR_DIRECT_DECLARE(name) \ +#define ARCH_ISR_DIRECT_DECLARE(name) \ static inline int name##_body(void); \ int name(void) \ { \ @@ -73,14 +73,14 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags); } \ static inline int name##_body(void) -#define Z_ARCH_ISR_DIRECT_HEADER() do { } while (0) -#define Z_ARCH_ISR_DIRECT_FOOTER(a) do { } while (0) +#define ARCH_ISR_DIRECT_HEADER() do { } while (0) +#define ARCH_ISR_DIRECT_FOOTER(a) do { } while (0) #ifdef CONFIG_SYS_POWER_MANAGEMENT extern void posix_irq_check_idle_exit(void); -#define Z_ARCH_ISR_DIRECT_PM() posix_irq_check_idle_exit() +#define ARCH_ISR_DIRECT_PM() posix_irq_check_idle_exit() #else -#define Z_ARCH_ISR_DIRECT_PM() do { } while (0) +#define ARCH_ISR_DIRECT_PM() do { } while (0) #endif #ifdef __cplusplus diff --git a/boards/posix/nrf52_bsim/board_irq.h b/boards/posix/nrf52_bsim/board_irq.h index ec4998956d3..8392f3053a6 100644 --- a/boards/posix/nrf52_bsim/board_irq.h +++ b/boards/posix/nrf52_bsim/board_irq.h @@ -30,7 +30,7 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags); * * @return The vector assigned to this interrupt */ -#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ +#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ ({ \ posix_isr_declare(irq_p, 0, isr_p, isr_param_p); \ posix_irq_priority_set(irq_p, priority_p, flags_p); \ @@ -43,7 +43,7 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags); * * See include/irq.h for details. */ -#define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \ +#define ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \ ({ \ posix_isr_declare(irq_p, ISR_FLAG_DIRECT, (void (*)(void *))isr_p, \ NULL); \ @@ -63,7 +63,7 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags); * All pre/post irq work of the interrupt is handled in the board * posix_irq_handler() both for direct and normal interrupts together */ -#define Z_ARCH_ISR_DIRECT_DECLARE(name) \ +#define ARCH_ISR_DIRECT_DECLARE(name) \ static inline int name##_body(void); \ int name(void) \ { \ @@ -73,14 +73,14 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags); } \ static inline int name##_body(void) -#define Z_ARCH_ISR_DIRECT_HEADER() do { } while (0) -#define Z_ARCH_ISR_DIRECT_FOOTER(a) do { } while (0) +#define ARCH_ISR_DIRECT_HEADER() do { } while (0) +#define ARCH_ISR_DIRECT_FOOTER(a) do { } while (0) #ifdef CONFIG_SYS_POWER_MANAGEMENT extern void posix_irq_check_idle_exit(void); -#define Z_ARCH_ISR_DIRECT_PM() posix_irq_check_idle_exit() +#define ARCH_ISR_DIRECT_PM() posix_irq_check_idle_exit() #else -#define Z_ARCH_ISR_DIRECT_PM() do { } while (0) +#define ARCH_ISR_DIRECT_PM() do { } while (0) #endif #ifdef __cplusplus diff --git a/boards/posix/nrf52_bsim/k_busy_wait.c b/boards/posix/nrf52_bsim/k_busy_wait.c index 2787a5f592d..5dcb68824ba 100644 --- a/boards/posix/nrf52_bsim/k_busy_wait.c +++ b/boards/posix/nrf52_bsim/k_busy_wait.c @@ -16,7 +16,7 @@ * Note that interrupts may be received in the meanwhile and that therefore this * thread may lose context */ -void z_arch_busy_wait(u32_t usec_to_wait) +void arch_busy_wait(u32_t usec_to_wait) { bs_time_t time_end = tm_get_hw_time() + usec_to_wait; diff --git a/doc/guides/porting/arch.rst b/doc/guides/porting/arch.rst index 0552f99aea0..8dc2bf540a5 100644 --- a/doc/guides/porting/arch.rst +++ b/doc/guides/porting/arch.rst @@ -407,9 +407,9 @@ CPU Idling/Power Management *************************** The kernel provides support for CPU power management with two functions: -:c:func:`z_arch_cpu_idle` and :c:func:`z_arch_cpu_atomic_idle`. +:c:func:`arch_cpu_idle` and :c:func:`arch_cpu_atomic_idle`. -:c:func:`z_arch_cpu_idle` can be as simple as calling the power saving +:c:func:`arch_cpu_idle` can be as simple as calling the power saving instruction for the architecture with interrupts unlocked, for example :code:`hlt` on x86, :code:`wfi` or :code:`wfe` on ARM, :code:`sleep` on ARC. This function can be called in a loop within a context that does not care if it @@ -422,7 +422,7 @@ basically two scenarios when it is correct to use this function: * In the idle thread. -:c:func:`z_arch_cpu_atomic_idle`, on the other hand, must be able to atomically +:c:func:`arch_cpu_atomic_idle`, on the other hand, must be able to atomically re-enable interrupts and invoke the power saving instruction. It can thus be used in real application code, again in single-threaded systems. @@ -511,32 +511,32 @@ implemented, and the system must enable the :option:`CONFIG_ARCH_HAS_USERSPACE` option. Please see the documentation for each of these functions for more details: -* :cpp:func:`z_arch_buffer_validate()` to test whether the current thread has +* :cpp:func:`arch_buffer_validate()` to test whether the current thread has access permissions to a particular memory region -* :cpp:func:`z_arch_user_mode_enter()` which will irreversibly drop a supervisor +* :cpp:func:`arch_user_mode_enter()` which will irreversibly drop a supervisor thread to user mode privileges. The stack must be wiped. -* :cpp:func:`z_arch_syscall_oops()` which generates a kernel oops when system +* :cpp:func:`arch_syscall_oops()` which generates a kernel oops when system call parameters can't be validated, in such a way that the oops appears to be generated from where the system call was invoked in the user thread -* :cpp:func:`z_arch_syscall_invoke0()` through - :cpp:func:`z_arch_syscall_invoke6()` invoke a system call with the +* :cpp:func:`arch_syscall_invoke0()` through + :cpp:func:`arch_syscall_invoke6()` invoke a system call with the appropriate number of arguments which must all be passed in during the privilege elevation via registers. -* :cpp:func:`z_arch_is_user_context()` return nonzero if the CPU is currently +* :cpp:func:`arch_is_user_context()` return nonzero if the CPU is currently running in user mode -* :cpp:func:`z_arch_mem_domain_max_partitions_get()` which indicates the max +* :cpp:func:`arch_mem_domain_max_partitions_get()` which indicates the max number of regions for a memory domain. MMU systems have an unlimited amount, MPU systems have constraints on this. -* :cpp:func:`z_arch_mem_domain_partition_remove()` Remove a partition from +* :cpp:func:`arch_mem_domain_partition_remove()` Remove a partition from a memory domain if the currently executing thread was part of that domain. -* :cpp:func:`z_arch_mem_domain_destroy()` Reset the thread's memory domain +* :cpp:func:`arch_mem_domain_destroy()` Reset the thread's memory domain configuration In addition to implementing these APIs, there are some other tasks as well: diff --git a/doc/reference/kernel/smp/smp.rst b/doc/reference/kernel/smp/smp.rst index ed078f1b3fe..9c1d41b22c5 100644 --- a/doc/reference/kernel/smp/smp.rst +++ b/doc/reference/kernel/smp/smp.rst @@ -132,7 +132,7 @@ happens on a single CPU before other CPUs are brought online. Just before entering the application ``main()`` function, the kernel calls ``z_smp_init()`` to launch the SMP initialization process. This enumerates over the configured CPUs, calling into the architecture -layer using ``z_arch_start_cpu()`` for each one. This function is +layer using ``arch_start_cpu()`` for each one. This function is passed a memory region to use as a stack on the foreign CPU (in practice it uses the area that will become that CPU's interrupt stack), the address of a local ``smp_init_top()`` callback function to @@ -172,7 +172,7 @@ handle the newly-runnable load. So where possible, Zephyr SMP architectures should implement an interprocessor interrupt. The current framework is very simple: the -architecture provides a ``z_arch_sched_ipi()`` call, which when invoked +architecture provides a ``arch_sched_ipi()`` call, which when invoked will flag an interrupt on all CPUs (except the current one, though that is allowed behavior) which will then invoke the ``z_sched_ipi()`` function implemented in the scheduler. The expectation is that these @@ -239,7 +239,7 @@ offsets. Note that an important requirement on the architecture layer is that the pointer to this CPU struct be available rapidly when in kernel -context. The expectation is that ``z_arch_curr_cpu()`` will be +context. The expectation is that ``arch_curr_cpu()`` will be implemented using a CPU-provided register or addressing mode that can store this value across arbitrary context switches or interrupts and make it available to any kernel-mode code. @@ -270,7 +270,7 @@ Instead, the SMP "switch to" decision needs to be made synchronously with the swap call, and as we don't want per-architecture assembly code to be handling scheduler internal state, Zephyr requires a somewhat lower-level context switch primitives for SMP systems: -``z_arch_switch()`` is always called with interrupts masked, and takes +``arch_switch()`` is always called with interrupts masked, and takes exactly two arguments. The first is an opaque (architecture defined) handle to the context to which it should switch, and the second is a pointer to such a handle into which it should store the handle @@ -288,4 +288,4 @@ in the interrupted thread struct. Note that while SMP requires :option:`CONFIG_USE_SWITCH`, the reverse is not true. A uniprocessor architecture built with :option:`CONFIG_SMP` = n might still decide to implement its context switching using -``z_arch_switch()``. +``arch_switch()``. diff --git a/doc/reference/usermode/syscalls.rst b/doc/reference/usermode/syscalls.rst index 0ae042ae27a..61d7aa2d816 100644 --- a/doc/reference/usermode/syscalls.rst +++ b/doc/reference/usermode/syscalls.rst @@ -149,7 +149,7 @@ Inside this header is the body of :c:func:`k_sem_init()`:: { #ifdef CONFIG_USERSPACE if (z_syscall_trap()) { - z_arch_syscall_invoke3(*(uintptr_t *)&sem, *(uintptr_t *)&initial_count, *(uintptr_t *)&limit, K_SYSCALL_K_SEM_INIT); + arch_syscall_invoke3(*(uintptr_t *)&sem, *(uintptr_t *)&initial_count, *(uintptr_t *)&limit, K_SYSCALL_K_SEM_INIT); return; } compiler_barrier(); diff --git a/drivers/interrupt_controller/plic.c b/drivers/interrupt_controller/plic.c index ea12737f50f..5848bc38295 100644 --- a/drivers/interrupt_controller/plic.c +++ b/drivers/interrupt_controller/plic.c @@ -33,7 +33,7 @@ static int save_irq; * * This routine enables a RISCV PLIC-specific interrupt line. * riscv_plic_irq_enable is called by SOC_FAMILY_RISCV_PRIVILEGE - * z_arch_irq_enable function to enable external interrupts for + * arch_irq_enable function to enable external interrupts for * IRQS level == 2, whenever CONFIG_RISCV_HAS_PLIC variable is set. * @param irq IRQ number to enable * @@ -57,7 +57,7 @@ void riscv_plic_irq_enable(u32_t irq) * * This routine disables a RISCV PLIC-specific interrupt line. * riscv_plic_irq_disable is called by SOC_FAMILY_RISCV_PRIVILEGE - * z_arch_irq_disable function to disable external interrupts, for + * arch_irq_disable function to disable external interrupts, for * IRQS level == 2, whenever CONFIG_RISCV_HAS_PLIC variable is set. * @param irq IRQ number to disable * @@ -98,7 +98,7 @@ int riscv_plic_irq_is_enabled(u32_t irq) * @brief Set priority of a riscv PLIC-specific interrupt line * * This routine set the priority of a RISCV PLIC-specific interrupt line. - * riscv_plic_irq_set_prio is called by riscv z_arch_irq_priority_set to set + * riscv_plic_irq_set_prio is called by riscv arch_irq_priority_set to set * the priority of an interrupt whenever CONFIG_RISCV_HAS_PLIC variable is set. * @param irq IRQ number for which to set priority * diff --git a/drivers/interrupt_controller/system_apic.c b/drivers/interrupt_controller/system_apic.c index 2a68ec6d32d..92cb71f7e66 100644 --- a/drivers/interrupt_controller/system_apic.c +++ b/drivers/interrupt_controller/system_apic.c @@ -72,7 +72,7 @@ void z_irq_controller_irq_config(unsigned int vector, unsigned int irq, * * @return N/A */ -void z_arch_irq_enable(unsigned int irq) +void arch_irq_enable(unsigned int irq) { if (IS_IOAPIC_IRQ(irq)) { z_ioapic_irq_enable(irq); @@ -92,7 +92,7 @@ void z_arch_irq_enable(unsigned int irq) * * @return N/A */ -void z_arch_irq_disable(unsigned int irq) +void arch_irq_disable(unsigned int irq) { if (IS_IOAPIC_IRQ(irq)) { z_ioapic_irq_disable(irq); diff --git a/drivers/interrupt_controller/vexriscv_litex.c b/drivers/interrupt_controller/vexriscv_litex.c index c2090f2fe85..8c901fdc691 100644 --- a/drivers/interrupt_controller/vexriscv_litex.c +++ b/drivers/interrupt_controller/vexriscv_litex.c @@ -83,17 +83,17 @@ static void vexriscv_litex_irq_handler(void *device) #endif } -void z_arch_irq_enable(unsigned int irq) +void arch_irq_enable(unsigned int irq) { vexriscv_litex_irq_setmask(vexriscv_litex_irq_getmask() | (1 << irq)); } -void z_arch_irq_disable(unsigned int irq) +void arch_irq_disable(unsigned int irq) { vexriscv_litex_irq_setmask(vexriscv_litex_irq_getmask() & ~(1 << irq)); } -int z_arch_irq_is_enabled(unsigned int irq) +int arch_irq_is_enabled(unsigned int irq) { return vexriscv_litex_irq_getmask() & (1 << irq); } diff --git a/drivers/timer/arcv2_timer0.c b/drivers/timer/arcv2_timer0.c index bcbe4f919ad..bab77af93e3 100644 --- a/drivers/timer/arcv2_timer0.c +++ b/drivers/timer/arcv2_timer0.c @@ -251,14 +251,14 @@ void z_clock_set_timeout(s32_t ticks, bool idle) /* Desired delay in the future */ delay = (ticks == 0) ? CYC_PER_TICK : ticks * CYC_PER_TICK; - key = z_arch_irq_lock(); + key = arch_irq_lock(); timer0_limit_register_set(delay - 1); timer0_count_register_set(0); timer0_control_register_set(_ARC_V2_TMR_CTRL_NH | _ARC_V2_TMR_CTRL_IE); - z_arch_irq_unlock(key); + arch_irq_unlock(key); #endif #else if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && idle && ticks == K_FOREVER) { diff --git a/drivers/timer/loapic_timer.c b/drivers/timer/loapic_timer.c index a53a568ae01..ed795d4415a 100644 --- a/drivers/timer/loapic_timer.c +++ b/drivers/timer/loapic_timer.c @@ -206,8 +206,8 @@ void timer_int_handler(void *unused /* parameter is not used */ "pushl %eax\n\t" "pushl %edx\n\t" "rdtsc\n\t" - "mov %eax, z_arch_timing_tick_start\n\t" - "mov %edx, z_arch_timing_tick_start+4\n\t" + "mov %eax, arch_timing_tick_start\n\t" + "mov %edx, arch_timing_tick_start+4\n\t" "pop %edx\n\t" "pop %eax\n\t"); #endif @@ -293,8 +293,8 @@ void timer_int_handler(void *unused /* parameter is not used */ "pushl %eax\n\t" "pushl %edx\n\t" "rdtsc\n\t" - "mov %eax, z_arch_timing_tick_end\n\t" - "mov %edx, z_arch_timing_tick_end+4\n\t" + "mov %eax, arch_timing_tick_end\n\t" + "mov %edx, arch_timing_tick_end+4\n\t" "pop %edx\n\t" "pop %eax\n\t"); #endif /* CONFIG_EXECUTION_BENCHMARKING */ diff --git a/drivers/timer/mchp_xec_rtos_timer.c b/drivers/timer/mchp_xec_rtos_timer.c index 10489d0683c..cf65a2d6096 100644 --- a/drivers/timer/mchp_xec_rtos_timer.c +++ b/drivers/timer/mchp_xec_rtos_timer.c @@ -269,7 +269,7 @@ u32_t z_clock_elapsed(void) /* * Warning RTOS timer resolution is 30.5 us. * This is called by two code paths: - * 1. Kernel call to k_cycle_get_32() -> z_arch_k_cycle_get_32() -> here. + * 1. Kernel call to k_cycle_get_32() -> arch_k_cycle_get_32() -> here. * The kernel is casting return to (int) and using it uncasted in math * expressions with int types. Expression result is stored in an int. * 2. If CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT is not defined then @@ -359,7 +359,7 @@ int z_clock_driver_init(struct device *device) * 32-bit basic timer 0 configured for 1MHz count up, auto-reload, * and no interrupt generation. */ -void z_arch_busy_wait(u32_t usec_to_wait) +void arch_busy_wait(u32_t usec_to_wait) { if (usec_to_wait == 0) { return; diff --git a/drivers/timer/native_posix_timer.c b/drivers/timer/native_posix_timer.c index 2bb1e5d59c6..e00373c2d6b 100644 --- a/drivers/timer/native_posix_timer.c +++ b/drivers/timer/native_posix_timer.c @@ -123,7 +123,7 @@ u32_t z_clock_elapsed(void) * Note that interrupts may be received in the meanwhile and that therefore this * thread may loose context */ -void z_arch_busy_wait(u32_t usec_to_wait) +void arch_busy_wait(u32_t usec_to_wait) { u64_t time_end = hwm_get_time() + usec_to_wait; diff --git a/include/arch/arc/arch.h b/include/arch/arc/arch.h index c116eb7bd36..e85708eea78 100644 --- a/include/arch/arc/arch.h +++ b/include/arch/arc/arch.h @@ -89,10 +89,10 @@ extern "C" { #if defined(CONFIG_USERSPACE) -#define Z_ARCH_THREAD_STACK_RESERVED \ +#define ARCH_THREAD_STACK_RESERVED \ (STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE) #else -#define Z_ARCH_THREAD_STACK_RESERVED (STACK_GUARD_SIZE) +#define ARCH_THREAD_STACK_RESERVED (STACK_GUARD_SIZE) #endif @@ -107,8 +107,8 @@ extern "C" { * MPU start, size alignment */ #define Z_ARC_THREAD_STACK_ALIGN(size) Z_ARC_MPUV2_SIZE_ALIGN(size) -#define Z_ARCH_THREAD_STACK_LEN(size) \ - (Z_ARC_MPUV2_SIZE_ALIGN(size) + Z_ARCH_THREAD_STACK_RESERVED) +#define ARCH_THREAD_STACK_LEN(size) \ + (Z_ARC_MPUV2_SIZE_ALIGN(size) + ARCH_THREAD_STACK_RESERVED) /* * for stack array, each array member should be aligned both in size * and start @@ -116,7 +116,7 @@ extern "C" { #define Z_ARC_THREAD_STACK_ARRAY_LEN(size) \ (Z_ARC_MPUV2_SIZE_ALIGN(size) + \ MAX(Z_ARC_MPUV2_SIZE_ALIGN(size), \ - POW2_CEIL(Z_ARCH_THREAD_STACK_RESERVED))) + POW2_CEIL(ARCH_THREAD_STACK_RESERVED))) #else /* * MPUv3, no-mpu and no USERSPACE share the same macro definitions. @@ -130,33 +130,33 @@ extern "C" { * aligned */ #define Z_ARC_THREAD_STACK_ALIGN(size) (STACK_ALIGN) -#define Z_ARCH_THREAD_STACK_LEN(size) \ - (STACK_SIZE_ALIGN(size) + Z_ARCH_THREAD_STACK_RESERVED) +#define ARCH_THREAD_STACK_LEN(size) \ + (STACK_SIZE_ALIGN(size) + ARCH_THREAD_STACK_RESERVED) #define Z_ARC_THREAD_STACK_ARRAY_LEN(size) \ - Z_ARCH_THREAD_STACK_LEN(size) + ARCH_THREAD_STACK_LEN(size) #endif /* CONFIG_USERSPACE && CONFIG_ARC_MPU_VER == 2 */ -#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \ +#define ARCH_THREAD_STACK_DEFINE(sym, size) \ struct _k_thread_stack_element __noinit \ __aligned(Z_ARC_THREAD_STACK_ALIGN(size)) \ - sym[Z_ARCH_THREAD_STACK_LEN(size)] + sym[ARCH_THREAD_STACK_LEN(size)] -#define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \ +#define ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \ struct _k_thread_stack_element __noinit \ __aligned(Z_ARC_THREAD_STACK_ALIGN(size)) \ sym[nmemb][Z_ARC_THREAD_STACK_ARRAY_LEN(size)] -#define Z_ARCH_THREAD_STACK_MEMBER(sym, size) \ +#define ARCH_THREAD_STACK_MEMBER(sym, size) \ struct _k_thread_stack_element \ __aligned(Z_ARC_THREAD_STACK_ALIGN(size)) \ - sym[Z_ARCH_THREAD_STACK_LEN(size)] + sym[ARCH_THREAD_STACK_LEN(size)] -#define Z_ARCH_THREAD_STACK_SIZEOF(sym) \ - (sizeof(sym) - Z_ARCH_THREAD_STACK_RESERVED) +#define ARCH_THREAD_STACK_SIZEOF(sym) \ + (sizeof(sym) - ARCH_THREAD_STACK_RESERVED) -#define Z_ARCH_THREAD_STACK_BUFFER(sym) \ +#define ARCH_THREAD_STACK_BUFFER(sym) \ ((char *)(sym)) #ifdef CONFIG_ARC_MPU @@ -227,7 +227,7 @@ extern "C" { /* Typedef for the k_mem_partition attribute*/ typedef u32_t k_mem_partition_attr_t; -static ALWAYS_INLINE void z_arch_nop(void) +static ALWAYS_INLINE void arch_nop(void) { __asm__ volatile("nop"); } diff --git a/include/arch/arc/arch_inlines.h b/include/arch/arc/arch_inlines.h index 316507ca9db..68f85c72297 100644 --- a/include/arch/arc/arch_inlines.h +++ b/include/arch/arc/arch_inlines.h @@ -16,7 +16,7 @@ #include #endif -static ALWAYS_INLINE _cpu_t *z_arch_curr_cpu(void) +static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void) { #ifdef CONFIG_SMP u32_t core; diff --git a/include/arch/arc/syscall.h b/include/arch/arc/syscall.h index a3b7cf28e0b..b9e449c16d9 100644 --- a/include/arch/arc/syscall.h +++ b/include/arch/arc/syscall.h @@ -38,10 +38,10 @@ extern "C" { * just for enabling CONFIG_USERSPACE on arc w/o errors. */ -static inline uintptr_t z_arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2, - uintptr_t arg3, uintptr_t arg4, - uintptr_t arg5, uintptr_t arg6, - uintptr_t call_id) +static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2, + uintptr_t arg3, uintptr_t arg4, + uintptr_t arg5, uintptr_t arg6, + uintptr_t call_id) { register u32_t ret __asm__("r0") = arg1; register u32_t r1 __asm__("r1") = arg2; @@ -63,10 +63,10 @@ static inline uintptr_t z_arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2, return ret; } -static inline uintptr_t z_arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2, - uintptr_t arg3, uintptr_t arg4, - uintptr_t arg5, - uintptr_t call_id) +static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2, + uintptr_t arg3, uintptr_t arg4, + uintptr_t arg5, + uintptr_t call_id) { register u32_t ret __asm__("r0") = arg1; register u32_t r1 __asm__("r1") = arg2; @@ -87,9 +87,9 @@ static inline uintptr_t z_arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2, return ret; } -static inline uintptr_t z_arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2, - uintptr_t arg3, uintptr_t arg4, - uintptr_t call_id) +static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2, + uintptr_t arg3, uintptr_t arg4, + uintptr_t call_id) { register u32_t ret __asm__("r0") = arg1; register u32_t r1 __asm__("r1") = arg2; @@ -109,9 +109,9 @@ static inline uintptr_t z_arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2, return ret; } -static inline uintptr_t z_arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2, - uintptr_t arg3, - uintptr_t call_id) +static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2, + uintptr_t arg3, + uintptr_t call_id) { register u32_t ret __asm__("r0") = arg1; register u32_t r1 __asm__("r1") = arg2; @@ -129,8 +129,8 @@ static inline uintptr_t z_arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2, return ret; } -static inline uintptr_t z_arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2, - uintptr_t call_id) +static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2, + uintptr_t call_id) { register u32_t ret __asm__("r0") = arg1; register u32_t r1 __asm__("r1") = arg2; @@ -147,7 +147,7 @@ static inline uintptr_t z_arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2, return ret; } -static inline uintptr_t z_arch_syscall_invoke1(uintptr_t arg1, uintptr_t call_id) +static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1, uintptr_t call_id) { register u32_t ret __asm__("r0") = arg1; register u32_t r6 __asm__("r6") = call_id; @@ -163,7 +163,7 @@ static inline uintptr_t z_arch_syscall_invoke1(uintptr_t arg1, uintptr_t call_id return ret; } -static inline uintptr_t z_arch_syscall_invoke0(uintptr_t call_id) +static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id) { register u32_t ret __asm__("r0"); register u32_t r6 __asm__("r6") = call_id; @@ -179,7 +179,7 @@ static inline uintptr_t z_arch_syscall_invoke0(uintptr_t call_id) return ret; } -static inline bool z_arch_is_user_context(void) +static inline bool arch_is_user_context(void) { u32_t status; diff --git a/include/arch/arc/v2/error.h b/include/arch/arc/v2/error.h index 4943f9258f9..3c584266cc3 100644 --- a/include/arch/arc/v2/error.h +++ b/include/arch/arc/v2/error.h @@ -25,7 +25,7 @@ extern "C" { /* * use trap_s to raise a SW exception */ -#define Z_ARCH_EXCEPT(reason_p) do { \ +#define ARCH_EXCEPT(reason_p) do { \ __asm__ volatile ( \ "mov r0, %[reason]\n\t" \ "trap_s %[id]\n\t" \ diff --git a/include/arch/arc/v2/irq.h b/include/arch/arc/v2/irq.h index 9b9aa412c80..afbf0f42713 100644 --- a/include/arch/arc/v2/irq.h +++ b/include/arch/arc/v2/irq.h @@ -26,15 +26,15 @@ extern "C" { #ifdef _ASMLANGUAGE GTEXT(_irq_exit); -GTEXT(z_arch_irq_enable) -GTEXT(z_arch_irq_disable) +GTEXT(arch_irq_enable) +GTEXT(arch_irq_disable) GTEXT(z_arc_firq_stack_set) #else extern void z_arc_firq_stack_set(void); -extern void z_arch_irq_enable(unsigned int irq); -extern void z_arch_irq_disable(unsigned int irq); -extern int z_arch_irq_is_enabled(unsigned int irq); +extern void arch_irq_enable(unsigned int irq); +extern void arch_irq_disable(unsigned int irq); +extern int arch_irq_is_enabled(unsigned int irq); extern void _irq_exit(void); extern void z_irq_priority_set(unsigned int irq, unsigned int prio, @@ -50,7 +50,7 @@ extern void z_irq_spurious(void *unused); * We additionally set the priority in the interrupt controller at * runtime. */ -#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ +#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ ({ \ Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \ z_irq_priority_set(irq_p, priority_p, flags_p); \ @@ -78,7 +78,7 @@ extern void z_irq_spurious(void *unused); * See include/irq.h for details. * All arguments must be computable at build time. */ -#define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \ +#define ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \ ({ \ Z_ISR_DECLARE(irq_p, ISR_FLAG_DIRECT, isr_p, NULL); \ BUILD_ASSERT_MSG(priority_p || !IS_ENABLED(CONFIG_ARC_FIRQ) || \ @@ -92,14 +92,14 @@ extern void z_irq_spurious(void *unused); }) -static inline void z_arch_isr_direct_header(void) +static inline void arch_isr_direct_header(void) { #ifdef CONFIG_TRACING z_sys_trace_isr_enter(); #endif } -static inline void z_arch_isr_direct_footer(int maybe_swap) +static inline void arch_isr_direct_footer(int maybe_swap) { /* clear SW generated interrupt */ if (z_arc_v2_aux_reg_read(_ARC_V2_ICAUSE) == @@ -111,16 +111,16 @@ static inline void z_arch_isr_direct_footer(int maybe_swap) #endif } -#define Z_ARCH_ISR_DIRECT_HEADER() z_arch_isr_direct_header() -extern void z_arch_isr_direct_header(void); +#define ARCH_ISR_DIRECT_HEADER() arch_isr_direct_header() +extern void arch_isr_direct_header(void); -#define Z_ARCH_ISR_DIRECT_FOOTER(swap) z_arch_isr_direct_footer(swap) +#define ARCH_ISR_DIRECT_FOOTER(swap) arch_isr_direct_footer(swap) /* * Scheduling can not be done in direct isr. If required, please use kernel * aware interrupt handling */ -#define Z_ARCH_ISR_DIRECT_DECLARE(name) \ +#define ARCH_ISR_DIRECT_DECLARE(name) \ static inline int name##_body(void); \ __attribute__ ((interrupt("ilink")))void name(void) \ { \ @@ -163,7 +163,7 @@ extern void z_arch_isr_direct_header(void); * "interrupt disable state" prior to the call. */ -static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) +static ALWAYS_INLINE unsigned int arch_irq_lock(void) { unsigned int key; @@ -171,12 +171,12 @@ static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) return key; } -static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key) +static ALWAYS_INLINE void arch_irq_unlock(unsigned int key) { __asm__ volatile("seti %0" : : "ir"(key) : "memory"); } -static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key) +static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key) { /* ARC irq lock uses instruction "clri r0", * r0 == {26’d0, 1’b1, STATUS32.IE, STATUS32.E[3:0] } diff --git a/include/arch/arc/v2/misc.h b/include/arch/arc/v2/misc.h index 7c5014648e5..9124a8d6f18 100644 --- a/include/arch/arc/v2/misc.h +++ b/include/arch/arc/v2/misc.h @@ -23,7 +23,7 @@ extern unsigned int z_arc_cpu_sleep_mode; extern u32_t z_timer_cycle_get_32(void); -static inline u32_t z_arch_k_cycle_get_32(void) +static inline u32_t arch_k_cycle_get_32(void) { return z_timer_cycle_get_32(); } diff --git a/include/arch/arm/arch.h b/include/arch/arm/arch.h index 04bd26f144a..cce1c8399bd 100644 --- a/include/arch/arm/arch.h +++ b/include/arch/arm/arch.h @@ -189,56 +189,56 @@ extern "C" { /* Guard is 'carved-out' of the thread stack region, and the supervisor * mode stack is allocated elsewhere by gen_priv_stack.py */ -#define Z_ARCH_THREAD_STACK_RESERVED 0 +#define ARCH_THREAD_STACK_RESERVED 0 #else -#define Z_ARCH_THREAD_STACK_RESERVED MPU_GUARD_ALIGN_AND_SIZE +#define ARCH_THREAD_STACK_RESERVED MPU_GUARD_ALIGN_AND_SIZE #endif #if defined(CONFIG_USERSPACE) && \ defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT) -#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \ +#define ARCH_THREAD_STACK_DEFINE(sym, size) \ struct _k_thread_stack_element __noinit \ __aligned(POW2_CEIL(size)) sym[POW2_CEIL(size)] #else -#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \ +#define ARCH_THREAD_STACK_DEFINE(sym, size) \ struct _k_thread_stack_element __noinit __aligned(STACK_ALIGN) \ sym[size+MPU_GUARD_ALIGN_AND_SIZE] #endif #if defined(CONFIG_USERSPACE) && \ defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT) -#define Z_ARCH_THREAD_STACK_LEN(size) (POW2_CEIL(size)) +#define ARCH_THREAD_STACK_LEN(size) (POW2_CEIL(size)) #else -#define Z_ARCH_THREAD_STACK_LEN(size) ((size)+MPU_GUARD_ALIGN_AND_SIZE) +#define ARCH_THREAD_STACK_LEN(size) ((size)+MPU_GUARD_ALIGN_AND_SIZE) #endif #if defined(CONFIG_USERSPACE) && \ defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT) -#define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \ +#define ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \ struct _k_thread_stack_element __noinit \ __aligned(POW2_CEIL(size)) \ - sym[nmemb][Z_ARCH_THREAD_STACK_LEN(size)] + sym[nmemb][ARCH_THREAD_STACK_LEN(size)] #else -#define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \ +#define ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \ struct _k_thread_stack_element __noinit \ __aligned(STACK_ALIGN) \ - sym[nmemb][Z_ARCH_THREAD_STACK_LEN(size)] + sym[nmemb][ARCH_THREAD_STACK_LEN(size)] #endif #if defined(CONFIG_USERSPACE) && \ defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT) -#define Z_ARCH_THREAD_STACK_MEMBER(sym, size) \ +#define ARCH_THREAD_STACK_MEMBER(sym, size) \ struct _k_thread_stack_element __aligned(POW2_CEIL(size)) \ sym[POW2_CEIL(size)] #else -#define Z_ARCH_THREAD_STACK_MEMBER(sym, size) \ +#define ARCH_THREAD_STACK_MEMBER(sym, size) \ struct _k_thread_stack_element __aligned(STACK_ALIGN) \ sym[size+MPU_GUARD_ALIGN_AND_SIZE] #endif -#define Z_ARCH_THREAD_STACK_SIZEOF(sym) (sizeof(sym) - MPU_GUARD_ALIGN_AND_SIZE) +#define ARCH_THREAD_STACK_SIZEOF(sym) (sizeof(sym) - MPU_GUARD_ALIGN_AND_SIZE) -#define Z_ARCH_THREAD_STACK_BUFFER(sym) \ +#define ARCH_THREAD_STACK_BUFFER(sym) \ ((char *)(sym) + MPU_GUARD_ALIGN_AND_SIZE) /* Legacy case: retain containing extern "C" with C++ */ diff --git a/include/arch/arm/asm_inline_gcc.h b/include/arch/arm/asm_inline_gcc.h index 460849fd78d..4873c5776bf 100644 --- a/include/arch/arm/asm_inline_gcc.h +++ b/include/arch/arm/asm_inline_gcc.h @@ -36,7 +36,7 @@ extern "C" { * except NMI. */ -static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) +static ALWAYS_INLINE unsigned int arch_irq_lock(void) { unsigned int key; @@ -75,7 +75,7 @@ static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) * previously disabled. */ -static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key) +static ALWAYS_INLINE void arch_irq_unlock(unsigned int key) { #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) if (key) { @@ -100,7 +100,7 @@ static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key) #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ } -static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key) +static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key) { /* This convention works for both PRIMASK and BASEPRI */ return key == 0; diff --git a/include/arch/arm/error.h b/include/arch/arm/error.h index 4017369524a..fb4a2b23d9e 100644 --- a/include/arch/arm/error.h +++ b/include/arch/arm/error.h @@ -31,7 +31,7 @@ extern "C" { * schedule a new thread until they are unlocked which is not what we want. * Force them unlocked as well. */ -#define Z_ARCH_EXCEPT(reason_p) \ +#define ARCH_EXCEPT(reason_p) \ register u32_t r0 __asm__("r0") = reason_p; \ do { \ __asm__ volatile ( \ @@ -42,7 +42,7 @@ do { \ : "memory"); \ } while (false) #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) -#define Z_ARCH_EXCEPT(reason_p) do { \ +#define ARCH_EXCEPT(reason_p) do { \ __asm__ volatile ( \ "eors.n r0, r0\n\t" \ "msr BASEPRI, r0\n\t" \ diff --git a/include/arch/arm/irq.h b/include/arch/arm/irq.h index c698b222d32..eb5afd35d4e 100644 --- a/include/arch/arm/irq.h +++ b/include/arch/arm/irq.h @@ -24,13 +24,13 @@ extern "C" { #ifdef _ASMLANGUAGE GTEXT(z_arm_int_exit); -GTEXT(z_arch_irq_enable) -GTEXT(z_arch_irq_disable) -GTEXT(z_arch_irq_is_enabled) +GTEXT(arch_irq_enable) +GTEXT(arch_irq_disable) +GTEXT(arch_irq_is_enabled) #else -extern void z_arch_irq_enable(unsigned int irq); -extern void z_arch_irq_disable(unsigned int irq); -extern int z_arch_irq_is_enabled(unsigned int irq); +extern void arch_irq_enable(unsigned int irq); +extern void arch_irq_disable(unsigned int irq); +extern int arch_irq_is_enabled(unsigned int irq); extern void z_arm_int_exit(void); @@ -76,14 +76,14 @@ extern void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, * We additionally set the priority in the interrupt controller at * runtime. */ -#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ +#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ ({ \ Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \ z_arm_irq_priority_set(irq_p, priority_p, flags_p); \ irq_p; \ }) -#define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \ +#define ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \ ({ \ Z_ISR_DECLARE(irq_p, ISR_FLAG_DIRECT, isr_p, NULL); \ z_arm_irq_priority_set(irq_p, priority_p, flags_p); \ @@ -93,15 +93,15 @@ extern void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, /* FIXME prefer these inline, but see GH-3056 */ #ifdef CONFIG_SYS_POWER_MANAGEMENT extern void _arch_isr_direct_pm(void); -#define Z_ARCH_ISR_DIRECT_PM() _arch_isr_direct_pm() +#define ARCH_ISR_DIRECT_PM() _arch_isr_direct_pm() #else -#define Z_ARCH_ISR_DIRECT_PM() do { } while (false) +#define ARCH_ISR_DIRECT_PM() do { } while (false) #endif -#define Z_ARCH_ISR_DIRECT_HEADER() z_arch_isr_direct_header() -extern void z_arch_isr_direct_header(void); +#define ARCH_ISR_DIRECT_HEADER() arch_isr_direct_header() +extern void arch_isr_direct_header(void); -#define Z_ARCH_ISR_DIRECT_FOOTER(swap) z_arch_isr_direct_footer(swap) +#define ARCH_ISR_DIRECT_FOOTER(swap) arch_isr_direct_footer(swap) /* arch/arm/core/exc_exit.S */ extern void z_arm_int_exit(void); @@ -110,7 +110,7 @@ extern void z_arm_int_exit(void); extern void sys_trace_isr_exit(void); #endif -static inline void z_arch_isr_direct_footer(int maybe_swap) +static inline void arch_isr_direct_footer(int maybe_swap) { #ifdef CONFIG_TRACING @@ -121,7 +121,7 @@ static inline void z_arch_isr_direct_footer(int maybe_swap) } } -#define Z_ARCH_ISR_DIRECT_DECLARE(name) \ +#define ARCH_ISR_DIRECT_DECLARE(name) \ static inline int name##_body(void); \ __attribute__ ((interrupt ("IRQ"))) void name(void) \ { \ diff --git a/include/arch/arm/misc.h b/include/arch/arm/misc.h index 47f051cf4c2..35f5886bc8b 100644 --- a/include/arch/arm/misc.h +++ b/include/arch/arm/misc.h @@ -21,12 +21,12 @@ extern "C" { #ifndef _ASMLANGUAGE extern u32_t z_timer_cycle_get_32(void); -static inline u32_t z_arch_k_cycle_get_32(void) +static inline u32_t arch_k_cycle_get_32(void) { return z_timer_cycle_get_32(); } -static ALWAYS_INLINE void z_arch_nop(void) +static ALWAYS_INLINE void arch_nop(void) { __asm__ volatile("nop"); } diff --git a/include/arch/arm/syscall.h b/include/arch/arm/syscall.h index f727fff2b99..bf2ad9a34dd 100644 --- a/include/arch/arm/syscall.h +++ b/include/arch/arm/syscall.h @@ -36,10 +36,10 @@ extern "C" { /* Syscall invocation macros. arm-specific machine constraints used to ensure * args land in the proper registers. */ -static inline uintptr_t z_arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2, - uintptr_t arg3, uintptr_t arg4, - uintptr_t arg5, uintptr_t arg6, - uintptr_t call_id) +static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2, + uintptr_t arg3, uintptr_t arg4, + uintptr_t arg5, uintptr_t arg6, + uintptr_t call_id) { register u32_t ret __asm__("r0") = arg1; register u32_t r1 __asm__("r1") = arg2; @@ -59,10 +59,10 @@ static inline uintptr_t z_arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2, return ret; } -static inline uintptr_t z_arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2, - uintptr_t arg3, uintptr_t arg4, - uintptr_t arg5, - uintptr_t call_id) +static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2, + uintptr_t arg3, uintptr_t arg4, + uintptr_t arg5, + uintptr_t call_id) { register u32_t ret __asm__("r0") = arg1; register u32_t r1 __asm__("r1") = arg2; @@ -81,9 +81,9 @@ static inline uintptr_t z_arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2, return ret; } -static inline uintptr_t z_arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2, - uintptr_t arg3, uintptr_t arg4, - uintptr_t call_id) +static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2, + uintptr_t arg3, uintptr_t arg4, + uintptr_t call_id) { register u32_t ret __asm__("r0") = arg1; register u32_t r1 __asm__("r1") = arg2; @@ -101,9 +101,9 @@ static inline uintptr_t z_arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2, return ret; } -static inline uintptr_t z_arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2, - uintptr_t arg3, - uintptr_t call_id) +static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2, + uintptr_t arg3, + uintptr_t call_id) { register u32_t ret __asm__("r0") = arg1; register u32_t r1 __asm__("r1") = arg2; @@ -119,8 +119,8 @@ static inline uintptr_t z_arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2, return ret; } -static inline uintptr_t z_arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2, - uintptr_t call_id) +static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2, + uintptr_t call_id) { register u32_t ret __asm__("r0") = arg1; register u32_t r1 __asm__("r1") = arg2; @@ -135,8 +135,8 @@ static inline uintptr_t z_arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2, return ret; } -static inline uintptr_t z_arch_syscall_invoke1(uintptr_t arg1, - uintptr_t call_id) +static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1, + uintptr_t call_id) { register u32_t ret __asm__("r0") = arg1; register u32_t r6 __asm__("r6") = call_id; @@ -149,7 +149,7 @@ static inline uintptr_t z_arch_syscall_invoke1(uintptr_t arg1, return ret; } -static inline uintptr_t z_arch_syscall_invoke0(uintptr_t call_id) +static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id) { register u32_t ret __asm__("r0"); register u32_t r6 __asm__("r6") = call_id; @@ -163,7 +163,7 @@ static inline uintptr_t z_arch_syscall_invoke0(uintptr_t call_id) return ret; } -static inline bool z_arch_is_user_context(void) +static inline bool arch_is_user_context(void) { u32_t value; diff --git a/include/arch/nios2/arch.h b/include/arch/nios2/arch.h index f5fdaf4442a..55d4939343e 100644 --- a/include/arch/nios2/arch.h +++ b/include/arch/nios2/arch.h @@ -38,7 +38,7 @@ extern "C" { /* There is no notion of priority with the Nios II internal interrupt * controller and no flags are currently supported. */ -#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ +#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ ({ \ Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \ irq_p; \ @@ -46,7 +46,7 @@ extern "C" { extern void z_irq_spurious(void *unused); -static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) +static ALWAYS_INLINE unsigned int arch_irq_lock(void) { unsigned int key, tmp; @@ -61,7 +61,7 @@ static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) return key; } -static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key) +static ALWAYS_INLINE void arch_irq_unlock(unsigned int key) { /* If the CPU is built without certain features, then * the only writable bit in the status register is PIE @@ -93,13 +93,13 @@ static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key) #endif } -static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key) +static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key) { return key & 1; } -void z_arch_irq_enable(unsigned int irq); -void z_arch_irq_disable(unsigned int irq); +void arch_irq_enable(unsigned int irq); +void arch_irq_disable(unsigned int irq); struct __esf { u32_t ra; /* return address r31 */ @@ -173,12 +173,12 @@ enum nios2_exception_cause { extern u32_t z_timer_cycle_get_32(void); -static inline u32_t z_arch_k_cycle_get_32(void) +static inline u32_t arch_k_cycle_get_32(void) { return z_timer_cycle_get_32(); } -static ALWAYS_INLINE void z_arch_nop(void) +static ALWAYS_INLINE void arch_nop(void) { __asm__ volatile("nop"); } diff --git a/include/arch/posix/arch.h b/include/arch/posix/arch.h index 245e0490329..bb62b95d190 100644 --- a/include/arch/posix/arch.h +++ b/include/arch/posix/arch.h @@ -48,28 +48,28 @@ typedef struct __esf z_arch_esf_t; extern u32_t z_timer_cycle_get_32(void); -static inline u32_t z_arch_k_cycle_get_32(void) +static inline u32_t arch_k_cycle_get_32(void) { return z_timer_cycle_get_32(); } -static ALWAYS_INLINE void z_arch_nop(void) +static ALWAYS_INLINE void arch_nop(void) { __asm__ volatile("nop"); } -static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key) +static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key) { return key == false; } -static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) +static ALWAYS_INLINE unsigned int arch_irq_lock(void) { return posix_irq_lock(); } -static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key) +static ALWAYS_INLINE void arch_irq_unlock(unsigned int key) { posix_irq_unlock(key); } diff --git a/include/arch/riscv/arch.h b/include/arch/riscv/arch.h index b841a85d6c5..0c1af8e81a9 100644 --- a/include/arch/riscv/arch.h +++ b/include/arch/riscv/arch.h @@ -64,21 +64,21 @@ extern "C" { */ extern u32_t __soc_get_irq(void); -void z_arch_irq_enable(unsigned int irq); -void z_arch_irq_disable(unsigned int irq); -int z_arch_irq_is_enabled(unsigned int irq); -void z_arch_irq_priority_set(unsigned int irq, unsigned int prio); +void arch_irq_enable(unsigned int irq); +void arch_irq_disable(unsigned int irq); +int arch_irq_is_enabled(unsigned int irq); +void arch_irq_priority_set(unsigned int irq, unsigned int prio); void z_irq_spurious(void *unused); #if defined(CONFIG_RISCV_HAS_PLIC) -#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ +#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ ({ \ Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \ - z_arch_irq_priority_set(irq_p, priority_p); \ + arch_irq_priority_set(irq_p, priority_p); \ irq_p; \ }) #else -#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ +#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ ({ \ Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \ irq_p; \ @@ -89,7 +89,7 @@ void z_irq_spurious(void *unused); * use atomic instruction csrrc to lock global irq * csrrc: atomic read and clear bits in CSR register */ -static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) +static ALWAYS_INLINE unsigned int arch_irq_lock(void) { unsigned int key; ulong_t mstatus; @@ -107,7 +107,7 @@ static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) * use atomic instruction csrrs to unlock global irq * csrrs: atomic read and set bits in CSR register */ -static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key) +static ALWAYS_INLINE void arch_irq_unlock(unsigned int key) { ulong_t mstatus; @@ -117,26 +117,26 @@ static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key) : "memory"); } -static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key) +static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key) { - /* FIXME: looking at z_arch_irq_lock, this should be reducable + /* FIXME: looking at arch_irq_lock, this should be reducable * to just testing that key is nonzero (because it should only * have the single bit set). But there is a mask applied to - * the argument in z_arch_irq_unlock() that has me worried + * the argument in arch_irq_unlock() that has me worried * that something elseswhere might try to set a bit? Do it * the safe way for now. */ return (key & SOC_MSTATUS_IEN) == SOC_MSTATUS_IEN; } -static ALWAYS_INLINE void z_arch_nop(void) +static ALWAYS_INLINE void arch_nop(void) { __asm__ volatile("nop"); } extern u32_t z_timer_cycle_get_32(void); -static inline u32_t z_arch_k_cycle_get_32(void) +static inline u32_t arch_k_cycle_get_32(void) { return z_timer_cycle_get_32(); } diff --git a/include/arch/x86/arch.h b/include/arch/x86/arch.h index 74698e7f0ac..57ae8875af2 100644 --- a/include/arch/x86/arch.h +++ b/include/arch/x86/arch.h @@ -18,7 +18,7 @@ #include #include -static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key) +static ALWAYS_INLINE void arch_irq_unlock(unsigned int key) { if ((key & 0x00000200U) != 0U) { /* 'IF' bit */ __asm__ volatile ("sti" ::: "memory"); @@ -209,17 +209,17 @@ extern unsigned char _irq_to_interrupt_vector[]; #ifndef _ASMLANGUAGE -extern void z_arch_irq_enable(unsigned int irq); -extern void z_arch_irq_disable(unsigned int irq); +extern void arch_irq_enable(unsigned int irq); +extern void arch_irq_disable(unsigned int irq); extern u32_t z_timer_cycle_get_32(void); -static inline u32_t z_arch_k_cycle_get_32(void) +static inline u32_t arch_k_cycle_get_32(void) { return z_timer_cycle_get_32(); } -static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key) +static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key) { return (key & 0x200) != 0; } @@ -268,7 +268,7 @@ static inline u64_t z_tsc_read(void) return rv.value; } -static ALWAYS_INLINE void z_arch_nop(void) +static ALWAYS_INLINE void arch_nop(void) { __asm__ volatile("nop"); } diff --git a/include/arch/x86/arch_inlines.h b/include/arch/x86/arch_inlines.h index 3b60dd35733..1ab73da9b80 100644 --- a/include/arch/x86/arch_inlines.h +++ b/include/arch/x86/arch_inlines.h @@ -15,7 +15,7 @@ #include #include -static inline struct _cpu *z_arch_curr_cpu(void) +static inline struct _cpu *arch_curr_cpu(void) { struct _cpu *cpu; diff --git a/include/arch/x86/ia32/arch.h b/include/arch/x86/ia32/arch.h index b0237a5d673..d8401c1fea1 100644 --- a/include/arch/x86/ia32/arch.h +++ b/include/arch/x86/ia32/arch.h @@ -176,7 +176,7 @@ typedef struct s_isrList { * 4. z_irq_controller_irq_config() is called at runtime to set the mapping * between the vector and the IRQ line as well as triggering flags */ -#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ +#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ ({ \ __asm__ __volatile__( \ ".pushsection .intList\n\t" \ @@ -205,7 +205,7 @@ typedef struct s_isrList { Z_IRQ_TO_INTERRUPT_VECTOR(irq_p); \ }) -#define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \ +#define ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \ ({ \ NANO_CPU_INT_REGISTER(isr_p, irq_p, priority_p, -1, 0); \ z_irq_controller_irq_config(Z_IRQ_TO_INTERRUPT_VECTOR(irq_p), (irq_p), \ @@ -215,20 +215,20 @@ typedef struct s_isrList { #ifdef CONFIG_SYS_POWER_MANAGEMENT -extern void z_arch_irq_direct_pm(void); -#define Z_ARCH_ISR_DIRECT_PM() z_arch_irq_direct_pm() +extern void arch_irq_direct_pm(void); +#define ARCH_ISR_DIRECT_PM() arch_irq_direct_pm() #else -#define Z_ARCH_ISR_DIRECT_PM() do { } while (false) +#define ARCH_ISR_DIRECT_PM() do { } while (false) #endif -#define Z_ARCH_ISR_DIRECT_HEADER() z_arch_isr_direct_header() -#define Z_ARCH_ISR_DIRECT_FOOTER(swap) z_arch_isr_direct_footer(swap) +#define ARCH_ISR_DIRECT_HEADER() arch_isr_direct_header() +#define ARCH_ISR_DIRECT_FOOTER(swap) arch_isr_direct_footer(swap) /* FIXME prefer these inline, but see GH-3056 */ -extern void z_arch_isr_direct_header(void); -extern void z_arch_isr_direct_footer(int maybe_swap); +extern void arch_isr_direct_header(void); +extern void arch_isr_direct_footer(int maybe_swap); -#define Z_ARCH_ISR_DIRECT_DECLARE(name) \ +#define ARCH_ISR_DIRECT_DECLARE(name) \ static inline int name##_body(void); \ __attribute__ ((interrupt)) void name(void *stack_frame) \ { \ @@ -279,7 +279,7 @@ struct _x86_syscall_stack_frame { u32_t ss; }; -static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) +static ALWAYS_INLINE unsigned int arch_irq_lock(void) { unsigned int key; @@ -344,7 +344,7 @@ extern struct task_state_segment _main_tss; #endif #if CONFIG_X86_KERNEL_OOPS -#define Z_ARCH_EXCEPT(reason_p) do { \ +#define ARCH_EXCEPT(reason_p) do { \ __asm__ volatile( \ "push %[reason]\n\t" \ "int %[vector]\n\t" \ diff --git a/include/arch/x86/ia32/syscall.h b/include/arch/x86/ia32/syscall.h index 6ed00a59597..ba4f9c40b9b 100644 --- a/include/arch/x86/ia32/syscall.h +++ b/include/arch/x86/ia32/syscall.h @@ -34,10 +34,10 @@ extern "C" { * z_x86_syscall_entry_stub in userspace.S */ -static inline uintptr_t z_arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2, - uintptr_t arg3, uintptr_t arg4, - uintptr_t arg5, uintptr_t arg6, - uintptr_t call_id) +static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2, + uintptr_t arg3, uintptr_t arg4, + uintptr_t arg5, uintptr_t arg6, + uintptr_t call_id) { u32_t ret; @@ -53,10 +53,10 @@ static inline uintptr_t z_arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2, return ret; } -static inline uintptr_t z_arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2, - uintptr_t arg3, uintptr_t arg4, - uintptr_t arg5, - uintptr_t call_id) +static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2, + uintptr_t arg3, uintptr_t arg4, + uintptr_t arg5, + uintptr_t call_id) { u32_t ret; @@ -68,9 +68,9 @@ static inline uintptr_t z_arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2, return ret; } -static inline uintptr_t z_arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2, - uintptr_t arg3, uintptr_t arg4, - uintptr_t call_id) +static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2, + uintptr_t arg3, uintptr_t arg4, + uintptr_t call_id) { u32_t ret; @@ -82,9 +82,9 @@ static inline uintptr_t z_arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2, return ret; } -static inline uintptr_t z_arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2, - uintptr_t arg3, - uintptr_t call_id) +static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2, + uintptr_t arg3, + uintptr_t call_id) { u32_t ret; @@ -95,8 +95,8 @@ static inline uintptr_t z_arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2, return ret; } -static inline uintptr_t z_arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2, - uintptr_t call_id) +static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2, + uintptr_t call_id) { u32_t ret; @@ -108,8 +108,8 @@ static inline uintptr_t z_arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2, return ret; } -static inline uintptr_t z_arch_syscall_invoke1(uintptr_t arg1, - uintptr_t call_id) +static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1, + uintptr_t call_id) { u32_t ret; @@ -121,7 +121,7 @@ static inline uintptr_t z_arch_syscall_invoke1(uintptr_t arg1, return ret; } -static inline uintptr_t z_arch_syscall_invoke0(uintptr_t call_id) +static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id) { u32_t ret; @@ -133,7 +133,7 @@ static inline uintptr_t z_arch_syscall_invoke0(uintptr_t call_id) return ret; } -static inline bool z_arch_is_user_context(void) +static inline bool arch_is_user_context(void) { int cs; diff --git a/include/arch/x86/ia32/thread.h b/include/arch/x86/ia32/thread.h index 23e74070a79..c914bf4331e 100644 --- a/include/arch/x86/ia32/thread.h +++ b/include/arch/x86/ia32/thread.h @@ -202,7 +202,7 @@ typedef struct s_preempFloatReg { * The thread control structure definition. It contains the * various fields to manage a _single_ thread. The TCS will be aligned * to the appropriate architecture specific boundary via the - * z_arch_new_thread() call. + * arch_new_thread() call. */ struct _thread_arch { diff --git a/include/arch/x86/intel64/arch.h b/include/arch/x86/intel64/arch.h index fc8b19d1806..ccaf020cf1b 100644 --- a/include/arch/x86/intel64/arch.h +++ b/include/arch/x86/intel64/arch.h @@ -18,7 +18,7 @@ #endif #ifndef _ASMLANGUAGE -static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) +static ALWAYS_INLINE unsigned int arch_irq_lock(void) { unsigned long key; @@ -64,6 +64,6 @@ typedef struct x86_esf z_arch_esf_t; * All Intel64 interrupts are dynamically connected. */ -#define Z_ARCH_IRQ_CONNECT z_arch_irq_connect_dynamic +#define ARCH_IRQ_CONNECT arch_irq_connect_dynamic #endif /* ZEPHYR_INCLUDE_ARCH_X86_INTEL64_ARCH_H_ */ diff --git a/include/arch/x86/thread_stack.h b/include/arch/x86/thread_stack.h index 585937beafb..203a999a727 100644 --- a/include/arch/x86/thread_stack.h +++ b/include/arch/x86/thread_stack.h @@ -195,36 +195,36 @@ struct z_x86_thread_stack_header { #endif /* CONFIG_USERSPACE */ } __packed __aligned(Z_X86_STACK_BASE_ALIGN); -#define Z_ARCH_THREAD_STACK_RESERVED \ +#define ARCH_THREAD_STACK_RESERVED \ ((u32_t)sizeof(struct z_x86_thread_stack_header)) -#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \ +#define ARCH_THREAD_STACK_DEFINE(sym, size) \ struct _k_thread_stack_element __noinit \ __aligned(Z_X86_STACK_BASE_ALIGN) \ sym[ROUND_UP((size), Z_X86_STACK_SIZE_ALIGN) + \ - Z_ARCH_THREAD_STACK_RESERVED] + ARCH_THREAD_STACK_RESERVED] -#define Z_ARCH_THREAD_STACK_LEN(size) \ +#define ARCH_THREAD_STACK_LEN(size) \ (ROUND_UP((size), \ MAX(Z_X86_STACK_BASE_ALIGN, \ Z_X86_STACK_SIZE_ALIGN)) + \ - Z_ARCH_THREAD_STACK_RESERVED) + ARCH_THREAD_STACK_RESERVED) -#define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \ +#define ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \ struct _k_thread_stack_element __noinit \ __aligned(Z_X86_STACK_BASE_ALIGN) \ - sym[nmemb][Z_ARCH_THREAD_STACK_LEN(size)] + sym[nmemb][ARCH_THREAD_STACK_LEN(size)] -#define Z_ARCH_THREAD_STACK_MEMBER(sym, size) \ +#define ARCH_THREAD_STACK_MEMBER(sym, size) \ struct _k_thread_stack_element __aligned(Z_X86_STACK_BASE_ALIGN) \ sym[ROUND_UP((size), Z_X86_STACK_SIZE_ALIGN) + \ - Z_ARCH_THREAD_STACK_RESERVED] + ARCH_THREAD_STACK_RESERVED] -#define Z_ARCH_THREAD_STACK_SIZEOF(sym) \ - (sizeof(sym) - Z_ARCH_THREAD_STACK_RESERVED) +#define ARCH_THREAD_STACK_SIZEOF(sym) \ + (sizeof(sym) - ARCH_THREAD_STACK_RESERVED) -#define Z_ARCH_THREAD_STACK_BUFFER(sym) \ - ((char *)((sym) + Z_ARCH_THREAD_STACK_RESERVED)) +#define ARCH_THREAD_STACK_BUFFER(sym) \ + ((char *)((sym) + ARCH_THREAD_STACK_RESERVED)) #endif /* !_ASMLANGUAGE */ #endif /* ZEPHYR_INCLUDE_ARCH_X86_THREAD_STACK_H */ diff --git a/include/arch/xtensa/arch.h b/include/arch/xtensa/arch.h index 4b2896fafe5..647c217fd77 100644 --- a/include/arch/xtensa/arch.h +++ b/include/arch/xtensa/arch.h @@ -40,7 +40,7 @@ extern "C" { /* internal routine documented in C file, needed by IRQ_CONNECT() macro */ extern void z_irq_priority_set(u32_t irq, u32_t prio, u32_t flags); -#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ +#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ ({ \ Z_ISR_DECLARE(irq_p, flags_p, isr_p, isr_param_p); \ irq_p; \ @@ -53,12 +53,12 @@ extern void z_irq_spurious(void *unused); extern u32_t z_timer_cycle_get_32(void); -static inline u32_t z_arch_k_cycle_get_32(void) +static inline u32_t arch_k_cycle_get_32(void) { return z_timer_cycle_get_32(); } -static ALWAYS_INLINE void z_arch_nop(void) +static ALWAYS_INLINE void arch_nop(void) { __asm__ volatile("nop"); } diff --git a/include/arch/xtensa/arch_inlines.h b/include/arch/xtensa/arch_inlines.h index d7bb0374a35..0251fb1c7f7 100644 --- a/include/arch/xtensa/arch_inlines.h +++ b/include/arch/xtensa/arch_inlines.h @@ -22,7 +22,7 @@ __asm__ volatile ("wsr." sr " %0" : : "r"(v)); \ } while (false) -static ALWAYS_INLINE _cpu_t *z_arch_curr_cpu(void) +static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void) { _cpu_t *cpu; diff --git a/include/arch/xtensa/irq.h b/include/arch/xtensa/irq.h index 264eaf6b1f2..5fb611e42c7 100644 --- a/include/arch/xtensa/irq.h +++ b/include/arch/xtensa/irq.h @@ -31,19 +31,19 @@ #define CONFIG_NUM_IRQS XCHAL_NUM_INTERRUPTS #endif -#define z_arch_irq_enable(irq) z_soc_irq_enable(irq) -#define z_arch_irq_disable(irq) z_soc_irq_disable(irq) +#define arch_irq_enable(irq) z_soc_irq_enable(irq) +#define arch_irq_disable(irq) z_soc_irq_disable(irq) -#define z_arch_irq_is_enabled(irq) z_soc_irq_is_enabled(irq) +#define arch_irq_is_enabled(irq) z_soc_irq_is_enabled(irq) #else #define CONFIG_NUM_IRQS XCHAL_NUM_INTERRUPTS -#define z_arch_irq_enable(irq) z_xtensa_irq_enable(irq) -#define z_arch_irq_disable(irq) z_xtensa_irq_disable(irq) +#define arch_irq_enable(irq) z_xtensa_irq_enable(irq) +#define arch_irq_disable(irq) z_xtensa_irq_disable(irq) -#define z_arch_irq_is_enabled(irq) z_xtensa_irq_is_enabled(irq) +#define arch_irq_is_enabled(irq) z_xtensa_irq_is_enabled(irq) #endif @@ -57,18 +57,18 @@ static ALWAYS_INLINE void z_xtensa_irq_disable(u32_t irq) z_xt_ints_off(1 << irq); } -static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) +static ALWAYS_INLINE unsigned int arch_irq_lock(void) { unsigned int key = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL); return key; } -static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key) +static ALWAYS_INLINE void arch_irq_unlock(unsigned int key) { XTOS_RESTORE_INTLEVEL(key); } -static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key) +static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key) { return (key & 0xf) == 0; /* INTLEVEL field */ } diff --git a/include/exc_handle.h b/include/exc_handle.h index c47db016d17..2bc2fe9ce88 100644 --- a/include/exc_handle.h +++ b/include/exc_handle.h @@ -15,7 +15,7 @@ * * For example, in the case where user mode passes in a C string via * system call, the length of that string needs to be measured. A specially - * written assembly language version of strlen (z_arch_user_string_len) + * written assembly language version of strlen (arch_user_string_len) * defines start and end symbols where the memory in the string is examined; * if this generates a fault, jumping to the fixup symbol within the same * function will return an error result to the caller. diff --git a/include/irq.h b/include/irq.h index 0c899a0de5e..c53a33cbff7 100644 --- a/include/irq.h +++ b/include/irq.h @@ -48,7 +48,7 @@ extern "C" { * @return Interrupt vector assigned to this interrupt. */ #define IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ - Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) + ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) /** * Configure a dynamic interrupt. @@ -68,7 +68,8 @@ irq_connect_dynamic(unsigned int irq, unsigned int priority, void (*routine)(void *parameter), void *parameter, u32_t flags) { - return z_arch_irq_connect_dynamic(irq, priority, routine, parameter, flags); + return arch_irq_connect_dynamic(irq, priority, routine, parameter, + flags); } /** @@ -112,7 +113,7 @@ irq_connect_dynamic(unsigned int irq, unsigned int priority, * @return Interrupt vector assigned to this interrupt. */ #define IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \ - Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) + ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) /** * @brief Common tasks before executing the body of an ISR @@ -121,7 +122,7 @@ irq_connect_dynamic(unsigned int irq, unsigned int priority, * minimal architecture-specific tasks before the ISR itself can run. It takes * no arguments and has no return value. */ -#define ISR_DIRECT_HEADER() Z_ARCH_ISR_DIRECT_HEADER() +#define ISR_DIRECT_HEADER() ARCH_ISR_DIRECT_HEADER() /** * @brief Common tasks before exiting the body of an ISR @@ -139,7 +140,7 @@ irq_connect_dynamic(unsigned int irq, unsigned int priority, * @param check_reschedule If nonzero, additionally invoke scheduling logic */ #define ISR_DIRECT_FOOTER(check_reschedule) \ - Z_ARCH_ISR_DIRECT_FOOTER(check_reschedule) + ARCH_ISR_DIRECT_FOOTER(check_reschedule) /** * @brief Perform power management idle exit logic @@ -149,7 +150,7 @@ irq_connect_dynamic(unsigned int irq, unsigned int priority, * exit power management idle state. It takes no parameters and returns no * arguments. It may be omitted, but be careful! */ -#define ISR_DIRECT_PM() Z_ARCH_ISR_DIRECT_PM() +#define ISR_DIRECT_PM() ARCH_ISR_DIRECT_PM() /** * @brief Helper macro to declare a direct interrupt service routine. @@ -179,7 +180,7 @@ irq_connect_dynamic(unsigned int irq, unsigned int priority, * * @param name symbol name of the ISR */ -#define ISR_DIRECT_DECLARE(name) Z_ARCH_ISR_DIRECT_DECLARE(name) +#define ISR_DIRECT_DECLARE(name) ARCH_ISR_DIRECT_DECLARE(name) /** * @brief Lock interrupts. @@ -218,7 +219,7 @@ irq_connect_dynamic(unsigned int irq, unsigned int priority, unsigned int z_smp_global_lock(void); #define irq_lock() z_smp_global_lock() #else -#define irq_lock() z_arch_irq_lock() +#define irq_lock() arch_irq_lock() #endif /** @@ -240,7 +241,7 @@ unsigned int z_smp_global_lock(void); void z_smp_global_unlock(unsigned int key); #define irq_unlock(key) z_smp_global_unlock(key) #else -#define irq_unlock(key) z_arch_irq_unlock(key) +#define irq_unlock(key) arch_irq_unlock(key) #endif /** @@ -252,7 +253,7 @@ void z_smp_global_unlock(unsigned int key); * * @return N/A */ -#define irq_enable(irq) z_arch_irq_enable(irq) +#define irq_enable(irq) arch_irq_enable(irq) /** * @brief Disable an IRQ. @@ -263,7 +264,7 @@ void z_smp_global_unlock(unsigned int key); * * @return N/A */ -#define irq_disable(irq) z_arch_irq_disable(irq) +#define irq_disable(irq) arch_irq_disable(irq) /** * @brief Get IRQ enable state. @@ -274,7 +275,7 @@ void z_smp_global_unlock(unsigned int key); * * @return interrupt enable state, true or false */ -#define irq_is_enabled(irq) z_arch_irq_is_enabled(irq) +#define irq_is_enabled(irq) arch_irq_is_enabled(irq) /** * @} diff --git a/include/irq_offload.h b/include/irq_offload.h index 30d9bf6a2ae..d308e3e44c0 100644 --- a/include/irq_offload.h +++ b/include/irq_offload.h @@ -32,7 +32,7 @@ extern "C" { */ static inline void irq_offload(irq_offload_routine_t routine, void *parameter) { - z_arch_irq_offload(routine, parameter); + arch_irq_offload(routine, parameter); } #endif diff --git a/include/kernel.h b/include/kernel.h index 959a05cb232..d2258306e76 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -579,7 +579,7 @@ struct k_thread { /** z_swap() return value */ int swap_retval; - /** Context handle returned via z_arch_switch() */ + /** Context handle returned via arch_switch() */ void *switch_handle; #endif /** resource pool */ @@ -1816,7 +1816,7 @@ static inline u32_t k_uptime_delta_32(s64_t *reftime) */ static inline u32_t k_cycle_get_32(void) { - return z_arch_k_cycle_get_32(); + return arch_k_cycle_get_32(); } /** @@ -4765,7 +4765,7 @@ extern void z_handle_obj_poll_events(sys_dlist_t *events, u32_t state); */ static inline void k_cpu_idle(void) { - z_arch_cpu_idle(); + arch_cpu_idle(); } /** @@ -4781,7 +4781,7 @@ static inline void k_cpu_idle(void) */ static inline void k_cpu_atomic_idle(unsigned int key) { - z_arch_cpu_atomic_idle(key); + arch_cpu_atomic_idle(key); } /** @@ -4793,13 +4793,13 @@ static inline void k_cpu_atomic_idle(unsigned int key) */ extern void z_sys_power_save_idle_exit(s32_t ticks); -#ifdef Z_ARCH_EXCEPT +#ifdef ARCH_EXCEPT /* This architecture has direct support for triggering a CPU exception */ -#define z_except_reason(reason) Z_ARCH_EXCEPT(reason) +#define z_except_reason(reason) ARCH_EXCEPT(reason) #else /* NOTE: This is the implementation for arches that do not implement - * Z_ARCH_EXCEPT() to generate a real CPU exception. + * ARCH_EXCEPT() to generate a real CPU exception. * * We won't have a real exception frame to determine the PC value when * the oops occurred, so print file and line number before we jump into @@ -4885,17 +4885,17 @@ extern void z_timer_expiration_handler(struct _timeout *t); */ #define K_THREAD_STACK_EXTERN(sym) extern k_thread_stack_t sym[] -#ifdef Z_ARCH_THREAD_STACK_DEFINE -#define K_THREAD_STACK_DEFINE(sym, size) Z_ARCH_THREAD_STACK_DEFINE(sym, size) +#ifdef ARCH_THREAD_STACK_DEFINE +#define K_THREAD_STACK_DEFINE(sym, size) ARCH_THREAD_STACK_DEFINE(sym, size) #define K_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \ - Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) -#define K_THREAD_STACK_LEN(size) Z_ARCH_THREAD_STACK_LEN(size) -#define K_THREAD_STACK_MEMBER(sym, size) Z_ARCH_THREAD_STACK_MEMBER(sym, size) -#define K_THREAD_STACK_SIZEOF(sym) Z_ARCH_THREAD_STACK_SIZEOF(sym) -#define K_THREAD_STACK_RESERVED Z_ARCH_THREAD_STACK_RESERVED + ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) +#define K_THREAD_STACK_LEN(size) ARCH_THREAD_STACK_LEN(size) +#define K_THREAD_STACK_MEMBER(sym, size) ARCH_THREAD_STACK_MEMBER(sym, size) +#define K_THREAD_STACK_SIZEOF(sym) ARCH_THREAD_STACK_SIZEOF(sym) +#define K_THREAD_STACK_RESERVED ARCH_THREAD_STACK_RESERVED static inline char *Z_THREAD_STACK_BUFFER(k_thread_stack_t *sym) { - return Z_ARCH_THREAD_STACK_BUFFER(sym); + return ARCH_THREAD_STACK_BUFFER(sym); } #else /** @@ -5176,7 +5176,7 @@ __syscall void k_str_out(char *c, size_t n); * * @warning * Some architectures apply restrictions on how the disabling of floating - * point preservation may be requested, see z_arch_float_disable. + * point preservation may be requested, see arch_float_disable. * * @warning * This routine should only be used to disable floating point support for diff --git a/include/kernel_structs.h b/include/kernel_structs.h index 59519723714..28422231812 100644 --- a/include/kernel_structs.h +++ b/include/kernel_structs.h @@ -187,8 +187,8 @@ typedef struct z_kernel _kernel_t; extern struct z_kernel _kernel; #ifdef CONFIG_SMP -#define _current_cpu (z_arch_curr_cpu()) -#define _current (z_arch_curr_cpu()->current) +#define _current_cpu (arch_curr_cpu()) +#define _current (arch_curr_cpu()->current) #else #define _current_cpu (&_kernel.cpus[0]) #define _current _kernel.current diff --git a/include/spinlock.h b/include/spinlock.h index f55ae9efc82..cb69f0e7255 100644 --- a/include/spinlock.h +++ b/include/spinlock.h @@ -73,7 +73,7 @@ static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l) * implementation. The "irq_lock()" API in SMP context is * actually a wrapper for a global spinlock! */ - k.key = z_arch_irq_lock(); + k.key = arch_irq_lock(); #ifdef SPIN_VALIDATE __ASSERT(z_spin_lock_valid(l), "Recursive spinlock"); @@ -108,7 +108,7 @@ static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l, */ atomic_clear(&l->locked); #endif - z_arch_irq_unlock(key.key); + arch_irq_unlock(key.key); } /* Internal function: releases the lock, but leaves local interrupts diff --git a/include/sys/arch_interface.h b/include/sys/arch_interface.h index 2b023869380..6fe2c56bc91 100644 --- a/include/sys/arch_interface.h +++ b/include/sys/arch_interface.h @@ -55,7 +55,7 @@ typedef void (*k_thread_entry_t)(void *p1, void *p2, void *p3); * * @see k_cycle_get_32() */ -static inline u32_t z_arch_k_cycle_get_32(void); +static inline u32_t arch_k_cycle_get_32(void); /** @} */ @@ -66,43 +66,43 @@ static inline u32_t z_arch_k_cycle_get_32(void); */ /** - * @def Z_ARCH_THREAD_STACK_DEFINE(sym, size) + * @def ARCH_THREAD_STACK_DEFINE(sym, size) * * @see K_THREAD_STACK_DEFINE() */ /** - * @def Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, size) + * @def ARCH_THREAD_STACK_ARRAY_DEFINE(sym, size) * * @see K_THREAD_STACK_ARRAY_DEFINE() */ /** - * @def Z_ARCH_THREAD_STACK_LEN(size) + * @def ARCH_THREAD_STACK_LEN(size) * * @see K_THREAD_STACK_LEN() */ /** - * @def Z_ARCH_THREAD_STACK_MEMBER(sym, size) + * @def ARCH_THREAD_STACK_MEMBER(sym, size) * * @see K_THREAD_STACK_MEMBER() */ /* - * @def Z_ARCH_THREAD_STACK_SIZEOF(sym) + * @def ARCH_THREAD_STACK_SIZEOF(sym) * * @see K_THREAD_STACK_SIZEOF() */ /** - * @def Z_ARCH_THREAD_STACK_RESERVED + * @def ARCH_THREAD_STACK_RESERVED * * @see K_THREAD_STACK_RESERVED */ /** - * @def Z_ARCH_THREAD_STACK_BUFFER(sym) + * @def ARCH_THREAD_STACK_BUFFER(sym) * * @see K_THREAD_STACK_RESERVED */ @@ -128,12 +128,12 @@ static inline u32_t z_arch_k_cycle_get_32(void); * * @see k_cpu_idle() */ -void z_arch_cpu_idle(void); +void arch_cpu_idle(void); /** * @brief Atomically re-enable interrupts and enter low power mode * - * The requirements for z_arch_cpu_atomic_idle() are as follows: + * The requirements for arch_cpu_atomic_idle() are as follows: * * 1) Enabling interrupts and entering a low-power mode needs to be * atomic, i.e. there should be no period of time where interrupts are @@ -146,9 +146,9 @@ void z_arch_cpu_idle(void); * * @see k_cpu_atomic_idle() * - * @param key Lockout key returned by previous invocation of z_arch_irq_lock() + * @param key Lockout key returned by previous invocation of arch_irq_lock() */ -void z_arch_cpu_atomic_idle(unsigned int key); +void arch_cpu_atomic_idle(unsigned int key); /** @} */ @@ -179,8 +179,8 @@ void z_arch_cpu_atomic_idle(unsigned int key); * an irq_unlock() key. * @param arg Untyped argument to be passed to "fn" */ -void z_arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, - void (*fn)(int key, void *data), void *arg); +void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, + void (*fn)(int key, void *data), void *arg); /** @} */ @@ -194,44 +194,44 @@ void z_arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, * * @see irq_lock() */ -static inline unsigned int z_arch_irq_lock(void); +static inline unsigned int arch_irq_lock(void); /** * Unlock interrupts on the current CPU * * @see irq_unlock() */ -static inline void z_arch_irq_unlock(unsigned int key); +static inline void arch_irq_unlock(unsigned int key); /** - * Test if calling z_arch_irq_unlock() with this key would unlock irqs + * Test if calling arch_irq_unlock() with this key would unlock irqs * - * @param key value returned by z_arch_irq_lock() - * @return true if interrupts were unlocked prior to the z_arch_irq_lock() + * @param key value returned by arch_irq_lock() + * @return true if interrupts were unlocked prior to the arch_irq_lock() * call that produced the key argument. */ -static inline bool z_arch_irq_unlocked(unsigned int key); +static inline bool arch_irq_unlocked(unsigned int key); /** * Disable the specified interrupt line * * @see irq_disable() */ -void z_arch_irq_disable(unsigned int irq); +void arch_irq_disable(unsigned int irq); /** * Enable the specified interrupt line * * @see irq_enable() */ -void z_arch_irq_enable(unsigned int irq); +void arch_irq_enable(unsigned int irq); /** * Test if an interrupt line is enabled * * @see irq_is_enabled() */ -int z_arch_irq_is_enabled(unsigned int irq); +int arch_irq_is_enabled(unsigned int irq); /** * Arch-specific hook to install a dynamic interrupt. @@ -244,48 +244,48 @@ int z_arch_irq_is_enabled(unsigned int irq); * * @return The vector assigned to this interrupt */ -int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, - void (*routine)(void *parameter), - void *parameter, u32_t flags); +int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, + void (*routine)(void *parameter), + void *parameter, u32_t flags); /** - * @def Z_ARCH_IRQ_CONNECT(irq, pri, isr, arg, flags) + * @def ARCH_IRQ_CONNECT(irq, pri, isr, arg, flags) * * @see IRQ_CONNECT() */ /** - * @def Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) + * @def ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) * * @see IRQ_DIRECT_CONNECT() */ /** - * @def Z_ARCH_ISR_DIRECT_PM() + * @def ARCH_ISR_DIRECT_PM() * * @see ISR_DIRECT_PM() */ /** - * @def Z_ARCH_ISR_DIRECT_HEADER() + * @def ARCH_ISR_DIRECT_HEADER() * * @see ISR_DIRECT_HEADER() */ /** - * @def Z_ARCH_ISR_DIRECT_FOOTER(swap) + * @def ARCH_ISR_DIRECT_FOOTER(swap) * * @see ISR_DIRECT_FOOTER() */ /** - * @def Z_ARCH_ISR_DIRECT_DECLARE(name) + * @def ARCH_ISR_DIRECT_DECLARE(name) * * @see ISR_DIRECT_DECLARE() */ /** - * @def Z_ARCH_EXCEPT(reason_p) + * @def ARCH_EXCEPT(reason_p) * * Generate a software induced fatal error. * @@ -318,7 +318,7 @@ typedef void (*irq_offload_routine_t)(void *parameter); * @param routine Function to run in interrupt context * @param parameter Value to pass to the function when invoked */ -void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter); +void arch_irq_offload(irq_offload_routine_t routine, void *parameter); #endif /* CONFIG_IRQ_OFFLOAD */ /** @} */ @@ -331,14 +331,14 @@ void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter); */ #ifdef CONFIG_SMP /** Return the CPU struct for the currently executing CPU */ -static inline struct _cpu *z_arch_curr_cpu(void); +static inline struct _cpu *arch_curr_cpu(void); /** * Broadcast an interrupt to all CPUs * * This will invoke z_sched_ipi() on other CPUs in the system. */ -void z_arch_sched_ipi(void); +void arch_sched_ipi(void); #endif /* CONFIG_SMP */ /** @} */ @@ -372,25 +372,25 @@ void z_arch_sched_ipi(void); * @param call_id System call ID * @return Return value of the system call. Void system calls return 0 here. */ -static inline uintptr_t z_arch_syscall_invoke0(uintptr_t call_id); +static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id); /** * Invoke a system call with 1 argument. * - * @see z_arch_syscall_invoke0() + * @see arch_syscall_invoke0() * * @param arg1 First argument to the system call. * @param call_id System call ID, will be bounds-checked and used to reference * kernel-side dispatch table * @return Return value of the system call. Void system calls return 0 here. */ -static inline uintptr_t z_arch_syscall_invoke1(uintptr_t arg1, - uintptr_t call_id); +static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1, + uintptr_t call_id); /** * Invoke a system call with 2 arguments. * - * @see z_arch_syscall_invoke0() + * @see arch_syscall_invoke0() * * @param arg1 First argument to the system call. * @param arg2 Second argument to the system call. @@ -398,13 +398,13 @@ static inline uintptr_t z_arch_syscall_invoke1(uintptr_t arg1, * kernel-side dispatch table * @return Return value of the system call. Void system calls return 0 here. */ -static inline uintptr_t z_arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2, - uintptr_t call_id); +static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2, + uintptr_t call_id); /** * Invoke a system call with 3 arguments. * - * @see z_arch_syscall_invoke0() + * @see arch_syscall_invoke0() * * @param arg1 First argument to the system call. * @param arg2 Second argument to the system call. @@ -413,14 +413,14 @@ static inline uintptr_t z_arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2, * kernel-side dispatch table * @return Return value of the system call. Void system calls return 0 here. */ -static inline uintptr_t z_arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2, - uintptr_t arg3, - uintptr_t call_id); +static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2, + uintptr_t arg3, + uintptr_t call_id); /** * Invoke a system call with 4 arguments. * - * @see z_arch_syscall_invoke0() + * @see arch_syscall_invoke0() * * @param arg1 First argument to the system call. * @param arg2 Second argument to the system call. @@ -430,14 +430,14 @@ static inline uintptr_t z_arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2, * kernel-side dispatch table * @return Return value of the system call. Void system calls return 0 here. */ -static inline uintptr_t z_arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2, - uintptr_t arg3, uintptr_t arg4, - uintptr_t call_id); +static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2, + uintptr_t arg3, uintptr_t arg4, + uintptr_t call_id); /** * Invoke a system call with 5 arguments. * - * @see z_arch_syscall_invoke0() + * @see arch_syscall_invoke0() * * @param arg1 First argument to the system call. * @param arg2 Second argument to the system call. @@ -448,15 +448,15 @@ static inline uintptr_t z_arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2, * kernel-side dispatch table * @return Return value of the system call. Void system calls return 0 here. */ -static inline uintptr_t z_arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2, - uintptr_t arg3, uintptr_t arg4, - uintptr_t arg5, - uintptr_t call_id); +static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2, + uintptr_t arg3, uintptr_t arg4, + uintptr_t arg5, + uintptr_t call_id); /** * Invoke a system call with 6 arguments. * - * @see z_arch_syscall_invoke0() + * @see arch_syscall_invoke0() * * @param arg1 First argument to the system call. * @param arg2 Second argument to the system call. @@ -468,24 +468,24 @@ static inline uintptr_t z_arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2, * kernel-side dispatch table * @return Return value of the system call. Void system calls return 0 here. */ -static inline uintptr_t z_arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2, - uintptr_t arg3, uintptr_t arg4, - uintptr_t arg5, uintptr_t arg6, - uintptr_t call_id); +static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2, + uintptr_t arg3, uintptr_t arg4, + uintptr_t arg5, uintptr_t arg6, + uintptr_t call_id); /** * Indicate whether we are currently running in user mode * * @return true if the CPU is currently running with user permissions */ -static inline bool z_arch_is_user_context(void); +static inline bool arch_is_user_context(void); /** * @brief Get the maximum number of partitions for a memory domain * * @return Max number of partitions, or -1 if there is no limit */ -int z_arch_mem_domain_max_partitions_get(void); +int arch_mem_domain_max_partitions_get(void); /** * @brief Add a thread to a memory domain (arch-specific) @@ -498,7 +498,7 @@ int z_arch_mem_domain_max_partitions_get(void); * * @param thread Thread which needs to be configured. */ -void z_arch_mem_domain_thread_add(struct k_thread *thread); +void arch_mem_domain_thread_add(struct k_thread *thread); /** * @brief Remove a thread from a memory domain (arch-specific) @@ -511,7 +511,7 @@ void z_arch_mem_domain_thread_add(struct k_thread *thread); * * @param thread Thread being removed from its memory domain */ -void z_arch_mem_domain_thread_remove(struct k_thread *thread); +void arch_mem_domain_thread_remove(struct k_thread *thread); /** * @brief Remove a partition from the memory domain (arch-specific) @@ -526,8 +526,8 @@ void z_arch_mem_domain_thread_remove(struct k_thread *thread); * @param domain The memory domain structure * @param partition_id The partition index that needs to be deleted */ -void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain, - u32_t partition_id); +void arch_mem_domain_partition_remove(struct k_mem_domain *domain, + u32_t partition_id); /** * @brief Add a partition to the memory domain @@ -538,8 +538,8 @@ void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain, * @param domain The memory domain structure * @param partition_id The partition that needs to be added */ -void z_arch_mem_domain_partition_add(struct k_mem_domain *domain, - u32_t partition_id); +void arch_mem_domain_partition_add(struct k_mem_domain *domain, + u32_t partition_id); /** * @brief Remove the memory domain @@ -552,7 +552,7 @@ void z_arch_mem_domain_partition_add(struct k_mem_domain *domain, * * @param domain The memory domain structure which needs to be deleted. */ -void z_arch_mem_domain_destroy(struct k_mem_domain *domain); +void arch_mem_domain_destroy(struct k_mem_domain *domain); /** * @brief Check memory region permissions @@ -580,7 +580,7 @@ void z_arch_mem_domain_destroy(struct k_mem_domain *domain); * * @return nonzero if the permissions don't match. */ -int z_arch_buffer_validate(void *addr, size_t size, int write); +int arch_buffer_validate(void *addr, size_t size, int write); /** * Perform a one-way transition from supervisor to kernel mode. @@ -592,7 +592,7 @@ int z_arch_buffer_validate(void *addr, size_t size, int write); * - Set up any kernel stack region for the CPU to use during privilege * elevation * - Put the CPU in whatever its equivalent of user mode is - * - Transfer execution to z_arch_new_thread() passing along all the supplied + * - Transfer execution to arch_new_thread() passing along all the supplied * arguments, in user mode. * * @param user_entry Entry point to start executing as a user thread @@ -600,8 +600,8 @@ int z_arch_buffer_validate(void *addr, size_t size, int write); * @param p2 2nd parameter to user thread * @param p3 3rd parameter to user thread */ -FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry, - void *p1, void *p2, void *p3); +FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, + void *p1, void *p2, void *p3); /** * @brief Induce a kernel oops that appears to come from a specific location @@ -617,7 +617,7 @@ FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry, * to _k_syscall_handler_t functions and its contents are completely * architecture specific. */ -FUNC_NORETURN void z_arch_syscall_oops(void *ssf); +FUNC_NORETURN void arch_syscall_oops(void *ssf); /** * @brief Safely take the length of a potentially bad string @@ -631,7 +631,7 @@ FUNC_NORETURN void z_arch_syscall_oops(void *ssf); * @param err Error value to write * @return Length of the string, not counting NULL byte, up to maxsize */ -size_t z_arch_user_string_nlen(const char *s, size_t maxsize, int *err); +size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err); #endif /* CONFIG_USERSPACE */ /** @} */ diff --git a/include/syscall.h b/include/syscall.h index 28b6227ce3b..6f73f5f4218 100644 --- a/include/syscall.h +++ b/include/syscall.h @@ -100,7 +100,7 @@ static ALWAYS_INLINE bool z_syscall_trap(void) #elif defined(__ZEPHYR_USER__) ret = true; #else - ret = z_arch_is_user_context(); + ret = arch_is_user_context(); #endif #endif return ret; @@ -114,7 +114,7 @@ static ALWAYS_INLINE bool z_syscall_trap(void) static inline bool _is_user_context(void) { #ifdef CONFIG_USERSPACE - return z_arch_is_user_context(); + return arch_is_user_context(); #else return false; #endif diff --git a/include/syscall_handler.h b/include/syscall_handler.h index e8beaa54467..cec1cd463a0 100644 --- a/include/syscall_handler.h +++ b/include/syscall_handler.h @@ -172,7 +172,7 @@ void z_object_recycle(void *obj); static inline size_t z_user_string_nlen(const char *src, size_t maxlen, int *err) { - return z_arch_user_string_nlen(src, maxlen, err); + return arch_user_string_nlen(src, maxlen, err); } /** @@ -259,7 +259,7 @@ extern int z_user_string_copy(char *dst, const char *src, size_t maxlen); #define Z_OOPS(expr) \ do { \ if (expr) { \ - z_arch_syscall_oops(_current_cpu->syscall_frame); \ + arch_syscall_oops(_current_cpu->syscall_frame); \ } \ } while (false) @@ -314,7 +314,7 @@ extern int z_user_string_copy(char *dst, const char *src, size_t maxlen); * @return 0 on success, nonzero on failure */ #define Z_SYSCALL_MEMORY(ptr, size, write) \ - Z_SYSCALL_VERIFY_MSG(z_arch_buffer_validate((void *)ptr, size, write) \ + Z_SYSCALL_VERIFY_MSG(arch_buffer_validate((void *)ptr, size, write) \ == 0, \ "Memory region %p (size %u) %s access denied", \ (void *)(ptr), (u32_t)(size), \ diff --git a/kernel/Kconfig b/kernel/Kconfig index 19289261f8d..fcc67613234 100644 --- a/kernel/Kconfig +++ b/kernel/Kconfig @@ -691,13 +691,13 @@ config MAX_DOMAIN_PARTITIONS menu "SMP Options" config USE_SWITCH - bool "Use new-style _arch_switch instead of z_arch_swap" + bool "Use new-style _arch_switch instead of arch_swap" depends on USE_SWITCH_SUPPORTED help The _arch_switch() API is a lower level context switching - primitive than the original z_arch_swap mechanism. It is required + primitive than the original arch_swap mechanism. It is required for an SMP-aware scheduler, or if the architecture does not - provide z_arch_swap. In uniprocess situations where the + provide arch_swap. In uniprocess situations where the architecture provides both, _arch_switch incurs more somewhat overhead and may be slower. @@ -727,7 +727,7 @@ config SCHED_IPI_SUPPORTED bool "Architecture supports broadcast interprocessor interrupts" help True if the architecture supports a call to - z_arch_sched_ipi() to broadcast an interrupt that will call + arch_sched_ipi() to broadcast an interrupt that will call z_sched_ipi() on other CPUs in the system. Required for k_thread_abort() to operate with reasonable latency (otherwise we might have to wait for the other thread to diff --git a/kernel/fatal.c b/kernel/fatal.c index ca44f7b652b..cb6441abf30 100644 --- a/kernel/fatal.c +++ b/kernel/fatal.c @@ -17,7 +17,7 @@ LOG_MODULE_DECLARE(os); /* LCOV_EXCL_START */ -FUNC_NORETURN __weak void z_arch_system_halt(unsigned int reason) +FUNC_NORETURN __weak void arch_system_halt(unsigned int reason) { ARG_UNUSED(reason); @@ -25,7 +25,7 @@ FUNC_NORETURN __weak void z_arch_system_halt(unsigned int reason) * is enabled? */ - (void)z_arch_irq_lock(); + (void)arch_irq_lock(); for (;;) { /* Spin endlessly */ } @@ -40,7 +40,7 @@ __weak void k_sys_fatal_error_handler(unsigned int reason, LOG_PANIC(); LOG_ERR("Halting system"); - z_arch_system_halt(reason); + arch_system_halt(reason); CODE_UNREACHABLE; } /* LCOV_EXCL_STOP */ @@ -77,14 +77,14 @@ static const char *reason_to_str(unsigned int reason) /* LCOV_EXCL_START */ FUNC_NORETURN void k_fatal_halt(unsigned int reason) { - z_arch_system_halt(reason); + arch_system_halt(reason); } /* LCOV_EXCL_STOP */ static inline int get_cpu(void) { #if defined(CONFIG_SMP) - return z_arch_curr_cpu()->id; + return arch_curr_cpu()->id; #else return 0; #endif @@ -107,7 +107,7 @@ void z_fatal_error(unsigned int reason, const z_arch_esf_t *esf) * See #17656 */ #if defined(CONFIG_ARCH_HAS_NESTED_EXCEPTION_DETECTION) - if (z_arch_is_in_nested_exception(esf)) { + if (arch_is_in_nested_exception(esf)) { LOG_ERR("Fault during interrupt handling\n"); } #endif @@ -135,7 +135,7 @@ void z_fatal_error(unsigned int reason, const z_arch_esf_t *esf) "Attempted to recover from a kernel panic condition"); /* FIXME: #17656 */ #if defined(CONFIG_ARCH_HAS_NESTED_EXCEPTION_DETECTION) - if (z_arch_is_in_nested_exception(esf)) { + if (arch_is_in_nested_exception(esf)) { #if defined(CONFIG_STACK_SENTINEL) if (reason != K_ERR_STACK_CHK_FAIL) { __ASSERT(0, @@ -147,7 +147,7 @@ void z_fatal_error(unsigned int reason, const z_arch_esf_t *esf) } else { /* Test mode */ #if defined(CONFIG_ARCH_HAS_NESTED_EXCEPTION_DETECTION) - if (z_arch_is_in_nested_exception(esf)) { + if (arch_is_in_nested_exception(esf)) { /* Abort the thread only on STACK Sentinel check fail. */ #if defined(CONFIG_STACK_SENTINEL) if (reason != K_ERR_STACK_CHK_FAIL) { diff --git a/kernel/futex.c b/kernel/futex.c index 23336192f62..c31fb0892ac 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -42,7 +42,7 @@ int z_impl_k_futex_wake(struct k_futex *futex, bool wake_all) thread = z_unpend_first_thread(&futex_data->wait_q); if (thread) { z_ready_thread(thread); - z_arch_thread_return_value_set(thread, 0); + arch_thread_return_value_set(thread, 0); woken++; } } while (thread && wake_all); diff --git a/kernel/idle.c b/kernel/idle.c index 0bfe35dc6c9..701b4b79ea6 100644 --- a/kernel/idle.c +++ b/kernel/idle.c @@ -156,7 +156,7 @@ void idle(void *unused1, void *unused2, void *unused3) k_busy_wait(100); k_yield(); #else - (void)z_arch_irq_lock(); + (void)arch_irq_lock(); sys_power_save_idle(); IDLE_YIELD_IF_COOP(); #endif diff --git a/kernel/include/kernel_arch_interface.h b/kernel/include/kernel_arch_interface.h index 47d75a6d127..0c760a5b125 100644 --- a/kernel/include/kernel_arch_interface.h +++ b/kernel/include/kernel_arch_interface.h @@ -36,7 +36,7 @@ extern "C" { * * @param usec_to_wait Wait period, in microseconds */ -void z_arch_busy_wait(u32_t usec_to_wait); +void arch_busy_wait(u32_t usec_to_wait); #endif /** @} */ @@ -68,7 +68,7 @@ void z_arch_busy_wait(u32_t usec_to_wait); * @param prio Thread priority. * @param options Thread options. */ -void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *pStack, +void arch_new_thread(struct k_thread *thread, k_thread_stack_t *pStack, size_t stackSize, k_thread_entry_t entry, void *p1, void *p2, void *p3, int prio, unsigned int options); @@ -100,7 +100,7 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *pStack, * @param switched_from Pointer to outgoing thread's switch handle storage * location, which may be updated. */ -static inline void z_arch_switch(void *switch_to, void **switched_from); +static inline void arch_switch(void *switch_to, void **switched_from); #else /** * Cooperatively context switch @@ -108,13 +108,13 @@ static inline void z_arch_switch(void *switch_to, void **switched_from); * Must be called with interrupts locked with the provided key. * This is the older-style context switching method, which is incompatible * with SMP. New arch ports, either SMP or UP, are encouraged to implement - * z_arch_switch() instead. + * arch_switch() instead. * * @param key Interrupt locking key * @return If woken from blocking on some kernel object, the result of that * blocking operation. */ -int z_arch_swap(unsigned int key); +int arch_swap(unsigned int key); /** * Set the return value for the specified thread. @@ -125,7 +125,7 @@ int z_arch_swap(unsigned int key); * @param value value to set as return value */ static ALWAYS_INLINE void -z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value); +arch_thread_return_value_set(struct k_thread *thread, unsigned int value); #endif /* CONFIG_USE_SWITCH i*/ #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN @@ -140,7 +140,7 @@ z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value); * @param main_stack_size Size of the stack object's buffer * @param _main Entry point for application main function. */ -void z_arch_switch_to_main_thread(struct k_thread *main_thread, +void arch_switch_to_main_thread(struct k_thread *main_thread, k_thread_stack_t *main_stack, size_t main_stack_size, k_thread_entry_t _main); @@ -159,7 +159,7 @@ void z_arch_switch_to_main_thread(struct k_thread *main_thread, * @retval 0 On success. * @retval -EINVAL If the floating point disabling could not be performed. */ -int z_arch_float_disable(struct k_thread *thread); +int arch_float_disable(struct k_thread *thread); #endif /* CONFIG_FLOAT && CONFIG_FP_SHARING */ /** @} */ @@ -170,7 +170,7 @@ int z_arch_float_disable(struct k_thread *thread); * @{ */ /** Halt the system, optionally propagating a reason code */ -FUNC_NORETURN void z_arch_system_halt(unsigned int reason); +FUNC_NORETURN void arch_system_halt(unsigned int reason); /** @} */ @@ -189,7 +189,7 @@ FUNC_NORETURN void z_arch_system_halt(unsigned int reason); * * @return true if we are in interrupt context */ -static inline bool z_arch_is_in_isr(void); +static inline bool arch_is_in_isr(void); /** @} */ @@ -200,16 +200,16 @@ static inline bool z_arch_is_in_isr(void); */ #ifdef CONFIG_EXECUTION_BENCHMARKING -extern u64_t z_arch_timing_swap_start; -extern u64_t z_arch_timing_swap_end; -extern u64_t z_arch_timing_irq_start; -extern u64_t z_arch_timing_irq_end; -extern u64_t z_arch_timing_tick_start; -extern u64_t z_arch_timing_tick_end; -extern u64_t z_arch_timing_user_mode_end; -extern u32_t z_arch_timing_value_swap_end; -extern u64_t z_arch_timing_value_swap_common; -extern u64_t z_arch_timing_value_swap_temp; +extern u64_t arch_timing_swap_start; +extern u64_t arch_timing_swap_end; +extern u64_t arch_timing_irq_start; +extern u64_t arch_timing_irq_end; +extern u64_t arch_timing_tick_start; +extern u64_t arch_timing_tick_end; +extern u64_t arch_timing_user_mode_end; +extern u32_t arch_timing_value_swap_end; +extern u64_t arch_timing_value_swap_common; +extern u64_t arch_timing_value_swap_temp; #endif /* CONFIG_EXECUTION_BENCHMARKING */ /** @} */ @@ -229,10 +229,10 @@ extern u64_t z_arch_timing_value_swap_temp; * TODO: Deprecate, most arches are using a prep_c() function to do the same * thing in a simpler way */ -static inline void z_arch_kernel_init(void); +static inline void arch_kernel_init(void); /** Do nothing and return. Yawn. */ -static inline void z_arch_nop(void); +static inline void arch_nop(void); /** @} */ diff --git a/kernel/include/kernel_internal.h b/kernel/include/kernel_internal.h index 147be7585f4..05946d5c3c0 100644 --- a/kernel/include/kernel_internal.h +++ b/kernel/include/kernel_internal.h @@ -93,7 +93,7 @@ extern void z_thread_monitor_exit(struct k_thread *thread); * z_swap() is in use it's a simple inline provided by the kernel. */ static ALWAYS_INLINE void -z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value) +arch_thread_return_value_set(struct k_thread *thread, unsigned int value) { thread->swap_retval = value; } @@ -104,7 +104,7 @@ z_thread_return_value_set_with_data(struct k_thread *thread, unsigned int value, void *data) { - z_arch_thread_return_value_set(thread, value); + arch_thread_return_value_set(thread, value); thread->base.swap_data = data; } diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index 48eb70cd229..e6bd3cfe6df 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -64,12 +64,12 @@ void z_sched_ipi(void); static inline void z_pend_curr_unlocked(_wait_q_t *wait_q, s32_t timeout) { - (void) z_pend_curr_irqlock(z_arch_irq_lock(), wait_q, timeout); + (void) z_pend_curr_irqlock(arch_irq_lock(), wait_q, timeout); } static inline void z_reschedule_unlocked(void) { - (void) z_reschedule_irqlock(z_arch_irq_lock()); + (void) z_reschedule_irqlock(arch_irq_lock()); } /* find which one is the next thread to run */ @@ -265,7 +265,7 @@ static inline void _ready_one_thread(_wait_q_t *wq) static inline void z_sched_lock(void) { #ifdef CONFIG_PREEMPT_ENABLED - __ASSERT(!z_arch_is_in_isr(), ""); + __ASSERT(!arch_is_in_isr(), ""); __ASSERT(_current->base.sched_locked != 1, ""); --_current->base.sched_locked; @@ -280,7 +280,7 @@ static inline void z_sched_lock(void) static ALWAYS_INLINE void z_sched_unlock_no_reschedule(void) { #ifdef CONFIG_PREEMPT_ENABLED - __ASSERT(!z_arch_is_in_isr(), ""); + __ASSERT(!arch_is_in_isr(), ""); __ASSERT(_current->base.sched_locked != 0, ""); compiler_barrier(); diff --git a/kernel/include/kswap.h b/kernel/include/kswap.h index d98fa446186..53ad0cfe340 100644 --- a/kernel/include/kswap.h +++ b/kernel/include/kswap.h @@ -29,7 +29,7 @@ void z_smp_release_global_lock(struct k_thread *thread); /* context switching and scheduling-related routines */ #ifdef CONFIG_USE_SWITCH -/* New style context switching. z_arch_switch() is a lower level +/* New style context switching. arch_switch() is a lower level * primitive that doesn't know about the scheduler or return value. * Needed for SMP, where the scheduler requires spinlocking that we * don't want to have to do in per-architecture assembly. @@ -71,21 +71,21 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key, #ifdef CONFIG_SMP _current_cpu->swap_ok = 0; - new_thread->base.cpu = z_arch_curr_cpu()->id; + new_thread->base.cpu = arch_curr_cpu()->id; if (!is_spinlock) { z_smp_release_global_lock(new_thread); } #endif _current = new_thread; - z_arch_switch(new_thread->switch_handle, + arch_switch(new_thread->switch_handle, &old_thread->switch_handle); } sys_trace_thread_switched_in(); if (is_spinlock) { - z_arch_irq_unlock(key); + arch_irq_unlock(key); } else { irq_unlock(key); } @@ -113,7 +113,7 @@ static inline void z_swap_unlocked(void) #else /* !CONFIG_USE_SWITCH */ -extern int z_arch_swap(unsigned int key); +extern int arch_swap(unsigned int key); static inline int z_swap_irqlock(unsigned int key) { @@ -123,7 +123,7 @@ static inline int z_swap_irqlock(unsigned int key) #ifndef CONFIG_ARM sys_trace_thread_switched_out(); #endif - ret = z_arch_swap(key); + ret = arch_swap(key); #ifndef CONFIG_ARM sys_trace_thread_switched_in(); #endif @@ -143,7 +143,7 @@ static ALWAYS_INLINE int z_swap(struct k_spinlock *lock, k_spinlock_key_t key) static inline void z_swap_unlocked(void) { - (void) z_swap_irqlock(z_arch_irq_lock()); + (void) z_swap_irqlock(arch_irq_lock()); } #endif diff --git a/kernel/init.c b/kernel/init.c index d6e0b9b24cf..71ade3d8d00 100644 --- a/kernel/init.c +++ b/kernel/init.c @@ -295,7 +295,7 @@ static void bg_thread_main(void *unused1, void *unused2, void *unused3) void __weak main(void) { /* NOP default main() if the application does not provide one. */ - z_arch_nop(); + arch_nop(); } /* LCOV_EXCL_STOP */ @@ -411,9 +411,9 @@ static void prepare_multithreading(struct k_thread *dummy_thread) static FUNC_NORETURN void switch_to_main_thread(void) { #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN - z_arch_switch_to_main_thread(&z_main_thread, z_main_stack, - K_THREAD_STACK_SIZEOF(z_main_stack), - bg_thread_main); + arch_switch_to_main_thread(&z_main_thread, z_main_stack, + K_THREAD_STACK_SIZEOF(z_main_stack), + bg_thread_main); #else /* * Context switch to main task (entry function is _main()): the @@ -513,7 +513,7 @@ FUNC_NORETURN void z_cstart(void) LOG_CORE_INIT(); /* perform any architecture-specific initialization */ - z_arch_kernel_init(); + arch_kernel_init(); #ifdef CONFIG_MULTITHREADING struct k_thread dummy_thread = { diff --git a/kernel/mailbox.c b/kernel/mailbox.c index 308d90bd36d..b0740af6a91 100644 --- a/kernel/mailbox.c +++ b/kernel/mailbox.c @@ -212,7 +212,7 @@ static void mbox_message_dispose(struct k_mbox_msg *rx_msg) #endif /* synchronous send: wake up sending thread */ - z_arch_thread_return_value_set(sending_thread, 0); + arch_thread_return_value_set(sending_thread, 0); z_mark_thread_as_not_pending(sending_thread); z_ready_thread(sending_thread); z_reschedule_unlocked(); @@ -258,7 +258,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, z_unpend_thread(receiving_thread); /* ready receiver for execution */ - z_arch_thread_return_value_set(receiving_thread, 0); + arch_thread_return_value_set(receiving_thread, 0); z_ready_thread(receiving_thread); #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0) diff --git a/kernel/mem_domain.c b/kernel/mem_domain.c index c91d9df9f1f..4eb24d7719c 100644 --- a/kernel/mem_domain.c +++ b/kernel/mem_domain.c @@ -129,7 +129,7 @@ void k_mem_domain_destroy(struct k_mem_domain *domain) key = k_spin_lock(&lock); - z_arch_mem_domain_destroy(domain); + arch_mem_domain_destroy(domain); SYS_DLIST_FOR_EACH_NODE_SAFE(&domain->mem_domain_q, node, next_node) { struct k_thread *thread = @@ -176,7 +176,7 @@ void k_mem_domain_add_partition(struct k_mem_domain *domain, domain->num_partitions++; - z_arch_mem_domain_partition_add(domain, p_idx); + arch_mem_domain_partition_add(domain, p_idx); k_spin_unlock(&lock, key); } @@ -202,7 +202,7 @@ void k_mem_domain_remove_partition(struct k_mem_domain *domain, /* Assert if not found */ __ASSERT(p_idx < max_partitions, "no matching partition found"); - z_arch_mem_domain_partition_remove(domain, p_idx); + arch_mem_domain_partition_remove(domain, p_idx); /* A zero-sized partition denotes it's a free partition */ domain->partitions[p_idx].size = 0U; @@ -227,7 +227,7 @@ void k_mem_domain_add_thread(struct k_mem_domain *domain, k_tid_t thread) &thread->mem_domain_info.mem_domain_q_node); thread->mem_domain_info.mem_domain = domain; - z_arch_mem_domain_thread_add(thread); + arch_mem_domain_thread_add(thread); k_spin_unlock(&lock, key); } @@ -240,7 +240,7 @@ void k_mem_domain_remove_thread(k_tid_t thread) __ASSERT(thread->mem_domain_info.mem_domain != NULL, "mem domain set"); key = k_spin_lock(&lock); - z_arch_mem_domain_thread_remove(thread); + arch_mem_domain_thread_remove(thread); sys_dlist_remove(&thread->mem_domain_info.mem_domain_q_node); thread->mem_domain_info.mem_domain = NULL; @@ -251,7 +251,7 @@ static int init_mem_domain_module(struct device *arg) { ARG_UNUSED(arg); - max_partitions = z_arch_mem_domain_max_partitions_get(); + max_partitions = arch_mem_domain_max_partitions_get(); /* * max_partitions must be less than or equal to * CONFIG_MAX_DOMAIN_PARTITIONS, or would encounter array index diff --git a/kernel/mempool.c b/kernel/mempool.c index ae6fade2182..33eff00e713 100644 --- a/kernel/mempool.c +++ b/kernel/mempool.c @@ -52,7 +52,7 @@ int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block, int ret; s64_t end = 0; - __ASSERT(!(z_arch_is_in_isr() && timeout != K_NO_WAIT), ""); + __ASSERT(!(arch_is_in_isr() && timeout != K_NO_WAIT), ""); if (timeout > 0) { end = k_uptime_get() + timeout; diff --git a/kernel/msg_q.c b/kernel/msg_q.c index 7c12440ba91..84a21deb5ba 100644 --- a/kernel/msg_q.c +++ b/kernel/msg_q.c @@ -111,7 +111,7 @@ void k_msgq_cleanup(struct k_msgq *msgq) int z_impl_k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout) { - __ASSERT(!z_arch_is_in_isr() || timeout == K_NO_WAIT, ""); + __ASSERT(!arch_is_in_isr() || timeout == K_NO_WAIT, ""); struct k_thread *pending_thread; k_spinlock_key_t key; @@ -127,7 +127,7 @@ int z_impl_k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout) (void)memcpy(pending_thread->base.swap_data, data, msgq->msg_size); /* wake up waiting thread */ - z_arch_thread_return_value_set(pending_thread, 0); + arch_thread_return_value_set(pending_thread, 0); z_ready_thread(pending_thread); z_reschedule(&msgq->lock, key); return 0; @@ -186,7 +186,7 @@ static inline void z_vrfy_k_msgq_get_attrs(struct k_msgq *q, int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, s32_t timeout) { - __ASSERT(!z_arch_is_in_isr() || timeout == K_NO_WAIT, ""); + __ASSERT(!arch_is_in_isr() || timeout == K_NO_WAIT, ""); k_spinlock_key_t key; struct k_thread *pending_thread; @@ -216,7 +216,7 @@ int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, s32_t timeout) msgq->used_msgs++; /* wake up waiting thread */ - z_arch_thread_return_value_set(pending_thread, 0); + arch_thread_return_value_set(pending_thread, 0); z_ready_thread(pending_thread); z_reschedule(&msgq->lock, key); return 0; @@ -288,7 +288,7 @@ void z_impl_k_msgq_purge(struct k_msgq *msgq) /* wake up any threads that are waiting to write */ while ((pending_thread = z_unpend_first_thread(&msgq->wait_q)) != NULL) { - z_arch_thread_return_value_set(pending_thread, -ENOMSG); + arch_thread_return_value_set(pending_thread, -ENOMSG); z_ready_thread(pending_thread); } diff --git a/kernel/mutex.c b/kernel/mutex.c index ba3aa4de992..5f47b364433 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -237,7 +237,7 @@ void z_impl_k_mutex_unlock(struct k_mutex *mutex) k_spin_unlock(&lock, key); - z_arch_thread_return_value_set(new_owner, 0); + arch_thread_return_value_set(new_owner, 0); /* * new owner is already of higher or equal prio than first diff --git a/kernel/poll.c b/kernel/poll.c index 608887a3ace..7fba360ebde 100644 --- a/kernel/poll.c +++ b/kernel/poll.c @@ -232,7 +232,7 @@ static int k_poll_poller_cb(struct k_poll_event *event, u32_t state) } z_unpend_thread(thread); - z_arch_thread_return_value_set(thread, + arch_thread_return_value_set(thread, state == K_POLL_STATE_CANCELLED ? -EINTR : 0); if (!z_is_thread_ready(thread)) { @@ -252,7 +252,7 @@ int z_impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout) .thread = _current, .cb = k_poll_poller_cb }; - __ASSERT(!z_arch_is_in_isr(), ""); + __ASSERT(!arch_is_in_isr(), ""); __ASSERT(events != NULL, "NULL events\n"); __ASSERT(num_events > 0, "zero events\n"); diff --git a/kernel/sched.c b/kernel/sched.c index a783990fa84..e3f4e32bab4 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -329,7 +329,7 @@ void z_add_thread_to_ready_q(struct k_thread *thread) z_mark_thread_as_queued(thread); update_cache(0); #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED) - z_arch_sched_ipi(); + arch_sched_ipi(); #endif } } @@ -523,7 +523,7 @@ static inline int resched(u32_t key) _current_cpu->swap_ok = 0; #endif - return z_arch_irq_unlocked(key) && !z_arch_is_in_isr(); + return arch_irq_unlocked(key) && !arch_is_in_isr(); } void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key) @@ -555,7 +555,7 @@ void k_sched_unlock(void) { #ifdef CONFIG_PREEMPT_ENABLED __ASSERT(_current->base.sched_locked != 0, ""); - __ASSERT(!z_arch_is_in_isr(), ""); + __ASSERT(!arch_is_in_isr(), ""); LOCKED(&sched_spinlock) { ++_current->base.sched_locked; @@ -847,7 +847,7 @@ void z_impl_k_thread_priority_set(k_tid_t tid, int prio) * keep track of it) and idle cannot change its priority. */ Z_ASSERT_VALID_PRIO(prio, NULL); - __ASSERT(!z_arch_is_in_isr(), ""); + __ASSERT(!arch_is_in_isr(), ""); struct k_thread *thread = (struct k_thread *)tid; @@ -901,7 +901,7 @@ static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline) void z_impl_k_yield(void) { - __ASSERT(!z_arch_is_in_isr(), ""); + __ASSERT(!arch_is_in_isr(), ""); if (!z_is_idle_thread_object(_current)) { LOCKED(&sched_spinlock) { @@ -931,7 +931,7 @@ static s32_t z_tick_sleep(s32_t ticks) #ifdef CONFIG_MULTITHREADING u32_t expected_wakeup_time; - __ASSERT(!z_arch_is_in_isr(), ""); + __ASSERT(!arch_is_in_isr(), ""); K_DEBUG("thread %p for %d ticks\n", _current, ticks); @@ -1018,7 +1018,7 @@ void z_impl_k_wakeup(k_tid_t thread) z_mark_thread_as_not_suspended(thread); z_ready_thread(thread); - if (!z_arch_is_in_isr()) { + if (!arch_is_in_isr()) { z_reschedule_unlocked(); } @@ -1060,7 +1060,7 @@ void z_sched_abort(struct k_thread *thread) */ thread->base.thread_state |= _THREAD_ABORTING; #ifdef CONFIG_SCHED_IPI_SUPPORTED - z_arch_sched_ipi(); + arch_sched_ipi(); #endif /* Wait for it to be flagged dead either by the CPU it was @@ -1109,7 +1109,7 @@ static inline k_tid_t z_vrfy_k_current_get(void) int z_impl_k_is_preempt_thread(void) { - return !z_arch_is_in_isr() && is_preempt(_current); + return !arch_is_in_isr() && is_preempt(_current); } #ifdef CONFIG_USERSPACE diff --git a/kernel/sem.c b/kernel/sem.c index 249b6e83527..ef895d3c437 100644 --- a/kernel/sem.c +++ b/kernel/sem.c @@ -110,7 +110,7 @@ static void do_sem_give(struct k_sem *sem) if (thread != NULL) { z_ready_thread(thread); - z_arch_thread_return_value_set(thread, 0); + arch_thread_return_value_set(thread, 0); } else { increment_count_up_to_limit(sem); handle_poll_events(sem); @@ -138,7 +138,7 @@ static inline void z_vrfy_k_sem_give(struct k_sem *sem) int z_impl_k_sem_take(struct k_sem *sem, s32_t timeout) { - __ASSERT(((z_arch_is_in_isr() == false) || (timeout == K_NO_WAIT)), ""); + __ASSERT(((arch_is_in_isr() == false) || (timeout == K_NO_WAIT)), ""); sys_trace_void(SYS_TRACE_ID_SEMA_TAKE); k_spinlock_key_t key = k_spin_lock(&lock); diff --git a/kernel/smp.c b/kernel/smp.c index 5f66c6ed849..8692d19c232 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -16,7 +16,7 @@ static atomic_t start_flag; unsigned int z_smp_global_lock(void) { - unsigned int key = z_arch_irq_lock(); + unsigned int key = arch_irq_lock(); if (!_current->base.global_lock_count) { while (!atomic_cas(&global_lock, 0, 1)) { @@ -38,13 +38,13 @@ void z_smp_global_unlock(unsigned int key) } } - z_arch_irq_unlock(key); + arch_irq_unlock(key); } void z_smp_reacquire_global_lock(struct k_thread *thread) { if (thread->base.global_lock_count) { - z_arch_irq_lock(); + arch_irq_lock(); while (!atomic_cas(&global_lock, 0, 1)) { } @@ -81,7 +81,7 @@ static void smp_init_top(int key, void *arg) .base.thread_state = _THREAD_DUMMY, }; - z_arch_curr_cpu()->current = &dummy_thread; + arch_curr_cpu()->current = &dummy_thread; smp_timer_init(); z_swap_unlocked(); @@ -94,17 +94,17 @@ void z_smp_init(void) (void)atomic_clear(&start_flag); #if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 1 - z_arch_start_cpu(1, _interrupt_stack1, CONFIG_ISR_STACK_SIZE, + arch_start_cpu(1, _interrupt_stack1, CONFIG_ISR_STACK_SIZE, smp_init_top, &start_flag); #endif #if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 2 - z_arch_start_cpu(2, _interrupt_stack2, CONFIG_ISR_STACK_SIZE, + arch_start_cpu(2, _interrupt_stack2, CONFIG_ISR_STACK_SIZE, smp_init_top, &start_flag); #endif #if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 3 - z_arch_start_cpu(3, _interrupt_stack3, CONFIG_ISR_STACK_SIZE, + arch_start_cpu(3, _interrupt_stack3, CONFIG_ISR_STACK_SIZE, smp_init_top, &start_flag); #endif diff --git a/kernel/thread.c b/kernel/thread.c index 01266e2a933..f2ad1c953e0 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -56,7 +56,7 @@ void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data) bool k_is_in_isr(void) { - return z_arch_is_in_isr(); + return arch_is_in_isr(); } /* @@ -109,7 +109,7 @@ void z_impl_k_busy_wait(u32_t usec_to_wait) } } #else - z_arch_busy_wait(usec_to_wait); + arch_busy_wait(usec_to_wait); #endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */ } @@ -408,7 +408,7 @@ static inline size_t adjust_stack_size(size_t stack_size) } /* Don't need to worry about alignment of the size here, - * z_arch_new_thread() is required to do it. + * arch_new_thread() is required to do it. * * FIXME: Not the best way to get a random number in a range. * See #6493 @@ -502,12 +502,12 @@ void z_setup_new_thread(struct k_thread *new_thread, #endif #endif - z_arch_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3, + arch_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3, prio, options); #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA #ifndef CONFIG_THREAD_USERSPACE_LOCAL_DATA_ARCH_DEFER_SETUP - /* don't set again if the arch's own code in z_arch_new_thread() has + /* don't set again if the arch's own code in arch_new_thread() has * already set the pointer. */ new_thread->userspace_local_data = @@ -571,7 +571,7 @@ k_tid_t z_impl_k_thread_create(struct k_thread *new_thread, void *p1, void *p2, void *p3, int prio, u32_t options, s32_t delay) { - __ASSERT(!z_arch_is_in_isr(), "Threads may not be created in ISRs"); + __ASSERT(!arch_is_in_isr(), "Threads may not be created in ISRs"); /* Special case, only for unit tests */ #if defined(CONFIG_TEST) && defined(CONFIG_ARCH_HAS_USERSPACE) && !defined(CONFIG_USERSPACE) @@ -836,7 +836,7 @@ FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry, _current->entry.parameter3 = p3; #endif #ifdef CONFIG_USERSPACE - z_arch_user_mode_enter(entry, p1, p2, p3); + arch_user_mode_enter(entry, p1, p2, p3); #else /* XXX In this case we do not reset the stack */ z_thread_entry(entry, p1, p2, p3); @@ -878,7 +878,7 @@ void z_spin_lock_set_owner(struct k_spinlock *l) int z_impl_k_float_disable(struct k_thread *thread) { #if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING) - return z_arch_float_disable(thread); + return arch_float_disable(thread); #else return -ENOSYS; #endif /* CONFIG_FLOAT && CONFIG_FP_SHARING */ diff --git a/kernel/thread_abort.c b/kernel/thread_abort.c index d675dd2eb8b..502986a5f7b 100644 --- a/kernel/thread_abort.c +++ b/kernel/thread_abort.c @@ -43,7 +43,7 @@ void z_impl_k_thread_abort(k_tid_t thread) z_thread_single_abort(thread); z_thread_monitor_exit(thread); - if (thread == _current && !z_arch_is_in_isr()) { + if (thread == _current && !arch_is_in_isr()) { z_swap(&lock, key); } else { /* Really, there's no good reason for this to be a diff --git a/kernel/timer.c b/kernel/timer.c index 3c4c4caacf7..f767d78526e 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -83,7 +83,7 @@ void z_timer_expiration_handler(struct _timeout *t) z_ready_thread(thread); - z_arch_thread_return_value_set(thread, 0); + arch_thread_return_value_set(thread, 0); } @@ -185,7 +185,7 @@ static inline u32_t z_vrfy_k_timer_status_get(struct k_timer *timer) u32_t z_impl_k_timer_status_sync(struct k_timer *timer) { - __ASSERT(!z_arch_is_in_isr(), ""); + __ASSERT(!arch_is_in_isr(), ""); k_spinlock_key_t key = k_spin_lock(&lock); u32_t result = timer->status; diff --git a/kernel/userspace.c b/kernel/userspace.c index 3dce3395876..1f5acf9e66c 100644 --- a/kernel/userspace.c +++ b/kernel/userspace.c @@ -760,7 +760,7 @@ static uintptr_t handler_bad_syscall(uintptr_t bad_id, uintptr_t arg2, void *ssf) { LOG_ERR("Bad system call id %" PRIuPTR " invoked", bad_id); - z_arch_syscall_oops(_current_cpu->syscall_frame); + arch_syscall_oops(_current_cpu->syscall_frame); CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ } @@ -769,7 +769,7 @@ static uintptr_t handler_no_syscall(uintptr_t arg1, uintptr_t arg2, uintptr_t arg5, uintptr_t arg6, void *ssf) { LOG_ERR("Unimplemented system call"); - z_arch_syscall_oops(_current_cpu->syscall_frame); + arch_syscall_oops(_current_cpu->syscall_frame); CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ } diff --git a/lib/os/printk.c b/lib/os/printk.c index 76ac0c71828..c0d8116cc32 100644 --- a/lib/os/printk.c +++ b/lib/os/printk.c @@ -47,7 +47,7 @@ static void _printk_hex_ulong(out_func_t out, void *ctx, * @return 0 */ /* LCOV_EXCL_START */ - __attribute__((weak)) int z_arch_printk_char_out(int c) +__attribute__((weak)) int arch_printk_char_out(int c) { ARG_UNUSED(c); @@ -56,7 +56,7 @@ static void _printk_hex_ulong(out_func_t out, void *ctx, } /* LCOV_EXCL_STOP */ -int (*_char_out)(int) = z_arch_printk_char_out; +int (*_char_out)(int) = arch_printk_char_out; /** * @brief Install the character output routine for printk diff --git a/lib/posix/pthread_mutex.c b/lib/posix/pthread_mutex.c index 25070fe560f..1f0b374f22b 100644 --- a/lib/posix/pthread_mutex.c +++ b/lib/posix/pthread_mutex.c @@ -143,7 +143,7 @@ int pthread_mutex_unlock(pthread_mutex_t *m) m->owner = (pthread_t)thread; m->lock_count++; z_ready_thread(thread); - z_arch_thread_return_value_set(thread, 0); + arch_thread_return_value_set(thread, 0); z_reschedule_irqlock(key); return 0; } diff --git a/scripts/gen_syscalls.py b/scripts/gen_syscalls.py index ef410dba2e8..d70befb2dc3 100755 --- a/scripts/gen_syscalls.py +++ b/scripts/gen_syscalls.py @@ -189,7 +189,7 @@ def wrapper_defs(func_name, func_type, args): mrsh_args[5:] = ["(uintptr_t) &more"] syscall_id = "K_SYSCALL_" + func_name.upper() - invoke = ("z_arch_syscall_invoke%d(%s)" + invoke = ("arch_syscall_invoke%d(%s)" % (len(mrsh_args), ", ".join(mrsh_args + [syscall_id]))) diff --git a/soc/arm/nordic_nrf/nrf51/soc.c b/soc/arm/nordic_nrf/nrf51/soc.c index 1e5842d3b8e..a21fc32c786 100644 --- a/soc/arm/nordic_nrf/nrf51/soc.c +++ b/soc/arm/nordic_nrf/nrf51/soc.c @@ -58,7 +58,7 @@ static int nordicsemi_nrf51_init(struct device *arg) #define DELAY_CALL_OVERHEAD_US 2 -void z_arch_busy_wait(u32_t time_us) +void arch_busy_wait(u32_t time_us) { if (time_us <= DELAY_CALL_OVERHEAD_US) { return; diff --git a/soc/arm/nordic_nrf/nrf52/soc.c b/soc/arm/nordic_nrf/nrf52/soc.c index 0dba393da8e..7fd6c5d5aac 100644 --- a/soc/arm/nordic_nrf/nrf52/soc.c +++ b/soc/arm/nordic_nrf/nrf52/soc.c @@ -76,7 +76,7 @@ static int nordicsemi_nrf52_init(struct device *arg) return 0; } -void z_arch_busy_wait(u32_t time_us) +void arch_busy_wait(u32_t time_us) { nrfx_coredep_delay_us(time_us); } diff --git a/soc/arm/nordic_nrf/nrf91/soc.c b/soc/arm/nordic_nrf/nrf91/soc.c index 86cad4c635c..82838ae844b 100644 --- a/soc/arm/nordic_nrf/nrf91/soc.c +++ b/soc/arm/nordic_nrf/nrf91/soc.c @@ -57,7 +57,7 @@ static int nordicsemi_nrf91_init(struct device *arg) return 0; } -void z_arch_busy_wait(u32_t time_us) +void arch_busy_wait(u32_t time_us) { nrfx_coredep_delay_us(time_us); } diff --git a/soc/posix/inf_clock/soc.c b/soc/posix/inf_clock/soc.c index 93451016e8c..20c17370167 100644 --- a/soc/posix/inf_clock/soc.c +++ b/soc/posix/inf_clock/soc.c @@ -9,15 +9,14 @@ * clock. * * Therefore, the code will always run until completion after each interrupt, - * after which z_arch_cpu_idle() will be called releasing the execution back to the - * HW models. + * after which arch_cpu_idle() will be called releasing the execution back to + * the HW models. * * The HW models raising an interrupt will "awake the cpu" by calling * poisix_interrupt_raised() which will transfer control to the irq handler, - * which will run inside SW/Zephyr contenxt. After which a z_arch_swap() to whatever - * Zephyr thread may follow. - * Again, once Zephyr is done, control is given back to the HW models. - * + * which will run inside SW/Zephyr contenxt. After which a arch_swap() to + * whatever Zephyr thread may follow. Again, once Zephyr is done, control is + * given back to the HW models. * * The Zephyr OS+APP code and the HW models are gated by a mutex + * condition as there is no reason to let the zephyr threads run while the @@ -125,7 +124,7 @@ void posix_interrupt_raised(void) /** - * Normally called from z_arch_cpu_idle(): + * Normally called from arch_cpu_idle(): * the idle loop will call this function to set the CPU to "sleep". * Others may also call this function with care. The CPU will be set to sleep * until some interrupt awakes it. @@ -143,8 +142,8 @@ void posix_halt_cpu(void) * => let the "irq handler" check if/what interrupt was raised * and call the appropriate irq handler. * - * Note that, the interrupt handling may trigger a z_arch_swap() to another - * Zephyr thread. When posix_irq_handler() returns, the Zephyr + * Note that, the interrupt handling may trigger a arch_swap() to + * another Zephyr thread. When posix_irq_handler() returns, the Zephyr * kernel has swapped back to this thread again */ posix_irq_handler(); @@ -156,7 +155,7 @@ void posix_halt_cpu(void) /** - * Implementation of z_arch_cpu_atomic_idle() for this SOC + * Implementation of arch_cpu_atomic_idle() for this SOC */ void posix_atomic_halt_cpu(unsigned int imask) { diff --git a/soc/riscv/openisa_rv32m1/soc.c b/soc/riscv/openisa_rv32m1/soc.c index 68d7b5aebc5..1ddf9cbf61a 100644 --- a/soc/riscv/openisa_rv32m1/soc.c +++ b/soc/riscv/openisa_rv32m1/soc.c @@ -66,7 +66,7 @@ void sys_arch_reboot(int type) EVENT_UNIT->SLPCTRL |= EVENT_SLPCTRL_SYSRSTREQST_MASK; } -void z_arch_irq_enable(unsigned int irq) +void arch_irq_enable(unsigned int irq) { if (IS_ENABLED(CONFIG_MULTI_LEVEL_INTERRUPTS)) { unsigned int level = rv32m1_irq_level(irq); @@ -84,7 +84,7 @@ void z_arch_irq_enable(unsigned int irq) } } -void z_arch_irq_disable(unsigned int irq) +void arch_irq_disable(unsigned int irq) { if (IS_ENABLED(CONFIG_MULTI_LEVEL_INTERRUPTS)) { unsigned int level = rv32m1_irq_level(irq); @@ -102,7 +102,7 @@ void z_arch_irq_disable(unsigned int irq) } } -int z_arch_irq_is_enabled(unsigned int irq) +int arch_irq_is_enabled(unsigned int irq) { if (IS_ENABLED(CONFIG_MULTI_LEVEL_INTERRUPTS)) { unsigned int level = rv32m1_irq_level(irq); diff --git a/soc/riscv/riscv-privilege/common/idle.c b/soc/riscv/riscv-privilege/common/idle.c index 592f2725d8f..196fb096228 100644 --- a/soc/riscv/riscv-privilege/common/idle.c +++ b/soc/riscv/riscv-privilege/common/idle.c @@ -31,7 +31,7 @@ static ALWAYS_INLINE void riscv_idle(unsigned int key) * * @return N/A */ -void z_arch_cpu_idle(void) +void arch_cpu_idle(void) { riscv_idle(SOC_MSTATUS_IEN); } @@ -41,7 +41,7 @@ void z_arch_cpu_idle(void) * @brief Atomically re-enable interrupts and enter low power mode * * INTERNAL - * The requirements for z_arch_cpu_atomic_idle() are as follows: + * The requirements for arch_cpu_atomic_idle() are as follows: * 1) The enablement of interrupts and entering a low-power mode needs to be * atomic, i.e. there should be no period of time where interrupts are * enabled before the processor enters a low-power mode. See the comments @@ -53,7 +53,7 @@ void z_arch_cpu_idle(void) * * @return N/A */ -void z_arch_cpu_atomic_idle(unsigned int key) +void arch_cpu_atomic_idle(unsigned int key) { riscv_idle(key); } diff --git a/soc/riscv/riscv-privilege/common/soc_common_irq.c b/soc/riscv/riscv-privilege/common/soc_common_irq.c index 7d8a462e065..ba33a25cc7d 100644 --- a/soc/riscv/riscv-privilege/common/soc_common_irq.c +++ b/soc/riscv/riscv-privilege/common/soc_common_irq.c @@ -26,7 +26,7 @@ static inline unsigned int _level2_irq(unsigned int irq) return (irq >> 8) - 1; } -void z_arch_irq_enable(unsigned int irq) +void arch_irq_enable(unsigned int irq) { u32_t mie; @@ -49,7 +49,7 @@ void z_arch_irq_enable(unsigned int irq) : "r" (1 << irq)); } -void z_arch_irq_disable(unsigned int irq) +void arch_irq_disable(unsigned int irq) { u32_t mie; @@ -72,7 +72,7 @@ void z_arch_irq_disable(unsigned int irq) : "r" (1 << irq)); }; -void z_arch_irq_priority_set(unsigned int irq, unsigned int prio) +void arch_irq_priority_set(unsigned int irq, unsigned int prio) { #if defined(CONFIG_RISCV_HAS_PLIC) unsigned int level = _irq_level(irq); @@ -86,7 +86,7 @@ void z_arch_irq_priority_set(unsigned int irq, unsigned int prio) return ; } -int z_arch_irq_is_enabled(unsigned int irq) +int arch_irq_is_enabled(unsigned int irq) { u32_t mie; diff --git a/soc/xtensa/esp32/esp32-mp.c b/soc/xtensa/esp32/esp32-mp.c index e67babc19a0..1892b612de7 100644 --- a/soc/xtensa/esp32/esp32-mp.c +++ b/soc/xtensa/esp32/esp32-mp.c @@ -190,8 +190,8 @@ static void appcpu_start(void) smp_log("ESP32: APPCPU start sequence complete"); } -void z_arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, - void (*fn)(int, void *), void *arg) +void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, + void (*fn)(int, void *), void *arg) { volatile struct cpustart_rec sr; int vb; diff --git a/soc/xtensa/esp32/soc.c b/soc/xtensa/esp32/soc.c index e6e18c114b9..a0a0d11dbb0 100644 --- a/soc/xtensa/esp32/soc.c +++ b/soc/xtensa/esp32/soc.c @@ -64,7 +64,7 @@ void __attribute__((section(".iram1"))) __start(void) /* Initialize the architecture CPU pointer. Some of the * initialization code wants a valid _current before - * z_arch_kernel_init() is invoked. + * arch_kernel_init() is invoked. */ __asm__ volatile("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[0])); @@ -76,7 +76,7 @@ void __attribute__((section(".iram1"))) __start(void) } /* Boot-time static default printk handler, possibly to be overridden later. */ -int z_arch_printk_char_out(int c) +int arch_printk_char_out(int c) { if (c == '\n') { esp32_rom_uart_tx_one_char('\r'); diff --git a/subsys/logging/log_msg.c b/subsys/logging/log_msg.c index 541a976f544..844b6900054 100644 --- a/subsys/logging/log_msg.c +++ b/subsys/logging/log_msg.c @@ -52,10 +52,10 @@ void log_msg_pool_init(void) /* Return true if interrupts were locked in the context of this call. */ static bool is_irq_locked(void) { - unsigned int key = z_arch_irq_lock(); - bool ret = z_arch_irq_unlocked(key); + unsigned int key = arch_irq_lock(); + bool ret = arch_irq_unlocked(key); - z_arch_irq_unlock(key); + arch_irq_unlock(key); return ret; } diff --git a/subsys/testsuite/ztest/include/arch/cpu.h b/subsys/testsuite/ztest/include/arch/cpu.h index 56235b85d0b..5857b1d53ea 100644 --- a/subsys/testsuite/ztest/include/arch/cpu.h +++ b/subsys/testsuite/ztest/include/arch/cpu.h @@ -18,22 +18,22 @@ struct _thread_arch { typedef struct _thread_arch _thread_arch_t; /* Architecture functions */ -static inline u32_t z_arch_k_cycle_get_32(void) +static inline u32_t arch_k_cycle_get_32(void) { return 0; } -static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) +static ALWAYS_INLINE unsigned int arch_irq_lock(void) { return 0; } -static inline void z_arch_irq_unlock(unsigned int key) +static inline void arch_irq_unlock(unsigned int key) { ARG_UNUSED(key); } -static inline bool z_arch_irq_unlocked(unsigned int key) +static inline bool arch_irq_unlocked(unsigned int key) { return 0; } diff --git a/subsys/testsuite/ztest/src/ztest.c b/subsys/testsuite/ztest/src/ztest.c index e7775350cf5..4411206dc3b 100644 --- a/subsys/testsuite/ztest/src/ztest.c +++ b/subsys/testsuite/ztest/src/ztest.c @@ -78,7 +78,7 @@ static void cpu_hold(void *arg1, void *arg2, void *arg3) ARG_UNUSED(arg1); ARG_UNUSED(arg2); ARG_UNUSED(arg3); - unsigned int key = z_arch_irq_lock(); + unsigned int key = arch_irq_lock(); u32_t dt, start_ms = k_uptime_get_32(); k_sem_give(&cpuhold_sem); @@ -95,7 +95,7 @@ static void cpu_hold(void *arg1, void *arg2, void *arg3) dt = k_uptime_get_32() - start_ms; zassert_true(dt < 3000, "1cpu test took too long (%d ms)", dt); - z_arch_irq_unlock(key); + arch_irq_unlock(key); } void z_test_1cpu_start(void) diff --git a/tests/arch/arm/arm_interrupt/README.txt b/tests/arch/arm/arm_interrupt/README.txt index aec6307d4f7..577376f1fab 100644 --- a/tests/arch/arm/arm_interrupt/README.txt +++ b/tests/arch/arm/arm_interrupt/README.txt @@ -37,7 +37,7 @@ Sample Output: starting test - test_arm_interrupt Available IRQ line: 25 E: ***** HARD FAULT ***** - E: Z_ARCH_EXCEPT with reason 3 + E: ARCH_EXCEPT with reason 3 E: r0/a1: 0x00000003 r1/a2: 0x20001240 r2/a3: 0x00000003 E: r3/a4: 0x20001098 r12/ip: 0x00000000 r14/lr: 0x000012c9 @@ -49,7 +49,7 @@ Sample Output: E: Fault during interrupt handling E: ***** HARD FAULT ***** - E: Z_ARCH_EXCEPT with reason 4 + E: ARCH_EXCEPT with reason 4 E: r0/a1: 0x00000004 r1/a2: 0x20001240 r2/a3: 0x00000004 E: r3/a4: 0x20001098 r12/ip: 0x00000000 r14/lr: 0x000012c9 diff --git a/tests/arch/arm/arm_interrupt/src/arm_interrupt.c b/tests/arch/arm/arm_interrupt/src/arm_interrupt.c index 6c3f3934994..b6b73ca7569 100644 --- a/tests/arch/arm/arm_interrupt/src/arm_interrupt.c +++ b/tests/arch/arm/arm_interrupt/src/arm_interrupt.c @@ -87,7 +87,7 @@ void test_arm_interrupt(void) TC_PRINT("Available IRQ line: %u\n", i); - z_arch_irq_connect_dynamic(i, 0 /* highest priority */, + arch_irq_connect_dynamic(i, 0 /* highest priority */, arm_isr_handler, NULL, 0); diff --git a/tests/arch/arm/arm_ramfunc/src/arm_ramfunc.c b/tests/arch/arm/arm_ramfunc/src/arm_ramfunc.c index e3c56ff5669..51d74e2f33f 100644 --- a/tests/arch/arm/arm_ramfunc/src/arm_ramfunc.c +++ b/tests/arch/arm/arm_ramfunc/src/arm_ramfunc.c @@ -40,7 +40,7 @@ void test_arm_ramfunc(void) * arm_ram_function(.) is user (read) accessible. */ #if defined(CONFIG_USERSPACE) - zassert_true(z_arch_buffer_validate((void *)&_ramfunc_ram_start, + zassert_true(arch_buffer_validate((void *)&_ramfunc_ram_start, (size_t)&_ramfunc_ram_size, 0) == 0 /* Success */, ".ramfunc section not user accessible"); #endif /* CONFIG_USERSPACE */ diff --git a/tests/arch/arm/arm_thread_swap/README.txt b/tests/arch/arm/arm_thread_swap/README.txt index 13fd8ebf3fc..863fa4dcc87 100644 --- a/tests/arch/arm/arm_thread_swap/README.txt +++ b/tests/arch/arm/arm_thread_swap/README.txt @@ -20,7 +20,7 @@ Notes: The test verifies the correct behavior of the thread context-switch, when it is triggered indirectly (by setting the PendSV interrupt to pending state), as well as when the thread itself triggers its - swap-out (by calling z_arch_swap(.)). + swap-out (by calling arch_swap(.)). The test is currently supported in ARM Cortex-M Baseline and Mainline targets. diff --git a/tests/arch/arm/arm_thread_swap/src/arm_thread_arch.c b/tests/arch/arm/arm_thread_swap/src/arm_thread_arch.c index e8c0507db2b..49d4ea2a85a 100644 --- a/tests/arch/arm/arm_thread_swap/src/arm_thread_arch.c +++ b/tests/arch/arm/arm_thread_swap/src/arm_thread_arch.c @@ -221,7 +221,7 @@ static void alt_thread_entry(void) zassert_true(p_ztest_thread->arch.basepri == BASEPRI_MODIFIED_1, "ztest thread basepri not preserved in swap-out\n"); - /* Verify original swap return value (set by z_arch_swap() */ + /* Verify original swap return value (set by arch_swap() */ zassert_true(p_ztest_thread->arch.swap_return_value == -EAGAIN, "ztest thread swap-return-value not preserved in swap-out\n"); #endif @@ -370,7 +370,7 @@ void test_arm_thread_swap(void) #if defined(CONFIG_USERSPACE) /* The main test thread is set to run in privilege mode */ - zassert_false((z_arch_is_user_context()), + zassert_false((arch_is_user_context()), "Main test thread does not start in privilege mode\n"); /* Assert that the mode status variable indicates privilege mode */ @@ -482,7 +482,7 @@ void test_arm_thread_swap(void) * This will be verified by the alternative test thread. */ register int swap_return_val __asm__("r0") = - z_arch_swap(BASEPRI_MODIFIED_1); + arch_swap(BASEPRI_MODIFIED_1); #endif /* CONFIG_NO_OPTIMIZATIONS */ diff --git a/tests/arch/arm/arm_zero_latency_irqs/src/arm_zero_latency_irqs.c b/tests/arch/arm/arm_zero_latency_irqs/src/arm_zero_latency_irqs.c index d0b7e85a867..52166e28a09 100644 --- a/tests/arch/arm/arm_zero_latency_irqs/src/arm_zero_latency_irqs.c +++ b/tests/arch/arm/arm_zero_latency_irqs/src/arm_zero_latency_irqs.c @@ -56,7 +56,7 @@ void test_arm_zero_latency_irqs(void) /* Configure the available IRQ line as zero-latency. */ - z_arch_irq_connect_dynamic(i, 0 /* Unused */, + arch_irq_connect_dynamic(i, 0 /* Unused */, arm_zero_latency_isr_handler, NULL, IRQ_ZERO_LATENCY); diff --git a/tests/benchmarks/timing_info/src/msg_passing_bench.c b/tests/benchmarks/timing_info/src/msg_passing_bench.c index 16cbd521701..bc87c95fb1c 100644 --- a/tests/benchmarks/timing_info/src/msg_passing_bench.c +++ b/tests/benchmarks/timing_info/src/msg_passing_bench.c @@ -23,8 +23,8 @@ K_MBOX_DEFINE(benchmark_mbox); K_SEM_DEFINE(mbox_sem, 1, 1); /* common location for the swap to write the tsc data*/ -extern u32_t z_arch_timing_value_swap_end; -extern u64_t z_arch_timing_value_swap_common; +extern u32_t arch_timing_value_swap_end; +extern u64_t arch_timing_value_swap_common; /* location of the time stamps*/ u64_t __msg_q_put_state; @@ -142,7 +142,7 @@ void msg_passing_bench(void) k_thread_abort(producer_w_cxt_switch_tid); k_thread_abort(producer_wo_cxt_switch_tid); - __msg_q_put_w_cxt_end_time = ((u32_t)z_arch_timing_value_swap_common); + __msg_q_put_w_cxt_end_time = ((u32_t)arch_timing_value_swap_common); ARG_UNUSED(msg_status); /*******************************************************************/ @@ -162,7 +162,7 @@ void msg_passing_bench(void) 2 /*priority*/, 0, K_MSEC(50)); k_sleep(K_MSEC(2000)); /* make the main thread sleep */ k_thread_abort(producer_get_w_cxt_switch_tid); - __msg_q_get_w_cxt_end_time = (z_arch_timing_value_swap_common); + __msg_q_get_w_cxt_end_time = (arch_timing_value_swap_common); /*******************************************************************/ @@ -196,7 +196,7 @@ void msg_passing_bench(void) NULL, NULL, NULL, 1 /*priority*/, 0, K_NO_WAIT); k_sleep(K_MSEC(1000)); /* make the main thread sleep */ - mbox_sync_put_end_time = (z_arch_timing_value_swap_common); + mbox_sync_put_end_time = (arch_timing_value_swap_common); /*******************************************************************/ @@ -214,7 +214,7 @@ void msg_passing_bench(void) thread_mbox_sync_get_receive, NULL, NULL, NULL, 2 /*priority*/, 0, K_NO_WAIT); k_sleep(K_MSEC(1000)); /* make the main thread sleep */ - mbox_sync_get_end_time = (z_arch_timing_value_swap_common); + mbox_sync_get_end_time = (arch_timing_value_swap_common); /*******************************************************************/ @@ -330,7 +330,7 @@ void thread_producer_msgq_w_cxt_switch(void *p1, void *p2, void *p3) { int data_to_send = 5050; - z_arch_timing_value_swap_end = 1U; + arch_timing_value_swap_end = 1U; TIMING_INFO_PRE_READ(); __msg_q_put_w_cxt_start_time = (u32_t) TIMING_INFO_OS_GET_TIME(); k_msgq_put(&benchmark_q, &data_to_send, K_NO_WAIT); @@ -367,7 +367,7 @@ void thread_producer_get_msgq_w_cxt_switch(void *p1, void *p2, void *p3) void thread_consumer_get_msgq_w_cxt_switch(void *p1, void *p2, void *p3) { producer_get_w_cxt_switch_tid->base.timeout.dticks = _EXPIRED; - z_arch_timing_value_swap_end = 1U; + arch_timing_value_swap_end = 1U; TIMING_INFO_PRE_READ(); __msg_q_get_w_cxt_start_time = TIMING_INFO_OS_GET_TIME(); received_data_get = k_msgq_get(&benchmark_q_get, @@ -391,7 +391,7 @@ void thread_mbox_sync_put_send(void *p1, void *p2, void *p3) TIMING_INFO_PRE_READ(); mbox_sync_put_start_time = TIMING_INFO_OS_GET_TIME(); - z_arch_timing_value_swap_end = 1U; + arch_timing_value_swap_end = 1U; status = k_mbox_put(&benchmark_mbox, &tx_msg, K_MSEC(300)); MBOX_CHECK(status); @@ -438,7 +438,7 @@ void thread_mbox_sync_get_receive(void *p1, void *p2, void *p3) .tx_target_thread = K_ANY }; - z_arch_timing_value_swap_end = 1U; + arch_timing_value_swap_end = 1U; TIMING_INFO_PRE_READ(); mbox_sync_get_start_time = TIMING_INFO_OS_GET_TIME(); diff --git a/tests/benchmarks/timing_info/src/semaphore_bench.c b/tests/benchmarks/timing_info/src/semaphore_bench.c index bd108bca4b8..243b11302b3 100644 --- a/tests/benchmarks/timing_info/src/semaphore_bench.c +++ b/tests/benchmarks/timing_info/src/semaphore_bench.c @@ -43,8 +43,8 @@ void thread_sem1_give_test(void *p1, void *p2, void *p3); k_tid_t sem0_tid; k_tid_t sem1_tid; -extern u64_t z_arch_timing_value_swap_common; -extern u32_t z_arch_timing_value_swap_end; +extern u64_t arch_timing_value_swap_common; +extern u32_t arch_timing_value_swap_end; void semaphore_bench(void) { @@ -64,7 +64,7 @@ void semaphore_bench(void) /* u64_t test_time1 = z_tsc_read(); */ - sem_end_time = (z_arch_timing_value_swap_common); + sem_end_time = (arch_timing_value_swap_common); u32_t sem_cycles = sem_end_time - sem_start_time; sem0_tid = k_thread_create(&my_thread, my_stack_area, @@ -77,7 +77,7 @@ void semaphore_bench(void) 2 /*priority*/, 0, K_NO_WAIT); k_sleep(K_MSEC(1000)); - sem_give_end_time = (z_arch_timing_value_swap_common); + sem_give_end_time = (arch_timing_value_swap_common); u32_t sem_give_cycles = sem_give_end_time - sem_give_start_time; @@ -177,7 +177,7 @@ void thread_sem1_test(void *p1, void *p2, void *p3) k_sem_give(&sem_bench); /* sync the 2 threads*/ - z_arch_timing_value_swap_end = 1U; + arch_timing_value_swap_end = 1U; TIMING_INFO_PRE_READ(); sem_start_time = TIMING_INFO_OS_GET_TIME(); k_sem_take(&sem_bench, K_MSEC(10)); @@ -207,7 +207,7 @@ void thread_sem0_give_test(void *p1, void *p2, void *p3) /* To make sure that the sem give will cause a swap to occur */ k_thread_priority_set(sem1_tid, 1); - z_arch_timing_value_swap_end = 1U; + arch_timing_value_swap_end = 1U; TIMING_INFO_PRE_READ(); sem_give_start_time = TIMING_INFO_OS_GET_TIME(); k_sem_give(&sem_bench_1); diff --git a/tests/benchmarks/timing_info/src/thread_bench.c b/tests/benchmarks/timing_info/src/thread_bench.c index d9254796af9..59bfed5fc96 100644 --- a/tests/benchmarks/timing_info/src/thread_bench.c +++ b/tests/benchmarks/timing_info/src/thread_bench.c @@ -19,9 +19,9 @@ char sline[256]; /* FILE *output_file = stdout; */ /* location of the time stamps*/ -extern u32_t z_arch_timing_value_swap_end; -extern u64_t z_arch_timing_value_swap_temp; -extern u64_t z_arch_timing_value_swap_common; +extern u32_t arch_timing_value_swap_end; +extern u64_t arch_timing_value_swap_temp; +extern u64_t arch_timing_value_swap_common; volatile u64_t thread_abort_end_time; volatile u64_t thread_abort_start_time; @@ -48,7 +48,7 @@ K_THREAD_STACK_DEFINE(my_stack_area_0, STACK_SIZE); struct k_thread my_thread; struct k_thread my_thread_0; -u32_t z_arch_timing_value_swap_end_test = 1U; +u32_t arch_timing_value_swap_end_test = 1U; u64_t dummy_time; u64_t start_time; u64_t test_end_time; @@ -68,9 +68,9 @@ u32_t benchmarking_overhead_swap(void) "rdtsc\n\t" "mov %eax,start_time\n\t" "mov %edx,start_time+4\n\t" - "cmp $0x1,z_arch_timing_value_swap_end_test\n\t" + "cmp $0x1,arch_timing_value_swap_end_test\n\t" "jne time_read_not_needed_test\n\t" - "movw $0x2,z_arch_timing_value_swap_end\n\t" + "movw $0x2,arch_timing_value_swap_end\n\t" "pushl %eax\n\t" "pushl %edx\n\t" "rdtsc\n\t" @@ -99,7 +99,7 @@ void test_thread_entry(void *p, void *p1, void *p2) void thread_swap_test(void *p1, void *p2, void *p3) { - z_arch_timing_value_swap_end = 1U; + arch_timing_value_swap_end = 1U; TIMING_INFO_PRE_READ(); thread_abort_start_time = TIMING_INFO_OS_GET_TIME(); k_thread_abort(_current); @@ -142,20 +142,20 @@ void system_thread_bench(void) -1 /*priority*/, 0, K_NO_WAIT); k_sleep(K_MSEC(1)); - thread_abort_end_time = (z_arch_timing_value_swap_common); - z_arch_timing_swap_end = z_arch_timing_value_swap_common; + thread_abort_end_time = (arch_timing_value_swap_common); + arch_timing_swap_end = arch_timing_value_swap_common; #if defined(CONFIG_X86) - z_arch_timing_swap_start = z_arch_timing_value_swap_temp; + arch_timing_swap_start = arch_timing_value_swap_temp; /* In the rest of ARCHes read_timer_start_of_swap() has already * registered the time-stamp of the start of context-switch in - * z_arch_timing_swap_start. + * arch_timing_swap_start. */ #endif - u32_t total_swap_cycles = z_arch_timing_swap_end - z_arch_timing_swap_start; + u32_t total_swap_cycles = arch_timing_swap_end - arch_timing_swap_start; /* Interrupt latency*/ - u64_t local_end_intr_time = z_arch_timing_irq_end; - u64_t local_start_intr_time = z_arch_timing_irq_start; + u64_t local_end_intr_time = arch_timing_irq_end; + u64_t local_start_intr_time = arch_timing_irq_start; /*******************************************************************/ /* thread create*/ @@ -224,8 +224,10 @@ void system_thread_bench(void) (u32_t) (CYCLES_TO_NS(intr_latency_cycles))); /*tick overhead*/ - u32_t tick_overhead_cycles = SUBTRACT_CLOCK_CYCLES(z_arch_timing_tick_end) - - SUBTRACT_CLOCK_CYCLES(z_arch_timing_tick_start); + u32_t tick_overhead_cycles = + SUBTRACT_CLOCK_CYCLES(arch_timing_tick_end) - + SUBTRACT_CLOCK_CYCLES(arch_timing_tick_start); + PRINT_STATS("Tick overhead", (u32_t)(tick_overhead_cycles), (u32_t) (CYCLES_TO_NS(tick_overhead_cycles))); diff --git a/tests/benchmarks/timing_info/src/userspace_bench.c b/tests/benchmarks/timing_info/src/userspace_bench.c index 8431678b545..b1d2caf5b16 100644 --- a/tests/benchmarks/timing_info/src/userspace_bench.c +++ b/tests/benchmarks/timing_info/src/userspace_bench.c @@ -20,7 +20,7 @@ K_APPMEM_PARTITION_DEFINE(bench_ptn); struct k_mem_domain bench_domain; extern char sline[256]; -extern u64_t z_arch_timing_enter_user_mode_end; +extern u64_t arch_timing_enter_user_mode_end; u32_t drop_to_user_mode_end_time, drop_to_user_mode_start_time; u32_t user_thread_creation_end_time, user_thread_creation_start_time; @@ -105,7 +105,7 @@ void drop_to_user_mode(void) k_yield(); drop_to_user_mode_end_time = (u32_t) - SUBTRACT_CLOCK_CYCLES(z_arch_timing_enter_user_mode_end); + SUBTRACT_CLOCK_CYCLES(arch_timing_enter_user_mode_end); u32_t tmp_start_time = SUBTRACT_CLOCK_CYCLES(drop_to_user_mode_start_time); diff --git a/tests/benchmarks/timing_info/src/yield_bench.c b/tests/benchmarks/timing_info/src/yield_bench.c index 127a5c64014..a8b79ff6781 100644 --- a/tests/benchmarks/timing_info/src/yield_bench.c +++ b/tests/benchmarks/timing_info/src/yield_bench.c @@ -48,12 +48,12 @@ void yield_bench(void) 0 /*priority*/, 0, K_NO_WAIT); /*read the time of start of the sleep till the swap happens */ - z_arch_timing_value_swap_end = 1U; + arch_timing_value_swap_end = 1U; TIMING_INFO_PRE_READ(); thread_sleep_start_time = TIMING_INFO_OS_GET_TIME(); k_sleep(K_MSEC(1000)); - thread_sleep_end_time = ((u32_t)z_arch_timing_value_swap_common); + thread_sleep_end_time = ((u32_t)arch_timing_value_swap_common); u32_t yield_cycles = (thread_end_time - thread_start_time) / 2000U; u32_t sleep_cycles = thread_sleep_end_time - thread_sleep_start_time; diff --git a/tests/kernel/common/src/irq_offload.c b/tests/kernel/common/src/irq_offload.c index 32eb0386a56..7618a0f118f 100644 --- a/tests/kernel/common/src/irq_offload.c +++ b/tests/kernel/common/src/irq_offload.c @@ -43,16 +43,16 @@ void test_irq_offload(void) /* Simple validation of nested locking. */ unsigned int key1, key2; - key1 = z_arch_irq_lock(); - zassert_true(z_arch_irq_unlocked(key1), + key1 = arch_irq_lock(); + zassert_true(arch_irq_unlocked(key1), "IRQs should have been unlocked, but key is 0x%x\n", key1); - key2 = z_arch_irq_lock(); - zassert_false(z_arch_irq_unlocked(key2), + key2 = arch_irq_lock(); + zassert_false(arch_irq_unlocked(key2), "IRQs should have been locked, but key is 0x%x\n", key2); - z_arch_irq_unlock(key2); - z_arch_irq_unlock(key1); + arch_irq_unlock(key2); + arch_irq_unlock(key1); /**TESTPOINT: Offload to IRQ context*/ irq_offload(offload_function, (void *)SENTINEL_VALUE); diff --git a/tests/kernel/fp_sharing/float_disable/src/k_float_disable.c b/tests/kernel/fp_sharing/float_disable/src/k_float_disable.c index 53d865d41af..2a489add4dd 100644 --- a/tests/kernel/fp_sharing/float_disable/src/k_float_disable.c +++ b/tests/kernel/fp_sharing/float_disable/src/k_float_disable.c @@ -197,7 +197,7 @@ static void sup_fp_thread_entry(void) TC_PRINT("Available IRQ line: %u\n", i); - z_arch_irq_connect_dynamic(i, + arch_irq_connect_dynamic(i, 0, arm_test_isr_handler, NULL, diff --git a/tests/kernel/gen_isr_table/src/main.c b/tests/kernel/gen_isr_table/src/main.c index 098f4925da4..57715f0c48c 100644 --- a/tests/kernel/gen_isr_table/src/main.c +++ b/tests/kernel/gen_isr_table/src/main.c @@ -11,7 +11,7 @@ extern u32_t _irq_vector_table[]; -#if defined(Z_ARCH_IRQ_DIRECT_CONNECT) && defined(CONFIG_GEN_IRQ_VECTOR_TABLE) +#if defined(ARCH_IRQ_DIRECT_CONNECT) && defined(CONFIG_GEN_IRQ_VECTOR_TABLE) #define HAS_DIRECT_IRQS #endif diff --git a/tests/kernel/interrupt/src/main.c b/tests/kernel/interrupt/src/main.c index 1ed124bd547..cd56d626764 100644 --- a/tests/kernel/interrupt/src/main.c +++ b/tests/kernel/interrupt/src/main.c @@ -42,7 +42,7 @@ static void do_isr_dynamic(void) "could not find slot for dynamic isr"); argval = &i; - z_arch_irq_connect_dynamic(i + CONFIG_GEN_IRQ_START_VECTOR, 0, dyn_isr, + arch_irq_connect_dynamic(i + CONFIG_GEN_IRQ_START_VECTOR, 0, dyn_isr, argval, 0); zassert_true(_sw_isr_table[i].isr == dyn_isr && diff --git a/tests/kernel/interrupt/src/nested_irq.c b/tests/kernel/interrupt/src/nested_irq.c index 433545e1b65..46be21faf05 100644 --- a/tests/kernel/interrupt/src/nested_irq.c +++ b/tests/kernel/interrupt/src/nested_irq.c @@ -104,8 +104,8 @@ void test_nested_isr(void) #if defined(CONFIG_CPU_CORTEX_M) irq_line_0 = get_available_nvic_line(CONFIG_NUM_IRQS); irq_line_1 = get_available_nvic_line(irq_line_0); - z_arch_irq_connect_dynamic(irq_line_0, ISR0_PRIO, isr0, NULL, 0); - z_arch_irq_connect_dynamic(irq_line_1, ISR1_PRIO, isr1, NULL, 0); + arch_irq_connect_dynamic(irq_line_0, ISR0_PRIO, isr0, NULL, 0); + arch_irq_connect_dynamic(irq_line_1, ISR1_PRIO, isr1, NULL, 0); #else IRQ_CONNECT(IRQ_LINE(ISR0_OFFSET), ISR0_PRIO, isr0, NULL, 0); IRQ_CONNECT(IRQ_LINE(ISR1_OFFSET), ISR1_PRIO, isr1, NULL, 0); diff --git a/tests/kernel/mem_protect/mem_protect/src/mem_domain.c b/tests/kernel/mem_protect/mem_protect/src/mem_domain.c index 0bec2058448..8b93c3dbf34 100644 --- a/tests/kernel/mem_protect/mem_protect/src/mem_domain.c +++ b/tests/kernel/mem_protect/mem_protect/src/mem_domain.c @@ -356,7 +356,7 @@ void test_mem_domain_add_partitions_invalid(void *p1, void *p2, void *p3) /* Subtract one since the domain is initialized with one partition * already present. */ - u8_t max_partitions = (u8_t)z_arch_mem_domain_max_partitions_get() - 1; + u8_t max_partitions = (u8_t)arch_mem_domain_max_partitions_get() - 1; u8_t index; k_mem_domain_remove_thread(k_current_get()); @@ -420,7 +420,7 @@ void mem_domain_for_user_tc4(void *max_partitions, void *p2, void *p3) void test_mem_domain_add_partitions_simple(void *p1, void *p2, void *p3) { - u8_t max_partitions = (u8_t)z_arch_mem_domain_max_partitions_get(); + u8_t max_partitions = (u8_t)arch_mem_domain_max_partitions_get(); u8_t index; k_mem_domain_init(&mem_domain_tc3_mem_domain, diff --git a/tests/kernel/mem_protect/syscalls/src/main.c b/tests/kernel/mem_protect/syscalls/src/main.c index 831bb4bed95..9282e4cc608 100644 --- a/tests/kernel/mem_protect/syscalls/src/main.c +++ b/tests/kernel/mem_protect/syscalls/src/main.c @@ -155,7 +155,7 @@ void test_string_nlen(void) size_t ret; ret = string_nlen(kernel_string, BUF_SIZE, &err); - if (z_arch_is_user_context()) { + if (arch_is_user_context()) { zassert_equal(err, -1, "kernel string did not fault on user access"); } else { diff --git a/tests/kernel/mem_protect/userspace/src/main.c b/tests/kernel/mem_protect/userspace/src/main.c index 058451acdf3..7d7ba606112 100644 --- a/tests/kernel/mem_protect/userspace/src/main.c +++ b/tests/kernel/mem_protect/userspace/src/main.c @@ -909,7 +909,7 @@ static inline void z_vrfy_stack_info_get(u32_t *start_addr, u32_t *size) int z_impl_check_perms(void *addr, size_t size, int write) { - return z_arch_buffer_validate(addr, size, write); + return arch_buffer_validate(addr, size, write); } static inline int z_vrfy_check_perms(void *addr, size_t size, int write) @@ -971,7 +971,7 @@ void stack_buffer_scenarios(k_thread_stack_t *stack_obj, size_t obj_size) *pos = val; } - if (z_arch_is_user_context()) { + if (arch_is_user_context()) { /* If we're in user mode, check every byte in the stack buffer * to ensure that the thread has permissions on it. */ @@ -999,7 +999,7 @@ void stack_buffer_scenarios(k_thread_stack_t *stack_obj, size_t obj_size) Z_THREAD_STACK_BUFFER(stack_obj)); } - if (z_arch_is_user_context()) { + if (arch_is_user_context()) { zassert_true(stack_size <= obj_size - K_THREAD_STACK_RESERVED, "bad stack size in thread struct"); } @@ -1081,7 +1081,7 @@ void test_bad_syscall(void) expect_fault = true; expected_reason = K_ERR_KERNEL_OOPS; - z_arch_syscall_invoke0(INT_MAX); + arch_syscall_invoke0(INT_MAX); } diff --git a/tests/kernel/mp/src/main.c b/tests/kernel/mp/src/main.c index e6886b8de9a..96710f2c865 100644 --- a/tests/kernel/mp/src/main.c +++ b/tests/kernel/mp/src/main.c @@ -50,13 +50,13 @@ void cpu1_fn(int key, void *arg) * * @ingroup kernel_mp_tests * - * @see z_arch_start_cpu() + * @see arch_start_cpu() */ void test_mp_start(void) { cpu_arg = 12345; - z_arch_start_cpu(1, cpu1_stack, CPU1_STACK_SIZE, cpu1_fn, &cpu_arg); + arch_start_cpu(1, cpu1_stack, CPU1_STACK_SIZE, cpu1_fn, &cpu_arg); while (!cpu_running) { } diff --git a/tests/kernel/smp/src/main.c b/tests/kernel/smp/src/main.c index 0018ae23586..629b97ef041 100644 --- a/tests/kernel/smp/src/main.c +++ b/tests/kernel/smp/src/main.c @@ -120,7 +120,7 @@ static void child_fn(void *p1, void *p2, void *p3) ARG_UNUSED(p3); int parent_cpu_id = POINTER_TO_INT(p1); - zassert_true(parent_cpu_id != z_arch_curr_cpu()->id, + zassert_true(parent_cpu_id != arch_curr_cpu()->id, "Parent isn't on other core"); sync_count++; @@ -140,7 +140,7 @@ void test_cpu_id_threads(void) /* Make sure idle thread runs on each core */ k_sleep(K_MSEC(1000)); - int parent_cpu_id = z_arch_curr_cpu()->id; + int parent_cpu_id = arch_curr_cpu()->id; k_tid_t tid = k_thread_create(&t2, t2_stack, T2_STACK_SIZE, child_fn, INT_TO_POINTER(parent_cpu_id), NULL, @@ -161,7 +161,7 @@ static void thread_entry(void *p1, void *p2, void *p3) int count = 0; tinfo[thread_num].executed = 1; - tinfo[thread_num].cpu_id = z_arch_curr_cpu()->id; + tinfo[thread_num].cpu_id = arch_curr_cpu()->id; while (count++ < 5) { k_busy_wait(DELAY_US); diff --git a/tests/kernel/spinlock/src/main.c b/tests/kernel/spinlock/src/main.c index 0d5b1fbd9de..161822d082f 100644 --- a/tests/kernel/spinlock/src/main.c +++ b/tests/kernel/spinlock/src/main.c @@ -110,7 +110,7 @@ void cpu1_fn(void *p1, void *p2, void *p3) * * @ingroup kernel_spinlock_tests * - * @see z_arch_start_cpu() + * @see arch_start_cpu() */ void test_spinlock_bounce(void) {