kernel: rename z_arch_ to arch_

Promote the private z_arch_* namespace, which specifies
the interface between the core kernel and the
architecture code, to a new top-level namespace named
arch_*.

This allows our documentation generation to create
online documentation for this set of interfaces,
and this set of interfaces is worth treating in a
more formal way anyway.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2019-11-07 12:43:29 -08:00 committed by Andrew Boie
commit 4f77c2ad53
178 changed files with 912 additions and 910 deletions

View file

@ -223,7 +223,7 @@ u64_t z_arc_connect_gfrc_read(void)
* sub-components. For GFRC, HW allows simultaneously accessing to * sub-components. For GFRC, HW allows simultaneously accessing to
* counters. So an irq lock is enough. * counters. So an irq lock is enough.
*/ */
key = z_arch_irq_lock(); key = arch_irq_lock();
z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_READ_LO, 0); z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_READ_LO, 0);
low = z_arc_connect_cmd_readback(); low = z_arc_connect_cmd_readback();
@ -231,7 +231,7 @@ u64_t z_arc_connect_gfrc_read(void)
z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_READ_HI, 0); z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_READ_HI, 0);
high = z_arc_connect_cmd_readback(); high = z_arc_connect_cmd_readback();
z_arch_irq_unlock(key); arch_irq_unlock(key);
return (((u64_t)high) << 32) | low; return (((u64_t)high) << 32) | low;
} }

View file

@ -55,7 +55,7 @@ u64_t z_arc_smp_switch_in_isr(void)
if (new_thread != old_thread) { if (new_thread != old_thread) {
_current_cpu->swap_ok = 0; _current_cpu->swap_ok = 0;
((struct k_thread *)new_thread)->base.cpu = ((struct k_thread *)new_thread)->base.cpu =
z_arch_curr_cpu()->id; arch_curr_cpu()->id;
_current = (struct k_thread *) new_thread; _current = (struct k_thread *) new_thread;
ret = new_thread | ((u64_t)(old_thread) << 32); ret = new_thread | ((u64_t)(old_thread) << 32);
} }
@ -83,8 +83,8 @@ volatile u32_t arc_cpu_wake_flag;
volatile _cpu_t *_curr_cpu[CONFIG_MP_NUM_CPUS]; volatile _cpu_t *_curr_cpu[CONFIG_MP_NUM_CPUS];
/* Called from Zephyr initialization */ /* Called from Zephyr initialization */
void z_arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
void (*fn)(int, void *), void *arg) void (*fn)(int, void *), void *arg)
{ {
_curr_cpu[cpu_num] = &(_kernel.cpus[cpu_num]); _curr_cpu[cpu_num] = &(_kernel.cpus[cpu_num]);
arc_cpu_init[cpu_num].fn = fn; arc_cpu_init[cpu_num].fn = fn;
@ -109,14 +109,14 @@ void z_arc_slave_start(int cpu_num)
z_irq_priority_set(IRQ_ICI, ARCV2_ICI_IRQ_PRIORITY, 0); z_irq_priority_set(IRQ_ICI, ARCV2_ICI_IRQ_PRIORITY, 0);
irq_enable(IRQ_ICI); irq_enable(IRQ_ICI);
/* call the function set by z_arch_start_cpu */ /* call the function set by arch_start_cpu */
fn = arc_cpu_init[cpu_num].fn; fn = arc_cpu_init[cpu_num].fn;
fn(cpu_num, arc_cpu_init[cpu_num].arg); fn(cpu_num, arc_cpu_init[cpu_num].arg);
} }
/* arch implementation of sched_ipi */ /* arch implementation of sched_ipi */
void z_arch_sched_ipi(void) void arch_sched_ipi(void)
{ {
u32_t i; u32_t i;

View file

@ -17,8 +17,8 @@
#include <linker/sections.h> #include <linker/sections.h>
#include <arch/cpu.h> #include <arch/cpu.h>
GTEXT(z_arch_cpu_idle) GTEXT(arch_cpu_idle)
GTEXT(z_arch_cpu_atomic_idle) GTEXT(arch_cpu_atomic_idle)
GDATA(z_arc_cpu_sleep_mode) GDATA(z_arc_cpu_sleep_mode)
SECTION_VAR(BSS, z_arc_cpu_sleep_mode) SECTION_VAR(BSS, z_arc_cpu_sleep_mode)
@ -33,7 +33,7 @@ SECTION_VAR(BSS, z_arc_cpu_sleep_mode)
* void nanCpuIdle(void) * void nanCpuIdle(void)
*/ */
SECTION_FUNC(TEXT, z_arch_cpu_idle) SECTION_FUNC(TEXT, arch_cpu_idle)
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
push_s blink push_s blink
@ -52,9 +52,9 @@ SECTION_FUNC(TEXT, z_arch_cpu_idle)
* *
* This function exits with interrupts restored to <key>. * This function exits with interrupts restored to <key>.
* *
* void z_arch_cpu_atomic_idle(unsigned int key) * void arch_cpu_atomic_idle(unsigned int key)
*/ */
SECTION_FUNC(TEXT, z_arch_cpu_atomic_idle) SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
push_s blink push_s blink

View file

@ -28,13 +28,13 @@ void z_arc_fatal_error(unsigned int reason, const z_arch_esf_t *esf)
z_fatal_error(reason, esf); z_fatal_error(reason, esf);
} }
FUNC_NORETURN void z_arch_syscall_oops(void *ssf_ptr) FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr)
{ {
z_arc_fatal_error(K_ERR_KERNEL_OOPS, ssf_ptr); z_arc_fatal_error(K_ERR_KERNEL_OOPS, ssf_ptr);
CODE_UNREACHABLE; CODE_UNREACHABLE;
} }
FUNC_NORETURN void z_arch_system_halt(unsigned int reason) FUNC_NORETURN void arch_system_halt(unsigned int reason)
{ {
ARG_UNUSED(reason); ARG_UNUSED(reason);

View file

@ -93,7 +93,7 @@ void z_arc_firq_stack_set(void)
* @return N/A * @return N/A
*/ */
void z_arch_irq_enable(unsigned int irq) void arch_irq_enable(unsigned int irq)
{ {
unsigned int key = irq_lock(); unsigned int key = irq_lock();
@ -110,7 +110,7 @@ void z_arch_irq_enable(unsigned int irq)
* @return N/A * @return N/A
*/ */
void z_arch_irq_disable(unsigned int irq) void arch_irq_disable(unsigned int irq)
{ {
unsigned int key = irq_lock(); unsigned int key = irq_lock();
@ -124,7 +124,7 @@ void z_arch_irq_disable(unsigned int irq)
* @param irq IRQ line * @param irq IRQ line
* @return interrupt enable state, true or false * @return interrupt enable state, true or false
*/ */
int z_arch_irq_is_enabled(unsigned int irq) int arch_irq_is_enabled(unsigned int irq)
{ {
return z_arc_v2_irq_unit_int_enabled(irq); return z_arc_v2_irq_unit_int_enabled(irq);
} }
@ -181,9 +181,9 @@ void z_irq_spurious(void *unused)
} }
#ifdef CONFIG_DYNAMIC_INTERRUPTS #ifdef CONFIG_DYNAMIC_INTERRUPTS
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(void *parameter), void *parameter, void (*routine)(void *parameter), void *parameter,
u32_t flags) u32_t flags)
{ {
z_isr_install(irq, routine, parameter); z_isr_install(irq, routine, parameter);
z_irq_priority_set(irq, priority, flags); z_irq_priority_set(irq, priority, flags);

View file

@ -20,7 +20,7 @@ void z_irq_do_offload(void)
offload_routine(offload_param); offload_routine(offload_param);
} }
void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter) void arch_irq_offload(irq_offload_routine_t routine, void *parameter)
{ {
unsigned int key; unsigned int key;

View file

@ -68,7 +68,7 @@ The context switch code adopts this standard so that it is easier to follow:
transition from outgoing thread to incoming thread transition from outgoing thread to incoming thread
Not loading _kernel into r0 allows loading _kernel without stomping on Not loading _kernel into r0 allows loading _kernel without stomping on
the parameter in r0 in z_arch_switch(). the parameter in r0 in arch_switch().
ARCv2 processors have two kinds of interrupts: fast (FIRQ) and regular. The ARCv2 processors have two kinds of interrupts: fast (FIRQ) and regular. The
@ -168,7 +168,7 @@ From FIRQ:
o to coop o to coop
The address of the returning instruction from z_arch_switch() is loaded The address of the returning instruction from arch_switch() is loaded
in ilink and the saved status32 in status32_p0. in ilink and the saved status32 in status32_p0.
o to any irq o to any irq

View file

@ -27,7 +27,7 @@ void configure_mpu_thread(struct k_thread *thread)
#if defined(CONFIG_USERSPACE) #if defined(CONFIG_USERSPACE)
int z_arch_mem_domain_max_partitions_get(void) int arch_mem_domain_max_partitions_get(void)
{ {
return arc_core_mpu_get_max_domain_partition_regions(); return arc_core_mpu_get_max_domain_partition_regions();
} }
@ -35,8 +35,8 @@ int z_arch_mem_domain_max_partitions_get(void)
/* /*
* Reset MPU region for a single memory partition * Reset MPU region for a single memory partition
*/ */
void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain, void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
u32_t partition_id) u32_t partition_id)
{ {
if (_current->mem_domain_info.mem_domain != domain) { if (_current->mem_domain_info.mem_domain != domain) {
return; return;
@ -50,7 +50,7 @@ void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
/* /*
* Configure MPU memory domain * Configure MPU memory domain
*/ */
void z_arch_mem_domain_thread_add(struct k_thread *thread) void arch_mem_domain_thread_add(struct k_thread *thread)
{ {
if (_current != thread) { if (_current != thread) {
return; return;
@ -64,7 +64,7 @@ void z_arch_mem_domain_thread_add(struct k_thread *thread)
/* /*
* Destroy MPU regions for the mem domain * Destroy MPU regions for the mem domain
*/ */
void z_arch_mem_domain_destroy(struct k_mem_domain *domain) void arch_mem_domain_destroy(struct k_mem_domain *domain)
{ {
if (_current->mem_domain_info.mem_domain != domain) { if (_current->mem_domain_info.mem_domain != domain) {
return; return;
@ -75,25 +75,25 @@ void z_arch_mem_domain_destroy(struct k_mem_domain *domain)
arc_core_mpu_enable(); arc_core_mpu_enable();
} }
void z_arch_mem_domain_partition_add(struct k_mem_domain *domain, void arch_mem_domain_partition_add(struct k_mem_domain *domain,
u32_t partition_id) u32_t partition_id)
{ {
/* No-op on this architecture */ /* No-op on this architecture */
} }
void z_arch_mem_domain_thread_remove(struct k_thread *thread) void arch_mem_domain_thread_remove(struct k_thread *thread)
{ {
if (_current != thread) { if (_current != thread) {
return; return;
} }
z_arch_mem_domain_destroy(thread->mem_domain_info.mem_domain); arch_mem_domain_destroy(thread->mem_domain_info.mem_domain);
} }
/* /*
* Validate the given buffer is user accessible or not * Validate the given buffer is user accessible or not
*/ */
int z_arch_buffer_validate(void *addr, size_t size, int write) int arch_buffer_validate(void *addr, size_t size, int write)
{ {
return arc_core_mpu_buffer_validate(addr, size, write); return arc_core_mpu_buffer_validate(addr, size, write);
} }

View file

@ -64,7 +64,7 @@ PRE-CONTEXT-SWITCH STACK
-------------------------------------- --------------------------------------
SP -> | Return address; PC (Program Counter), in fact value taken from SP -> | Return address; PC (Program Counter), in fact value taken from
| BLINK register in z_arch_switch() | BLINK register in arch_switch()
-------------------------------------- --------------------------------------
| STATUS32 value, we explicitly save it here for later usage, read-on | STATUS32 value, we explicitly save it here for later usage, read-on
-------------------------------------- --------------------------------------

View file

@ -22,37 +22,37 @@
#include <v2/irq.h> #include <v2/irq.h>
#include <swap_macros.h> #include <swap_macros.h>
GTEXT(z_arch_switch) GTEXT(arch_switch)
/** /**
* *
* @brief Initiate a cooperative context switch * @brief Initiate a cooperative context switch
* *
* The z_arch_switch routine is invoked by various kernel services to effect * The arch_switch routine is invoked by various kernel services to effect
* a cooperative context switch. Prior to invoking z_arch_switch, the caller * a cooperative context switch. Prior to invoking arch_switch, the caller
* disables interrupts via irq_lock() * disables interrupts via irq_lock()
* Given that z_arch_switch() is called to effect a cooperative context switch, * Given that arch_switch() is called to effect a cooperative context switch,
* the caller-saved integer registers are saved on the stack by the function * the caller-saved integer registers are saved on the stack by the function
* call preamble to z_arch_switch. This creates a custom stack frame that will * call preamble to arch_switch. This creates a custom stack frame that will
* be popped when returning from z_arch_switch, but is not suitable for handling * be popped when returning from arch_switch, but is not suitable for handling
* a return from an exception. Thus, the fact that the thread is pending because * a return from an exception. Thus, the fact that the thread is pending because
* of a cooperative call to z_arch_switch() has to be recorded via the * of a cooperative call to arch_switch() has to be recorded via the
* _CAUSE_COOP code in the relinquish_cause of the thread's k_thread structure. * _CAUSE_COOP code in the relinquish_cause of the thread's k_thread structure.
* The _rirq_exit()/_firq_exit() code will take care of doing the right thing * The _rirq_exit()/_firq_exit() code will take care of doing the right thing
* to restore the thread status. * to restore the thread status.
* *
* When z_arch_switch() is invoked, we know the decision to perform a context * When arch_switch() is invoked, we know the decision to perform a context
* switch or not has already been taken and a context switch must happen. * switch or not has already been taken and a context switch must happen.
* *
* *
* C function prototype: * C function prototype:
* *
* void z_arch_switch(void *switch_to, void **switched_from); * void arch_switch(void *switch_to, void **switched_from);
* *
*/ */
SECTION_FUNC(TEXT, z_arch_switch) SECTION_FUNC(TEXT, arch_switch)
#ifdef CONFIG_EXECUTION_BENCHMARKING #ifdef CONFIG_EXECUTION_BENCHMARKING
push_s r0 push_s r0

View file

@ -58,10 +58,10 @@ struct init_stack_frame {
* *
* @return N/A * @return N/A
*/ */
void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
size_t stackSize, k_thread_entry_t pEntry, size_t stackSize, k_thread_entry_t pEntry,
void *parameter1, void *parameter2, void *parameter3, void *parameter1, void *parameter2, void *parameter3,
int priority, unsigned int options) int priority, unsigned int options)
{ {
char *pStackMem = Z_THREAD_STACK_BUFFER(stack); char *pStackMem = Z_THREAD_STACK_BUFFER(stack);
Z_ASSERT_VALID_PRIO(priority, pEntry); Z_ASSERT_VALID_PRIO(priority, pEntry);
@ -92,7 +92,7 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
(u32_t)(stackEnd + STACK_GUARD_SIZE); (u32_t)(stackEnd + STACK_GUARD_SIZE);
stackAdjEnd = (char *)STACK_ROUND_DOWN(stackEnd + stackAdjEnd = (char *)STACK_ROUND_DOWN(stackEnd +
Z_ARCH_THREAD_STACK_RESERVED); ARCH_THREAD_STACK_RESERVED);
/* reserve 4 bytes for the start of user sp */ /* reserve 4 bytes for the start of user sp */
stackAdjEnd -= 4; stackAdjEnd -= 4;
@ -122,7 +122,7 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
*/ */
pStackMem += STACK_GUARD_SIZE; pStackMem += STACK_GUARD_SIZE;
stackAdjSize = stackAdjSize + CONFIG_PRIVILEGED_STACK_SIZE; stackAdjSize = stackAdjSize + CONFIG_PRIVILEGED_STACK_SIZE;
stackEnd += Z_ARCH_THREAD_STACK_RESERVED; stackEnd += ARCH_THREAD_STACK_RESERVED;
thread->arch.priv_stack_start = 0; thread->arch.priv_stack_start = 0;
@ -161,7 +161,7 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
*/ */
pInitCtx->status32 |= _ARC_V2_STATUS32_US; pInitCtx->status32 |= _ARC_V2_STATUS32_US;
#else /* For no USERSPACE feature */ #else /* For no USERSPACE feature */
pStackMem += Z_ARCH_THREAD_STACK_RESERVED; pStackMem += ARCH_THREAD_STACK_RESERVED;
stackEnd = pStackMem + stackSize; stackEnd = pStackMem + stackSize;
z_new_thread_init(thread, pStackMem, stackSize, priority, options); z_new_thread_init(thread, pStackMem, stackSize, priority, options);
@ -199,7 +199,7 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
thread->arch.k_stack_top = thread->arch.k_stack_top =
(u32_t)(stackEnd + STACK_GUARD_SIZE); (u32_t)(stackEnd + STACK_GUARD_SIZE);
thread->arch.k_stack_base = (u32_t) thread->arch.k_stack_base = (u32_t)
(stackEnd + Z_ARCH_THREAD_STACK_RESERVED); (stackEnd + ARCH_THREAD_STACK_RESERVED);
} else { } else {
thread->arch.k_stack_top = (u32_t)pStackMem; thread->arch.k_stack_top = (u32_t)pStackMem;
thread->arch.k_stack_base = (u32_t)stackEnd; thread->arch.k_stack_base = (u32_t)stackEnd;
@ -227,8 +227,8 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry, FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3) void *p1, void *p2, void *p3)
{ {
/* /*
@ -270,7 +270,7 @@ FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry,
#endif #endif
#if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING) #if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING)
int z_arch_float_disable(struct k_thread *thread) int arch_float_disable(struct k_thread *thread)
{ {
unsigned int key; unsigned int key;
@ -287,7 +287,7 @@ int z_arch_float_disable(struct k_thread *thread)
} }
int z_arch_float_enable(struct k_thread *thread) int arch_float_enable(struct k_thread *thread)
{ {
unsigned int key; unsigned int key;

View file

@ -22,7 +22,7 @@ GTEXT(z_thread_entry_wrapper1)
* @brief Wrapper for z_thread_entry * @brief Wrapper for z_thread_entry
* *
* The routine pops parameters for the z_thread_entry from stack frame, prepared * The routine pops parameters for the z_thread_entry from stack frame, prepared
* by the z_arch_new_thread() routine. * by the arch_new_thread() routine.
* *
* @return N/A * @return N/A
*/ */

View file

@ -48,7 +48,7 @@
GTEXT(z_arc_userspace_enter) GTEXT(z_arc_userspace_enter)
GTEXT(_arc_do_syscall) GTEXT(_arc_do_syscall)
GTEXT(z_user_thread_entry_wrapper) GTEXT(z_user_thread_entry_wrapper)
GTEXT(z_arch_user_string_nlen) GTEXT(arch_user_string_nlen)
GTEXT(z_arc_user_string_nlen_fault_start) GTEXT(z_arc_user_string_nlen_fault_start)
GTEXT(z_arc_user_string_nlen_fault_end) GTEXT(z_arc_user_string_nlen_fault_end)
GTEXT(z_arc_user_string_nlen_fixup) GTEXT(z_arc_user_string_nlen_fixup)
@ -248,9 +248,9 @@ SECTION_FUNC(TEXT, _arc_do_syscall)
rtie rtie
/* /*
* size_t z_arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg) * size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg)
*/ */
SECTION_FUNC(TEXT, z_arch_user_string_nlen) SECTION_FUNC(TEXT, arch_user_string_nlen)
/* int err; */ /* int err; */
sub_s sp,sp,0x4 sub_s sp,sp,0x4

View file

@ -33,7 +33,7 @@
extern "C" { extern "C" {
#endif #endif
static ALWAYS_INLINE void z_arch_kernel_init(void) static ALWAYS_INLINE void arch_kernel_init(void)
{ {
z_irq_setup(); z_irq_setup();
_current_cpu->irq_stack = _current_cpu->irq_stack =
@ -55,7 +55,7 @@ static ALWAYS_INLINE int Z_INTERRUPT_CAUSE(void)
return irq_num; return irq_num;
} }
static inline bool z_arch_is_in_isr(void) static inline bool arch_is_in_isr(void)
{ {
return z_arc_v2_irq_unit_is_in_isr(); return z_arc_v2_irq_unit_is_in_isr();
} }
@ -67,10 +67,10 @@ extern void z_arc_userspace_enter(k_thread_entry_t user_entry, void *p1,
void *p2, void *p3, u32_t stack, u32_t size); void *p2, void *p3, u32_t stack, u32_t size);
extern void z_arch_switch(void *switch_to, void **switched_from); extern void arch_switch(void *switch_to, void **switched_from);
extern void z_arc_fatal_error(unsigned int reason, const z_arch_esf_t *esf); extern void z_arc_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
extern void z_arch_sched_ipi(void); extern void arch_sched_ipi(void);
#ifdef __cplusplus #ifdef __cplusplus
} }

View file

@ -258,7 +258,7 @@
* The pc and status32 values will still be on the stack. We cannot * The pc and status32 values will still be on the stack. We cannot
* pop them yet because the callers of _pop_irq_stack_frame must reload * pop them yet because the callers of _pop_irq_stack_frame must reload
* status32 differently depending on the execution context they are * status32 differently depending on the execution context they are
* running in (z_arch_switch(), firq or exception). * running in (arch_switch(), firq or exception).
*/ */
add_s sp, sp, ___isf_t_SIZEOF add_s sp, sp, ___isf_t_SIZEOF

View file

@ -607,7 +607,7 @@ static u32_t hard_fault(z_arch_esf_t *esf, bool *recoverable)
/* Workaround for #18712: /* Workaround for #18712:
* HardFault may be due to escalation, as a result of * HardFault may be due to escalation, as a result of
* an SVC instruction that could not be executed; this * an SVC instruction that could not be executed; this
* can occur if Z_ARCH_EXCEPT() is called by an ISR, * can occur if ARCH_EXCEPT() is called by an ISR,
* which executes at priority equal to the SVC handler * which executes at priority equal to the SVC handler
* priority. We handle the case of Kernel OOPS and Stack * priority. We handle the case of Kernel OOPS and Stack
* Fail here. * Fail here.
@ -623,7 +623,7 @@ static u32_t hard_fault(z_arch_esf_t *esf, bool *recoverable)
if (((fault_insn & 0xff00) == _SVC_OPCODE) && if (((fault_insn & 0xff00) == _SVC_OPCODE) &&
((fault_insn & 0x00ff) == _SVC_CALL_RUNTIME_EXCEPT)) { ((fault_insn & 0x00ff) == _SVC_CALL_RUNTIME_EXCEPT)) {
PR_EXC("Z_ARCH_EXCEPT with reason %x\n", esf->basic.r0); PR_EXC("ARCH_EXCEPT with reason %x\n", esf->basic.r0);
reason = esf->basic.r0; reason = esf->basic.r0;
} }
#undef _SVC_OPCODE #undef _SVC_OPCODE
@ -935,7 +935,7 @@ void z_arm_fault(u32_t msp, u32_t psp, u32_t exc_return)
z_arch_esf_t esf_copy; z_arch_esf_t esf_copy;
/* Force unlock interrupts */ /* Force unlock interrupts */
z_arch_irq_unlock(0); arch_irq_unlock(0);
/* Retrieve the Exception Stack Frame (ESF) to be supplied /* Retrieve the Exception Stack Frame (ESF) to be supplied
* as argument to the remainder of the fault handling process. * as argument to the remainder of the fault handling process.

View file

@ -259,7 +259,7 @@ void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread)
} }
#if defined(CONFIG_USERSPACE) #if defined(CONFIG_USERSPACE)
int z_arch_mem_domain_max_partitions_get(void) int arch_mem_domain_max_partitions_get(void)
{ {
int available_regions = arm_core_mpu_get_max_available_dyn_regions(); int available_regions = arm_core_mpu_get_max_available_dyn_regions();
@ -274,7 +274,7 @@ int z_arch_mem_domain_max_partitions_get(void)
return ARM_CORE_MPU_MAX_DOMAIN_PARTITIONS_GET(available_regions); return ARM_CORE_MPU_MAX_DOMAIN_PARTITIONS_GET(available_regions);
} }
void z_arch_mem_domain_thread_add(struct k_thread *thread) void arch_mem_domain_thread_add(struct k_thread *thread)
{ {
if (_current != thread) { if (_current != thread) {
return; return;
@ -287,7 +287,7 @@ void z_arch_mem_domain_thread_add(struct k_thread *thread)
z_arm_configure_dynamic_mpu_regions(thread); z_arm_configure_dynamic_mpu_regions(thread);
} }
void z_arch_mem_domain_destroy(struct k_mem_domain *domain) void arch_mem_domain_destroy(struct k_mem_domain *domain)
{ {
/* This function will reset the access permission configuration /* This function will reset the access permission configuration
* of the active partitions of the memory domain. * of the active partitions of the memory domain.
@ -317,8 +317,8 @@ void z_arch_mem_domain_destroy(struct k_mem_domain *domain)
} }
} }
void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain, void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
u32_t partition_id) u32_t partition_id)
{ {
/* Request to remove a partition from a memory domain. /* Request to remove a partition from a memory domain.
* This resets the access permissions of the partition * This resets the access permissions of the partition
@ -334,22 +334,22 @@ void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
&domain->partitions[partition_id], &reset_attr); &domain->partitions[partition_id], &reset_attr);
} }
void z_arch_mem_domain_partition_add(struct k_mem_domain *domain, void arch_mem_domain_partition_add(struct k_mem_domain *domain,
u32_t partition_id) u32_t partition_id)
{ {
/* No-op on this architecture */ /* No-op on this architecture */
} }
void z_arch_mem_domain_thread_remove(struct k_thread *thread) void arch_mem_domain_thread_remove(struct k_thread *thread)
{ {
if (_current != thread) { if (_current != thread) {
return; return;
} }
z_arch_mem_domain_destroy(thread->mem_domain_info.mem_domain); arch_mem_domain_destroy(thread->mem_domain_info.mem_domain);
} }
int z_arch_buffer_validate(void *addr, size_t size, int write) int arch_buffer_validate(void *addr, size_t size, int write)
{ {
return arm_core_mpu_buffer_validate(addr, size, write); return arm_core_mpu_buffer_validate(addr, size, write);
} }

View file

@ -16,8 +16,8 @@
_ASM_FILE_PROLOGUE _ASM_FILE_PROLOGUE
GTEXT(z_arm_cpu_idle_init) GTEXT(z_arm_cpu_idle_init)
GTEXT(z_arch_cpu_idle) GTEXT(arch_cpu_idle)
GTEXT(z_arch_cpu_atomic_idle) GTEXT(arch_cpu_atomic_idle)
#if defined(CONFIG_CPU_CORTEX_M) #if defined(CONFIG_CPU_CORTEX_M)
#define _SCB_SCR 0xE000ED10 #define _SCB_SCR 0xE000ED10
@ -32,7 +32,7 @@ GTEXT(z_arch_cpu_atomic_idle)
* *
* @brief Initialization of CPU idle * @brief Initialization of CPU idle
* *
* Only called by z_arch_kernel_init(). Sets SEVONPEND bit once for the system's * Only called by arch_kernel_init(). Sets SEVONPEND bit once for the system's
* duration. * duration.
* *
* @return N/A * @return N/A
@ -50,7 +50,7 @@ SECTION_FUNC(TEXT, z_arm_cpu_idle_init)
#endif #endif
bx lr bx lr
SECTION_FUNC(TEXT, z_arch_cpu_idle) SECTION_FUNC(TEXT, arch_cpu_idle)
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
push {r0, lr} push {r0, lr}
bl sys_trace_idle bl sys_trace_idle
@ -77,7 +77,7 @@ SECTION_FUNC(TEXT, z_arch_cpu_idle)
bx lr bx lr
SECTION_FUNC(TEXT, z_arch_cpu_atomic_idle) SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
push {r0, lr} push {r0, lr}
bl sys_trace_idle bl sys_trace_idle

View file

@ -86,7 +86,7 @@ void z_do_kernel_oops(const z_arch_esf_t *esf)
z_arm_fatal_error(reason, esf); z_arm_fatal_error(reason, esf);
} }
FUNC_NORETURN void z_arch_syscall_oops(void *ssf_ptr) FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr)
{ {
u32_t *ssf_contents = ssf_ptr; u32_t *ssf_contents = ssf_ptr;
z_arch_esf_t oops_esf = { 0 }; z_arch_esf_t oops_esf = { 0 };

View file

@ -36,17 +36,17 @@ extern void z_arm_reserved(void);
#define REG_FROM_IRQ(irq) (irq / NUM_IRQS_PER_REG) #define REG_FROM_IRQ(irq) (irq / NUM_IRQS_PER_REG)
#define BIT_FROM_IRQ(irq) (irq % NUM_IRQS_PER_REG) #define BIT_FROM_IRQ(irq) (irq % NUM_IRQS_PER_REG)
void z_arch_irq_enable(unsigned int irq) void arch_irq_enable(unsigned int irq)
{ {
NVIC_EnableIRQ((IRQn_Type)irq); NVIC_EnableIRQ((IRQn_Type)irq);
} }
void z_arch_irq_disable(unsigned int irq) void arch_irq_disable(unsigned int irq)
{ {
NVIC_DisableIRQ((IRQn_Type)irq); NVIC_DisableIRQ((IRQn_Type)irq);
} }
int z_arch_irq_is_enabled(unsigned int irq) int arch_irq_is_enabled(unsigned int irq)
{ {
return NVIC->ISER[REG_FROM_IRQ(irq)] & BIT(BIT_FROM_IRQ(irq)); return NVIC->ISER[REG_FROM_IRQ(irq)] & BIT(BIT_FROM_IRQ(irq));
} }
@ -97,21 +97,21 @@ void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
} }
#elif defined(CONFIG_CPU_CORTEX_R) #elif defined(CONFIG_CPU_CORTEX_R)
void z_arch_irq_enable(unsigned int irq) void arch_irq_enable(unsigned int irq)
{ {
struct device *dev = _sw_isr_table[0].arg; struct device *dev = _sw_isr_table[0].arg;
irq_enable_next_level(dev, (irq >> 8) - 1); irq_enable_next_level(dev, (irq >> 8) - 1);
} }
void z_arch_irq_disable(unsigned int irq) void arch_irq_disable(unsigned int irq)
{ {
struct device *dev = _sw_isr_table[0].arg; struct device *dev = _sw_isr_table[0].arg;
irq_disable_next_level(dev, (irq >> 8) - 1); irq_disable_next_level(dev, (irq >> 8) - 1);
} }
int z_arch_irq_is_enabled(unsigned int irq) int arch_irq_is_enabled(unsigned int irq)
{ {
struct device *dev = _sw_isr_table[0].arg; struct device *dev = _sw_isr_table[0].arg;
@ -206,7 +206,7 @@ void _arch_isr_direct_pm(void)
} }
#endif #endif
void z_arch_isr_direct_header(void) void arch_isr_direct_header(void)
{ {
sys_trace_isr_enter(); sys_trace_isr_enter();
} }
@ -268,9 +268,9 @@ int irq_target_state_is_secure(unsigned int irq)
#endif /* CONFIG_ARM_SECURE_FIRMWARE */ #endif /* CONFIG_ARM_SECURE_FIRMWARE */
#ifdef CONFIG_DYNAMIC_INTERRUPTS #ifdef CONFIG_DYNAMIC_INTERRUPTS
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(void *parameter), void *parameter, void (*routine)(void *parameter), void *parameter,
u32_t flags) u32_t flags)
{ {
z_isr_install(irq, routine, parameter); z_isr_install(irq, routine, parameter);
z_arm_irq_priority_set(irq, priority, flags); z_arm_irq_priority_set(irq, priority, flags);

View file

@ -20,7 +20,7 @@ void z_irq_do_offload(void)
offload_routine(offload_param); offload_routine(offload_param);
} }
void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter) void arch_irq_offload(irq_offload_routine_t routine, void *parameter)
{ {
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) && defined(CONFIG_ASSERT) #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) && defined(CONFIG_ASSERT)
/* ARMv6-M/ARMv8-M Baseline HardFault if you make a SVC call with /* ARMv6-M/ARMv8-M Baseline HardFault if you make a SVC call with

View file

@ -15,7 +15,7 @@ extern const int _k_neg_eagain;
/* The 'key' actually represents the BASEPRI register /* The 'key' actually represents the BASEPRI register
* prior to disabling interrupts via the BASEPRI mechanism. * prior to disabling interrupts via the BASEPRI mechanism.
* *
* z_arch_swap() itself does not do much. * arch_swap() itself does not do much.
* *
* It simply stores the intlock key (the BASEPRI value) parameter into * It simply stores the intlock key (the BASEPRI value) parameter into
* current->basepri, and then triggers a PendSV exception, which does * current->basepri, and then triggers a PendSV exception, which does
@ -25,7 +25,7 @@ extern const int _k_neg_eagain;
* z_arm_pendsv all come from handling an interrupt, which means we know the * z_arm_pendsv all come from handling an interrupt, which means we know the
* interrupts were not locked: in that case the BASEPRI value is 0. * interrupts were not locked: in that case the BASEPRI value is 0.
* *
* Given that z_arch_swap() is called to effect a cooperative context switch, * Given that arch_swap() is called to effect a cooperative context switch,
* only the caller-saved integer registers need to be saved in the thread of the * only the caller-saved integer registers need to be saved in the thread of the
* outgoing thread. This is all performed by the hardware, which stores it in * outgoing thread. This is all performed by the hardware, which stores it in
* its exception stack frame, created when handling the z_arm_pendsv exception. * its exception stack frame, created when handling the z_arm_pendsv exception.
@ -33,7 +33,7 @@ extern const int _k_neg_eagain;
* On ARMv6-M, the intlock key is represented by the PRIMASK register, * On ARMv6-M, the intlock key is represented by the PRIMASK register,
* as BASEPRI is not available. * as BASEPRI is not available.
*/ */
int z_arch_swap(unsigned int key) int arch_swap(unsigned int key)
{ {
#ifdef CONFIG_EXECUTION_BENCHMARKING #ifdef CONFIG_EXECUTION_BENCHMARKING
read_timer_start_of_swap(); read_timer_start_of_swap();

View file

@ -125,7 +125,7 @@ out_fp_endif:
isb /* Make the effect of disabling interrupts be realized immediately */ isb /* Make the effect of disabling interrupts be realized immediately */
#elif defined(CONFIG_ARMV7_R) #elif defined(CONFIG_ARMV7_R)
/* /*
* Interrupts are still disabled from z_arch_swap so empty clause * Interrupts are still disabled from arch_swap so empty clause
* here to avoid the preprocessor error below * here to avoid the preprocessor error below
*/ */
#else #else

View file

@ -32,10 +32,10 @@ extern u8_t *z_priv_stack_find(void *obj);
* addresses, we have to unset it manually before storing it in the 'pc' field * addresses, we have to unset it manually before storing it in the 'pc' field
* of the ESF. * of the ESF.
*/ */
void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
size_t stackSize, k_thread_entry_t pEntry, size_t stackSize, k_thread_entry_t pEntry,
void *parameter1, void *parameter2, void *parameter3, void *parameter1, void *parameter2, void *parameter3,
int priority, unsigned int options) int priority, unsigned int options)
{ {
char *pStackMem = Z_THREAD_STACK_BUFFER(stack); char *pStackMem = Z_THREAD_STACK_BUFFER(stack);
char *stackEnd; char *stackEnd;
@ -112,7 +112,7 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
#if defined(CONFIG_USERSPACE) #if defined(CONFIG_USERSPACE)
if ((options & K_USER) != 0) { if ((options & K_USER) != 0) {
pInitCtx->basic.pc = (u32_t)z_arch_user_mode_enter; pInitCtx->basic.pc = (u32_t)arch_user_mode_enter;
} else { } else {
pInitCtx->basic.pc = (u32_t)z_thread_entry; pInitCtx->basic.pc = (u32_t)z_thread_entry;
} }
@ -157,8 +157,8 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry, FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3) void *p1, void *p2, void *p3)
{ {
/* Set up privileged stack before entering user mode */ /* Set up privileged stack before entering user mode */
@ -328,13 +328,13 @@ u32_t z_check_thread_stack_fail(const u32_t fault_addr, const u32_t psp)
#endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */ #endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */
#if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING) #if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING)
int z_arch_float_disable(struct k_thread *thread) int arch_float_disable(struct k_thread *thread)
{ {
if (thread != _current) { if (thread != _current) {
return -EINVAL; return -EINVAL;
} }
if (z_arch_is_in_isr()) { if (arch_is_in_isr()) {
return -EINVAL; return -EINVAL;
} }
@ -345,26 +345,26 @@ int z_arch_float_disable(struct k_thread *thread)
* fault to take an outdated thread user_options flag into * fault to take an outdated thread user_options flag into
* account. * account.
*/ */
int key = z_arch_irq_lock(); int key = arch_irq_lock();
thread->base.user_options &= ~K_FP_REGS; thread->base.user_options &= ~K_FP_REGS;
__set_CONTROL(__get_CONTROL() & (~CONTROL_FPCA_Msk)); __set_CONTROL(__get_CONTROL() & (~CONTROL_FPCA_Msk));
/* No need to add an ISB barrier after setting the CONTROL /* No need to add an ISB barrier after setting the CONTROL
* register; z_arch_irq_unlock() already adds one. * register; arch_irq_unlock() already adds one.
*/ */
z_arch_irq_unlock(key); arch_irq_unlock(key);
return 0; return 0;
} }
#endif /* CONFIG_FLOAT && CONFIG_FP_SHARING */ #endif /* CONFIG_FLOAT && CONFIG_FP_SHARING */
void z_arch_switch_to_main_thread(struct k_thread *main_thread, void arch_switch_to_main_thread(struct k_thread *main_thread,
k_thread_stack_t *main_stack, k_thread_stack_t *main_stack,
size_t main_stack_size, size_t main_stack_size,
k_thread_entry_t _main) k_thread_entry_t _main)
{ {
#if defined(CONFIG_FLOAT) #if defined(CONFIG_FLOAT)
/* Initialize the Floating Point Status and Control Register when in /* Initialize the Floating Point Status and Control Register when in

View file

@ -16,7 +16,7 @@ _ASM_FILE_PROLOGUE
GTEXT(z_arm_userspace_enter) GTEXT(z_arm_userspace_enter)
GTEXT(z_arm_do_syscall) GTEXT(z_arm_do_syscall)
GTEXT(z_arch_user_string_nlen) GTEXT(arch_user_string_nlen)
GTEXT(z_arm_user_string_nlen_fault_start) GTEXT(z_arm_user_string_nlen_fault_start)
GTEXT(z_arm_user_string_nlen_fault_end) GTEXT(z_arm_user_string_nlen_fault_end)
GTEXT(z_arm_user_string_nlen_fixup) GTEXT(z_arm_user_string_nlen_fixup)
@ -497,9 +497,9 @@ dispatch_syscall:
/* /*
* size_t z_arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg) * size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg)
*/ */
SECTION_FUNC(TEXT, z_arch_user_string_nlen) SECTION_FUNC(TEXT, arch_user_string_nlen)
push {r0, r1, r2, r4, r5, lr} push {r0, r1, r2, r4, r5, lr}
/* sp+4 is error value, init to -1 */ /* sp+4 is error value, init to -1 */

View file

@ -43,7 +43,7 @@ extern volatile irq_offload_routine_t offload_routine;
* The current executing vector is found in the IPSR register. All * The current executing vector is found in the IPSR register. All
* IRQs and system exceptions are considered as interrupt context. * IRQs and system exceptions are considered as interrupt context.
*/ */
static ALWAYS_INLINE bool z_arch_is_in_isr(void) static ALWAYS_INLINE bool arch_is_in_isr(void)
{ {
return (__get_IPSR()) ? (true) : (false); return (__get_IPSR()) ? (true) : (false);
} }
@ -68,8 +68,7 @@ static ALWAYS_INLINE bool z_arch_is_in_isr(void)
* @return true if execution state was in handler mode, before * @return true if execution state was in handler mode, before
* the current exception occurred, otherwise false. * the current exception occurred, otherwise false.
*/ */
static ALWAYS_INLINE bool z_arch_is_in_nested_exception( static ALWAYS_INLINE bool arch_is_in_nested_exception(const z_arch_esf_t *esf)
const z_arch_esf_t *esf)
{ {
return (esf->basic.xpsr & IPSR_ISR_Msk) ? (true) : (false); return (esf->basic.xpsr & IPSR_ISR_Msk) ? (true) : (false);
} }

View file

@ -33,7 +33,7 @@ extern volatile irq_offload_routine_t offload_routine;
#endif #endif
/* Check the CPSR mode bits to see if we are in IRQ or FIQ mode */ /* Check the CPSR mode bits to see if we are in IRQ or FIQ mode */
static ALWAYS_INLINE bool z_arch_is_in_isr(void) static ALWAYS_INLINE bool arch_is_in_isr(void)
{ {
unsigned int status; unsigned int status;

View file

@ -34,7 +34,7 @@ extern void z_arm_configure_static_mpu_regions(void);
extern void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread); extern void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread);
#endif /* CONFIG_ARM_MPU */ #endif /* CONFIG_ARM_MPU */
static ALWAYS_INLINE void z_arch_kernel_init(void) static ALWAYS_INLINE void arch_kernel_init(void)
{ {
z_arm_interrupt_stack_setup(); z_arm_interrupt_stack_setup();
z_arm_exc_setup(); z_arm_exc_setup();
@ -44,7 +44,7 @@ static ALWAYS_INLINE void z_arch_kernel_init(void)
} }
static ALWAYS_INLINE void static ALWAYS_INLINE void
z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value) arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{ {
thread->arch.swap_return_value = value; thread->arch.swap_return_value = value;
} }

View file

@ -28,11 +28,11 @@ void z_isr_install(unsigned int irq, void (*routine)(void *), void *param)
/* Some architectures don't/can't interpret flags or priority and have /* Some architectures don't/can't interpret flags or priority and have
* no more processing to do than this. Provide a generic fallback. * no more processing to do than this. Provide a generic fallback.
*/ */
int __weak z_arch_irq_connect_dynamic(unsigned int irq, int __weak arch_irq_connect_dynamic(unsigned int irq,
unsigned int priority, unsigned int priority,
void (*routine)(void *), void (*routine)(void *),
void *parameter, void *parameter,
u32_t flags) u32_t flags)
{ {
ARG_UNUSED(flags); ARG_UNUSED(flags);
ARG_UNUSED(priority); ARG_UNUSED(priority);

View file

@ -6,18 +6,18 @@
#include <kernel.h> #include <kernel.h>
#include <kernel_internal.h> #include <kernel_internal.h>
u64_t z_arch_timing_swap_start; u64_t arch_timing_swap_start;
u64_t z_arch_timing_swap_end; u64_t arch_timing_swap_end;
u64_t z_arch_timing_irq_start; u64_t arch_timing_irq_start;
u64_t z_arch_timing_irq_end; u64_t arch_timing_irq_end;
u64_t z_arch_timing_tick_start; u64_t arch_timing_tick_start;
u64_t z_arch_timing_tick_end; u64_t arch_timing_tick_end;
u64_t z_arch_timing_enter_user_mode_end; u64_t arch_timing_enter_user_mode_end;
/* location of the time stamps*/ /* location of the time stamps*/
u32_t z_arch_timing_value_swap_end; u32_t arch_timing_value_swap_end;
u64_t z_arch_timing_value_swap_common; u64_t arch_timing_value_swap_common;
u64_t z_arch_timing_value_swap_temp; u64_t arch_timing_value_swap_temp;
#ifdef CONFIG_NRF_RTC_TIMER #ifdef CONFIG_NRF_RTC_TIMER
#include <nrfx.h> #include <nrfx.h>
@ -79,18 +79,19 @@ u64_t z_arch_timing_value_swap_temp;
void read_timer_start_of_swap(void) void read_timer_start_of_swap(void)
{ {
if (z_arch_timing_value_swap_end == 1U) { if (arch_timing_value_swap_end == 1U) {
TIMING_INFO_PRE_READ(); TIMING_INFO_PRE_READ();
z_arch_timing_swap_start = (u32_t) TIMING_INFO_OS_GET_TIME(); arch_timing_swap_start = (u32_t) TIMING_INFO_OS_GET_TIME();
} }
} }
void read_timer_end_of_swap(void) void read_timer_end_of_swap(void)
{ {
if (z_arch_timing_value_swap_end == 1U) { if (arch_timing_value_swap_end == 1U) {
TIMING_INFO_PRE_READ(); TIMING_INFO_PRE_READ();
z_arch_timing_value_swap_end = 2U; arch_timing_value_swap_end = 2U;
z_arch_timing_value_swap_common = (u64_t)TIMING_INFO_OS_GET_TIME(); arch_timing_value_swap_common =
(u64_t)TIMING_INFO_OS_GET_TIME();
} }
} }
@ -100,29 +101,29 @@ void read_timer_end_of_swap(void)
void read_timer_start_of_isr(void) void read_timer_start_of_isr(void)
{ {
TIMING_INFO_PRE_READ(); TIMING_INFO_PRE_READ();
z_arch_timing_irq_start = (u32_t) TIMING_INFO_GET_TIMER_VALUE(); arch_timing_irq_start = (u32_t) TIMING_INFO_GET_TIMER_VALUE();
} }
void read_timer_end_of_isr(void) void read_timer_end_of_isr(void)
{ {
TIMING_INFO_PRE_READ(); TIMING_INFO_PRE_READ();
z_arch_timing_irq_end = (u32_t) TIMING_INFO_GET_TIMER_VALUE(); arch_timing_irq_end = (u32_t) TIMING_INFO_GET_TIMER_VALUE();
} }
void read_timer_start_of_tick_handler(void) void read_timer_start_of_tick_handler(void)
{ {
TIMING_INFO_PRE_READ(); TIMING_INFO_PRE_READ();
z_arch_timing_tick_start = (u32_t)TIMING_INFO_GET_TIMER_VALUE(); arch_timing_tick_start = (u32_t)TIMING_INFO_GET_TIMER_VALUE();
} }
void read_timer_end_of_tick_handler(void) void read_timer_end_of_tick_handler(void)
{ {
TIMING_INFO_PRE_READ(); TIMING_INFO_PRE_READ();
z_arch_timing_tick_end = (u32_t) TIMING_INFO_GET_TIMER_VALUE(); arch_timing_tick_end = (u32_t) TIMING_INFO_GET_TIMER_VALUE();
} }
void read_timer_end_of_userspace_enter(void) void read_timer_end_of_userspace_enter(void)
{ {
TIMING_INFO_PRE_READ(); TIMING_INFO_PRE_READ();
z_arch_timing_enter_user_mode_end = (u32_t) TIMING_INFO_GET_TIMER_VALUE(); arch_timing_enter_user_mode_end = (u32_t)TIMING_INFO_GET_TIMER_VALUE();
} }

View file

@ -7,7 +7,7 @@
#include <kernel.h> #include <kernel.h>
#include <kernel_structs.h> #include <kernel_structs.h>
void z_arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
/* Do nothing but unconditionally unlock interrupts and return to the /* Do nothing but unconditionally unlock interrupts and return to the
* caller. This CPU does not have any kind of power saving instruction. * caller. This CPU does not have any kind of power saving instruction.
@ -15,7 +15,7 @@ void z_arch_cpu_idle(void)
irq_unlock(NIOS2_STATUS_PIE_MSK); irq_unlock(NIOS2_STATUS_PIE_MSK);
} }
void z_arch_cpu_atomic_idle(unsigned int key) void arch_cpu_atomic_idle(unsigned int key)
{ {
/* Do nothing but restore IRQ state. This CPU does not have any /* Do nothing but restore IRQ state. This CPU does not have any
* kind of power saving instruction. * kind of power saving instruction.

View file

@ -13,7 +13,7 @@ GTEXT(_exception)
/* import */ /* import */
GTEXT(_Fault) GTEXT(_Fault)
GTEXT(z_arch_swap) GTEXT(arch_swap)
#ifdef CONFIG_IRQ_OFFLOAD #ifdef CONFIG_IRQ_OFFLOAD
GTEXT(z_irq_do_offload) GTEXT(z_irq_do_offload)
GTEXT(_offload_routine) GTEXT(_offload_routine)
@ -127,7 +127,7 @@ on_irq_stack:
/* /*
* A context reschedule is required: keep the volatile registers of * A context reschedule is required: keep the volatile registers of
* the interrupted thread on the context's stack. Utilize * the interrupted thread on the context's stack. Utilize
* the existing z_arch_swap() primitive to save the remaining * the existing arch_swap() primitive to save the remaining
* thread's registers (including floating point) and perform * thread's registers (including floating point) and perform
* a switch to the new thread. * a switch to the new thread.
*/ */
@ -144,7 +144,7 @@ on_irq_stack:
*/ */
mov r4, et mov r4, et
call z_arch_swap call arch_swap
jmpi _exception_exit jmpi _exception_exit
#else #else
jmpi no_reschedule jmpi no_reschedule

View file

@ -132,7 +132,7 @@ FUNC_NORETURN void _Fault(const z_arch_esf_t *esf)
} }
#ifdef ALT_CPU_HAS_DEBUG_STUB #ifdef ALT_CPU_HAS_DEBUG_STUB
FUNC_NORETURN void z_arch_system_halt(unsigned int reason) FUNC_NORETURN void arch_system_halt(unsigned int reason)
{ {
ARG_UNUSED(reason); ARG_UNUSED(reason);

View file

@ -31,7 +31,7 @@ FUNC_NORETURN void z_irq_spurious(void *unused)
} }
void z_arch_irq_enable(unsigned int irq) void arch_irq_enable(unsigned int irq)
{ {
u32_t ienable; u32_t ienable;
unsigned int key; unsigned int key;
@ -47,7 +47,7 @@ void z_arch_irq_enable(unsigned int irq)
void z_arch_irq_disable(unsigned int irq) void arch_irq_disable(unsigned int irq)
{ {
u32_t ienable; u32_t ienable;
unsigned int key; unsigned int key;
@ -109,9 +109,9 @@ void _enter_irq(u32_t ipending)
} }
#ifdef CONFIG_DYNAMIC_INTERRUPTS #ifdef CONFIG_DYNAMIC_INTERRUPTS
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(void *parameter), void *parameter, void (*routine)(void *parameter), void *parameter,
u32_t flags) u32_t flags)
{ {
ARG_UNUSED(flags); ARG_UNUSED(flags);
ARG_UNUSED(priority); ARG_UNUSED(priority);

View file

@ -29,7 +29,7 @@ void z_irq_do_offload(void)
tmp((void *)offload_param); tmp((void *)offload_param);
} }
void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter) void arch_irq_offload(irq_offload_routine_t routine, void *parameter)
{ {
unsigned int key; unsigned int key;

View file

@ -9,18 +9,18 @@
#include <offsets_short.h> #include <offsets_short.h>
/* exports */ /* exports */
GTEXT(z_arch_swap) GTEXT(arch_swap)
GTEXT(z_thread_entry_wrapper) GTEXT(z_thread_entry_wrapper)
/* imports */ /* imports */
GTEXT(sys_trace_thread_switched_in) GTEXT(sys_trace_thread_switched_in)
GTEXT(_k_neg_eagain) GTEXT(_k_neg_eagain)
/* unsigned int z_arch_swap(unsigned int key) /* unsigned int arch_swap(unsigned int key)
* *
* Always called with interrupts locked * Always called with interrupts locked
*/ */
SECTION_FUNC(exception.other, z_arch_swap) SECTION_FUNC(exception.other, arch_swap)
#ifdef CONFIG_EXECUTION_BENCHMARKING #ifdef CONFIG_EXECUTION_BENCHMARKING
/* Get a reference to _kernel in r10 */ /* Get a reference to _kernel in r10 */
@ -57,7 +57,7 @@ SECTION_FUNC(exception.other, z_arch_swap)
ldw r11, _kernel_offset_to_current(r10) ldw r11, _kernel_offset_to_current(r10)
/* Store all the callee saved registers. We either got here via /* Store all the callee saved registers. We either got here via
* an exception or from a cooperative invocation of z_arch_swap() from C * an exception or from a cooperative invocation of arch_swap() from C
* domain, so all the caller-saved registers have already been * domain, so all the caller-saved registers have already been
* saved by the exception asm or the calling C code already. * saved by the exception asm or the calling C code already.
*/ */
@ -115,14 +115,14 @@ SECTION_FUNC(exception.other, z_arch_swap)
ldw sp, _thread_offset_to_sp(r2) ldw sp, _thread_offset_to_sp(r2)
/* We need to irq_unlock(current->coopReg.key); /* We need to irq_unlock(current->coopReg.key);
* key was supplied as argument to z_arch_swap(). Fetch it. * key was supplied as argument to arch_swap(). Fetch it.
*/ */
ldw r3, _thread_offset_to_key(r2) ldw r3, _thread_offset_to_key(r2)
/* /*
* Load return value into r2 (return value register). -EAGAIN unless * Load return value into r2 (return value register). -EAGAIN unless
* someone previously called z_arch_thread_return_value_set(). Do this before * someone previously called arch_thread_return_value_set(). Do this
* we potentially unlock interrupts. * before we potentially unlock interrupts.
*/ */
ldw r2, _thread_offset_to_retval(r2) ldw r2, _thread_offset_to_retval(r2)

View file

@ -28,10 +28,10 @@ struct init_stack_frame {
}; };
void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
size_t stack_size, k_thread_entry_t thread_func, size_t stack_size, k_thread_entry_t thread_func,
void *arg1, void *arg2, void *arg3, void *arg1, void *arg2, void *arg3,
int priority, unsigned int options) int priority, unsigned int options)
{ {
char *stack_memory = Z_THREAD_STACK_BUFFER(stack); char *stack_memory = Z_THREAD_STACK_BUFFER(stack);
Z_ASSERT_VALID_PRIO(priority, thread_func); Z_ASSERT_VALID_PRIO(priority, thread_func);

View file

@ -28,14 +28,14 @@ extern "C" {
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE
static ALWAYS_INLINE void z_arch_kernel_init(void) static ALWAYS_INLINE void arch_kernel_init(void)
{ {
_kernel.irq_stack = _kernel.irq_stack =
Z_THREAD_STACK_BUFFER(_interrupt_stack) + CONFIG_ISR_STACK_SIZE; Z_THREAD_STACK_BUFFER(_interrupt_stack) + CONFIG_ISR_STACK_SIZE;
} }
static ALWAYS_INLINE void static ALWAYS_INLINE void
z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value) arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{ {
thread->callee_saved.retval = value; thread->callee_saved.retval = value;
} }
@ -43,7 +43,7 @@ z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason, FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason,
const z_arch_esf_t *esf); const z_arch_esf_t *esf);
static inline bool z_arch_is_in_isr(void) static inline bool arch_is_in_isr(void)
{ {
return _kernel.nested != 0U; return _kernel.nested != 0U;
} }

View file

@ -10,11 +10,11 @@
* This module provides: * This module provides:
* *
* An implementation of the architecture-specific * An implementation of the architecture-specific
* z_arch_cpu_idle() primitive required by the kernel idle loop component. * arch_cpu_idle() primitive required by the kernel idle loop component.
* It can be called within an implementation of _sys_power_save_idle(), * It can be called within an implementation of _sys_power_save_idle(),
* which is provided for the kernel by the platform. * which is provided for the kernel by the platform.
* *
* An implementation of z_arch_cpu_atomic_idle(), which * An implementation of arch_cpu_atomic_idle(), which
* atomically re-enables interrupts and enters low power mode. * atomically re-enables interrupts and enters low power mode.
* *
* A weak stub for sys_arch_reboot(), which does nothing * A weak stub for sys_arch_reboot(), which does nothing
@ -24,14 +24,14 @@
#include <arch/posix/posix_soc_if.h> #include <arch/posix/posix_soc_if.h>
#include <debug/tracing.h> #include <debug/tracing.h>
void z_arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
sys_trace_idle(); sys_trace_idle();
posix_irq_full_unlock(); posix_irq_full_unlock();
posix_halt_cpu(); posix_halt_cpu();
} }
void z_arch_cpu_atomic_idle(unsigned int key) void arch_cpu_atomic_idle(unsigned int key)
{ {
sys_trace_idle(); sys_trace_idle();
posix_atomic_halt_cpu(key); posix_atomic_halt_cpu(key);

View file

@ -13,7 +13,7 @@
#include <logging/log_ctrl.h> #include <logging/log_ctrl.h>
#include <arch/posix/posix_soc_if.h> #include <arch/posix/posix_soc_if.h>
FUNC_NORETURN void z_arch_system_halt(unsigned int reason) FUNC_NORETURN void arch_system_halt(unsigned int reason)
{ {
ARG_UNUSED(reason); ARG_UNUSED(reason);

View file

@ -10,23 +10,23 @@
#ifdef CONFIG_IRQ_OFFLOAD #ifdef CONFIG_IRQ_OFFLOAD
#include "irq_offload.h" #include "irq_offload.h"
void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter) void arch_irq_offload(irq_offload_routine_t routine, void *parameter)
{ {
posix_irq_offload(routine, parameter); posix_irq_offload(routine, parameter);
} }
#endif #endif
void z_arch_irq_enable(unsigned int irq) void arch_irq_enable(unsigned int irq)
{ {
posix_irq_enable(irq); posix_irq_enable(irq);
} }
void z_arch_irq_disable(unsigned int irq) void arch_irq_disable(unsigned int irq)
{ {
posix_irq_disable(irq); posix_irq_disable(irq);
} }
int z_arch_irq_is_enabled(unsigned int irq) int arch_irq_is_enabled(unsigned int irq)
{ {
return posix_irq_is_enabled(irq); return posix_irq_is_enabled(irq);
} }
@ -45,9 +45,9 @@ int z_arch_irq_is_enabled(unsigned int irq)
* *
* @return The vector assigned to this interrupt * @return The vector assigned to this interrupt
*/ */
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(void *parameter), void (*routine)(void *parameter),
void *parameter, u32_t flags) void *parameter, u32_t flags)
{ {
posix_isr_declare(irq, (int)flags, routine, parameter); posix_isr_declare(irq, (int)flags, routine, parameter);
posix_irq_priority_set(irq, priority, flags); posix_irq_priority_set(irq, priority, flags);

View file

@ -187,7 +187,7 @@ static void posix_preexit_cleanup(void)
/** /**
* Let the ready thread run and block this thread until it is allowed again * Let the ready thread run and block this thread until it is allowed again
* *
* called from z_arch_swap() which does the picking from the kernel structures * called from arch_swap() which does the picking from the kernel structures
*/ */
void posix_swap(int next_allowed_thread_nbr, int this_th_nbr) void posix_swap(int next_allowed_thread_nbr, int this_th_nbr)
{ {
@ -207,7 +207,7 @@ void posix_swap(int next_allowed_thread_nbr, int this_th_nbr)
/** /**
* Let the ready thread (main) run, and exit this thread (init) * Let the ready thread (main) run, and exit this thread (init)
* *
* Called from z_arch_switch_to_main_thread() which does the picking from the * Called from arch_switch_to_main_thread() which does the picking from the
* kernel structures * kernel structures
* *
* Note that we could have just done a swap(), but that would have left the * Note that we could have just done a swap(), but that would have left the
@ -256,7 +256,7 @@ static void posix_cleanup_handler(void *arg)
/** /**
* Helper function to start a Zephyr thread as a POSIX thread: * Helper function to start a Zephyr thread as a POSIX thread:
* It will block the thread until a z_arch_swap() is called for it * It will block the thread until a arch_swap() is called for it
* *
* Spawned from posix_new_thread() below * Spawned from posix_new_thread() below
*/ */
@ -361,9 +361,9 @@ static int ttable_get_empty_slot(void)
} }
/** /**
* Called from z_arch_new_thread(), * Called from arch_new_thread(),
* Create a new POSIX thread for the new Zephyr thread. * Create a new POSIX thread for the new Zephyr thread.
* z_arch_new_thread() picks from the kernel structures what it is that we need * arch_new_thread() picks from the kernel structures what it is that we need
* to call with what parameters * to call with what parameters
*/ */
void posix_new_thread(posix_thread_status_t *ptr) void posix_new_thread(posix_thread_status_t *ptr)

View file

@ -9,7 +9,7 @@
* @file * @file
* @brief Kernel swapper code for POSIX * @brief Kernel swapper code for POSIX
* *
* This module implements the z_arch_swap() routine for the POSIX architecture. * This module implements the arch_swap() routine for the POSIX architecture.
* *
*/ */
@ -19,7 +19,7 @@
#include "irq.h" #include "irq.h"
#include "kswap.h" #include "kswap.h"
int z_arch_swap(unsigned int key) int arch_swap(unsigned int key)
{ {
/* /*
* struct k_thread * _kernel.current is the currently runnig thread * struct k_thread * _kernel.current is the currently runnig thread
@ -34,7 +34,7 @@ int z_arch_swap(unsigned int key)
_kernel.current->callee_saved.retval = -EAGAIN; _kernel.current->callee_saved.retval = -EAGAIN;
/* retval may be modified with a call to /* retval may be modified with a call to
* z_arch_thread_return_value_set() * arch_thread_return_value_set()
*/ */
posix_thread_status_t *ready_thread_ptr = posix_thread_status_t *ready_thread_ptr =
@ -67,15 +67,15 @@ int z_arch_swap(unsigned int key)
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
/* This is just a version of z_arch_swap() in which we do not save anything /* This is just a version of arch_swap() in which we do not save anything
* about the current thread. * about the current thread.
* *
* Note that we will never come back to this thread: posix_main_thread_start() * Note that we will never come back to this thread: posix_main_thread_start()
* does never return. * does never return.
*/ */
void z_arch_switch_to_main_thread(struct k_thread *main_thread, void arch_switch_to_main_thread(struct k_thread *main_thread,
k_thread_stack_t *main_stack, k_thread_stack_t *main_stack,
size_t main_stack_size, k_thread_entry_t _main) size_t main_stack_size, k_thread_entry_t _main)
{ {
posix_thread_status_t *ready_thread_ptr = posix_thread_status_t *ready_thread_ptr =
(posix_thread_status_t *) (posix_thread_status_t *)

View file

@ -24,10 +24,10 @@
/* Note that in this arch we cheat quite a bit: we use as stack a normal /* Note that in this arch we cheat quite a bit: we use as stack a normal
* pthreads stack and therefore we ignore the stack size * pthreads stack and therefore we ignore the stack size
*/ */
void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
size_t stack_size, k_thread_entry_t thread_func, size_t stack_size, k_thread_entry_t thread_func,
void *arg1, void *arg2, void *arg3, void *arg1, void *arg2, void *arg3,
int priority, unsigned int options) int priority, unsigned int options)
{ {
char *stack_memory = Z_THREAD_STACK_BUFFER(stack); char *stack_memory = Z_THREAD_STACK_BUFFER(stack);

View file

@ -19,18 +19,18 @@ extern "C" {
#endif #endif
#if defined(CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN) #if defined(CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN)
void z_arch_switch_to_main_thread(struct k_thread *main_thread, void arch_switch_to_main_thread(struct k_thread *main_thread,
k_thread_stack_t *main_stack, k_thread_stack_t *main_stack,
size_t main_stack_size, k_thread_entry_t _main); size_t main_stack_size, k_thread_entry_t _main);
#endif #endif
static inline void z_arch_kernel_init(void) static inline void arch_kernel_init(void)
{ {
/* Nothing to be done */ /* Nothing to be done */
} }
static ALWAYS_INLINE void static ALWAYS_INLINE void
z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value) arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{ {
thread->callee_saved.retval = value; thread->callee_saved.retval = value;
} }
@ -39,7 +39,7 @@ z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
} }
#endif #endif
static inline bool z_arch_is_in_isr(void) static inline bool arch_is_in_isr(void)
{ {
return _kernel.nested != 0U; return _kernel.nested != 0U;
} }

View file

@ -9,20 +9,20 @@
/* /*
* In RISC-V there is no conventional way to handle CPU power save. * In RISC-V there is no conventional way to handle CPU power save.
* Each RISC-V SOC handles it in its own way. * Each RISC-V SOC handles it in its own way.
* Hence, by default, z_arch_cpu_idle and z_arch_cpu_atomic_idle functions just * Hence, by default, arch_cpu_idle and arch_cpu_atomic_idle functions just
* unlock interrupts and return to the caller, without issuing any CPU power * unlock interrupts and return to the caller, without issuing any CPU power
* saving instruction. * saving instruction.
* *
* Nonetheless, define the default z_arch_cpu_idle and z_arch_cpu_atomic_idle * Nonetheless, define the default arch_cpu_idle and arch_cpu_atomic_idle
* functions as weak functions, so that they can be replaced at the SOC-level. * functions as weak functions, so that they can be replaced at the SOC-level.
*/ */
void __weak z_arch_cpu_idle(void) void __weak arch_cpu_idle(void)
{ {
irq_unlock(SOC_MSTATUS_IEN); irq_unlock(SOC_MSTATUS_IEN);
} }
void __weak z_arch_cpu_atomic_idle(unsigned int key) void __weak arch_cpu_atomic_idle(unsigned int key)
{ {
irq_unlock(key); irq_unlock(key);
} }

View file

@ -30,9 +30,9 @@ FUNC_NORETURN void z_irq_spurious(void *unused)
} }
#ifdef CONFIG_DYNAMIC_INTERRUPTS #ifdef CONFIG_DYNAMIC_INTERRUPTS
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(void *parameter), void *parameter, void (*routine)(void *parameter), void *parameter,
u32_t flags) u32_t flags)
{ {
ARG_UNUSED(flags); ARG_UNUSED(flags);

View file

@ -31,7 +31,7 @@ void z_irq_do_offload(void)
tmp((void *)offload_param); tmp((void *)offload_param);
} }
void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter) void arch_irq_offload(irq_offload_routine_t routine, void *parameter)
{ {
unsigned int key; unsigned int key;

View file

@ -10,18 +10,18 @@
#include <arch/cpu.h> #include <arch/cpu.h>
/* exports */ /* exports */
GTEXT(z_arch_swap) GTEXT(arch_swap)
GTEXT(z_thread_entry_wrapper) GTEXT(z_thread_entry_wrapper)
/* Use ABI name of registers for the sake of simplicity */ /* Use ABI name of registers for the sake of simplicity */
/* /*
* unsigned int z_arch_swap(unsigned int key) * unsigned int arch_swap(unsigned int key)
* *
* Always called with interrupts locked * Always called with interrupts locked
* key is stored in a0 register * key is stored in a0 register
*/ */
SECTION_FUNC(exception.other, z_arch_swap) SECTION_FUNC(exception.other, arch_swap)
/* Make a system call to perform context switch */ /* Make a system call to perform context switch */
#ifdef CONFIG_EXECUTION_BENCHMARKING #ifdef CONFIG_EXECUTION_BENCHMARKING
@ -77,16 +77,16 @@ SECTION_FUNC(exception.other, z_arch_swap)
* Restored register a0 contains IRQ lock state of thread. * Restored register a0 contains IRQ lock state of thread.
* *
* Prior to unlocking irq, load return value of * Prior to unlocking irq, load return value of
* z_arch_swap to temp register t2 (from * arch_swap to temp register t2 (from
* _thread_offset_to_swap_return_value). Normally, it should be -EAGAIN, * _thread_offset_to_swap_return_value). Normally, it should be -EAGAIN,
* unless someone has previously called z_arch_thread_return_value_set(..). * unless someone has previously called arch_thread_return_value_set(..).
*/ */
la t0, _kernel la t0, _kernel
/* Get pointer to _kernel.current */ /* Get pointer to _kernel.current */
RV_OP_LOADREG t1, _kernel_offset_to_current(t0) RV_OP_LOADREG t1, _kernel_offset_to_current(t0)
/* Load return value of z_arch_swap function in temp register t2 */ /* Load return value of arch_swap function in temp register t2 */
lw t2, _thread_offset_to_swap_return_value(t1) lw t2, _thread_offset_to_swap_return_value(t1)
/* /*
@ -109,7 +109,7 @@ SECTION_FUNC(exception.other, z_arch_swap)
SECTION_FUNC(TEXT, z_thread_entry_wrapper) SECTION_FUNC(TEXT, z_thread_entry_wrapper)
/* /*
* z_thread_entry_wrapper is called for every new thread upon the return * z_thread_entry_wrapper is called for every new thread upon the return
* of z_arch_swap or ISR. Its address, as well as its input function * of arch_swap or ISR. Its address, as well as its input function
* arguments thread_entry_t, void *, void *, void * are restored from * arguments thread_entry_t, void *, void *, void * are restored from
* the thread stack (initialized via function _thread). * the thread stack (initialized via function _thread).
* In this case, thread_entry_t, * void *, void * and void * are stored * In this case, thread_entry_t, * void *, void * and void * are stored

View file

@ -12,10 +12,10 @@ void z_thread_entry_wrapper(k_thread_entry_t thread,
void *arg2, void *arg2,
void *arg3); void *arg3);
void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
size_t stack_size, k_thread_entry_t thread_func, size_t stack_size, k_thread_entry_t thread_func,
void *arg1, void *arg2, void *arg3, void *arg1, void *arg2, void *arg3,
int priority, unsigned int options) int priority, unsigned int options)
{ {
char *stack_memory = Z_THREAD_STACK_BUFFER(stack); char *stack_memory = Z_THREAD_STACK_BUFFER(stack);
Z_ASSERT_VALID_PRIO(priority, thread_func); Z_ASSERT_VALID_PRIO(priority, thread_func);

View file

@ -22,14 +22,14 @@ extern "C" {
#endif #endif
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE
static ALWAYS_INLINE void z_arch_kernel_init(void) static ALWAYS_INLINE void arch_kernel_init(void)
{ {
_kernel.irq_stack = _kernel.irq_stack =
Z_THREAD_STACK_BUFFER(_interrupt_stack) + CONFIG_ISR_STACK_SIZE; Z_THREAD_STACK_BUFFER(_interrupt_stack) + CONFIG_ISR_STACK_SIZE;
} }
static ALWAYS_INLINE void static ALWAYS_INLINE void
z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value) arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{ {
thread->arch.swap_return_value = value; thread->arch.swap_return_value = value;
} }
@ -37,7 +37,7 @@ z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason, FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason,
const z_arch_esf_t *esf); const z_arch_esf_t *esf);
static inline bool z_arch_is_in_isr(void) static inline bool arch_is_in_isr(void)
{ {
return _kernel.nested != 0U; return _kernel.nested != 0U;
} }

View file

@ -162,7 +162,7 @@ config X86_VERY_EARLY_CONSOLE
Non-emulated X86 devices often require special hardware to attach Non-emulated X86 devices often require special hardware to attach
a debugger, which may not be easily available. This option adds a a debugger, which may not be easily available. This option adds a
very minimal serial driver which gets initialized at the very very minimal serial driver which gets initialized at the very
beginning of z_cstart(), via z_arch_kernel_init(). This driver enables beginning of z_cstart(), via arch_kernel_init(). This driver enables
printk to emit messages to the 16550 UART port 0 instance in device printk to emit messages to the 16550 UART port 0 instance in device
tree. This mini-driver assumes I/O to the UART is done via ports. tree. This mini-driver assumes I/O to the UART is done via ports.

View file

@ -7,7 +7,7 @@
#include <debug/tracing.h> #include <debug/tracing.h>
#include <arch/cpu.h> #include <arch/cpu.h>
void z_arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
sys_trace_idle(); sys_trace_idle();
__asm__ volatile ( __asm__ volatile (
@ -15,7 +15,7 @@ void z_arch_cpu_idle(void)
"hlt\n\t"); "hlt\n\t");
} }
void z_arch_cpu_atomic_idle(unsigned int key) void arch_cpu_atomic_idle(unsigned int key)
{ {
sys_trace_idle(); sys_trace_idle();
@ -30,7 +30,7 @@ void z_arch_cpu_atomic_idle(unsigned int key)
* external, maskable interrupts after the next instruction is * external, maskable interrupts after the next instruction is
* executed." * executed."
* *
* Thus the IA-32 implementation of z_arch_cpu_atomic_idle() will * Thus the IA-32 implementation of arch_cpu_atomic_idle() will
* atomically re-enable interrupts and enter a low-power mode. * atomically re-enable interrupts and enter a low-power mode.
*/ */
"hlt\n\t"); "hlt\n\t");

View file

@ -40,9 +40,9 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, u16_t cs)
{ {
uintptr_t start, end; uintptr_t start, end;
if (z_arch_is_in_isr()) { if (arch_is_in_isr()) {
/* We were servicing an interrupt */ /* We were servicing an interrupt */
start = (uintptr_t)Z_ARCH_THREAD_STACK_BUFFER(_interrupt_stack); start = (uintptr_t)ARCH_THREAD_STACK_BUFFER(_interrupt_stack);
end = start + CONFIG_ISR_STACK_SIZE; end = start + CONFIG_ISR_STACK_SIZE;
} else if ((cs & 0x3U) != 0U || } else if ((cs & 0x3U) != 0U ||
(_current->base.user_options & K_USER) == 0) { (_current->base.user_options & K_USER) == 0) {

View file

@ -23,7 +23,7 @@ __weak void z_debug_fatal_hook(const z_arch_esf_t *esf) { ARG_UNUSED(esf); }
#ifdef CONFIG_BOARD_QEMU_X86 #ifdef CONFIG_BOARD_QEMU_X86
FUNC_NORETURN void z_arch_system_halt(unsigned int reason) FUNC_NORETURN void arch_system_halt(unsigned int reason)
{ {
ARG_UNUSED(reason); ARG_UNUSED(reason);
@ -46,7 +46,7 @@ void z_x86_spurious_irq(const z_arch_esf_t *esf)
z_x86_fatal_error(K_ERR_SPURIOUS_IRQ, esf); z_x86_fatal_error(K_ERR_SPURIOUS_IRQ, esf);
} }
void z_arch_syscall_oops(void *ssf_ptr) void arch_syscall_oops(void *ssf_ptr)
{ {
struct _x86_syscall_stack_frame *ssf = struct _x86_syscall_stack_frame *ssf =
(struct _x86_syscall_stack_frame *)ssf_ptr; (struct _x86_syscall_stack_frame *)ssf_ptr;
@ -229,7 +229,7 @@ static FUNC_NORETURN __used void df_handler_top(void)
_df_esf.eflags = _main_tss.eflags; _df_esf.eflags = _main_tss.eflags;
/* Restore the main IA task to a runnable state */ /* Restore the main IA task to a runnable state */
_main_tss.esp = (u32_t)(Z_ARCH_THREAD_STACK_BUFFER(_interrupt_stack) + _main_tss.esp = (u32_t)(ARCH_THREAD_STACK_BUFFER(_interrupt_stack) +
CONFIG_ISR_STACK_SIZE); CONFIG_ISR_STACK_SIZE);
_main_tss.cs = CODE_SEG; _main_tss.cs = CODE_SEG;
_main_tss.ds = DATA_SEG; _main_tss.ds = DATA_SEG;

View file

@ -29,7 +29,7 @@
/* externs */ /* externs */
GTEXT(z_arch_swap) GTEXT(arch_swap)
#ifdef CONFIG_SYS_POWER_MANAGEMENT #ifdef CONFIG_SYS_POWER_MANAGEMENT
GTEXT(z_sys_power_save_idle_exit) GTEXT(z_sys_power_save_idle_exit)
@ -83,8 +83,8 @@ SECTION_FUNC(TEXT, _interrupt_enter)
pushl %eax pushl %eax
pushl %edx pushl %edx
rdtsc rdtsc
mov %eax, z_arch_timing_irq_start mov %eax, arch_timing_irq_start
mov %edx, z_arch_timing_irq_start+4 mov %edx, arch_timing_irq_start+4
pop %edx pop %edx
pop %eax pop %eax
#endif #endif
@ -125,7 +125,7 @@ SECTION_FUNC(TEXT, _interrupt_enter)
/* Push EDI as we will use it for scratch space. /* Push EDI as we will use it for scratch space.
* Rest of the callee-saved regs get saved by invocation of C * Rest of the callee-saved regs get saved by invocation of C
* functions (isr handler, z_arch_swap(), etc) * functions (isr handler, arch_swap(), etc)
*/ */
pushl %edi pushl %edi
@ -186,8 +186,8 @@ alreadyOnIntStack:
pushl %eax pushl %eax
pushl %edx pushl %edx
rdtsc rdtsc
mov %eax,z_arch_timing_irq_end mov %eax,arch_timing_irq_end
mov %edx,z_arch_timing_irq_end+4 mov %edx,arch_timing_irq_end+4
pop %edx pop %edx
pop %eax pop %eax
#endif #endif
@ -227,7 +227,7 @@ alreadyOnIntStack:
/* /*
* Set X86_THREAD_FLAG_INT bit in k_thread to allow the upcoming call * Set X86_THREAD_FLAG_INT bit in k_thread to allow the upcoming call
* to z_arch_swap() to determine whether non-floating registers need to be * to arch_swap() to determine whether non-floating registers need to be
* preserved using the lazy save/restore algorithm, or to indicate to * preserved using the lazy save/restore algorithm, or to indicate to
* debug tools that a preemptive context switch has occurred. * debug tools that a preemptive context switch has occurred.
*/ */
@ -239,7 +239,7 @@ alreadyOnIntStack:
/* /*
* A context reschedule is required: keep the volatile registers of * A context reschedule is required: keep the volatile registers of
* the interrupted thread on the context's stack. Utilize * the interrupted thread on the context's stack. Utilize
* the existing z_arch_swap() primitive to save the remaining * the existing arch_swap() primitive to save the remaining
* thread's registers (including floating point) and perform * thread's registers (including floating point) and perform
* a switch to the new thread. * a switch to the new thread.
*/ */
@ -250,12 +250,12 @@ alreadyOnIntStack:
call z_check_stack_sentinel call z_check_stack_sentinel
#endif #endif
pushfl /* push KERNEL_LOCK_KEY argument */ pushfl /* push KERNEL_LOCK_KEY argument */
call z_arch_swap call arch_swap
addl $4, %esp /* pop KERNEL_LOCK_KEY argument */ addl $4, %esp /* pop KERNEL_LOCK_KEY argument */
/* /*
* The interrupted thread has now been scheduled, * The interrupted thread has now been scheduled,
* as the result of a _later_ invocation of z_arch_swap(). * as the result of a _later_ invocation of arch_swap().
* *
* Now need to restore the interrupted thread's environment before * Now need to restore the interrupted thread's environment before
* returning control to it at the point where it was interrupted ... * returning control to it at the point where it was interrupted ...
@ -263,7 +263,7 @@ alreadyOnIntStack:
#if defined(CONFIG_LAZY_FP_SHARING) #if defined(CONFIG_LAZY_FP_SHARING)
/* /*
* z_arch_swap() has restored the floating point registers, if needed. * arch_swap() has restored the floating point registers, if needed.
* Clear X86_THREAD_FLAG_INT in the interrupted thread's state * Clear X86_THREAD_FLAG_INT in the interrupted thread's state
* since it has served its purpose. * since it has served its purpose.
*/ */

View file

@ -48,7 +48,7 @@ void *__attribute__((section(".spurNoErrIsr")))
*/ */
#ifdef CONFIG_SYS_POWER_MANAGEMENT #ifdef CONFIG_SYS_POWER_MANAGEMENT
void z_arch_irq_direct_pm(void) void arch_irq_direct_pm(void)
{ {
if (_kernel.idle) { if (_kernel.idle) {
s32_t idle_val = _kernel.idle; s32_t idle_val = _kernel.idle;
@ -59,17 +59,17 @@ void z_arch_irq_direct_pm(void)
} }
#endif #endif
void z_arch_isr_direct_header(void) void arch_isr_direct_header(void)
{ {
sys_trace_isr_enter(); sys_trace_isr_enter();
/* We're not going to unlock IRQs, but we still need to increment this /* We're not going to unlock IRQs, but we still need to increment this
* so that z_arch_is_in_isr() works * so that arch_is_in_isr() works
*/ */
++_kernel.nested; ++_kernel.nested;
} }
void z_arch_isr_direct_footer(int swap) void arch_isr_direct_footer(int swap)
{ {
z_irq_controller_eoi(); z_irq_controller_eoi();
sys_trace_isr_exit(); sys_trace_isr_exit();
@ -250,7 +250,7 @@ static void idt_vector_install(int vector, void *irq_handler)
irq_unlock(key); irq_unlock(key);
} }
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(void *parameter), void *parameter, void (*routine)(void *parameter), void *parameter,
u32_t flags) u32_t flags)
{ {

View file

@ -25,7 +25,7 @@ void z_irq_do_offload(void)
offload_routine(offload_param); offload_routine(offload_param);
} }
void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter) void arch_irq_offload(irq_offload_routine_t routine, void *parameter)
{ {
unsigned int key; unsigned int key;

View file

@ -8,7 +8,7 @@
* @file * @file
* @brief Kernel swapper code for IA-32 * @brief Kernel swapper code for IA-32
* *
* This module implements the z_arch_swap() routine for the IA-32 architecture. * This module implements the arch_swap() routine for the IA-32 architecture.
*/ */
#include <arch/x86/ia32/asm.h> #include <arch/x86/ia32/asm.h>
@ -19,7 +19,7 @@
/* exports (internal APIs) */ /* exports (internal APIs) */
GTEXT(z_arch_swap) GTEXT(arch_swap)
GTEXT(z_x86_thread_entry_wrapper) GTEXT(z_x86_thread_entry_wrapper)
GTEXT(_x86_user_thread_entry_wrapper) GTEXT(_x86_user_thread_entry_wrapper)
@ -30,7 +30,7 @@
GDATA(_k_neg_eagain) GDATA(_k_neg_eagain)
/* /*
* Given that z_arch_swap() is called to effect a cooperative context switch, * Given that arch_swap() is called to effect a cooperative context switch,
* only the non-volatile integer registers need to be saved in the TCS of the * only the non-volatile integer registers need to be saved in the TCS of the
* outgoing thread. The restoration of the integer registers of the incoming * outgoing thread. The restoration of the integer registers of the incoming
* thread depends on whether that thread was preemptively context switched out. * thread depends on whether that thread was preemptively context switched out.
@ -62,7 +62,7 @@
* *
* C function prototype: * C function prototype:
* *
* unsigned int z_arch_swap (unsigned int eflags); * unsigned int arch_swap (unsigned int eflags);
*/ */
.macro read_tsc var_name .macro read_tsc var_name
@ -74,7 +74,7 @@
pop %edx pop %edx
pop %eax pop %eax
.endm .endm
SECTION_FUNC(TEXT, z_arch_swap) SECTION_FUNC(TEXT, arch_swap)
#ifdef CONFIG_EXECUTION_BENCHMARKING #ifdef CONFIG_EXECUTION_BENCHMARKING
/* Save the eax and edx registers before reading the time stamp /* Save the eax and edx registers before reading the time stamp
* once done pop the values. * once done pop the values.
@ -82,8 +82,8 @@ SECTION_FUNC(TEXT, z_arch_swap)
push %eax push %eax
push %edx push %edx
rdtsc rdtsc
mov %eax,z_arch_timing_swap_start mov %eax,arch_timing_swap_start
mov %edx,z_arch_timing_swap_start+4 mov %edx,arch_timing_swap_start+4
pop %edx pop %edx
pop %eax pop %eax
#endif #endif
@ -106,7 +106,7 @@ SECTION_FUNC(TEXT, z_arch_swap)
* Carve space for the return value. Setting it to a default of * Carve space for the return value. Setting it to a default of
* -EAGAIN eliminates the need for the timeout code to set it. * -EAGAIN eliminates the need for the timeout code to set it.
* If another value is ever needed, it can be modified with * If another value is ever needed, it can be modified with
* z_arch_thread_return_value_set(). * arch_thread_return_value_set().
*/ */
pushl _k_neg_eagain pushl _k_neg_eagain
@ -331,7 +331,7 @@ CROHandlingDone:
movl _thread_offset_to_esp(%eax), %esp movl _thread_offset_to_esp(%eax), %esp
/* load return value from a possible z_arch_thread_return_value_set() */ /* load return value from a possible arch_thread_return_value_set() */
popl %eax popl %eax
@ -345,23 +345,23 @@ CROHandlingDone:
/* /*
* %eax may contain one of these values: * %eax may contain one of these values:
* *
* - the return value for z_arch_swap() that was set up by a call to * - the return value for arch_swap() that was set up by a call to
* z_arch_thread_return_value_set() * arch_thread_return_value_set()
* - -EINVAL * - -EINVAL
*/ */
/* Utilize the 'eflags' parameter to z_arch_swap() */ /* Utilize the 'eflags' parameter to arch_swap() */
pushl 4(%esp) pushl 4(%esp)
popfl popfl
#ifdef CONFIG_EXECUTION_BENCHMARKING #ifdef CONFIG_EXECUTION_BENCHMARKING
cmp $0x1,z_arch_timing_value_swap_end cmp $0x1,arch_timing_value_swap_end
jne time_read_not_needed jne time_read_not_needed
movw $0x2,z_arch_timing_value_swap_end movw $0x2,arch_timing_value_swap_end
read_tsc z_arch_timing_value_swap_common read_tsc arch_timing_value_swap_common
pushl z_arch_timing_swap_start pushl arch_timing_swap_start
popl z_arch_timing_value_swap_temp popl arch_timing_value_swap_temp
time_read_not_needed: time_read_not_needed:
#endif #endif
ret ret
@ -371,7 +371,7 @@ time_read_not_needed:
* *
* @brief Adjust stack/parameters before invoking thread entry function * @brief Adjust stack/parameters before invoking thread entry function
* *
* This function adjusts the initial stack frame created by z_arch_new_thread() * This function adjusts the initial stack frame created by arch_new_thread()
* such that the GDB stack frame unwinders recognize it as the outermost frame * such that the GDB stack frame unwinders recognize it as the outermost frame
* in the thread's stack. * in the thread's stack.
* *
@ -380,7 +380,7 @@ time_read_not_needed:
* a main() function, and there does not appear to be a simple way of stopping * a main() function, and there does not appear to be a simple way of stopping
* the unwinding of the stack. * the unwinding of the stack.
* *
* Given the initial thread created by z_arch_new_thread(), GDB expects to find * Given the initial thread created by arch_new_thread(), GDB expects to find
* a return address on the stack immediately above the thread entry routine * a return address on the stack immediately above the thread entry routine
* z_thread_entry, in the location occupied by the initial EFLAGS. GDB * z_thread_entry, in the location occupied by the initial EFLAGS. GDB
* attempts to examine the memory at this return address, which typically * attempts to examine the memory at this return address, which typically

View file

@ -109,8 +109,8 @@ static FUNC_NORETURN void drop_to_user(k_thread_entry_t user_entry,
CODE_UNREACHABLE; CODE_UNREACHABLE;
} }
FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry, FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3) void *p1, void *p2, void *p3)
{ {
struct z_x86_thread_stack_header *header = struct z_x86_thread_stack_header *header =
(struct z_x86_thread_stack_header *)_current->stack_obj; (struct z_x86_thread_stack_header *)_current->stack_obj;
@ -161,7 +161,7 @@ NANO_CPU_INT_REGISTER(z_x86_syscall_entry_stub, -1, -1, 0x80, 3);
extern int z_float_disable(struct k_thread *thread); extern int z_float_disable(struct k_thread *thread);
int z_arch_float_disable(struct k_thread *thread) int arch_float_disable(struct k_thread *thread)
{ {
#if defined(CONFIG_LAZY_FP_SHARING) #if defined(CONFIG_LAZY_FP_SHARING)
return z_float_disable(thread); return z_float_disable(thread);
@ -171,10 +171,10 @@ int z_arch_float_disable(struct k_thread *thread)
} }
#endif /* CONFIG_FLOAT && CONFIG_FP_SHARING */ #endif /* CONFIG_FLOAT && CONFIG_FP_SHARING */
void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
size_t stack_size, k_thread_entry_t entry, size_t stack_size, k_thread_entry_t entry,
void *parameter1, void *parameter2, void *parameter3, void *parameter1, void *parameter2, void *parameter3,
int priority, unsigned int options) int priority, unsigned int options)
{ {
char *stack_buf; char *stack_buf;
char *stack_high; char *stack_high;

View file

@ -12,7 +12,7 @@
/* Exports */ /* Exports */
GTEXT(z_x86_syscall_entry_stub) GTEXT(z_x86_syscall_entry_stub)
GTEXT(z_x86_userspace_enter) GTEXT(z_x86_userspace_enter)
GTEXT(z_arch_user_string_nlen) GTEXT(arch_user_string_nlen)
GTEXT(z_x86_user_string_nlen_fault_start) GTEXT(z_x86_user_string_nlen_fault_start)
GTEXT(z_x86_user_string_nlen_fault_end) GTEXT(z_x86_user_string_nlen_fault_end)
GTEXT(z_x86_user_string_nlen_fixup) GTEXT(z_x86_user_string_nlen_fixup)
@ -254,9 +254,9 @@ _bad_syscall:
/* /*
* size_t z_arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg) * size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg)
*/ */
SECTION_FUNC(TEXT, z_arch_user_string_nlen) SECTION_FUNC(TEXT, arch_user_string_nlen)
push %ebp push %ebp
mov %esp, %ebp mov %esp, %ebp
@ -393,8 +393,8 @@ SECTION_FUNC(TEXT, z_x86_userspace_enter)
push %eax push %eax
push %edx push %edx
rdtsc rdtsc
mov %eax,z_arch_timing_enter_user_mode_end mov %eax,arch_timing_enter_user_mode_end
mov %edx,z_arch_timing_enter_user_mode_end+4 mov %edx,arch_timing_enter_user_mode_end+4
pop %edx pop %edx
pop %eax pop %eax
#endif #endif

View file

@ -97,7 +97,7 @@ struct x86_cpuboot x86_cpuboot[] = {
* will enter the kernel at fn(---, arg), running on the specified stack. * will enter the kernel at fn(---, arg), running on the specified stack.
*/ */
void z_arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
void (*fn)(int key, void *data), void *arg) void (*fn)(int key, void *data), void *arg)
{ {
u8_t vector = ((unsigned long) x86_ap_start) >> 12; u8_t vector = ((unsigned long) x86_ap_start) >> 12;

View file

@ -66,7 +66,7 @@ static int allocate_vector(unsigned int priority)
* allocated. Whether it should simply __ASSERT instead is up for debate. * allocated. Whether it should simply __ASSERT instead is up for debate.
*/ */
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*func)(void *arg), void *arg, u32_t flags) void (*func)(void *arg), void *arg, u32_t flags)
{ {
u32_t key; u32_t key;
@ -91,7 +91,7 @@ int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
#ifdef CONFIG_IRQ_OFFLOAD #ifdef CONFIG_IRQ_OFFLOAD
#include <irq_offload.h> #include <irq_offload.h>
void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter) void arch_irq_offload(irq_offload_routine_t routine, void *parameter)
{ {
x86_irq_funcs[CONFIG_IRQ_OFFLOAD_VECTOR - IV_IRQS] = routine; x86_irq_funcs[CONFIG_IRQ_OFFLOAD_VECTOR - IV_IRQS] = routine;
x86_irq_args[CONFIG_IRQ_OFFLOAD_VECTOR - IV_IRQS] = parameter; x86_irq_args[CONFIG_IRQ_OFFLOAD_VECTOR - IV_IRQS] = parameter;
@ -119,7 +119,7 @@ void z_x86_ipi_setup(void)
* it is not clear exactly how/where/why to abstract this, as it * it is not clear exactly how/where/why to abstract this, as it
* assumes the use of a local APIC (but there's no other mechanism). * assumes the use of a local APIC (but there's no other mechanism).
*/ */
void z_arch_sched_ipi(void) void arch_sched_ipi(void)
{ {
z_loapic_ipi(0, LOAPIC_ICR_IPI_OTHERS, CONFIG_SCHED_IPI_VECTOR); z_loapic_ipi(0, LOAPIC_ICR_IPI_OTHERS, CONFIG_SCHED_IPI_VECTOR);
} }

View file

@ -10,10 +10,10 @@
extern void x86_sse_init(struct k_thread *); /* in locore.S */ extern void x86_sse_init(struct k_thread *); /* in locore.S */
void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
size_t stack_size, k_thread_entry_t entry, size_t stack_size, k_thread_entry_t entry,
void *parameter1, void *parameter2, void *parameter3, void *parameter1, void *parameter2, void *parameter3,
int priority, unsigned int options) int priority, unsigned int options)
{ {
#if defined(CONFIG_X86_USERSPACE) || defined(CONFIG_X86_STACK_PROTECTION) #if defined(CONFIG_X86_USERSPACE) || defined(CONFIG_X86_STACK_PROTECTION)
struct z_x86_thread_stack_header *header = struct z_x86_thread_stack_header *header =

View file

@ -746,7 +746,7 @@ static void add_mmu_region(struct x86_page_tables *ptables,
} }
} }
/* Called from x86's z_arch_kernel_init() */ /* Called from x86's arch_kernel_init() */
void z_x86_paging_init(void) void z_x86_paging_init(void)
{ {
size_t pages_free; size_t pages_free;
@ -777,7 +777,7 @@ void z_x86_paging_init(void)
} }
#ifdef CONFIG_X86_USERSPACE #ifdef CONFIG_X86_USERSPACE
int z_arch_buffer_validate(void *addr, size_t size, int write) int arch_buffer_validate(void *addr, size_t size, int write)
{ {
return z_x86_mmu_validate(z_x86_thread_page_tables_get(_current), addr, return z_x86_mmu_validate(z_x86_thread_page_tables_get(_current), addr,
size, write != 0); size, write != 0);
@ -1003,8 +1003,8 @@ void z_x86_thread_pt_init(struct k_thread *thread)
* mode the per-thread page tables will be generated and the memory domain * mode the per-thread page tables will be generated and the memory domain
* configuration applied. * configuration applied.
*/ */
void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain, void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
u32_t partition_id) u32_t partition_id)
{ {
sys_dnode_t *node, *next_node; sys_dnode_t *node, *next_node;
@ -1024,7 +1024,7 @@ void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
} }
} }
void z_arch_mem_domain_destroy(struct k_mem_domain *domain) void arch_mem_domain_destroy(struct k_mem_domain *domain)
{ {
for (int i = 0, pcount = 0; pcount < domain->num_partitions; i++) { for (int i = 0, pcount = 0; pcount < domain->num_partitions; i++) {
struct k_mem_partition *partition; struct k_mem_partition *partition;
@ -1035,11 +1035,11 @@ void z_arch_mem_domain_destroy(struct k_mem_domain *domain)
} }
pcount++; pcount++;
z_arch_mem_domain_partition_remove(domain, i); arch_mem_domain_partition_remove(domain, i);
} }
} }
void z_arch_mem_domain_thread_remove(struct k_thread *thread) void arch_mem_domain_thread_remove(struct k_thread *thread)
{ {
struct k_mem_domain *domain = thread->mem_domain_info.mem_domain; struct k_mem_domain *domain = thread->mem_domain_info.mem_domain;
@ -1062,8 +1062,8 @@ void z_arch_mem_domain_thread_remove(struct k_thread *thread)
} }
} }
void z_arch_mem_domain_partition_add(struct k_mem_domain *domain, void arch_mem_domain_partition_add(struct k_mem_domain *domain,
u32_t partition_id) u32_t partition_id)
{ {
sys_dnode_t *node, *next_node; sys_dnode_t *node, *next_node;
@ -1080,7 +1080,7 @@ void z_arch_mem_domain_partition_add(struct k_mem_domain *domain,
} }
} }
void z_arch_mem_domain_thread_add(struct k_thread *thread) void arch_mem_domain_thread_add(struct k_thread *thread)
{ {
if ((thread->base.user_options & K_USER) == 0) { if ((thread->base.user_options & K_USER) == 0) {
return; return;
@ -1090,7 +1090,7 @@ void z_arch_mem_domain_thread_add(struct k_thread *thread)
thread->mem_domain_info.mem_domain); thread->mem_domain_info.mem_domain);
} }
int z_arch_mem_domain_max_partitions_get(void) int arch_mem_domain_max_partitions_get(void)
{ {
return CONFIG_MAX_DOMAIN_PARTITIONS; return CONFIG_MAX_DOMAIN_PARTITIONS;
} }

View file

@ -18,20 +18,20 @@
extern "C" { extern "C" {
#endif #endif
static inline void z_arch_kernel_init(void) static inline void arch_kernel_init(void)
{ {
/* No-op on this arch */ /* No-op on this arch */
} }
static ALWAYS_INLINE void static ALWAYS_INLINE void
z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value) arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{ {
/* write into 'eax' slot created in z_swap() entry */ /* write into 'eax' slot created in z_swap() entry */
*(unsigned int *)(thread->callee_saved.esp) = value; *(unsigned int *)(thread->callee_saved.esp) = value;
} }
extern void z_arch_cpu_atomic_idle(unsigned int key); extern void arch_cpu_atomic_idle(unsigned int key);
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
extern FUNC_NORETURN void z_x86_userspace_enter(k_thread_entry_t user_entry, extern FUNC_NORETURN void z_x86_userspace_enter(k_thread_entry_t user_entry,

View file

@ -12,7 +12,7 @@
extern void z_x86_switch(void *switch_to, void **switched_from); extern void z_x86_switch(void *switch_to, void **switched_from);
static inline void z_arch_switch(void *switch_to, void **switched_from) static inline void arch_switch(void *switch_to, void **switched_from)
{ {
z_x86_switch(switch_to, switched_from); z_x86_switch(switch_to, switched_from);
} }
@ -25,7 +25,7 @@ static inline void z_arch_switch(void *switch_to, void **switched_from)
extern void z_x86_ipi_setup(void); extern void z_x86_ipi_setup(void);
static inline void z_arch_kernel_init(void) static inline void arch_kernel_init(void)
{ {
/* nothing */; /* nothing */;
} }

View file

@ -15,10 +15,10 @@
#endif #endif
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE
static inline bool z_arch_is_in_isr(void) static inline bool arch_is_in_isr(void)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
return z_arch_curr_cpu()->nested != 0; return arch_curr_cpu()->nested != 0;
#else #else
return _kernel.nested != 0U; return _kernel.nested != 0U;
#endif #endif

View file

@ -5,12 +5,12 @@
#include <debug/tracing.h> #include <debug/tracing.h>
void z_arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
sys_trace_idle(); sys_trace_idle();
__asm__ volatile ("waiti 0"); __asm__ volatile ("waiti 0");
} }
void z_arch_cpu_atomic_idle(unsigned int key) void arch_cpu_atomic_idle(unsigned int key)
{ {
sys_trace_idle(); sys_trace_idle();
__asm__ volatile ("waiti 0\n\t" __asm__ volatile ("waiti 0\n\t"

View file

@ -23,11 +23,11 @@ void z_irq_do_offload(void *unused)
offload_routine(offload_param); offload_routine(offload_param);
} }
void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter) void arch_irq_offload(irq_offload_routine_t routine, void *parameter)
{ {
IRQ_CONNECT(CONFIG_IRQ_OFFLOAD_INTNUM, XCHAL_EXCM_LEVEL, IRQ_CONNECT(CONFIG_IRQ_OFFLOAD_INTNUM, XCHAL_EXCM_LEVEL,
z_irq_do_offload, NULL, 0); z_irq_do_offload, NULL, 0);
z_arch_irq_disable(CONFIG_IRQ_OFFLOAD_INTNUM); arch_irq_disable(CONFIG_IRQ_OFFLOAD_INTNUM);
offload_routine = routine; offload_routine = routine;
offload_param = parameter; offload_param = parameter;
z_xt_set_intset(BIT(CONFIG_IRQ_OFFLOAD_INTNUM)); z_xt_set_intset(BIT(CONFIG_IRQ_OFFLOAD_INTNUM));
@ -35,5 +35,5 @@ void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter)
* Enable the software interrupt, in case it is disabled, so that IRQ * Enable the software interrupt, in case it is disabled, so that IRQ
* offload is serviced. * offload is serviced.
*/ */
z_arch_irq_enable(CONFIG_IRQ_OFFLOAD_INTNUM); arch_irq_enable(CONFIG_IRQ_OFFLOAD_INTNUM);
} }

View file

@ -56,10 +56,10 @@ void *xtensa_init_stack(int *stack_top,
return &bsa[-9]; return &bsa[-9];
} }
void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
size_t sz, k_thread_entry_t entry, size_t sz, k_thread_entry_t entry,
void *p1, void *p2, void *p3, void *p1, void *p2, void *p3,
int prio, unsigned int opts) int prio, unsigned int opts)
{ {
char *base = Z_THREAD_STACK_BUFFER(stack); char *base = Z_THREAD_STACK_BUFFER(stack);
char *top = base + sz; char *top = base + sz;
@ -194,7 +194,7 @@ void *xtensa_excint1_c(int *interrupted_stack)
LOG_ERR(" ** FATAL EXCEPTION"); LOG_ERR(" ** FATAL EXCEPTION");
LOG_ERR(" ** CPU %d EXCCAUSE %d (%s)", LOG_ERR(" ** CPU %d EXCCAUSE %d (%s)",
z_arch_curr_cpu()->id, cause, arch_curr_cpu()->id, cause,
z_xtensa_exccause(cause)); z_xtensa_exccause(cause));
LOG_ERR(" ** PC %p VADDR %p", LOG_ERR(" ** PC %p VADDR %p",
(void *)bsa[BSA_PC_OFF/4], (void *)vaddr); (void *)bsa[BSA_PC_OFF/4], (void *)vaddr);

View file

@ -31,7 +31,7 @@ extern void z_xt_coproc_init(void);
extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE); extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE);
static ALWAYS_INLINE void z_arch_kernel_init(void) static ALWAYS_INLINE void arch_kernel_init(void)
{ {
_cpu_t *cpu0 = &_kernel.cpus[0]; _cpu_t *cpu0 = &_kernel.cpus[0];
@ -55,7 +55,7 @@ static ALWAYS_INLINE void z_arch_kernel_init(void)
void xtensa_switch(void *switch_to, void **switched_from); void xtensa_switch(void *switch_to, void **switched_from);
static inline void z_arch_switch(void *switch_to, void **switched_from) static inline void arch_switch(void *switch_to, void **switched_from)
{ {
return xtensa_switch(switch_to, switched_from); return xtensa_switch(switch_to, switched_from);
} }
@ -64,9 +64,9 @@ static inline void z_arch_switch(void *switch_to, void **switched_from)
} }
#endif #endif
static inline bool z_arch_is_in_isr(void) static inline bool arch_is_in_isr(void)
{ {
return z_arch_curr_cpu()->nested != 0U; return arch_curr_cpu()->nested != 0U;
} }
#endif /* _ASMLANGUAGE */ #endif /* _ASMLANGUAGE */

View file

@ -30,7 +30,7 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
* *
* @return The vector assigned to this interrupt * @return The vector assigned to this interrupt
*/ */
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ #define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
({ \ ({ \
posix_isr_declare(irq_p, 0, isr_p, isr_param_p); \ posix_isr_declare(irq_p, 0, isr_p, isr_param_p); \
posix_irq_priority_set(irq_p, priority_p, flags_p); \ posix_irq_priority_set(irq_p, priority_p, flags_p); \
@ -43,7 +43,7 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
* *
* See include/irq.h for details. * See include/irq.h for details.
*/ */
#define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \ #define ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
({ \ ({ \
posix_isr_declare(irq_p, ISR_FLAG_DIRECT, (void (*)(void *))isr_p, \ posix_isr_declare(irq_p, ISR_FLAG_DIRECT, (void (*)(void *))isr_p, \
NULL); \ NULL); \
@ -63,7 +63,7 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
* All pre/post irq work of the interrupt is handled in the board * All pre/post irq work of the interrupt is handled in the board
* posix_irq_handler() both for direct and normal interrupts together * posix_irq_handler() both for direct and normal interrupts together
*/ */
#define Z_ARCH_ISR_DIRECT_DECLARE(name) \ #define ARCH_ISR_DIRECT_DECLARE(name) \
static inline int name##_body(void); \ static inline int name##_body(void); \
int name(void) \ int name(void) \
{ \ { \
@ -73,14 +73,14 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
} \ } \
static inline int name##_body(void) static inline int name##_body(void)
#define Z_ARCH_ISR_DIRECT_HEADER() do { } while (0) #define ARCH_ISR_DIRECT_HEADER() do { } while (0)
#define Z_ARCH_ISR_DIRECT_FOOTER(a) do { } while (0) #define ARCH_ISR_DIRECT_FOOTER(a) do { } while (0)
#ifdef CONFIG_SYS_POWER_MANAGEMENT #ifdef CONFIG_SYS_POWER_MANAGEMENT
extern void posix_irq_check_idle_exit(void); extern void posix_irq_check_idle_exit(void);
#define Z_ARCH_ISR_DIRECT_PM() posix_irq_check_idle_exit() #define ARCH_ISR_DIRECT_PM() posix_irq_check_idle_exit()
#else #else
#define Z_ARCH_ISR_DIRECT_PM() do { } while (0) #define ARCH_ISR_DIRECT_PM() do { } while (0)
#endif #endif
#ifdef __cplusplus #ifdef __cplusplus

View file

@ -30,7 +30,7 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
* *
* @return The vector assigned to this interrupt * @return The vector assigned to this interrupt
*/ */
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ #define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
({ \ ({ \
posix_isr_declare(irq_p, 0, isr_p, isr_param_p); \ posix_isr_declare(irq_p, 0, isr_p, isr_param_p); \
posix_irq_priority_set(irq_p, priority_p, flags_p); \ posix_irq_priority_set(irq_p, priority_p, flags_p); \
@ -43,7 +43,7 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
* *
* See include/irq.h for details. * See include/irq.h for details.
*/ */
#define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \ #define ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
({ \ ({ \
posix_isr_declare(irq_p, ISR_FLAG_DIRECT, (void (*)(void *))isr_p, \ posix_isr_declare(irq_p, ISR_FLAG_DIRECT, (void (*)(void *))isr_p, \
NULL); \ NULL); \
@ -63,7 +63,7 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
* All pre/post irq work of the interrupt is handled in the board * All pre/post irq work of the interrupt is handled in the board
* posix_irq_handler() both for direct and normal interrupts together * posix_irq_handler() both for direct and normal interrupts together
*/ */
#define Z_ARCH_ISR_DIRECT_DECLARE(name) \ #define ARCH_ISR_DIRECT_DECLARE(name) \
static inline int name##_body(void); \ static inline int name##_body(void); \
int name(void) \ int name(void) \
{ \ { \
@ -73,14 +73,14 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
} \ } \
static inline int name##_body(void) static inline int name##_body(void)
#define Z_ARCH_ISR_DIRECT_HEADER() do { } while (0) #define ARCH_ISR_DIRECT_HEADER() do { } while (0)
#define Z_ARCH_ISR_DIRECT_FOOTER(a) do { } while (0) #define ARCH_ISR_DIRECT_FOOTER(a) do { } while (0)
#ifdef CONFIG_SYS_POWER_MANAGEMENT #ifdef CONFIG_SYS_POWER_MANAGEMENT
extern void posix_irq_check_idle_exit(void); extern void posix_irq_check_idle_exit(void);
#define Z_ARCH_ISR_DIRECT_PM() posix_irq_check_idle_exit() #define ARCH_ISR_DIRECT_PM() posix_irq_check_idle_exit()
#else #else
#define Z_ARCH_ISR_DIRECT_PM() do { } while (0) #define ARCH_ISR_DIRECT_PM() do { } while (0)
#endif #endif
#ifdef __cplusplus #ifdef __cplusplus

View file

@ -16,7 +16,7 @@
* Note that interrupts may be received in the meanwhile and that therefore this * Note that interrupts may be received in the meanwhile and that therefore this
* thread may lose context * thread may lose context
*/ */
void z_arch_busy_wait(u32_t usec_to_wait) void arch_busy_wait(u32_t usec_to_wait)
{ {
bs_time_t time_end = tm_get_hw_time() + usec_to_wait; bs_time_t time_end = tm_get_hw_time() + usec_to_wait;

View file

@ -407,9 +407,9 @@ CPU Idling/Power Management
*************************** ***************************
The kernel provides support for CPU power management with two functions: The kernel provides support for CPU power management with two functions:
:c:func:`z_arch_cpu_idle` and :c:func:`z_arch_cpu_atomic_idle`. :c:func:`arch_cpu_idle` and :c:func:`arch_cpu_atomic_idle`.
:c:func:`z_arch_cpu_idle` can be as simple as calling the power saving :c:func:`arch_cpu_idle` can be as simple as calling the power saving
instruction for the architecture with interrupts unlocked, for example instruction for the architecture with interrupts unlocked, for example
:code:`hlt` on x86, :code:`wfi` or :code:`wfe` on ARM, :code:`sleep` on ARC. :code:`hlt` on x86, :code:`wfi` or :code:`wfe` on ARM, :code:`sleep` on ARC.
This function can be called in a loop within a context that does not care if it This function can be called in a loop within a context that does not care if it
@ -422,7 +422,7 @@ basically two scenarios when it is correct to use this function:
* In the idle thread. * In the idle thread.
:c:func:`z_arch_cpu_atomic_idle`, on the other hand, must be able to atomically :c:func:`arch_cpu_atomic_idle`, on the other hand, must be able to atomically
re-enable interrupts and invoke the power saving instruction. It can thus be re-enable interrupts and invoke the power saving instruction. It can thus be
used in real application code, again in single-threaded systems. used in real application code, again in single-threaded systems.
@ -511,32 +511,32 @@ implemented, and the system must enable the :option:`CONFIG_ARCH_HAS_USERSPACE`
option. Please see the documentation for each of these functions for more option. Please see the documentation for each of these functions for more
details: details:
* :cpp:func:`z_arch_buffer_validate()` to test whether the current thread has * :cpp:func:`arch_buffer_validate()` to test whether the current thread has
access permissions to a particular memory region access permissions to a particular memory region
* :cpp:func:`z_arch_user_mode_enter()` which will irreversibly drop a supervisor * :cpp:func:`arch_user_mode_enter()` which will irreversibly drop a supervisor
thread to user mode privileges. The stack must be wiped. thread to user mode privileges. The stack must be wiped.
* :cpp:func:`z_arch_syscall_oops()` which generates a kernel oops when system * :cpp:func:`arch_syscall_oops()` which generates a kernel oops when system
call parameters can't be validated, in such a way that the oops appears to be call parameters can't be validated, in such a way that the oops appears to be
generated from where the system call was invoked in the user thread generated from where the system call was invoked in the user thread
* :cpp:func:`z_arch_syscall_invoke0()` through * :cpp:func:`arch_syscall_invoke0()` through
:cpp:func:`z_arch_syscall_invoke6()` invoke a system call with the :cpp:func:`arch_syscall_invoke6()` invoke a system call with the
appropriate number of arguments which must all be passed in during the appropriate number of arguments which must all be passed in during the
privilege elevation via registers. privilege elevation via registers.
* :cpp:func:`z_arch_is_user_context()` return nonzero if the CPU is currently * :cpp:func:`arch_is_user_context()` return nonzero if the CPU is currently
running in user mode running in user mode
* :cpp:func:`z_arch_mem_domain_max_partitions_get()` which indicates the max * :cpp:func:`arch_mem_domain_max_partitions_get()` which indicates the max
number of regions for a memory domain. MMU systems have an unlimited amount, number of regions for a memory domain. MMU systems have an unlimited amount,
MPU systems have constraints on this. MPU systems have constraints on this.
* :cpp:func:`z_arch_mem_domain_partition_remove()` Remove a partition from * :cpp:func:`arch_mem_domain_partition_remove()` Remove a partition from
a memory domain if the currently executing thread was part of that domain. a memory domain if the currently executing thread was part of that domain.
* :cpp:func:`z_arch_mem_domain_destroy()` Reset the thread's memory domain * :cpp:func:`arch_mem_domain_destroy()` Reset the thread's memory domain
configuration configuration
In addition to implementing these APIs, there are some other tasks as well: In addition to implementing these APIs, there are some other tasks as well:

View file

@ -132,7 +132,7 @@ happens on a single CPU before other CPUs are brought online.
Just before entering the application ``main()`` function, the kernel Just before entering the application ``main()`` function, the kernel
calls ``z_smp_init()`` to launch the SMP initialization process. This calls ``z_smp_init()`` to launch the SMP initialization process. This
enumerates over the configured CPUs, calling into the architecture enumerates over the configured CPUs, calling into the architecture
layer using ``z_arch_start_cpu()`` for each one. This function is layer using ``arch_start_cpu()`` for each one. This function is
passed a memory region to use as a stack on the foreign CPU (in passed a memory region to use as a stack on the foreign CPU (in
practice it uses the area that will become that CPU's interrupt practice it uses the area that will become that CPU's interrupt
stack), the address of a local ``smp_init_top()`` callback function to stack), the address of a local ``smp_init_top()`` callback function to
@ -172,7 +172,7 @@ handle the newly-runnable load.
So where possible, Zephyr SMP architectures should implement an So where possible, Zephyr SMP architectures should implement an
interprocessor interrupt. The current framework is very simple: the interprocessor interrupt. The current framework is very simple: the
architecture provides a ``z_arch_sched_ipi()`` call, which when invoked architecture provides a ``arch_sched_ipi()`` call, which when invoked
will flag an interrupt on all CPUs (except the current one, though will flag an interrupt on all CPUs (except the current one, though
that is allowed behavior) which will then invoke the ``z_sched_ipi()`` that is allowed behavior) which will then invoke the ``z_sched_ipi()``
function implemented in the scheduler. The expectation is that these function implemented in the scheduler. The expectation is that these
@ -239,7 +239,7 @@ offsets.
Note that an important requirement on the architecture layer is that Note that an important requirement on the architecture layer is that
the pointer to this CPU struct be available rapidly when in kernel the pointer to this CPU struct be available rapidly when in kernel
context. The expectation is that ``z_arch_curr_cpu()`` will be context. The expectation is that ``arch_curr_cpu()`` will be
implemented using a CPU-provided register or addressing mode that can implemented using a CPU-provided register or addressing mode that can
store this value across arbitrary context switches or interrupts and store this value across arbitrary context switches or interrupts and
make it available to any kernel-mode code. make it available to any kernel-mode code.
@ -270,7 +270,7 @@ Instead, the SMP "switch to" decision needs to be made synchronously
with the swap call, and as we don't want per-architecture assembly with the swap call, and as we don't want per-architecture assembly
code to be handling scheduler internal state, Zephyr requires a code to be handling scheduler internal state, Zephyr requires a
somewhat lower-level context switch primitives for SMP systems: somewhat lower-level context switch primitives for SMP systems:
``z_arch_switch()`` is always called with interrupts masked, and takes ``arch_switch()`` is always called with interrupts masked, and takes
exactly two arguments. The first is an opaque (architecture defined) exactly two arguments. The first is an opaque (architecture defined)
handle to the context to which it should switch, and the second is a handle to the context to which it should switch, and the second is a
pointer to such a handle into which it should store the handle pointer to such a handle into which it should store the handle
@ -288,4 +288,4 @@ in the interrupted thread struct.
Note that while SMP requires :option:`CONFIG_USE_SWITCH`, the reverse is not Note that while SMP requires :option:`CONFIG_USE_SWITCH`, the reverse is not
true. A uniprocessor architecture built with :option:`CONFIG_SMP` = n might true. A uniprocessor architecture built with :option:`CONFIG_SMP` = n might
still decide to implement its context switching using still decide to implement its context switching using
``z_arch_switch()``. ``arch_switch()``.

View file

@ -149,7 +149,7 @@ Inside this header is the body of :c:func:`k_sem_init()`::
{ {
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
if (z_syscall_trap()) { if (z_syscall_trap()) {
z_arch_syscall_invoke3(*(uintptr_t *)&sem, *(uintptr_t *)&initial_count, *(uintptr_t *)&limit, K_SYSCALL_K_SEM_INIT); arch_syscall_invoke3(*(uintptr_t *)&sem, *(uintptr_t *)&initial_count, *(uintptr_t *)&limit, K_SYSCALL_K_SEM_INIT);
return; return;
} }
compiler_barrier(); compiler_barrier();

View file

@ -33,7 +33,7 @@ static int save_irq;
* *
* This routine enables a RISCV PLIC-specific interrupt line. * This routine enables a RISCV PLIC-specific interrupt line.
* riscv_plic_irq_enable is called by SOC_FAMILY_RISCV_PRIVILEGE * riscv_plic_irq_enable is called by SOC_FAMILY_RISCV_PRIVILEGE
* z_arch_irq_enable function to enable external interrupts for * arch_irq_enable function to enable external interrupts for
* IRQS level == 2, whenever CONFIG_RISCV_HAS_PLIC variable is set. * IRQS level == 2, whenever CONFIG_RISCV_HAS_PLIC variable is set.
* @param irq IRQ number to enable * @param irq IRQ number to enable
* *
@ -57,7 +57,7 @@ void riscv_plic_irq_enable(u32_t irq)
* *
* This routine disables a RISCV PLIC-specific interrupt line. * This routine disables a RISCV PLIC-specific interrupt line.
* riscv_plic_irq_disable is called by SOC_FAMILY_RISCV_PRIVILEGE * riscv_plic_irq_disable is called by SOC_FAMILY_RISCV_PRIVILEGE
* z_arch_irq_disable function to disable external interrupts, for * arch_irq_disable function to disable external interrupts, for
* IRQS level == 2, whenever CONFIG_RISCV_HAS_PLIC variable is set. * IRQS level == 2, whenever CONFIG_RISCV_HAS_PLIC variable is set.
* @param irq IRQ number to disable * @param irq IRQ number to disable
* *
@ -98,7 +98,7 @@ int riscv_plic_irq_is_enabled(u32_t irq)
* @brief Set priority of a riscv PLIC-specific interrupt line * @brief Set priority of a riscv PLIC-specific interrupt line
* *
* This routine set the priority of a RISCV PLIC-specific interrupt line. * This routine set the priority of a RISCV PLIC-specific interrupt line.
* riscv_plic_irq_set_prio is called by riscv z_arch_irq_priority_set to set * riscv_plic_irq_set_prio is called by riscv arch_irq_priority_set to set
* the priority of an interrupt whenever CONFIG_RISCV_HAS_PLIC variable is set. * the priority of an interrupt whenever CONFIG_RISCV_HAS_PLIC variable is set.
* @param irq IRQ number for which to set priority * @param irq IRQ number for which to set priority
* *

View file

@ -72,7 +72,7 @@ void z_irq_controller_irq_config(unsigned int vector, unsigned int irq,
* *
* @return N/A * @return N/A
*/ */
void z_arch_irq_enable(unsigned int irq) void arch_irq_enable(unsigned int irq)
{ {
if (IS_IOAPIC_IRQ(irq)) { if (IS_IOAPIC_IRQ(irq)) {
z_ioapic_irq_enable(irq); z_ioapic_irq_enable(irq);
@ -92,7 +92,7 @@ void z_arch_irq_enable(unsigned int irq)
* *
* @return N/A * @return N/A
*/ */
void z_arch_irq_disable(unsigned int irq) void arch_irq_disable(unsigned int irq)
{ {
if (IS_IOAPIC_IRQ(irq)) { if (IS_IOAPIC_IRQ(irq)) {
z_ioapic_irq_disable(irq); z_ioapic_irq_disable(irq);

View file

@ -83,17 +83,17 @@ static void vexriscv_litex_irq_handler(void *device)
#endif #endif
} }
void z_arch_irq_enable(unsigned int irq) void arch_irq_enable(unsigned int irq)
{ {
vexriscv_litex_irq_setmask(vexriscv_litex_irq_getmask() | (1 << irq)); vexriscv_litex_irq_setmask(vexriscv_litex_irq_getmask() | (1 << irq));
} }
void z_arch_irq_disable(unsigned int irq) void arch_irq_disable(unsigned int irq)
{ {
vexriscv_litex_irq_setmask(vexriscv_litex_irq_getmask() & ~(1 << irq)); vexriscv_litex_irq_setmask(vexriscv_litex_irq_getmask() & ~(1 << irq));
} }
int z_arch_irq_is_enabled(unsigned int irq) int arch_irq_is_enabled(unsigned int irq)
{ {
return vexriscv_litex_irq_getmask() & (1 << irq); return vexriscv_litex_irq_getmask() & (1 << irq);
} }

View file

@ -251,14 +251,14 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
/* Desired delay in the future */ /* Desired delay in the future */
delay = (ticks == 0) ? CYC_PER_TICK : ticks * CYC_PER_TICK; delay = (ticks == 0) ? CYC_PER_TICK : ticks * CYC_PER_TICK;
key = z_arch_irq_lock(); key = arch_irq_lock();
timer0_limit_register_set(delay - 1); timer0_limit_register_set(delay - 1);
timer0_count_register_set(0); timer0_count_register_set(0);
timer0_control_register_set(_ARC_V2_TMR_CTRL_NH | timer0_control_register_set(_ARC_V2_TMR_CTRL_NH |
_ARC_V2_TMR_CTRL_IE); _ARC_V2_TMR_CTRL_IE);
z_arch_irq_unlock(key); arch_irq_unlock(key);
#endif #endif
#else #else
if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && idle && ticks == K_FOREVER) { if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && idle && ticks == K_FOREVER) {

View file

@ -206,8 +206,8 @@ void timer_int_handler(void *unused /* parameter is not used */
"pushl %eax\n\t" "pushl %eax\n\t"
"pushl %edx\n\t" "pushl %edx\n\t"
"rdtsc\n\t" "rdtsc\n\t"
"mov %eax, z_arch_timing_tick_start\n\t" "mov %eax, arch_timing_tick_start\n\t"
"mov %edx, z_arch_timing_tick_start+4\n\t" "mov %edx, arch_timing_tick_start+4\n\t"
"pop %edx\n\t" "pop %edx\n\t"
"pop %eax\n\t"); "pop %eax\n\t");
#endif #endif
@ -293,8 +293,8 @@ void timer_int_handler(void *unused /* parameter is not used */
"pushl %eax\n\t" "pushl %eax\n\t"
"pushl %edx\n\t" "pushl %edx\n\t"
"rdtsc\n\t" "rdtsc\n\t"
"mov %eax, z_arch_timing_tick_end\n\t" "mov %eax, arch_timing_tick_end\n\t"
"mov %edx, z_arch_timing_tick_end+4\n\t" "mov %edx, arch_timing_tick_end+4\n\t"
"pop %edx\n\t" "pop %edx\n\t"
"pop %eax\n\t"); "pop %eax\n\t");
#endif /* CONFIG_EXECUTION_BENCHMARKING */ #endif /* CONFIG_EXECUTION_BENCHMARKING */

View file

@ -269,7 +269,7 @@ u32_t z_clock_elapsed(void)
/* /*
* Warning RTOS timer resolution is 30.5 us. * Warning RTOS timer resolution is 30.5 us.
* This is called by two code paths: * This is called by two code paths:
* 1. Kernel call to k_cycle_get_32() -> z_arch_k_cycle_get_32() -> here. * 1. Kernel call to k_cycle_get_32() -> arch_k_cycle_get_32() -> here.
* The kernel is casting return to (int) and using it uncasted in math * The kernel is casting return to (int) and using it uncasted in math
* expressions with int types. Expression result is stored in an int. * expressions with int types. Expression result is stored in an int.
* 2. If CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT is not defined then * 2. If CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT is not defined then
@ -359,7 +359,7 @@ int z_clock_driver_init(struct device *device)
* 32-bit basic timer 0 configured for 1MHz count up, auto-reload, * 32-bit basic timer 0 configured for 1MHz count up, auto-reload,
* and no interrupt generation. * and no interrupt generation.
*/ */
void z_arch_busy_wait(u32_t usec_to_wait) void arch_busy_wait(u32_t usec_to_wait)
{ {
if (usec_to_wait == 0) { if (usec_to_wait == 0) {
return; return;

View file

@ -123,7 +123,7 @@ u32_t z_clock_elapsed(void)
* Note that interrupts may be received in the meanwhile and that therefore this * Note that interrupts may be received in the meanwhile and that therefore this
* thread may loose context * thread may loose context
*/ */
void z_arch_busy_wait(u32_t usec_to_wait) void arch_busy_wait(u32_t usec_to_wait)
{ {
u64_t time_end = hwm_get_time() + usec_to_wait; u64_t time_end = hwm_get_time() + usec_to_wait;

View file

@ -89,10 +89,10 @@ extern "C" {
#if defined(CONFIG_USERSPACE) #if defined(CONFIG_USERSPACE)
#define Z_ARCH_THREAD_STACK_RESERVED \ #define ARCH_THREAD_STACK_RESERVED \
(STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE) (STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE)
#else #else
#define Z_ARCH_THREAD_STACK_RESERVED (STACK_GUARD_SIZE) #define ARCH_THREAD_STACK_RESERVED (STACK_GUARD_SIZE)
#endif #endif
@ -107,8 +107,8 @@ extern "C" {
* MPU start, size alignment * MPU start, size alignment
*/ */
#define Z_ARC_THREAD_STACK_ALIGN(size) Z_ARC_MPUV2_SIZE_ALIGN(size) #define Z_ARC_THREAD_STACK_ALIGN(size) Z_ARC_MPUV2_SIZE_ALIGN(size)
#define Z_ARCH_THREAD_STACK_LEN(size) \ #define ARCH_THREAD_STACK_LEN(size) \
(Z_ARC_MPUV2_SIZE_ALIGN(size) + Z_ARCH_THREAD_STACK_RESERVED) (Z_ARC_MPUV2_SIZE_ALIGN(size) + ARCH_THREAD_STACK_RESERVED)
/* /*
* for stack array, each array member should be aligned both in size * for stack array, each array member should be aligned both in size
* and start * and start
@ -116,7 +116,7 @@ extern "C" {
#define Z_ARC_THREAD_STACK_ARRAY_LEN(size) \ #define Z_ARC_THREAD_STACK_ARRAY_LEN(size) \
(Z_ARC_MPUV2_SIZE_ALIGN(size) + \ (Z_ARC_MPUV2_SIZE_ALIGN(size) + \
MAX(Z_ARC_MPUV2_SIZE_ALIGN(size), \ MAX(Z_ARC_MPUV2_SIZE_ALIGN(size), \
POW2_CEIL(Z_ARCH_THREAD_STACK_RESERVED))) POW2_CEIL(ARCH_THREAD_STACK_RESERVED)))
#else #else
/* /*
* MPUv3, no-mpu and no USERSPACE share the same macro definitions. * MPUv3, no-mpu and no USERSPACE share the same macro definitions.
@ -130,33 +130,33 @@ extern "C" {
* aligned * aligned
*/ */
#define Z_ARC_THREAD_STACK_ALIGN(size) (STACK_ALIGN) #define Z_ARC_THREAD_STACK_ALIGN(size) (STACK_ALIGN)
#define Z_ARCH_THREAD_STACK_LEN(size) \ #define ARCH_THREAD_STACK_LEN(size) \
(STACK_SIZE_ALIGN(size) + Z_ARCH_THREAD_STACK_RESERVED) (STACK_SIZE_ALIGN(size) + ARCH_THREAD_STACK_RESERVED)
#define Z_ARC_THREAD_STACK_ARRAY_LEN(size) \ #define Z_ARC_THREAD_STACK_ARRAY_LEN(size) \
Z_ARCH_THREAD_STACK_LEN(size) ARCH_THREAD_STACK_LEN(size)
#endif /* CONFIG_USERSPACE && CONFIG_ARC_MPU_VER == 2 */ #endif /* CONFIG_USERSPACE && CONFIG_ARC_MPU_VER == 2 */
#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \ #define ARCH_THREAD_STACK_DEFINE(sym, size) \
struct _k_thread_stack_element __noinit \ struct _k_thread_stack_element __noinit \
__aligned(Z_ARC_THREAD_STACK_ALIGN(size)) \ __aligned(Z_ARC_THREAD_STACK_ALIGN(size)) \
sym[Z_ARCH_THREAD_STACK_LEN(size)] sym[ARCH_THREAD_STACK_LEN(size)]
#define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \ #define ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
struct _k_thread_stack_element __noinit \ struct _k_thread_stack_element __noinit \
__aligned(Z_ARC_THREAD_STACK_ALIGN(size)) \ __aligned(Z_ARC_THREAD_STACK_ALIGN(size)) \
sym[nmemb][Z_ARC_THREAD_STACK_ARRAY_LEN(size)] sym[nmemb][Z_ARC_THREAD_STACK_ARRAY_LEN(size)]
#define Z_ARCH_THREAD_STACK_MEMBER(sym, size) \ #define ARCH_THREAD_STACK_MEMBER(sym, size) \
struct _k_thread_stack_element \ struct _k_thread_stack_element \
__aligned(Z_ARC_THREAD_STACK_ALIGN(size)) \ __aligned(Z_ARC_THREAD_STACK_ALIGN(size)) \
sym[Z_ARCH_THREAD_STACK_LEN(size)] sym[ARCH_THREAD_STACK_LEN(size)]
#define Z_ARCH_THREAD_STACK_SIZEOF(sym) \ #define ARCH_THREAD_STACK_SIZEOF(sym) \
(sizeof(sym) - Z_ARCH_THREAD_STACK_RESERVED) (sizeof(sym) - ARCH_THREAD_STACK_RESERVED)
#define Z_ARCH_THREAD_STACK_BUFFER(sym) \ #define ARCH_THREAD_STACK_BUFFER(sym) \
((char *)(sym)) ((char *)(sym))
#ifdef CONFIG_ARC_MPU #ifdef CONFIG_ARC_MPU
@ -227,7 +227,7 @@ extern "C" {
/* Typedef for the k_mem_partition attribute*/ /* Typedef for the k_mem_partition attribute*/
typedef u32_t k_mem_partition_attr_t; typedef u32_t k_mem_partition_attr_t;
static ALWAYS_INLINE void z_arch_nop(void) static ALWAYS_INLINE void arch_nop(void)
{ {
__asm__ volatile("nop"); __asm__ volatile("nop");
} }

View file

@ -16,7 +16,7 @@
#include <arch/arc/v2/aux_regs.h> #include <arch/arc/v2/aux_regs.h>
#endif #endif
static ALWAYS_INLINE _cpu_t *z_arch_curr_cpu(void) static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
u32_t core; u32_t core;

View file

@ -38,10 +38,10 @@ extern "C" {
* just for enabling CONFIG_USERSPACE on arc w/o errors. * just for enabling CONFIG_USERSPACE on arc w/o errors.
*/ */
static inline uintptr_t z_arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2, static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4, uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5, uintptr_t arg6, uintptr_t arg5, uintptr_t arg6,
uintptr_t call_id) uintptr_t call_id)
{ {
register u32_t ret __asm__("r0") = arg1; register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2; register u32_t r1 __asm__("r1") = arg2;
@ -63,10 +63,10 @@ static inline uintptr_t z_arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
return ret; return ret;
} }
static inline uintptr_t z_arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2, static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4, uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5, uintptr_t arg5,
uintptr_t call_id) uintptr_t call_id)
{ {
register u32_t ret __asm__("r0") = arg1; register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2; register u32_t r1 __asm__("r1") = arg2;
@ -87,9 +87,9 @@ static inline uintptr_t z_arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
return ret; return ret;
} }
static inline uintptr_t z_arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2, static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4, uintptr_t arg3, uintptr_t arg4,
uintptr_t call_id) uintptr_t call_id)
{ {
register u32_t ret __asm__("r0") = arg1; register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2; register u32_t r1 __asm__("r1") = arg2;
@ -109,9 +109,9 @@ static inline uintptr_t z_arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
return ret; return ret;
} }
static inline uintptr_t z_arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2, static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg3,
uintptr_t call_id) uintptr_t call_id)
{ {
register u32_t ret __asm__("r0") = arg1; register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2; register u32_t r1 __asm__("r1") = arg2;
@ -129,8 +129,8 @@ static inline uintptr_t z_arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
return ret; return ret;
} }
static inline uintptr_t z_arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2, static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
uintptr_t call_id) uintptr_t call_id)
{ {
register u32_t ret __asm__("r0") = arg1; register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2; register u32_t r1 __asm__("r1") = arg2;
@ -147,7 +147,7 @@ static inline uintptr_t z_arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
return ret; return ret;
} }
static inline uintptr_t z_arch_syscall_invoke1(uintptr_t arg1, uintptr_t call_id) static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1, uintptr_t call_id)
{ {
register u32_t ret __asm__("r0") = arg1; register u32_t ret __asm__("r0") = arg1;
register u32_t r6 __asm__("r6") = call_id; register u32_t r6 __asm__("r6") = call_id;
@ -163,7 +163,7 @@ static inline uintptr_t z_arch_syscall_invoke1(uintptr_t arg1, uintptr_t call_id
return ret; return ret;
} }
static inline uintptr_t z_arch_syscall_invoke0(uintptr_t call_id) static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
{ {
register u32_t ret __asm__("r0"); register u32_t ret __asm__("r0");
register u32_t r6 __asm__("r6") = call_id; register u32_t r6 __asm__("r6") = call_id;
@ -179,7 +179,7 @@ static inline uintptr_t z_arch_syscall_invoke0(uintptr_t call_id)
return ret; return ret;
} }
static inline bool z_arch_is_user_context(void) static inline bool arch_is_user_context(void)
{ {
u32_t status; u32_t status;

View file

@ -25,7 +25,7 @@ extern "C" {
/* /*
* use trap_s to raise a SW exception * use trap_s to raise a SW exception
*/ */
#define Z_ARCH_EXCEPT(reason_p) do { \ #define ARCH_EXCEPT(reason_p) do { \
__asm__ volatile ( \ __asm__ volatile ( \
"mov r0, %[reason]\n\t" \ "mov r0, %[reason]\n\t" \
"trap_s %[id]\n\t" \ "trap_s %[id]\n\t" \

View file

@ -26,15 +26,15 @@ extern "C" {
#ifdef _ASMLANGUAGE #ifdef _ASMLANGUAGE
GTEXT(_irq_exit); GTEXT(_irq_exit);
GTEXT(z_arch_irq_enable) GTEXT(arch_irq_enable)
GTEXT(z_arch_irq_disable) GTEXT(arch_irq_disable)
GTEXT(z_arc_firq_stack_set) GTEXT(z_arc_firq_stack_set)
#else #else
extern void z_arc_firq_stack_set(void); extern void z_arc_firq_stack_set(void);
extern void z_arch_irq_enable(unsigned int irq); extern void arch_irq_enable(unsigned int irq);
extern void z_arch_irq_disable(unsigned int irq); extern void arch_irq_disable(unsigned int irq);
extern int z_arch_irq_is_enabled(unsigned int irq); extern int arch_irq_is_enabled(unsigned int irq);
extern void _irq_exit(void); extern void _irq_exit(void);
extern void z_irq_priority_set(unsigned int irq, unsigned int prio, extern void z_irq_priority_set(unsigned int irq, unsigned int prio,
@ -50,7 +50,7 @@ extern void z_irq_spurious(void *unused);
* We additionally set the priority in the interrupt controller at * We additionally set the priority in the interrupt controller at
* runtime. * runtime.
*/ */
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ #define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
({ \ ({ \
Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \ Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
z_irq_priority_set(irq_p, priority_p, flags_p); \ z_irq_priority_set(irq_p, priority_p, flags_p); \
@ -78,7 +78,7 @@ extern void z_irq_spurious(void *unused);
* See include/irq.h for details. * See include/irq.h for details.
* All arguments must be computable at build time. * All arguments must be computable at build time.
*/ */
#define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \ #define ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
({ \ ({ \
Z_ISR_DECLARE(irq_p, ISR_FLAG_DIRECT, isr_p, NULL); \ Z_ISR_DECLARE(irq_p, ISR_FLAG_DIRECT, isr_p, NULL); \
BUILD_ASSERT_MSG(priority_p || !IS_ENABLED(CONFIG_ARC_FIRQ) || \ BUILD_ASSERT_MSG(priority_p || !IS_ENABLED(CONFIG_ARC_FIRQ) || \
@ -92,14 +92,14 @@ extern void z_irq_spurious(void *unused);
}) })
static inline void z_arch_isr_direct_header(void) static inline void arch_isr_direct_header(void)
{ {
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
z_sys_trace_isr_enter(); z_sys_trace_isr_enter();
#endif #endif
} }
static inline void z_arch_isr_direct_footer(int maybe_swap) static inline void arch_isr_direct_footer(int maybe_swap)
{ {
/* clear SW generated interrupt */ /* clear SW generated interrupt */
if (z_arc_v2_aux_reg_read(_ARC_V2_ICAUSE) == if (z_arc_v2_aux_reg_read(_ARC_V2_ICAUSE) ==
@ -111,16 +111,16 @@ static inline void z_arch_isr_direct_footer(int maybe_swap)
#endif #endif
} }
#define Z_ARCH_ISR_DIRECT_HEADER() z_arch_isr_direct_header() #define ARCH_ISR_DIRECT_HEADER() arch_isr_direct_header()
extern void z_arch_isr_direct_header(void); extern void arch_isr_direct_header(void);
#define Z_ARCH_ISR_DIRECT_FOOTER(swap) z_arch_isr_direct_footer(swap) #define ARCH_ISR_DIRECT_FOOTER(swap) arch_isr_direct_footer(swap)
/* /*
* Scheduling can not be done in direct isr. If required, please use kernel * Scheduling can not be done in direct isr. If required, please use kernel
* aware interrupt handling * aware interrupt handling
*/ */
#define Z_ARCH_ISR_DIRECT_DECLARE(name) \ #define ARCH_ISR_DIRECT_DECLARE(name) \
static inline int name##_body(void); \ static inline int name##_body(void); \
__attribute__ ((interrupt("ilink")))void name(void) \ __attribute__ ((interrupt("ilink")))void name(void) \
{ \ { \
@ -163,7 +163,7 @@ extern void z_arch_isr_direct_header(void);
* "interrupt disable state" prior to the call. * "interrupt disable state" prior to the call.
*/ */
static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) static ALWAYS_INLINE unsigned int arch_irq_lock(void)
{ {
unsigned int key; unsigned int key;
@ -171,12 +171,12 @@ static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
return key; return key;
} }
static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key) static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
{ {
__asm__ volatile("seti %0" : : "ir"(key) : "memory"); __asm__ volatile("seti %0" : : "ir"(key) : "memory");
} }
static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key) static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
{ {
/* ARC irq lock uses instruction "clri r0", /* ARC irq lock uses instruction "clri r0",
* r0 == {26d0, 1b1, STATUS32.IE, STATUS32.E[3:0] } * r0 == {26d0, 1b1, STATUS32.IE, STATUS32.E[3:0] }

View file

@ -23,7 +23,7 @@ extern unsigned int z_arc_cpu_sleep_mode;
extern u32_t z_timer_cycle_get_32(void); extern u32_t z_timer_cycle_get_32(void);
static inline u32_t z_arch_k_cycle_get_32(void) static inline u32_t arch_k_cycle_get_32(void)
{ {
return z_timer_cycle_get_32(); return z_timer_cycle_get_32();
} }

View file

@ -189,56 +189,56 @@ extern "C" {
/* Guard is 'carved-out' of the thread stack region, and the supervisor /* Guard is 'carved-out' of the thread stack region, and the supervisor
* mode stack is allocated elsewhere by gen_priv_stack.py * mode stack is allocated elsewhere by gen_priv_stack.py
*/ */
#define Z_ARCH_THREAD_STACK_RESERVED 0 #define ARCH_THREAD_STACK_RESERVED 0
#else #else
#define Z_ARCH_THREAD_STACK_RESERVED MPU_GUARD_ALIGN_AND_SIZE #define ARCH_THREAD_STACK_RESERVED MPU_GUARD_ALIGN_AND_SIZE
#endif #endif
#if defined(CONFIG_USERSPACE) && \ #if defined(CONFIG_USERSPACE) && \
defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT) defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \ #define ARCH_THREAD_STACK_DEFINE(sym, size) \
struct _k_thread_stack_element __noinit \ struct _k_thread_stack_element __noinit \
__aligned(POW2_CEIL(size)) sym[POW2_CEIL(size)] __aligned(POW2_CEIL(size)) sym[POW2_CEIL(size)]
#else #else
#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \ #define ARCH_THREAD_STACK_DEFINE(sym, size) \
struct _k_thread_stack_element __noinit __aligned(STACK_ALIGN) \ struct _k_thread_stack_element __noinit __aligned(STACK_ALIGN) \
sym[size+MPU_GUARD_ALIGN_AND_SIZE] sym[size+MPU_GUARD_ALIGN_AND_SIZE]
#endif #endif
#if defined(CONFIG_USERSPACE) && \ #if defined(CONFIG_USERSPACE) && \
defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT) defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
#define Z_ARCH_THREAD_STACK_LEN(size) (POW2_CEIL(size)) #define ARCH_THREAD_STACK_LEN(size) (POW2_CEIL(size))
#else #else
#define Z_ARCH_THREAD_STACK_LEN(size) ((size)+MPU_GUARD_ALIGN_AND_SIZE) #define ARCH_THREAD_STACK_LEN(size) ((size)+MPU_GUARD_ALIGN_AND_SIZE)
#endif #endif
#if defined(CONFIG_USERSPACE) && \ #if defined(CONFIG_USERSPACE) && \
defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT) defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
#define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \ #define ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
struct _k_thread_stack_element __noinit \ struct _k_thread_stack_element __noinit \
__aligned(POW2_CEIL(size)) \ __aligned(POW2_CEIL(size)) \
sym[nmemb][Z_ARCH_THREAD_STACK_LEN(size)] sym[nmemb][ARCH_THREAD_STACK_LEN(size)]
#else #else
#define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \ #define ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
struct _k_thread_stack_element __noinit \ struct _k_thread_stack_element __noinit \
__aligned(STACK_ALIGN) \ __aligned(STACK_ALIGN) \
sym[nmemb][Z_ARCH_THREAD_STACK_LEN(size)] sym[nmemb][ARCH_THREAD_STACK_LEN(size)]
#endif #endif
#if defined(CONFIG_USERSPACE) && \ #if defined(CONFIG_USERSPACE) && \
defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT) defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
#define Z_ARCH_THREAD_STACK_MEMBER(sym, size) \ #define ARCH_THREAD_STACK_MEMBER(sym, size) \
struct _k_thread_stack_element __aligned(POW2_CEIL(size)) \ struct _k_thread_stack_element __aligned(POW2_CEIL(size)) \
sym[POW2_CEIL(size)] sym[POW2_CEIL(size)]
#else #else
#define Z_ARCH_THREAD_STACK_MEMBER(sym, size) \ #define ARCH_THREAD_STACK_MEMBER(sym, size) \
struct _k_thread_stack_element __aligned(STACK_ALIGN) \ struct _k_thread_stack_element __aligned(STACK_ALIGN) \
sym[size+MPU_GUARD_ALIGN_AND_SIZE] sym[size+MPU_GUARD_ALIGN_AND_SIZE]
#endif #endif
#define Z_ARCH_THREAD_STACK_SIZEOF(sym) (sizeof(sym) - MPU_GUARD_ALIGN_AND_SIZE) #define ARCH_THREAD_STACK_SIZEOF(sym) (sizeof(sym) - MPU_GUARD_ALIGN_AND_SIZE)
#define Z_ARCH_THREAD_STACK_BUFFER(sym) \ #define ARCH_THREAD_STACK_BUFFER(sym) \
((char *)(sym) + MPU_GUARD_ALIGN_AND_SIZE) ((char *)(sym) + MPU_GUARD_ALIGN_AND_SIZE)
/* Legacy case: retain containing extern "C" with C++ */ /* Legacy case: retain containing extern "C" with C++ */

View file

@ -36,7 +36,7 @@ extern "C" {
* except NMI. * except NMI.
*/ */
static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) static ALWAYS_INLINE unsigned int arch_irq_lock(void)
{ {
unsigned int key; unsigned int key;
@ -75,7 +75,7 @@ static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
* previously disabled. * previously disabled.
*/ */
static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key) static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
{ {
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
if (key) { if (key) {
@ -100,7 +100,7 @@ static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
} }
static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key) static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
{ {
/* This convention works for both PRIMASK and BASEPRI */ /* This convention works for both PRIMASK and BASEPRI */
return key == 0; return key == 0;

View file

@ -31,7 +31,7 @@ extern "C" {
* schedule a new thread until they are unlocked which is not what we want. * schedule a new thread until they are unlocked which is not what we want.
* Force them unlocked as well. * Force them unlocked as well.
*/ */
#define Z_ARCH_EXCEPT(reason_p) \ #define ARCH_EXCEPT(reason_p) \
register u32_t r0 __asm__("r0") = reason_p; \ register u32_t r0 __asm__("r0") = reason_p; \
do { \ do { \
__asm__ volatile ( \ __asm__ volatile ( \
@ -42,7 +42,7 @@ do { \
: "memory"); \ : "memory"); \
} while (false) } while (false)
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
#define Z_ARCH_EXCEPT(reason_p) do { \ #define ARCH_EXCEPT(reason_p) do { \
__asm__ volatile ( \ __asm__ volatile ( \
"eors.n r0, r0\n\t" \ "eors.n r0, r0\n\t" \
"msr BASEPRI, r0\n\t" \ "msr BASEPRI, r0\n\t" \

View file

@ -24,13 +24,13 @@ extern "C" {
#ifdef _ASMLANGUAGE #ifdef _ASMLANGUAGE
GTEXT(z_arm_int_exit); GTEXT(z_arm_int_exit);
GTEXT(z_arch_irq_enable) GTEXT(arch_irq_enable)
GTEXT(z_arch_irq_disable) GTEXT(arch_irq_disable)
GTEXT(z_arch_irq_is_enabled) GTEXT(arch_irq_is_enabled)
#else #else
extern void z_arch_irq_enable(unsigned int irq); extern void arch_irq_enable(unsigned int irq);
extern void z_arch_irq_disable(unsigned int irq); extern void arch_irq_disable(unsigned int irq);
extern int z_arch_irq_is_enabled(unsigned int irq); extern int arch_irq_is_enabled(unsigned int irq);
extern void z_arm_int_exit(void); extern void z_arm_int_exit(void);
@ -76,14 +76,14 @@ extern void z_arm_irq_priority_set(unsigned int irq, unsigned int prio,
* We additionally set the priority in the interrupt controller at * We additionally set the priority in the interrupt controller at
* runtime. * runtime.
*/ */
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ #define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
({ \ ({ \
Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \ Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
z_arm_irq_priority_set(irq_p, priority_p, flags_p); \ z_arm_irq_priority_set(irq_p, priority_p, flags_p); \
irq_p; \ irq_p; \
}) })
#define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \ #define ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
({ \ ({ \
Z_ISR_DECLARE(irq_p, ISR_FLAG_DIRECT, isr_p, NULL); \ Z_ISR_DECLARE(irq_p, ISR_FLAG_DIRECT, isr_p, NULL); \
z_arm_irq_priority_set(irq_p, priority_p, flags_p); \ z_arm_irq_priority_set(irq_p, priority_p, flags_p); \
@ -93,15 +93,15 @@ extern void z_arm_irq_priority_set(unsigned int irq, unsigned int prio,
/* FIXME prefer these inline, but see GH-3056 */ /* FIXME prefer these inline, but see GH-3056 */
#ifdef CONFIG_SYS_POWER_MANAGEMENT #ifdef CONFIG_SYS_POWER_MANAGEMENT
extern void _arch_isr_direct_pm(void); extern void _arch_isr_direct_pm(void);
#define Z_ARCH_ISR_DIRECT_PM() _arch_isr_direct_pm() #define ARCH_ISR_DIRECT_PM() _arch_isr_direct_pm()
#else #else
#define Z_ARCH_ISR_DIRECT_PM() do { } while (false) #define ARCH_ISR_DIRECT_PM() do { } while (false)
#endif #endif
#define Z_ARCH_ISR_DIRECT_HEADER() z_arch_isr_direct_header() #define ARCH_ISR_DIRECT_HEADER() arch_isr_direct_header()
extern void z_arch_isr_direct_header(void); extern void arch_isr_direct_header(void);
#define Z_ARCH_ISR_DIRECT_FOOTER(swap) z_arch_isr_direct_footer(swap) #define ARCH_ISR_DIRECT_FOOTER(swap) arch_isr_direct_footer(swap)
/* arch/arm/core/exc_exit.S */ /* arch/arm/core/exc_exit.S */
extern void z_arm_int_exit(void); extern void z_arm_int_exit(void);
@ -110,7 +110,7 @@ extern void z_arm_int_exit(void);
extern void sys_trace_isr_exit(void); extern void sys_trace_isr_exit(void);
#endif #endif
static inline void z_arch_isr_direct_footer(int maybe_swap) static inline void arch_isr_direct_footer(int maybe_swap)
{ {
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
@ -121,7 +121,7 @@ static inline void z_arch_isr_direct_footer(int maybe_swap)
} }
} }
#define Z_ARCH_ISR_DIRECT_DECLARE(name) \ #define ARCH_ISR_DIRECT_DECLARE(name) \
static inline int name##_body(void); \ static inline int name##_body(void); \
__attribute__ ((interrupt ("IRQ"))) void name(void) \ __attribute__ ((interrupt ("IRQ"))) void name(void) \
{ \ { \

View file

@ -21,12 +21,12 @@ extern "C" {
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE
extern u32_t z_timer_cycle_get_32(void); extern u32_t z_timer_cycle_get_32(void);
static inline u32_t z_arch_k_cycle_get_32(void) static inline u32_t arch_k_cycle_get_32(void)
{ {
return z_timer_cycle_get_32(); return z_timer_cycle_get_32();
} }
static ALWAYS_INLINE void z_arch_nop(void) static ALWAYS_INLINE void arch_nop(void)
{ {
__asm__ volatile("nop"); __asm__ volatile("nop");
} }

View file

@ -36,10 +36,10 @@ extern "C" {
/* Syscall invocation macros. arm-specific machine constraints used to ensure /* Syscall invocation macros. arm-specific machine constraints used to ensure
* args land in the proper registers. * args land in the proper registers.
*/ */
static inline uintptr_t z_arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2, static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4, uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5, uintptr_t arg6, uintptr_t arg5, uintptr_t arg6,
uintptr_t call_id) uintptr_t call_id)
{ {
register u32_t ret __asm__("r0") = arg1; register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2; register u32_t r1 __asm__("r1") = arg2;
@ -59,10 +59,10 @@ static inline uintptr_t z_arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
return ret; return ret;
} }
static inline uintptr_t z_arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2, static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4, uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5, uintptr_t arg5,
uintptr_t call_id) uintptr_t call_id)
{ {
register u32_t ret __asm__("r0") = arg1; register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2; register u32_t r1 __asm__("r1") = arg2;
@ -81,9 +81,9 @@ static inline uintptr_t z_arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
return ret; return ret;
} }
static inline uintptr_t z_arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2, static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4, uintptr_t arg3, uintptr_t arg4,
uintptr_t call_id) uintptr_t call_id)
{ {
register u32_t ret __asm__("r0") = arg1; register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2; register u32_t r1 __asm__("r1") = arg2;
@ -101,9 +101,9 @@ static inline uintptr_t z_arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
return ret; return ret;
} }
static inline uintptr_t z_arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2, static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg3,
uintptr_t call_id) uintptr_t call_id)
{ {
register u32_t ret __asm__("r0") = arg1; register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2; register u32_t r1 __asm__("r1") = arg2;
@ -119,8 +119,8 @@ static inline uintptr_t z_arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
return ret; return ret;
} }
static inline uintptr_t z_arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2, static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
uintptr_t call_id) uintptr_t call_id)
{ {
register u32_t ret __asm__("r0") = arg1; register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2; register u32_t r1 __asm__("r1") = arg2;
@ -135,8 +135,8 @@ static inline uintptr_t z_arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
return ret; return ret;
} }
static inline uintptr_t z_arch_syscall_invoke1(uintptr_t arg1, static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1,
uintptr_t call_id) uintptr_t call_id)
{ {
register u32_t ret __asm__("r0") = arg1; register u32_t ret __asm__("r0") = arg1;
register u32_t r6 __asm__("r6") = call_id; register u32_t r6 __asm__("r6") = call_id;
@ -149,7 +149,7 @@ static inline uintptr_t z_arch_syscall_invoke1(uintptr_t arg1,
return ret; return ret;
} }
static inline uintptr_t z_arch_syscall_invoke0(uintptr_t call_id) static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
{ {
register u32_t ret __asm__("r0"); register u32_t ret __asm__("r0");
register u32_t r6 __asm__("r6") = call_id; register u32_t r6 __asm__("r6") = call_id;
@ -163,7 +163,7 @@ static inline uintptr_t z_arch_syscall_invoke0(uintptr_t call_id)
return ret; return ret;
} }
static inline bool z_arch_is_user_context(void) static inline bool arch_is_user_context(void)
{ {
u32_t value; u32_t value;

View file

@ -38,7 +38,7 @@ extern "C" {
/* There is no notion of priority with the Nios II internal interrupt /* There is no notion of priority with the Nios II internal interrupt
* controller and no flags are currently supported. * controller and no flags are currently supported.
*/ */
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ #define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
({ \ ({ \
Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \ Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
irq_p; \ irq_p; \
@ -46,7 +46,7 @@ extern "C" {
extern void z_irq_spurious(void *unused); extern void z_irq_spurious(void *unused);
static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) static ALWAYS_INLINE unsigned int arch_irq_lock(void)
{ {
unsigned int key, tmp; unsigned int key, tmp;
@ -61,7 +61,7 @@ static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
return key; return key;
} }
static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key) static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
{ {
/* If the CPU is built without certain features, then /* If the CPU is built without certain features, then
* the only writable bit in the status register is PIE * the only writable bit in the status register is PIE
@ -93,13 +93,13 @@ static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
#endif #endif
} }
static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key) static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
{ {
return key & 1; return key & 1;
} }
void z_arch_irq_enable(unsigned int irq); void arch_irq_enable(unsigned int irq);
void z_arch_irq_disable(unsigned int irq); void arch_irq_disable(unsigned int irq);
struct __esf { struct __esf {
u32_t ra; /* return address r31 */ u32_t ra; /* return address r31 */
@ -173,12 +173,12 @@ enum nios2_exception_cause {
extern u32_t z_timer_cycle_get_32(void); extern u32_t z_timer_cycle_get_32(void);
static inline u32_t z_arch_k_cycle_get_32(void) static inline u32_t arch_k_cycle_get_32(void)
{ {
return z_timer_cycle_get_32(); return z_timer_cycle_get_32();
} }
static ALWAYS_INLINE void z_arch_nop(void) static ALWAYS_INLINE void arch_nop(void)
{ {
__asm__ volatile("nop"); __asm__ volatile("nop");
} }

View file

@ -48,28 +48,28 @@ typedef struct __esf z_arch_esf_t;
extern u32_t z_timer_cycle_get_32(void); extern u32_t z_timer_cycle_get_32(void);
static inline u32_t z_arch_k_cycle_get_32(void) static inline u32_t arch_k_cycle_get_32(void)
{ {
return z_timer_cycle_get_32(); return z_timer_cycle_get_32();
} }
static ALWAYS_INLINE void z_arch_nop(void) static ALWAYS_INLINE void arch_nop(void)
{ {
__asm__ volatile("nop"); __asm__ volatile("nop");
} }
static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key) static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
{ {
return key == false; return key == false;
} }
static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) static ALWAYS_INLINE unsigned int arch_irq_lock(void)
{ {
return posix_irq_lock(); return posix_irq_lock();
} }
static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key) static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
{ {
posix_irq_unlock(key); posix_irq_unlock(key);
} }

View file

@ -64,21 +64,21 @@ extern "C" {
*/ */
extern u32_t __soc_get_irq(void); extern u32_t __soc_get_irq(void);
void z_arch_irq_enable(unsigned int irq); void arch_irq_enable(unsigned int irq);
void z_arch_irq_disable(unsigned int irq); void arch_irq_disable(unsigned int irq);
int z_arch_irq_is_enabled(unsigned int irq); int arch_irq_is_enabled(unsigned int irq);
void z_arch_irq_priority_set(unsigned int irq, unsigned int prio); void arch_irq_priority_set(unsigned int irq, unsigned int prio);
void z_irq_spurious(void *unused); void z_irq_spurious(void *unused);
#if defined(CONFIG_RISCV_HAS_PLIC) #if defined(CONFIG_RISCV_HAS_PLIC)
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ #define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
({ \ ({ \
Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \ Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
z_arch_irq_priority_set(irq_p, priority_p); \ arch_irq_priority_set(irq_p, priority_p); \
irq_p; \ irq_p; \
}) })
#else #else
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ #define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
({ \ ({ \
Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \ Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
irq_p; \ irq_p; \
@ -89,7 +89,7 @@ void z_irq_spurious(void *unused);
* use atomic instruction csrrc to lock global irq * use atomic instruction csrrc to lock global irq
* csrrc: atomic read and clear bits in CSR register * csrrc: atomic read and clear bits in CSR register
*/ */
static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) static ALWAYS_INLINE unsigned int arch_irq_lock(void)
{ {
unsigned int key; unsigned int key;
ulong_t mstatus; ulong_t mstatus;
@ -107,7 +107,7 @@ static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
* use atomic instruction csrrs to unlock global irq * use atomic instruction csrrs to unlock global irq
* csrrs: atomic read and set bits in CSR register * csrrs: atomic read and set bits in CSR register
*/ */
static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key) static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
{ {
ulong_t mstatus; ulong_t mstatus;
@ -117,26 +117,26 @@ static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
: "memory"); : "memory");
} }
static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key) static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
{ {
/* FIXME: looking at z_arch_irq_lock, this should be reducable /* FIXME: looking at arch_irq_lock, this should be reducable
* to just testing that key is nonzero (because it should only * to just testing that key is nonzero (because it should only
* have the single bit set). But there is a mask applied to * have the single bit set). But there is a mask applied to
* the argument in z_arch_irq_unlock() that has me worried * the argument in arch_irq_unlock() that has me worried
* that something elseswhere might try to set a bit? Do it * that something elseswhere might try to set a bit? Do it
* the safe way for now. * the safe way for now.
*/ */
return (key & SOC_MSTATUS_IEN) == SOC_MSTATUS_IEN; return (key & SOC_MSTATUS_IEN) == SOC_MSTATUS_IEN;
} }
static ALWAYS_INLINE void z_arch_nop(void) static ALWAYS_INLINE void arch_nop(void)
{ {
__asm__ volatile("nop"); __asm__ volatile("nop");
} }
extern u32_t z_timer_cycle_get_32(void); extern u32_t z_timer_cycle_get_32(void);
static inline u32_t z_arch_k_cycle_get_32(void) static inline u32_t arch_k_cycle_get_32(void)
{ {
return z_timer_cycle_get_32(); return z_timer_cycle_get_32();
} }

Some files were not shown because too many files have changed in this diff Show more