kernel: rename z_arch_ to arch_
Promote the private z_arch_* namespace, which specifies the interface between the core kernel and the architecture code, to a new top-level namespace named arch_*. This allows our documentation generation to create online documentation for this set of interfaces, and this set of interfaces is worth treating in a more formal way anyway. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
a6de79b4af
commit
4f77c2ad53
178 changed files with 912 additions and 910 deletions
|
@ -223,7 +223,7 @@ u64_t z_arc_connect_gfrc_read(void)
|
|||
* sub-components. For GFRC, HW allows simultaneously accessing to
|
||||
* counters. So an irq lock is enough.
|
||||
*/
|
||||
key = z_arch_irq_lock();
|
||||
key = arch_irq_lock();
|
||||
|
||||
z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_READ_LO, 0);
|
||||
low = z_arc_connect_cmd_readback();
|
||||
|
@ -231,7 +231,7 @@ u64_t z_arc_connect_gfrc_read(void)
|
|||
z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_READ_HI, 0);
|
||||
high = z_arc_connect_cmd_readback();
|
||||
|
||||
z_arch_irq_unlock(key);
|
||||
arch_irq_unlock(key);
|
||||
|
||||
return (((u64_t)high) << 32) | low;
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ u64_t z_arc_smp_switch_in_isr(void)
|
|||
if (new_thread != old_thread) {
|
||||
_current_cpu->swap_ok = 0;
|
||||
((struct k_thread *)new_thread)->base.cpu =
|
||||
z_arch_curr_cpu()->id;
|
||||
arch_curr_cpu()->id;
|
||||
_current = (struct k_thread *) new_thread;
|
||||
ret = new_thread | ((u64_t)(old_thread) << 32);
|
||||
}
|
||||
|
@ -83,8 +83,8 @@ volatile u32_t arc_cpu_wake_flag;
|
|||
volatile _cpu_t *_curr_cpu[CONFIG_MP_NUM_CPUS];
|
||||
|
||||
/* Called from Zephyr initialization */
|
||||
void z_arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
|
||||
void (*fn)(int, void *), void *arg)
|
||||
void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
|
||||
void (*fn)(int, void *), void *arg)
|
||||
{
|
||||
_curr_cpu[cpu_num] = &(_kernel.cpus[cpu_num]);
|
||||
arc_cpu_init[cpu_num].fn = fn;
|
||||
|
@ -109,14 +109,14 @@ void z_arc_slave_start(int cpu_num)
|
|||
z_irq_priority_set(IRQ_ICI, ARCV2_ICI_IRQ_PRIORITY, 0);
|
||||
irq_enable(IRQ_ICI);
|
||||
|
||||
/* call the function set by z_arch_start_cpu */
|
||||
/* call the function set by arch_start_cpu */
|
||||
fn = arc_cpu_init[cpu_num].fn;
|
||||
|
||||
fn(cpu_num, arc_cpu_init[cpu_num].arg);
|
||||
}
|
||||
|
||||
/* arch implementation of sched_ipi */
|
||||
void z_arch_sched_ipi(void)
|
||||
void arch_sched_ipi(void)
|
||||
{
|
||||
u32_t i;
|
||||
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
#include <linker/sections.h>
|
||||
#include <arch/cpu.h>
|
||||
|
||||
GTEXT(z_arch_cpu_idle)
|
||||
GTEXT(z_arch_cpu_atomic_idle)
|
||||
GTEXT(arch_cpu_idle)
|
||||
GTEXT(arch_cpu_atomic_idle)
|
||||
GDATA(z_arc_cpu_sleep_mode)
|
||||
|
||||
SECTION_VAR(BSS, z_arc_cpu_sleep_mode)
|
||||
|
@ -33,7 +33,7 @@ SECTION_VAR(BSS, z_arc_cpu_sleep_mode)
|
|||
* void nanCpuIdle(void)
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, z_arch_cpu_idle)
|
||||
SECTION_FUNC(TEXT, arch_cpu_idle)
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
push_s blink
|
||||
|
@ -52,9 +52,9 @@ SECTION_FUNC(TEXT, z_arch_cpu_idle)
|
|||
*
|
||||
* This function exits with interrupts restored to <key>.
|
||||
*
|
||||
* void z_arch_cpu_atomic_idle(unsigned int key)
|
||||
* void arch_cpu_atomic_idle(unsigned int key)
|
||||
*/
|
||||
SECTION_FUNC(TEXT, z_arch_cpu_atomic_idle)
|
||||
SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
push_s blink
|
||||
|
|
|
@ -28,13 +28,13 @@ void z_arc_fatal_error(unsigned int reason, const z_arch_esf_t *esf)
|
|||
z_fatal_error(reason, esf);
|
||||
}
|
||||
|
||||
FUNC_NORETURN void z_arch_syscall_oops(void *ssf_ptr)
|
||||
FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr)
|
||||
{
|
||||
z_arc_fatal_error(K_ERR_KERNEL_OOPS, ssf_ptr);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
||||
FUNC_NORETURN void z_arch_system_halt(unsigned int reason)
|
||||
FUNC_NORETURN void arch_system_halt(unsigned int reason)
|
||||
{
|
||||
ARG_UNUSED(reason);
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ void z_arc_firq_stack_set(void)
|
|||
* @return N/A
|
||||
*/
|
||||
|
||||
void z_arch_irq_enable(unsigned int irq)
|
||||
void arch_irq_enable(unsigned int irq)
|
||||
{
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
|
@ -110,7 +110,7 @@ void z_arch_irq_enable(unsigned int irq)
|
|||
* @return N/A
|
||||
*/
|
||||
|
||||
void z_arch_irq_disable(unsigned int irq)
|
||||
void arch_irq_disable(unsigned int irq)
|
||||
{
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
|
@ -124,7 +124,7 @@ void z_arch_irq_disable(unsigned int irq)
|
|||
* @param irq IRQ line
|
||||
* @return interrupt enable state, true or false
|
||||
*/
|
||||
int z_arch_irq_is_enabled(unsigned int irq)
|
||||
int arch_irq_is_enabled(unsigned int irq)
|
||||
{
|
||||
return z_arc_v2_irq_unit_int_enabled(irq);
|
||||
}
|
||||
|
@ -181,9 +181,9 @@ void z_irq_spurious(void *unused)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_INTERRUPTS
|
||||
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
void (*routine)(void *parameter), void *parameter,
|
||||
u32_t flags)
|
||||
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
void (*routine)(void *parameter), void *parameter,
|
||||
u32_t flags)
|
||||
{
|
||||
z_isr_install(irq, routine, parameter);
|
||||
z_irq_priority_set(irq, priority, flags);
|
||||
|
|
|
@ -20,7 +20,7 @@ void z_irq_do_offload(void)
|
|||
offload_routine(offload_param);
|
||||
}
|
||||
|
||||
void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter)
|
||||
void arch_irq_offload(irq_offload_routine_t routine, void *parameter)
|
||||
{
|
||||
unsigned int key;
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ The context switch code adopts this standard so that it is easier to follow:
|
|||
transition from outgoing thread to incoming thread
|
||||
|
||||
Not loading _kernel into r0 allows loading _kernel without stomping on
|
||||
the parameter in r0 in z_arch_switch().
|
||||
the parameter in r0 in arch_switch().
|
||||
|
||||
|
||||
ARCv2 processors have two kinds of interrupts: fast (FIRQ) and regular. The
|
||||
|
@ -168,7 +168,7 @@ From FIRQ:
|
|||
|
||||
o to coop
|
||||
|
||||
The address of the returning instruction from z_arch_switch() is loaded
|
||||
The address of the returning instruction from arch_switch() is loaded
|
||||
in ilink and the saved status32 in status32_p0.
|
||||
|
||||
o to any irq
|
||||
|
|
|
@ -27,7 +27,7 @@ void configure_mpu_thread(struct k_thread *thread)
|
|||
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
|
||||
int z_arch_mem_domain_max_partitions_get(void)
|
||||
int arch_mem_domain_max_partitions_get(void)
|
||||
{
|
||||
return arc_core_mpu_get_max_domain_partition_regions();
|
||||
}
|
||||
|
@ -35,8 +35,8 @@ int z_arch_mem_domain_max_partitions_get(void)
|
|||
/*
|
||||
* Reset MPU region for a single memory partition
|
||||
*/
|
||||
void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
|
||||
u32_t partition_id)
|
||||
void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
|
||||
u32_t partition_id)
|
||||
{
|
||||
if (_current->mem_domain_info.mem_domain != domain) {
|
||||
return;
|
||||
|
@ -50,7 +50,7 @@ void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
|
|||
/*
|
||||
* Configure MPU memory domain
|
||||
*/
|
||||
void z_arch_mem_domain_thread_add(struct k_thread *thread)
|
||||
void arch_mem_domain_thread_add(struct k_thread *thread)
|
||||
{
|
||||
if (_current != thread) {
|
||||
return;
|
||||
|
@ -64,7 +64,7 @@ void z_arch_mem_domain_thread_add(struct k_thread *thread)
|
|||
/*
|
||||
* Destroy MPU regions for the mem domain
|
||||
*/
|
||||
void z_arch_mem_domain_destroy(struct k_mem_domain *domain)
|
||||
void arch_mem_domain_destroy(struct k_mem_domain *domain)
|
||||
{
|
||||
if (_current->mem_domain_info.mem_domain != domain) {
|
||||
return;
|
||||
|
@ -75,25 +75,25 @@ void z_arch_mem_domain_destroy(struct k_mem_domain *domain)
|
|||
arc_core_mpu_enable();
|
||||
}
|
||||
|
||||
void z_arch_mem_domain_partition_add(struct k_mem_domain *domain,
|
||||
u32_t partition_id)
|
||||
void arch_mem_domain_partition_add(struct k_mem_domain *domain,
|
||||
u32_t partition_id)
|
||||
{
|
||||
/* No-op on this architecture */
|
||||
}
|
||||
|
||||
void z_arch_mem_domain_thread_remove(struct k_thread *thread)
|
||||
void arch_mem_domain_thread_remove(struct k_thread *thread)
|
||||
{
|
||||
if (_current != thread) {
|
||||
return;
|
||||
}
|
||||
|
||||
z_arch_mem_domain_destroy(thread->mem_domain_info.mem_domain);
|
||||
arch_mem_domain_destroy(thread->mem_domain_info.mem_domain);
|
||||
}
|
||||
|
||||
/*
|
||||
* Validate the given buffer is user accessible or not
|
||||
*/
|
||||
int z_arch_buffer_validate(void *addr, size_t size, int write)
|
||||
int arch_buffer_validate(void *addr, size_t size, int write)
|
||||
{
|
||||
return arc_core_mpu_buffer_validate(addr, size, write);
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ PRE-CONTEXT-SWITCH STACK
|
|||
|
||||
--------------------------------------
|
||||
SP -> | Return address; PC (Program Counter), in fact value taken from
|
||||
| BLINK register in z_arch_switch()
|
||||
| BLINK register in arch_switch()
|
||||
--------------------------------------
|
||||
| STATUS32 value, we explicitly save it here for later usage, read-on
|
||||
--------------------------------------
|
||||
|
|
|
@ -22,37 +22,37 @@
|
|||
#include <v2/irq.h>
|
||||
#include <swap_macros.h>
|
||||
|
||||
GTEXT(z_arch_switch)
|
||||
GTEXT(arch_switch)
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Initiate a cooperative context switch
|
||||
*
|
||||
* The z_arch_switch routine is invoked by various kernel services to effect
|
||||
* a cooperative context switch. Prior to invoking z_arch_switch, the caller
|
||||
* The arch_switch routine is invoked by various kernel services to effect
|
||||
* a cooperative context switch. Prior to invoking arch_switch, the caller
|
||||
* disables interrupts via irq_lock()
|
||||
|
||||
* Given that z_arch_switch() is called to effect a cooperative context switch,
|
||||
* Given that arch_switch() is called to effect a cooperative context switch,
|
||||
* the caller-saved integer registers are saved on the stack by the function
|
||||
* call preamble to z_arch_switch. This creates a custom stack frame that will
|
||||
* be popped when returning from z_arch_switch, but is not suitable for handling
|
||||
* call preamble to arch_switch. This creates a custom stack frame that will
|
||||
* be popped when returning from arch_switch, but is not suitable for handling
|
||||
* a return from an exception. Thus, the fact that the thread is pending because
|
||||
* of a cooperative call to z_arch_switch() has to be recorded via the
|
||||
* of a cooperative call to arch_switch() has to be recorded via the
|
||||
* _CAUSE_COOP code in the relinquish_cause of the thread's k_thread structure.
|
||||
* The _rirq_exit()/_firq_exit() code will take care of doing the right thing
|
||||
* to restore the thread status.
|
||||
*
|
||||
* When z_arch_switch() is invoked, we know the decision to perform a context
|
||||
* When arch_switch() is invoked, we know the decision to perform a context
|
||||
* switch or not has already been taken and a context switch must happen.
|
||||
*
|
||||
*
|
||||
* C function prototype:
|
||||
*
|
||||
* void z_arch_switch(void *switch_to, void **switched_from);
|
||||
* void arch_switch(void *switch_to, void **switched_from);
|
||||
*
|
||||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, z_arch_switch)
|
||||
SECTION_FUNC(TEXT, arch_switch)
|
||||
|
||||
#ifdef CONFIG_EXECUTION_BENCHMARKING
|
||||
push_s r0
|
||||
|
|
|
@ -58,10 +58,10 @@ struct init_stack_frame {
|
|||
*
|
||||
* @return N/A
|
||||
*/
|
||||
void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stackSize, k_thread_entry_t pEntry,
|
||||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned int options)
|
||||
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stackSize, k_thread_entry_t pEntry,
|
||||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned int options)
|
||||
{
|
||||
char *pStackMem = Z_THREAD_STACK_BUFFER(stack);
|
||||
Z_ASSERT_VALID_PRIO(priority, pEntry);
|
||||
|
@ -92,7 +92,7 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
(u32_t)(stackEnd + STACK_GUARD_SIZE);
|
||||
|
||||
stackAdjEnd = (char *)STACK_ROUND_DOWN(stackEnd +
|
||||
Z_ARCH_THREAD_STACK_RESERVED);
|
||||
ARCH_THREAD_STACK_RESERVED);
|
||||
|
||||
/* reserve 4 bytes for the start of user sp */
|
||||
stackAdjEnd -= 4;
|
||||
|
@ -122,7 +122,7 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
*/
|
||||
pStackMem += STACK_GUARD_SIZE;
|
||||
stackAdjSize = stackAdjSize + CONFIG_PRIVILEGED_STACK_SIZE;
|
||||
stackEnd += Z_ARCH_THREAD_STACK_RESERVED;
|
||||
stackEnd += ARCH_THREAD_STACK_RESERVED;
|
||||
|
||||
thread->arch.priv_stack_start = 0;
|
||||
|
||||
|
@ -161,7 +161,7 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
*/
|
||||
pInitCtx->status32 |= _ARC_V2_STATUS32_US;
|
||||
#else /* For no USERSPACE feature */
|
||||
pStackMem += Z_ARCH_THREAD_STACK_RESERVED;
|
||||
pStackMem += ARCH_THREAD_STACK_RESERVED;
|
||||
stackEnd = pStackMem + stackSize;
|
||||
|
||||
z_new_thread_init(thread, pStackMem, stackSize, priority, options);
|
||||
|
@ -199,7 +199,7 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
thread->arch.k_stack_top =
|
||||
(u32_t)(stackEnd + STACK_GUARD_SIZE);
|
||||
thread->arch.k_stack_base = (u32_t)
|
||||
(stackEnd + Z_ARCH_THREAD_STACK_RESERVED);
|
||||
(stackEnd + ARCH_THREAD_STACK_RESERVED);
|
||||
} else {
|
||||
thread->arch.k_stack_top = (u32_t)pStackMem;
|
||||
thread->arch.k_stack_base = (u32_t)stackEnd;
|
||||
|
@ -227,8 +227,8 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
||||
FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
|
||||
/*
|
||||
|
@ -270,7 +270,7 @@ FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry,
|
|||
#endif
|
||||
|
||||
#if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING)
|
||||
int z_arch_float_disable(struct k_thread *thread)
|
||||
int arch_float_disable(struct k_thread *thread)
|
||||
{
|
||||
unsigned int key;
|
||||
|
||||
|
@ -287,7 +287,7 @@ int z_arch_float_disable(struct k_thread *thread)
|
|||
}
|
||||
|
||||
|
||||
int z_arch_float_enable(struct k_thread *thread)
|
||||
int arch_float_enable(struct k_thread *thread)
|
||||
{
|
||||
unsigned int key;
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ GTEXT(z_thread_entry_wrapper1)
|
|||
* @brief Wrapper for z_thread_entry
|
||||
*
|
||||
* The routine pops parameters for the z_thread_entry from stack frame, prepared
|
||||
* by the z_arch_new_thread() routine.
|
||||
* by the arch_new_thread() routine.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
GTEXT(z_arc_userspace_enter)
|
||||
GTEXT(_arc_do_syscall)
|
||||
GTEXT(z_user_thread_entry_wrapper)
|
||||
GTEXT(z_arch_user_string_nlen)
|
||||
GTEXT(arch_user_string_nlen)
|
||||
GTEXT(z_arc_user_string_nlen_fault_start)
|
||||
GTEXT(z_arc_user_string_nlen_fault_end)
|
||||
GTEXT(z_arc_user_string_nlen_fixup)
|
||||
|
@ -248,9 +248,9 @@ SECTION_FUNC(TEXT, _arc_do_syscall)
|
|||
rtie
|
||||
|
||||
/*
|
||||
* size_t z_arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg)
|
||||
* size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg)
|
||||
*/
|
||||
SECTION_FUNC(TEXT, z_arch_user_string_nlen)
|
||||
SECTION_FUNC(TEXT, arch_user_string_nlen)
|
||||
/* int err; */
|
||||
sub_s sp,sp,0x4
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
static ALWAYS_INLINE void z_arch_kernel_init(void)
|
||||
static ALWAYS_INLINE void arch_kernel_init(void)
|
||||
{
|
||||
z_irq_setup();
|
||||
_current_cpu->irq_stack =
|
||||
|
@ -55,7 +55,7 @@ static ALWAYS_INLINE int Z_INTERRUPT_CAUSE(void)
|
|||
return irq_num;
|
||||
}
|
||||
|
||||
static inline bool z_arch_is_in_isr(void)
|
||||
static inline bool arch_is_in_isr(void)
|
||||
{
|
||||
return z_arc_v2_irq_unit_is_in_isr();
|
||||
}
|
||||
|
@ -67,10 +67,10 @@ extern void z_arc_userspace_enter(k_thread_entry_t user_entry, void *p1,
|
|||
void *p2, void *p3, u32_t stack, u32_t size);
|
||||
|
||||
|
||||
extern void z_arch_switch(void *switch_to, void **switched_from);
|
||||
extern void arch_switch(void *switch_to, void **switched_from);
|
||||
extern void z_arc_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
|
||||
|
||||
extern void z_arch_sched_ipi(void);
|
||||
extern void arch_sched_ipi(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -258,7 +258,7 @@
|
|||
* The pc and status32 values will still be on the stack. We cannot
|
||||
* pop them yet because the callers of _pop_irq_stack_frame must reload
|
||||
* status32 differently depending on the execution context they are
|
||||
* running in (z_arch_switch(), firq or exception).
|
||||
* running in (arch_switch(), firq or exception).
|
||||
*/
|
||||
add_s sp, sp, ___isf_t_SIZEOF
|
||||
|
||||
|
|
|
@ -607,7 +607,7 @@ static u32_t hard_fault(z_arch_esf_t *esf, bool *recoverable)
|
|||
/* Workaround for #18712:
|
||||
* HardFault may be due to escalation, as a result of
|
||||
* an SVC instruction that could not be executed; this
|
||||
* can occur if Z_ARCH_EXCEPT() is called by an ISR,
|
||||
* can occur if ARCH_EXCEPT() is called by an ISR,
|
||||
* which executes at priority equal to the SVC handler
|
||||
* priority. We handle the case of Kernel OOPS and Stack
|
||||
* Fail here.
|
||||
|
@ -623,7 +623,7 @@ static u32_t hard_fault(z_arch_esf_t *esf, bool *recoverable)
|
|||
if (((fault_insn & 0xff00) == _SVC_OPCODE) &&
|
||||
((fault_insn & 0x00ff) == _SVC_CALL_RUNTIME_EXCEPT)) {
|
||||
|
||||
PR_EXC("Z_ARCH_EXCEPT with reason %x\n", esf->basic.r0);
|
||||
PR_EXC("ARCH_EXCEPT with reason %x\n", esf->basic.r0);
|
||||
reason = esf->basic.r0;
|
||||
}
|
||||
#undef _SVC_OPCODE
|
||||
|
@ -935,7 +935,7 @@ void z_arm_fault(u32_t msp, u32_t psp, u32_t exc_return)
|
|||
z_arch_esf_t esf_copy;
|
||||
|
||||
/* Force unlock interrupts */
|
||||
z_arch_irq_unlock(0);
|
||||
arch_irq_unlock(0);
|
||||
|
||||
/* Retrieve the Exception Stack Frame (ESF) to be supplied
|
||||
* as argument to the remainder of the fault handling process.
|
||||
|
|
|
@ -259,7 +259,7 @@ void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread)
|
|||
}
|
||||
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
int z_arch_mem_domain_max_partitions_get(void)
|
||||
int arch_mem_domain_max_partitions_get(void)
|
||||
{
|
||||
int available_regions = arm_core_mpu_get_max_available_dyn_regions();
|
||||
|
||||
|
@ -274,7 +274,7 @@ int z_arch_mem_domain_max_partitions_get(void)
|
|||
return ARM_CORE_MPU_MAX_DOMAIN_PARTITIONS_GET(available_regions);
|
||||
}
|
||||
|
||||
void z_arch_mem_domain_thread_add(struct k_thread *thread)
|
||||
void arch_mem_domain_thread_add(struct k_thread *thread)
|
||||
{
|
||||
if (_current != thread) {
|
||||
return;
|
||||
|
@ -287,7 +287,7 @@ void z_arch_mem_domain_thread_add(struct k_thread *thread)
|
|||
z_arm_configure_dynamic_mpu_regions(thread);
|
||||
}
|
||||
|
||||
void z_arch_mem_domain_destroy(struct k_mem_domain *domain)
|
||||
void arch_mem_domain_destroy(struct k_mem_domain *domain)
|
||||
{
|
||||
/* This function will reset the access permission configuration
|
||||
* of the active partitions of the memory domain.
|
||||
|
@ -317,8 +317,8 @@ void z_arch_mem_domain_destroy(struct k_mem_domain *domain)
|
|||
}
|
||||
}
|
||||
|
||||
void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
|
||||
u32_t partition_id)
|
||||
void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
|
||||
u32_t partition_id)
|
||||
{
|
||||
/* Request to remove a partition from a memory domain.
|
||||
* This resets the access permissions of the partition
|
||||
|
@ -334,22 +334,22 @@ void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
|
|||
&domain->partitions[partition_id], &reset_attr);
|
||||
}
|
||||
|
||||
void z_arch_mem_domain_partition_add(struct k_mem_domain *domain,
|
||||
u32_t partition_id)
|
||||
void arch_mem_domain_partition_add(struct k_mem_domain *domain,
|
||||
u32_t partition_id)
|
||||
{
|
||||
/* No-op on this architecture */
|
||||
}
|
||||
|
||||
void z_arch_mem_domain_thread_remove(struct k_thread *thread)
|
||||
void arch_mem_domain_thread_remove(struct k_thread *thread)
|
||||
{
|
||||
if (_current != thread) {
|
||||
return;
|
||||
}
|
||||
|
||||
z_arch_mem_domain_destroy(thread->mem_domain_info.mem_domain);
|
||||
arch_mem_domain_destroy(thread->mem_domain_info.mem_domain);
|
||||
}
|
||||
|
||||
int z_arch_buffer_validate(void *addr, size_t size, int write)
|
||||
int arch_buffer_validate(void *addr, size_t size, int write)
|
||||
{
|
||||
return arm_core_mpu_buffer_validate(addr, size, write);
|
||||
}
|
||||
|
|
|
@ -16,8 +16,8 @@
|
|||
_ASM_FILE_PROLOGUE
|
||||
|
||||
GTEXT(z_arm_cpu_idle_init)
|
||||
GTEXT(z_arch_cpu_idle)
|
||||
GTEXT(z_arch_cpu_atomic_idle)
|
||||
GTEXT(arch_cpu_idle)
|
||||
GTEXT(arch_cpu_atomic_idle)
|
||||
|
||||
#if defined(CONFIG_CPU_CORTEX_M)
|
||||
#define _SCB_SCR 0xE000ED10
|
||||
|
@ -32,7 +32,7 @@ GTEXT(z_arch_cpu_atomic_idle)
|
|||
*
|
||||
* @brief Initialization of CPU idle
|
||||
*
|
||||
* Only called by z_arch_kernel_init(). Sets SEVONPEND bit once for the system's
|
||||
* Only called by arch_kernel_init(). Sets SEVONPEND bit once for the system's
|
||||
* duration.
|
||||
*
|
||||
* @return N/A
|
||||
|
@ -50,7 +50,7 @@ SECTION_FUNC(TEXT, z_arm_cpu_idle_init)
|
|||
#endif
|
||||
bx lr
|
||||
|
||||
SECTION_FUNC(TEXT, z_arch_cpu_idle)
|
||||
SECTION_FUNC(TEXT, arch_cpu_idle)
|
||||
#ifdef CONFIG_TRACING
|
||||
push {r0, lr}
|
||||
bl sys_trace_idle
|
||||
|
@ -77,7 +77,7 @@ SECTION_FUNC(TEXT, z_arch_cpu_idle)
|
|||
|
||||
bx lr
|
||||
|
||||
SECTION_FUNC(TEXT, z_arch_cpu_atomic_idle)
|
||||
SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
|
||||
#ifdef CONFIG_TRACING
|
||||
push {r0, lr}
|
||||
bl sys_trace_idle
|
||||
|
|
|
@ -86,7 +86,7 @@ void z_do_kernel_oops(const z_arch_esf_t *esf)
|
|||
z_arm_fatal_error(reason, esf);
|
||||
}
|
||||
|
||||
FUNC_NORETURN void z_arch_syscall_oops(void *ssf_ptr)
|
||||
FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr)
|
||||
{
|
||||
u32_t *ssf_contents = ssf_ptr;
|
||||
z_arch_esf_t oops_esf = { 0 };
|
||||
|
|
|
@ -36,17 +36,17 @@ extern void z_arm_reserved(void);
|
|||
#define REG_FROM_IRQ(irq) (irq / NUM_IRQS_PER_REG)
|
||||
#define BIT_FROM_IRQ(irq) (irq % NUM_IRQS_PER_REG)
|
||||
|
||||
void z_arch_irq_enable(unsigned int irq)
|
||||
void arch_irq_enable(unsigned int irq)
|
||||
{
|
||||
NVIC_EnableIRQ((IRQn_Type)irq);
|
||||
}
|
||||
|
||||
void z_arch_irq_disable(unsigned int irq)
|
||||
void arch_irq_disable(unsigned int irq)
|
||||
{
|
||||
NVIC_DisableIRQ((IRQn_Type)irq);
|
||||
}
|
||||
|
||||
int z_arch_irq_is_enabled(unsigned int irq)
|
||||
int arch_irq_is_enabled(unsigned int irq)
|
||||
{
|
||||
return NVIC->ISER[REG_FROM_IRQ(irq)] & BIT(BIT_FROM_IRQ(irq));
|
||||
}
|
||||
|
@ -97,21 +97,21 @@ void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
|
|||
}
|
||||
|
||||
#elif defined(CONFIG_CPU_CORTEX_R)
|
||||
void z_arch_irq_enable(unsigned int irq)
|
||||
void arch_irq_enable(unsigned int irq)
|
||||
{
|
||||
struct device *dev = _sw_isr_table[0].arg;
|
||||
|
||||
irq_enable_next_level(dev, (irq >> 8) - 1);
|
||||
}
|
||||
|
||||
void z_arch_irq_disable(unsigned int irq)
|
||||
void arch_irq_disable(unsigned int irq)
|
||||
{
|
||||
struct device *dev = _sw_isr_table[0].arg;
|
||||
|
||||
irq_disable_next_level(dev, (irq >> 8) - 1);
|
||||
}
|
||||
|
||||
int z_arch_irq_is_enabled(unsigned int irq)
|
||||
int arch_irq_is_enabled(unsigned int irq)
|
||||
{
|
||||
struct device *dev = _sw_isr_table[0].arg;
|
||||
|
||||
|
@ -206,7 +206,7 @@ void _arch_isr_direct_pm(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
void z_arch_isr_direct_header(void)
|
||||
void arch_isr_direct_header(void)
|
||||
{
|
||||
sys_trace_isr_enter();
|
||||
}
|
||||
|
@ -268,9 +268,9 @@ int irq_target_state_is_secure(unsigned int irq)
|
|||
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_INTERRUPTS
|
||||
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
void (*routine)(void *parameter), void *parameter,
|
||||
u32_t flags)
|
||||
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
void (*routine)(void *parameter), void *parameter,
|
||||
u32_t flags)
|
||||
{
|
||||
z_isr_install(irq, routine, parameter);
|
||||
z_arm_irq_priority_set(irq, priority, flags);
|
||||
|
|
|
@ -20,7 +20,7 @@ void z_irq_do_offload(void)
|
|||
offload_routine(offload_param);
|
||||
}
|
||||
|
||||
void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter)
|
||||
void arch_irq_offload(irq_offload_routine_t routine, void *parameter)
|
||||
{
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) && defined(CONFIG_ASSERT)
|
||||
/* ARMv6-M/ARMv8-M Baseline HardFault if you make a SVC call with
|
||||
|
|
|
@ -15,7 +15,7 @@ extern const int _k_neg_eagain;
|
|||
/* The 'key' actually represents the BASEPRI register
|
||||
* prior to disabling interrupts via the BASEPRI mechanism.
|
||||
*
|
||||
* z_arch_swap() itself does not do much.
|
||||
* arch_swap() itself does not do much.
|
||||
*
|
||||
* It simply stores the intlock key (the BASEPRI value) parameter into
|
||||
* current->basepri, and then triggers a PendSV exception, which does
|
||||
|
@ -25,7 +25,7 @@ extern const int _k_neg_eagain;
|
|||
* z_arm_pendsv all come from handling an interrupt, which means we know the
|
||||
* interrupts were not locked: in that case the BASEPRI value is 0.
|
||||
*
|
||||
* Given that z_arch_swap() is called to effect a cooperative context switch,
|
||||
* Given that arch_swap() is called to effect a cooperative context switch,
|
||||
* only the caller-saved integer registers need to be saved in the thread of the
|
||||
* outgoing thread. This is all performed by the hardware, which stores it in
|
||||
* its exception stack frame, created when handling the z_arm_pendsv exception.
|
||||
|
@ -33,7 +33,7 @@ extern const int _k_neg_eagain;
|
|||
* On ARMv6-M, the intlock key is represented by the PRIMASK register,
|
||||
* as BASEPRI is not available.
|
||||
*/
|
||||
int z_arch_swap(unsigned int key)
|
||||
int arch_swap(unsigned int key)
|
||||
{
|
||||
#ifdef CONFIG_EXECUTION_BENCHMARKING
|
||||
read_timer_start_of_swap();
|
||||
|
|
|
@ -125,7 +125,7 @@ out_fp_endif:
|
|||
isb /* Make the effect of disabling interrupts be realized immediately */
|
||||
#elif defined(CONFIG_ARMV7_R)
|
||||
/*
|
||||
* Interrupts are still disabled from z_arch_swap so empty clause
|
||||
* Interrupts are still disabled from arch_swap so empty clause
|
||||
* here to avoid the preprocessor error below
|
||||
*/
|
||||
#else
|
||||
|
|
|
@ -32,10 +32,10 @@ extern u8_t *z_priv_stack_find(void *obj);
|
|||
* addresses, we have to unset it manually before storing it in the 'pc' field
|
||||
* of the ESF.
|
||||
*/
|
||||
void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stackSize, k_thread_entry_t pEntry,
|
||||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned int options)
|
||||
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stackSize, k_thread_entry_t pEntry,
|
||||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned int options)
|
||||
{
|
||||
char *pStackMem = Z_THREAD_STACK_BUFFER(stack);
|
||||
char *stackEnd;
|
||||
|
@ -112,7 +112,7 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
if ((options & K_USER) != 0) {
|
||||
pInitCtx->basic.pc = (u32_t)z_arch_user_mode_enter;
|
||||
pInitCtx->basic.pc = (u32_t)arch_user_mode_enter;
|
||||
} else {
|
||||
pInitCtx->basic.pc = (u32_t)z_thread_entry;
|
||||
}
|
||||
|
@ -157,8 +157,8 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
||||
FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
|
||||
/* Set up privileged stack before entering user mode */
|
||||
|
@ -328,13 +328,13 @@ u32_t z_check_thread_stack_fail(const u32_t fault_addr, const u32_t psp)
|
|||
#endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */
|
||||
|
||||
#if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING)
|
||||
int z_arch_float_disable(struct k_thread *thread)
|
||||
int arch_float_disable(struct k_thread *thread)
|
||||
{
|
||||
if (thread != _current) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (z_arch_is_in_isr()) {
|
||||
if (arch_is_in_isr()) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -345,26 +345,26 @@ int z_arch_float_disable(struct k_thread *thread)
|
|||
* fault to take an outdated thread user_options flag into
|
||||
* account.
|
||||
*/
|
||||
int key = z_arch_irq_lock();
|
||||
int key = arch_irq_lock();
|
||||
|
||||
thread->base.user_options &= ~K_FP_REGS;
|
||||
|
||||
__set_CONTROL(__get_CONTROL() & (~CONTROL_FPCA_Msk));
|
||||
|
||||
/* No need to add an ISB barrier after setting the CONTROL
|
||||
* register; z_arch_irq_unlock() already adds one.
|
||||
* register; arch_irq_unlock() already adds one.
|
||||
*/
|
||||
|
||||
z_arch_irq_unlock(key);
|
||||
arch_irq_unlock(key);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_FLOAT && CONFIG_FP_SHARING */
|
||||
|
||||
void z_arch_switch_to_main_thread(struct k_thread *main_thread,
|
||||
k_thread_stack_t *main_stack,
|
||||
size_t main_stack_size,
|
||||
k_thread_entry_t _main)
|
||||
void arch_switch_to_main_thread(struct k_thread *main_thread,
|
||||
k_thread_stack_t *main_stack,
|
||||
size_t main_stack_size,
|
||||
k_thread_entry_t _main)
|
||||
{
|
||||
#if defined(CONFIG_FLOAT)
|
||||
/* Initialize the Floating Point Status and Control Register when in
|
||||
|
|
|
@ -16,7 +16,7 @@ _ASM_FILE_PROLOGUE
|
|||
|
||||
GTEXT(z_arm_userspace_enter)
|
||||
GTEXT(z_arm_do_syscall)
|
||||
GTEXT(z_arch_user_string_nlen)
|
||||
GTEXT(arch_user_string_nlen)
|
||||
GTEXT(z_arm_user_string_nlen_fault_start)
|
||||
GTEXT(z_arm_user_string_nlen_fault_end)
|
||||
GTEXT(z_arm_user_string_nlen_fixup)
|
||||
|
@ -497,9 +497,9 @@ dispatch_syscall:
|
|||
|
||||
|
||||
/*
|
||||
* size_t z_arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg)
|
||||
* size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg)
|
||||
*/
|
||||
SECTION_FUNC(TEXT, z_arch_user_string_nlen)
|
||||
SECTION_FUNC(TEXT, arch_user_string_nlen)
|
||||
push {r0, r1, r2, r4, r5, lr}
|
||||
|
||||
/* sp+4 is error value, init to -1 */
|
||||
|
|
|
@ -43,7 +43,7 @@ extern volatile irq_offload_routine_t offload_routine;
|
|||
* The current executing vector is found in the IPSR register. All
|
||||
* IRQs and system exceptions are considered as interrupt context.
|
||||
*/
|
||||
static ALWAYS_INLINE bool z_arch_is_in_isr(void)
|
||||
static ALWAYS_INLINE bool arch_is_in_isr(void)
|
||||
{
|
||||
return (__get_IPSR()) ? (true) : (false);
|
||||
}
|
||||
|
@ -68,8 +68,7 @@ static ALWAYS_INLINE bool z_arch_is_in_isr(void)
|
|||
* @return true if execution state was in handler mode, before
|
||||
* the current exception occurred, otherwise false.
|
||||
*/
|
||||
static ALWAYS_INLINE bool z_arch_is_in_nested_exception(
|
||||
const z_arch_esf_t *esf)
|
||||
static ALWAYS_INLINE bool arch_is_in_nested_exception(const z_arch_esf_t *esf)
|
||||
{
|
||||
return (esf->basic.xpsr & IPSR_ISR_Msk) ? (true) : (false);
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ extern volatile irq_offload_routine_t offload_routine;
|
|||
#endif
|
||||
|
||||
/* Check the CPSR mode bits to see if we are in IRQ or FIQ mode */
|
||||
static ALWAYS_INLINE bool z_arch_is_in_isr(void)
|
||||
static ALWAYS_INLINE bool arch_is_in_isr(void)
|
||||
{
|
||||
unsigned int status;
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ extern void z_arm_configure_static_mpu_regions(void);
|
|||
extern void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread);
|
||||
#endif /* CONFIG_ARM_MPU */
|
||||
|
||||
static ALWAYS_INLINE void z_arch_kernel_init(void)
|
||||
static ALWAYS_INLINE void arch_kernel_init(void)
|
||||
{
|
||||
z_arm_interrupt_stack_setup();
|
||||
z_arm_exc_setup();
|
||||
|
@ -44,7 +44,7 @@ static ALWAYS_INLINE void z_arch_kernel_init(void)
|
|||
}
|
||||
|
||||
static ALWAYS_INLINE void
|
||||
z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
|
||||
arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
|
||||
{
|
||||
thread->arch.swap_return_value = value;
|
||||
}
|
||||
|
|
|
@ -28,11 +28,11 @@ void z_isr_install(unsigned int irq, void (*routine)(void *), void *param)
|
|||
/* Some architectures don't/can't interpret flags or priority and have
|
||||
* no more processing to do than this. Provide a generic fallback.
|
||||
*/
|
||||
int __weak z_arch_irq_connect_dynamic(unsigned int irq,
|
||||
unsigned int priority,
|
||||
void (*routine)(void *),
|
||||
void *parameter,
|
||||
u32_t flags)
|
||||
int __weak arch_irq_connect_dynamic(unsigned int irq,
|
||||
unsigned int priority,
|
||||
void (*routine)(void *),
|
||||
void *parameter,
|
||||
u32_t flags)
|
||||
{
|
||||
ARG_UNUSED(flags);
|
||||
ARG_UNUSED(priority);
|
||||
|
|
|
@ -6,18 +6,18 @@
|
|||
#include <kernel.h>
|
||||
#include <kernel_internal.h>
|
||||
|
||||
u64_t z_arch_timing_swap_start;
|
||||
u64_t z_arch_timing_swap_end;
|
||||
u64_t z_arch_timing_irq_start;
|
||||
u64_t z_arch_timing_irq_end;
|
||||
u64_t z_arch_timing_tick_start;
|
||||
u64_t z_arch_timing_tick_end;
|
||||
u64_t z_arch_timing_enter_user_mode_end;
|
||||
u64_t arch_timing_swap_start;
|
||||
u64_t arch_timing_swap_end;
|
||||
u64_t arch_timing_irq_start;
|
||||
u64_t arch_timing_irq_end;
|
||||
u64_t arch_timing_tick_start;
|
||||
u64_t arch_timing_tick_end;
|
||||
u64_t arch_timing_enter_user_mode_end;
|
||||
|
||||
/* location of the time stamps*/
|
||||
u32_t z_arch_timing_value_swap_end;
|
||||
u64_t z_arch_timing_value_swap_common;
|
||||
u64_t z_arch_timing_value_swap_temp;
|
||||
u32_t arch_timing_value_swap_end;
|
||||
u64_t arch_timing_value_swap_common;
|
||||
u64_t arch_timing_value_swap_temp;
|
||||
|
||||
#ifdef CONFIG_NRF_RTC_TIMER
|
||||
#include <nrfx.h>
|
||||
|
@ -79,18 +79,19 @@ u64_t z_arch_timing_value_swap_temp;
|
|||
|
||||
void read_timer_start_of_swap(void)
|
||||
{
|
||||
if (z_arch_timing_value_swap_end == 1U) {
|
||||
if (arch_timing_value_swap_end == 1U) {
|
||||
TIMING_INFO_PRE_READ();
|
||||
z_arch_timing_swap_start = (u32_t) TIMING_INFO_OS_GET_TIME();
|
||||
arch_timing_swap_start = (u32_t) TIMING_INFO_OS_GET_TIME();
|
||||
}
|
||||
}
|
||||
|
||||
void read_timer_end_of_swap(void)
|
||||
{
|
||||
if (z_arch_timing_value_swap_end == 1U) {
|
||||
if (arch_timing_value_swap_end == 1U) {
|
||||
TIMING_INFO_PRE_READ();
|
||||
z_arch_timing_value_swap_end = 2U;
|
||||
z_arch_timing_value_swap_common = (u64_t)TIMING_INFO_OS_GET_TIME();
|
||||
arch_timing_value_swap_end = 2U;
|
||||
arch_timing_value_swap_common =
|
||||
(u64_t)TIMING_INFO_OS_GET_TIME();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -100,29 +101,29 @@ void read_timer_end_of_swap(void)
|
|||
void read_timer_start_of_isr(void)
|
||||
{
|
||||
TIMING_INFO_PRE_READ();
|
||||
z_arch_timing_irq_start = (u32_t) TIMING_INFO_GET_TIMER_VALUE();
|
||||
arch_timing_irq_start = (u32_t) TIMING_INFO_GET_TIMER_VALUE();
|
||||
}
|
||||
|
||||
void read_timer_end_of_isr(void)
|
||||
{
|
||||
TIMING_INFO_PRE_READ();
|
||||
z_arch_timing_irq_end = (u32_t) TIMING_INFO_GET_TIMER_VALUE();
|
||||
arch_timing_irq_end = (u32_t) TIMING_INFO_GET_TIMER_VALUE();
|
||||
}
|
||||
|
||||
void read_timer_start_of_tick_handler(void)
|
||||
{
|
||||
TIMING_INFO_PRE_READ();
|
||||
z_arch_timing_tick_start = (u32_t)TIMING_INFO_GET_TIMER_VALUE();
|
||||
arch_timing_tick_start = (u32_t)TIMING_INFO_GET_TIMER_VALUE();
|
||||
}
|
||||
|
||||
void read_timer_end_of_tick_handler(void)
|
||||
{
|
||||
TIMING_INFO_PRE_READ();
|
||||
z_arch_timing_tick_end = (u32_t) TIMING_INFO_GET_TIMER_VALUE();
|
||||
arch_timing_tick_end = (u32_t) TIMING_INFO_GET_TIMER_VALUE();
|
||||
}
|
||||
|
||||
void read_timer_end_of_userspace_enter(void)
|
||||
{
|
||||
TIMING_INFO_PRE_READ();
|
||||
z_arch_timing_enter_user_mode_end = (u32_t) TIMING_INFO_GET_TIMER_VALUE();
|
||||
arch_timing_enter_user_mode_end = (u32_t)TIMING_INFO_GET_TIMER_VALUE();
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
#include <kernel.h>
|
||||
#include <kernel_structs.h>
|
||||
|
||||
void z_arch_cpu_idle(void)
|
||||
void arch_cpu_idle(void)
|
||||
{
|
||||
/* Do nothing but unconditionally unlock interrupts and return to the
|
||||
* caller. This CPU does not have any kind of power saving instruction.
|
||||
|
@ -15,7 +15,7 @@ void z_arch_cpu_idle(void)
|
|||
irq_unlock(NIOS2_STATUS_PIE_MSK);
|
||||
}
|
||||
|
||||
void z_arch_cpu_atomic_idle(unsigned int key)
|
||||
void arch_cpu_atomic_idle(unsigned int key)
|
||||
{
|
||||
/* Do nothing but restore IRQ state. This CPU does not have any
|
||||
* kind of power saving instruction.
|
||||
|
|
|
@ -13,7 +13,7 @@ GTEXT(_exception)
|
|||
|
||||
/* import */
|
||||
GTEXT(_Fault)
|
||||
GTEXT(z_arch_swap)
|
||||
GTEXT(arch_swap)
|
||||
#ifdef CONFIG_IRQ_OFFLOAD
|
||||
GTEXT(z_irq_do_offload)
|
||||
GTEXT(_offload_routine)
|
||||
|
@ -127,7 +127,7 @@ on_irq_stack:
|
|||
/*
|
||||
* A context reschedule is required: keep the volatile registers of
|
||||
* the interrupted thread on the context's stack. Utilize
|
||||
* the existing z_arch_swap() primitive to save the remaining
|
||||
* the existing arch_swap() primitive to save the remaining
|
||||
* thread's registers (including floating point) and perform
|
||||
* a switch to the new thread.
|
||||
*/
|
||||
|
@ -144,7 +144,7 @@ on_irq_stack:
|
|||
*/
|
||||
mov r4, et
|
||||
|
||||
call z_arch_swap
|
||||
call arch_swap
|
||||
jmpi _exception_exit
|
||||
#else
|
||||
jmpi no_reschedule
|
||||
|
|
|
@ -132,7 +132,7 @@ FUNC_NORETURN void _Fault(const z_arch_esf_t *esf)
|
|||
}
|
||||
|
||||
#ifdef ALT_CPU_HAS_DEBUG_STUB
|
||||
FUNC_NORETURN void z_arch_system_halt(unsigned int reason)
|
||||
FUNC_NORETURN void arch_system_halt(unsigned int reason)
|
||||
{
|
||||
ARG_UNUSED(reason);
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ FUNC_NORETURN void z_irq_spurious(void *unused)
|
|||
}
|
||||
|
||||
|
||||
void z_arch_irq_enable(unsigned int irq)
|
||||
void arch_irq_enable(unsigned int irq)
|
||||
{
|
||||
u32_t ienable;
|
||||
unsigned int key;
|
||||
|
@ -47,7 +47,7 @@ void z_arch_irq_enable(unsigned int irq)
|
|||
|
||||
|
||||
|
||||
void z_arch_irq_disable(unsigned int irq)
|
||||
void arch_irq_disable(unsigned int irq)
|
||||
{
|
||||
u32_t ienable;
|
||||
unsigned int key;
|
||||
|
@ -109,9 +109,9 @@ void _enter_irq(u32_t ipending)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_INTERRUPTS
|
||||
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
void (*routine)(void *parameter), void *parameter,
|
||||
u32_t flags)
|
||||
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
void (*routine)(void *parameter), void *parameter,
|
||||
u32_t flags)
|
||||
{
|
||||
ARG_UNUSED(flags);
|
||||
ARG_UNUSED(priority);
|
||||
|
|
|
@ -29,7 +29,7 @@ void z_irq_do_offload(void)
|
|||
tmp((void *)offload_param);
|
||||
}
|
||||
|
||||
void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter)
|
||||
void arch_irq_offload(irq_offload_routine_t routine, void *parameter)
|
||||
{
|
||||
unsigned int key;
|
||||
|
||||
|
|
|
@ -9,18 +9,18 @@
|
|||
#include <offsets_short.h>
|
||||
|
||||
/* exports */
|
||||
GTEXT(z_arch_swap)
|
||||
GTEXT(arch_swap)
|
||||
GTEXT(z_thread_entry_wrapper)
|
||||
|
||||
/* imports */
|
||||
GTEXT(sys_trace_thread_switched_in)
|
||||
GTEXT(_k_neg_eagain)
|
||||
|
||||
/* unsigned int z_arch_swap(unsigned int key)
|
||||
/* unsigned int arch_swap(unsigned int key)
|
||||
*
|
||||
* Always called with interrupts locked
|
||||
*/
|
||||
SECTION_FUNC(exception.other, z_arch_swap)
|
||||
SECTION_FUNC(exception.other, arch_swap)
|
||||
|
||||
#ifdef CONFIG_EXECUTION_BENCHMARKING
|
||||
/* Get a reference to _kernel in r10 */
|
||||
|
@ -57,7 +57,7 @@ SECTION_FUNC(exception.other, z_arch_swap)
|
|||
ldw r11, _kernel_offset_to_current(r10)
|
||||
|
||||
/* Store all the callee saved registers. We either got here via
|
||||
* an exception or from a cooperative invocation of z_arch_swap() from C
|
||||
* an exception or from a cooperative invocation of arch_swap() from C
|
||||
* domain, so all the caller-saved registers have already been
|
||||
* saved by the exception asm or the calling C code already.
|
||||
*/
|
||||
|
@ -115,14 +115,14 @@ SECTION_FUNC(exception.other, z_arch_swap)
|
|||
ldw sp, _thread_offset_to_sp(r2)
|
||||
|
||||
/* We need to irq_unlock(current->coopReg.key);
|
||||
* key was supplied as argument to z_arch_swap(). Fetch it.
|
||||
* key was supplied as argument to arch_swap(). Fetch it.
|
||||
*/
|
||||
ldw r3, _thread_offset_to_key(r2)
|
||||
|
||||
/*
|
||||
* Load return value into r2 (return value register). -EAGAIN unless
|
||||
* someone previously called z_arch_thread_return_value_set(). Do this before
|
||||
* we potentially unlock interrupts.
|
||||
* someone previously called arch_thread_return_value_set(). Do this
|
||||
* before we potentially unlock interrupts.
|
||||
*/
|
||||
ldw r2, _thread_offset_to_retval(r2)
|
||||
|
||||
|
|
|
@ -28,10 +28,10 @@ struct init_stack_frame {
|
|||
};
|
||||
|
||||
|
||||
void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t thread_func,
|
||||
void *arg1, void *arg2, void *arg3,
|
||||
int priority, unsigned int options)
|
||||
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t thread_func,
|
||||
void *arg1, void *arg2, void *arg3,
|
||||
int priority, unsigned int options)
|
||||
{
|
||||
char *stack_memory = Z_THREAD_STACK_BUFFER(stack);
|
||||
Z_ASSERT_VALID_PRIO(priority, thread_func);
|
||||
|
|
|
@ -28,14 +28,14 @@ extern "C" {
|
|||
|
||||
#ifndef _ASMLANGUAGE
|
||||
|
||||
static ALWAYS_INLINE void z_arch_kernel_init(void)
|
||||
static ALWAYS_INLINE void arch_kernel_init(void)
|
||||
{
|
||||
_kernel.irq_stack =
|
||||
Z_THREAD_STACK_BUFFER(_interrupt_stack) + CONFIG_ISR_STACK_SIZE;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void
|
||||
z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
|
||||
arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
|
||||
{
|
||||
thread->callee_saved.retval = value;
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
|
|||
FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason,
|
||||
const z_arch_esf_t *esf);
|
||||
|
||||
static inline bool z_arch_is_in_isr(void)
|
||||
static inline bool arch_is_in_isr(void)
|
||||
{
|
||||
return _kernel.nested != 0U;
|
||||
}
|
||||
|
|
|
@ -10,11 +10,11 @@
|
|||
* This module provides:
|
||||
*
|
||||
* An implementation of the architecture-specific
|
||||
* z_arch_cpu_idle() primitive required by the kernel idle loop component.
|
||||
* arch_cpu_idle() primitive required by the kernel idle loop component.
|
||||
* It can be called within an implementation of _sys_power_save_idle(),
|
||||
* which is provided for the kernel by the platform.
|
||||
*
|
||||
* An implementation of z_arch_cpu_atomic_idle(), which
|
||||
* An implementation of arch_cpu_atomic_idle(), which
|
||||
* atomically re-enables interrupts and enters low power mode.
|
||||
*
|
||||
* A weak stub for sys_arch_reboot(), which does nothing
|
||||
|
@ -24,14 +24,14 @@
|
|||
#include <arch/posix/posix_soc_if.h>
|
||||
#include <debug/tracing.h>
|
||||
|
||||
void z_arch_cpu_idle(void)
|
||||
void arch_cpu_idle(void)
|
||||
{
|
||||
sys_trace_idle();
|
||||
posix_irq_full_unlock();
|
||||
posix_halt_cpu();
|
||||
}
|
||||
|
||||
void z_arch_cpu_atomic_idle(unsigned int key)
|
||||
void arch_cpu_atomic_idle(unsigned int key)
|
||||
{
|
||||
sys_trace_idle();
|
||||
posix_atomic_halt_cpu(key);
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#include <logging/log_ctrl.h>
|
||||
#include <arch/posix/posix_soc_if.h>
|
||||
|
||||
FUNC_NORETURN void z_arch_system_halt(unsigned int reason)
|
||||
FUNC_NORETURN void arch_system_halt(unsigned int reason)
|
||||
{
|
||||
ARG_UNUSED(reason);
|
||||
|
||||
|
|
|
@ -10,23 +10,23 @@
|
|||
#ifdef CONFIG_IRQ_OFFLOAD
|
||||
#include "irq_offload.h"
|
||||
|
||||
void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter)
|
||||
void arch_irq_offload(irq_offload_routine_t routine, void *parameter)
|
||||
{
|
||||
posix_irq_offload(routine, parameter);
|
||||
}
|
||||
#endif
|
||||
|
||||
void z_arch_irq_enable(unsigned int irq)
|
||||
void arch_irq_enable(unsigned int irq)
|
||||
{
|
||||
posix_irq_enable(irq);
|
||||
}
|
||||
|
||||
void z_arch_irq_disable(unsigned int irq)
|
||||
void arch_irq_disable(unsigned int irq)
|
||||
{
|
||||
posix_irq_disable(irq);
|
||||
}
|
||||
|
||||
int z_arch_irq_is_enabled(unsigned int irq)
|
||||
int arch_irq_is_enabled(unsigned int irq)
|
||||
{
|
||||
return posix_irq_is_enabled(irq);
|
||||
}
|
||||
|
@ -45,9 +45,9 @@ int z_arch_irq_is_enabled(unsigned int irq)
|
|||
*
|
||||
* @return The vector assigned to this interrupt
|
||||
*/
|
||||
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
void (*routine)(void *parameter),
|
||||
void *parameter, u32_t flags)
|
||||
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
void (*routine)(void *parameter),
|
||||
void *parameter, u32_t flags)
|
||||
{
|
||||
posix_isr_declare(irq, (int)flags, routine, parameter);
|
||||
posix_irq_priority_set(irq, priority, flags);
|
||||
|
|
|
@ -187,7 +187,7 @@ static void posix_preexit_cleanup(void)
|
|||
/**
|
||||
* Let the ready thread run and block this thread until it is allowed again
|
||||
*
|
||||
* called from z_arch_swap() which does the picking from the kernel structures
|
||||
* called from arch_swap() which does the picking from the kernel structures
|
||||
*/
|
||||
void posix_swap(int next_allowed_thread_nbr, int this_th_nbr)
|
||||
{
|
||||
|
@ -207,7 +207,7 @@ void posix_swap(int next_allowed_thread_nbr, int this_th_nbr)
|
|||
/**
|
||||
* Let the ready thread (main) run, and exit this thread (init)
|
||||
*
|
||||
* Called from z_arch_switch_to_main_thread() which does the picking from the
|
||||
* Called from arch_switch_to_main_thread() which does the picking from the
|
||||
* kernel structures
|
||||
*
|
||||
* Note that we could have just done a swap(), but that would have left the
|
||||
|
@ -256,7 +256,7 @@ static void posix_cleanup_handler(void *arg)
|
|||
|
||||
/**
|
||||
* Helper function to start a Zephyr thread as a POSIX thread:
|
||||
* It will block the thread until a z_arch_swap() is called for it
|
||||
* It will block the thread until a arch_swap() is called for it
|
||||
*
|
||||
* Spawned from posix_new_thread() below
|
||||
*/
|
||||
|
@ -361,9 +361,9 @@ static int ttable_get_empty_slot(void)
|
|||
}
|
||||
|
||||
/**
|
||||
* Called from z_arch_new_thread(),
|
||||
* Called from arch_new_thread(),
|
||||
* Create a new POSIX thread for the new Zephyr thread.
|
||||
* z_arch_new_thread() picks from the kernel structures what it is that we need
|
||||
* arch_new_thread() picks from the kernel structures what it is that we need
|
||||
* to call with what parameters
|
||||
*/
|
||||
void posix_new_thread(posix_thread_status_t *ptr)
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
* @file
|
||||
* @brief Kernel swapper code for POSIX
|
||||
*
|
||||
* This module implements the z_arch_swap() routine for the POSIX architecture.
|
||||
* This module implements the arch_swap() routine for the POSIX architecture.
|
||||
*
|
||||
*/
|
||||
|
||||
|
@ -19,7 +19,7 @@
|
|||
#include "irq.h"
|
||||
#include "kswap.h"
|
||||
|
||||
int z_arch_swap(unsigned int key)
|
||||
int arch_swap(unsigned int key)
|
||||
{
|
||||
/*
|
||||
* struct k_thread * _kernel.current is the currently runnig thread
|
||||
|
@ -34,7 +34,7 @@ int z_arch_swap(unsigned int key)
|
|||
_kernel.current->callee_saved.retval = -EAGAIN;
|
||||
|
||||
/* retval may be modified with a call to
|
||||
* z_arch_thread_return_value_set()
|
||||
* arch_thread_return_value_set()
|
||||
*/
|
||||
|
||||
posix_thread_status_t *ready_thread_ptr =
|
||||
|
@ -67,15 +67,15 @@ int z_arch_swap(unsigned int key)
|
|||
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
|
||||
/* This is just a version of z_arch_swap() in which we do not save anything
|
||||
/* This is just a version of arch_swap() in which we do not save anything
|
||||
* about the current thread.
|
||||
*
|
||||
* Note that we will never come back to this thread: posix_main_thread_start()
|
||||
* does never return.
|
||||
*/
|
||||
void z_arch_switch_to_main_thread(struct k_thread *main_thread,
|
||||
k_thread_stack_t *main_stack,
|
||||
size_t main_stack_size, k_thread_entry_t _main)
|
||||
void arch_switch_to_main_thread(struct k_thread *main_thread,
|
||||
k_thread_stack_t *main_stack,
|
||||
size_t main_stack_size, k_thread_entry_t _main)
|
||||
{
|
||||
posix_thread_status_t *ready_thread_ptr =
|
||||
(posix_thread_status_t *)
|
||||
|
|
|
@ -24,10 +24,10 @@
|
|||
/* Note that in this arch we cheat quite a bit: we use as stack a normal
|
||||
* pthreads stack and therefore we ignore the stack size
|
||||
*/
|
||||
void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t thread_func,
|
||||
void *arg1, void *arg2, void *arg3,
|
||||
int priority, unsigned int options)
|
||||
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t thread_func,
|
||||
void *arg1, void *arg2, void *arg3,
|
||||
int priority, unsigned int options)
|
||||
{
|
||||
|
||||
char *stack_memory = Z_THREAD_STACK_BUFFER(stack);
|
||||
|
|
|
@ -19,18 +19,18 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
#if defined(CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN)
|
||||
void z_arch_switch_to_main_thread(struct k_thread *main_thread,
|
||||
k_thread_stack_t *main_stack,
|
||||
size_t main_stack_size, k_thread_entry_t _main);
|
||||
void arch_switch_to_main_thread(struct k_thread *main_thread,
|
||||
k_thread_stack_t *main_stack,
|
||||
size_t main_stack_size, k_thread_entry_t _main);
|
||||
#endif
|
||||
|
||||
static inline void z_arch_kernel_init(void)
|
||||
static inline void arch_kernel_init(void)
|
||||
{
|
||||
/* Nothing to be done */
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void
|
||||
z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
|
||||
arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
|
||||
{
|
||||
thread->callee_saved.retval = value;
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline bool z_arch_is_in_isr(void)
|
||||
static inline bool arch_is_in_isr(void)
|
||||
{
|
||||
return _kernel.nested != 0U;
|
||||
}
|
||||
|
|
|
@ -9,20 +9,20 @@
|
|||
/*
|
||||
* In RISC-V there is no conventional way to handle CPU power save.
|
||||
* Each RISC-V SOC handles it in its own way.
|
||||
* Hence, by default, z_arch_cpu_idle and z_arch_cpu_atomic_idle functions just
|
||||
* Hence, by default, arch_cpu_idle and arch_cpu_atomic_idle functions just
|
||||
* unlock interrupts and return to the caller, without issuing any CPU power
|
||||
* saving instruction.
|
||||
*
|
||||
* Nonetheless, define the default z_arch_cpu_idle and z_arch_cpu_atomic_idle
|
||||
* Nonetheless, define the default arch_cpu_idle and arch_cpu_atomic_idle
|
||||
* functions as weak functions, so that they can be replaced at the SOC-level.
|
||||
*/
|
||||
|
||||
void __weak z_arch_cpu_idle(void)
|
||||
void __weak arch_cpu_idle(void)
|
||||
{
|
||||
irq_unlock(SOC_MSTATUS_IEN);
|
||||
}
|
||||
|
||||
void __weak z_arch_cpu_atomic_idle(unsigned int key)
|
||||
void __weak arch_cpu_atomic_idle(unsigned int key)
|
||||
{
|
||||
irq_unlock(key);
|
||||
}
|
||||
|
|
|
@ -30,9 +30,9 @@ FUNC_NORETURN void z_irq_spurious(void *unused)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_INTERRUPTS
|
||||
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
void (*routine)(void *parameter), void *parameter,
|
||||
u32_t flags)
|
||||
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
void (*routine)(void *parameter), void *parameter,
|
||||
u32_t flags)
|
||||
{
|
||||
ARG_UNUSED(flags);
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ void z_irq_do_offload(void)
|
|||
tmp((void *)offload_param);
|
||||
}
|
||||
|
||||
void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter)
|
||||
void arch_irq_offload(irq_offload_routine_t routine, void *parameter)
|
||||
{
|
||||
unsigned int key;
|
||||
|
||||
|
|
|
@ -10,18 +10,18 @@
|
|||
#include <arch/cpu.h>
|
||||
|
||||
/* exports */
|
||||
GTEXT(z_arch_swap)
|
||||
GTEXT(arch_swap)
|
||||
GTEXT(z_thread_entry_wrapper)
|
||||
|
||||
/* Use ABI name of registers for the sake of simplicity */
|
||||
|
||||
/*
|
||||
* unsigned int z_arch_swap(unsigned int key)
|
||||
* unsigned int arch_swap(unsigned int key)
|
||||
*
|
||||
* Always called with interrupts locked
|
||||
* key is stored in a0 register
|
||||
*/
|
||||
SECTION_FUNC(exception.other, z_arch_swap)
|
||||
SECTION_FUNC(exception.other, arch_swap)
|
||||
|
||||
/* Make a system call to perform context switch */
|
||||
#ifdef CONFIG_EXECUTION_BENCHMARKING
|
||||
|
@ -77,16 +77,16 @@ SECTION_FUNC(exception.other, z_arch_swap)
|
|||
* Restored register a0 contains IRQ lock state of thread.
|
||||
*
|
||||
* Prior to unlocking irq, load return value of
|
||||
* z_arch_swap to temp register t2 (from
|
||||
* arch_swap to temp register t2 (from
|
||||
* _thread_offset_to_swap_return_value). Normally, it should be -EAGAIN,
|
||||
* unless someone has previously called z_arch_thread_return_value_set(..).
|
||||
* unless someone has previously called arch_thread_return_value_set(..).
|
||||
*/
|
||||
la t0, _kernel
|
||||
|
||||
/* Get pointer to _kernel.current */
|
||||
RV_OP_LOADREG t1, _kernel_offset_to_current(t0)
|
||||
|
||||
/* Load return value of z_arch_swap function in temp register t2 */
|
||||
/* Load return value of arch_swap function in temp register t2 */
|
||||
lw t2, _thread_offset_to_swap_return_value(t1)
|
||||
|
||||
/*
|
||||
|
@ -109,7 +109,7 @@ SECTION_FUNC(exception.other, z_arch_swap)
|
|||
SECTION_FUNC(TEXT, z_thread_entry_wrapper)
|
||||
/*
|
||||
* z_thread_entry_wrapper is called for every new thread upon the return
|
||||
* of z_arch_swap or ISR. Its address, as well as its input function
|
||||
* of arch_swap or ISR. Its address, as well as its input function
|
||||
* arguments thread_entry_t, void *, void *, void * are restored from
|
||||
* the thread stack (initialized via function _thread).
|
||||
* In this case, thread_entry_t, * void *, void * and void * are stored
|
||||
|
|
|
@ -12,10 +12,10 @@ void z_thread_entry_wrapper(k_thread_entry_t thread,
|
|||
void *arg2,
|
||||
void *arg3);
|
||||
|
||||
void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t thread_func,
|
||||
void *arg1, void *arg2, void *arg3,
|
||||
int priority, unsigned int options)
|
||||
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t thread_func,
|
||||
void *arg1, void *arg2, void *arg3,
|
||||
int priority, unsigned int options)
|
||||
{
|
||||
char *stack_memory = Z_THREAD_STACK_BUFFER(stack);
|
||||
Z_ASSERT_VALID_PRIO(priority, thread_func);
|
||||
|
|
|
@ -22,14 +22,14 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
#ifndef _ASMLANGUAGE
|
||||
static ALWAYS_INLINE void z_arch_kernel_init(void)
|
||||
static ALWAYS_INLINE void arch_kernel_init(void)
|
||||
{
|
||||
_kernel.irq_stack =
|
||||
Z_THREAD_STACK_BUFFER(_interrupt_stack) + CONFIG_ISR_STACK_SIZE;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void
|
||||
z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
|
||||
arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
|
||||
{
|
||||
thread->arch.swap_return_value = value;
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
|
|||
FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason,
|
||||
const z_arch_esf_t *esf);
|
||||
|
||||
static inline bool z_arch_is_in_isr(void)
|
||||
static inline bool arch_is_in_isr(void)
|
||||
{
|
||||
return _kernel.nested != 0U;
|
||||
}
|
||||
|
|
|
@ -162,7 +162,7 @@ config X86_VERY_EARLY_CONSOLE
|
|||
Non-emulated X86 devices often require special hardware to attach
|
||||
a debugger, which may not be easily available. This option adds a
|
||||
very minimal serial driver which gets initialized at the very
|
||||
beginning of z_cstart(), via z_arch_kernel_init(). This driver enables
|
||||
beginning of z_cstart(), via arch_kernel_init(). This driver enables
|
||||
printk to emit messages to the 16550 UART port 0 instance in device
|
||||
tree. This mini-driver assumes I/O to the UART is done via ports.
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
#include <debug/tracing.h>
|
||||
#include <arch/cpu.h>
|
||||
|
||||
void z_arch_cpu_idle(void)
|
||||
void arch_cpu_idle(void)
|
||||
{
|
||||
sys_trace_idle();
|
||||
__asm__ volatile (
|
||||
|
@ -15,7 +15,7 @@ void z_arch_cpu_idle(void)
|
|||
"hlt\n\t");
|
||||
}
|
||||
|
||||
void z_arch_cpu_atomic_idle(unsigned int key)
|
||||
void arch_cpu_atomic_idle(unsigned int key)
|
||||
{
|
||||
sys_trace_idle();
|
||||
|
||||
|
@ -30,7 +30,7 @@ void z_arch_cpu_atomic_idle(unsigned int key)
|
|||
* external, maskable interrupts after the next instruction is
|
||||
* executed."
|
||||
*
|
||||
* Thus the IA-32 implementation of z_arch_cpu_atomic_idle() will
|
||||
* Thus the IA-32 implementation of arch_cpu_atomic_idle() will
|
||||
* atomically re-enable interrupts and enter a low-power mode.
|
||||
*/
|
||||
"hlt\n\t");
|
||||
|
|
|
@ -40,9 +40,9 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, u16_t cs)
|
|||
{
|
||||
uintptr_t start, end;
|
||||
|
||||
if (z_arch_is_in_isr()) {
|
||||
if (arch_is_in_isr()) {
|
||||
/* We were servicing an interrupt */
|
||||
start = (uintptr_t)Z_ARCH_THREAD_STACK_BUFFER(_interrupt_stack);
|
||||
start = (uintptr_t)ARCH_THREAD_STACK_BUFFER(_interrupt_stack);
|
||||
end = start + CONFIG_ISR_STACK_SIZE;
|
||||
} else if ((cs & 0x3U) != 0U ||
|
||||
(_current->base.user_options & K_USER) == 0) {
|
||||
|
|
|
@ -23,7 +23,7 @@ __weak void z_debug_fatal_hook(const z_arch_esf_t *esf) { ARG_UNUSED(esf); }
|
|||
|
||||
|
||||
#ifdef CONFIG_BOARD_QEMU_X86
|
||||
FUNC_NORETURN void z_arch_system_halt(unsigned int reason)
|
||||
FUNC_NORETURN void arch_system_halt(unsigned int reason)
|
||||
{
|
||||
ARG_UNUSED(reason);
|
||||
|
||||
|
@ -46,7 +46,7 @@ void z_x86_spurious_irq(const z_arch_esf_t *esf)
|
|||
z_x86_fatal_error(K_ERR_SPURIOUS_IRQ, esf);
|
||||
}
|
||||
|
||||
void z_arch_syscall_oops(void *ssf_ptr)
|
||||
void arch_syscall_oops(void *ssf_ptr)
|
||||
{
|
||||
struct _x86_syscall_stack_frame *ssf =
|
||||
(struct _x86_syscall_stack_frame *)ssf_ptr;
|
||||
|
@ -229,7 +229,7 @@ static FUNC_NORETURN __used void df_handler_top(void)
|
|||
_df_esf.eflags = _main_tss.eflags;
|
||||
|
||||
/* Restore the main IA task to a runnable state */
|
||||
_main_tss.esp = (u32_t)(Z_ARCH_THREAD_STACK_BUFFER(_interrupt_stack) +
|
||||
_main_tss.esp = (u32_t)(ARCH_THREAD_STACK_BUFFER(_interrupt_stack) +
|
||||
CONFIG_ISR_STACK_SIZE);
|
||||
_main_tss.cs = CODE_SEG;
|
||||
_main_tss.ds = DATA_SEG;
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
|
||||
/* externs */
|
||||
|
||||
GTEXT(z_arch_swap)
|
||||
GTEXT(arch_swap)
|
||||
|
||||
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
||||
GTEXT(z_sys_power_save_idle_exit)
|
||||
|
@ -83,8 +83,8 @@ SECTION_FUNC(TEXT, _interrupt_enter)
|
|||
pushl %eax
|
||||
pushl %edx
|
||||
rdtsc
|
||||
mov %eax, z_arch_timing_irq_start
|
||||
mov %edx, z_arch_timing_irq_start+4
|
||||
mov %eax, arch_timing_irq_start
|
||||
mov %edx, arch_timing_irq_start+4
|
||||
pop %edx
|
||||
pop %eax
|
||||
#endif
|
||||
|
@ -125,7 +125,7 @@ SECTION_FUNC(TEXT, _interrupt_enter)
|
|||
|
||||
/* Push EDI as we will use it for scratch space.
|
||||
* Rest of the callee-saved regs get saved by invocation of C
|
||||
* functions (isr handler, z_arch_swap(), etc)
|
||||
* functions (isr handler, arch_swap(), etc)
|
||||
*/
|
||||
pushl %edi
|
||||
|
||||
|
@ -186,8 +186,8 @@ alreadyOnIntStack:
|
|||
pushl %eax
|
||||
pushl %edx
|
||||
rdtsc
|
||||
mov %eax,z_arch_timing_irq_end
|
||||
mov %edx,z_arch_timing_irq_end+4
|
||||
mov %eax,arch_timing_irq_end
|
||||
mov %edx,arch_timing_irq_end+4
|
||||
pop %edx
|
||||
pop %eax
|
||||
#endif
|
||||
|
@ -227,7 +227,7 @@ alreadyOnIntStack:
|
|||
|
||||
/*
|
||||
* Set X86_THREAD_FLAG_INT bit in k_thread to allow the upcoming call
|
||||
* to z_arch_swap() to determine whether non-floating registers need to be
|
||||
* to arch_swap() to determine whether non-floating registers need to be
|
||||
* preserved using the lazy save/restore algorithm, or to indicate to
|
||||
* debug tools that a preemptive context switch has occurred.
|
||||
*/
|
||||
|
@ -239,7 +239,7 @@ alreadyOnIntStack:
|
|||
/*
|
||||
* A context reschedule is required: keep the volatile registers of
|
||||
* the interrupted thread on the context's stack. Utilize
|
||||
* the existing z_arch_swap() primitive to save the remaining
|
||||
* the existing arch_swap() primitive to save the remaining
|
||||
* thread's registers (including floating point) and perform
|
||||
* a switch to the new thread.
|
||||
*/
|
||||
|
@ -250,12 +250,12 @@ alreadyOnIntStack:
|
|||
call z_check_stack_sentinel
|
||||
#endif
|
||||
pushfl /* push KERNEL_LOCK_KEY argument */
|
||||
call z_arch_swap
|
||||
call arch_swap
|
||||
addl $4, %esp /* pop KERNEL_LOCK_KEY argument */
|
||||
|
||||
/*
|
||||
* The interrupted thread has now been scheduled,
|
||||
* as the result of a _later_ invocation of z_arch_swap().
|
||||
* as the result of a _later_ invocation of arch_swap().
|
||||
*
|
||||
* Now need to restore the interrupted thread's environment before
|
||||
* returning control to it at the point where it was interrupted ...
|
||||
|
@ -263,7 +263,7 @@ alreadyOnIntStack:
|
|||
|
||||
#if defined(CONFIG_LAZY_FP_SHARING)
|
||||
/*
|
||||
* z_arch_swap() has restored the floating point registers, if needed.
|
||||
* arch_swap() has restored the floating point registers, if needed.
|
||||
* Clear X86_THREAD_FLAG_INT in the interrupted thread's state
|
||||
* since it has served its purpose.
|
||||
*/
|
||||
|
|
|
@ -48,7 +48,7 @@ void *__attribute__((section(".spurNoErrIsr")))
|
|||
*/
|
||||
|
||||
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
||||
void z_arch_irq_direct_pm(void)
|
||||
void arch_irq_direct_pm(void)
|
||||
{
|
||||
if (_kernel.idle) {
|
||||
s32_t idle_val = _kernel.idle;
|
||||
|
@ -59,17 +59,17 @@ void z_arch_irq_direct_pm(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
void z_arch_isr_direct_header(void)
|
||||
void arch_isr_direct_header(void)
|
||||
{
|
||||
sys_trace_isr_enter();
|
||||
|
||||
/* We're not going to unlock IRQs, but we still need to increment this
|
||||
* so that z_arch_is_in_isr() works
|
||||
* so that arch_is_in_isr() works
|
||||
*/
|
||||
++_kernel.nested;
|
||||
}
|
||||
|
||||
void z_arch_isr_direct_footer(int swap)
|
||||
void arch_isr_direct_footer(int swap)
|
||||
{
|
||||
z_irq_controller_eoi();
|
||||
sys_trace_isr_exit();
|
||||
|
@ -250,7 +250,7 @@ static void idt_vector_install(int vector, void *irq_handler)
|
|||
irq_unlock(key);
|
||||
}
|
||||
|
||||
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
void (*routine)(void *parameter), void *parameter,
|
||||
u32_t flags)
|
||||
{
|
||||
|
|
|
@ -25,7 +25,7 @@ void z_irq_do_offload(void)
|
|||
offload_routine(offload_param);
|
||||
}
|
||||
|
||||
void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter)
|
||||
void arch_irq_offload(irq_offload_routine_t routine, void *parameter)
|
||||
{
|
||||
unsigned int key;
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
* @file
|
||||
* @brief Kernel swapper code for IA-32
|
||||
*
|
||||
* This module implements the z_arch_swap() routine for the IA-32 architecture.
|
||||
* This module implements the arch_swap() routine for the IA-32 architecture.
|
||||
*/
|
||||
|
||||
#include <arch/x86/ia32/asm.h>
|
||||
|
@ -19,7 +19,7 @@
|
|||
|
||||
/* exports (internal APIs) */
|
||||
|
||||
GTEXT(z_arch_swap)
|
||||
GTEXT(arch_swap)
|
||||
GTEXT(z_x86_thread_entry_wrapper)
|
||||
GTEXT(_x86_user_thread_entry_wrapper)
|
||||
|
||||
|
@ -30,7 +30,7 @@
|
|||
GDATA(_k_neg_eagain)
|
||||
|
||||
/*
|
||||
* Given that z_arch_swap() is called to effect a cooperative context switch,
|
||||
* Given that arch_swap() is called to effect a cooperative context switch,
|
||||
* only the non-volatile integer registers need to be saved in the TCS of the
|
||||
* outgoing thread. The restoration of the integer registers of the incoming
|
||||
* thread depends on whether that thread was preemptively context switched out.
|
||||
|
@ -62,7 +62,7 @@
|
|||
*
|
||||
* C function prototype:
|
||||
*
|
||||
* unsigned int z_arch_swap (unsigned int eflags);
|
||||
* unsigned int arch_swap (unsigned int eflags);
|
||||
*/
|
||||
|
||||
.macro read_tsc var_name
|
||||
|
@ -74,7 +74,7 @@
|
|||
pop %edx
|
||||
pop %eax
|
||||
.endm
|
||||
SECTION_FUNC(TEXT, z_arch_swap)
|
||||
SECTION_FUNC(TEXT, arch_swap)
|
||||
#ifdef CONFIG_EXECUTION_BENCHMARKING
|
||||
/* Save the eax and edx registers before reading the time stamp
|
||||
* once done pop the values.
|
||||
|
@ -82,8 +82,8 @@ SECTION_FUNC(TEXT, z_arch_swap)
|
|||
push %eax
|
||||
push %edx
|
||||
rdtsc
|
||||
mov %eax,z_arch_timing_swap_start
|
||||
mov %edx,z_arch_timing_swap_start+4
|
||||
mov %eax,arch_timing_swap_start
|
||||
mov %edx,arch_timing_swap_start+4
|
||||
pop %edx
|
||||
pop %eax
|
||||
#endif
|
||||
|
@ -106,7 +106,7 @@ SECTION_FUNC(TEXT, z_arch_swap)
|
|||
* Carve space for the return value. Setting it to a default of
|
||||
* -EAGAIN eliminates the need for the timeout code to set it.
|
||||
* If another value is ever needed, it can be modified with
|
||||
* z_arch_thread_return_value_set().
|
||||
* arch_thread_return_value_set().
|
||||
*/
|
||||
|
||||
pushl _k_neg_eagain
|
||||
|
@ -331,7 +331,7 @@ CROHandlingDone:
|
|||
movl _thread_offset_to_esp(%eax), %esp
|
||||
|
||||
|
||||
/* load return value from a possible z_arch_thread_return_value_set() */
|
||||
/* load return value from a possible arch_thread_return_value_set() */
|
||||
|
||||
popl %eax
|
||||
|
||||
|
@ -345,23 +345,23 @@ CROHandlingDone:
|
|||
/*
|
||||
* %eax may contain one of these values:
|
||||
*
|
||||
* - the return value for z_arch_swap() that was set up by a call to
|
||||
* z_arch_thread_return_value_set()
|
||||
* - the return value for arch_swap() that was set up by a call to
|
||||
* arch_thread_return_value_set()
|
||||
* - -EINVAL
|
||||
*/
|
||||
|
||||
/* Utilize the 'eflags' parameter to z_arch_swap() */
|
||||
/* Utilize the 'eflags' parameter to arch_swap() */
|
||||
|
||||
pushl 4(%esp)
|
||||
popfl
|
||||
|
||||
#ifdef CONFIG_EXECUTION_BENCHMARKING
|
||||
cmp $0x1,z_arch_timing_value_swap_end
|
||||
cmp $0x1,arch_timing_value_swap_end
|
||||
jne time_read_not_needed
|
||||
movw $0x2,z_arch_timing_value_swap_end
|
||||
read_tsc z_arch_timing_value_swap_common
|
||||
pushl z_arch_timing_swap_start
|
||||
popl z_arch_timing_value_swap_temp
|
||||
movw $0x2,arch_timing_value_swap_end
|
||||
read_tsc arch_timing_value_swap_common
|
||||
pushl arch_timing_swap_start
|
||||
popl arch_timing_value_swap_temp
|
||||
time_read_not_needed:
|
||||
#endif
|
||||
ret
|
||||
|
@ -371,7 +371,7 @@ time_read_not_needed:
|
|||
*
|
||||
* @brief Adjust stack/parameters before invoking thread entry function
|
||||
*
|
||||
* This function adjusts the initial stack frame created by z_arch_new_thread()
|
||||
* This function adjusts the initial stack frame created by arch_new_thread()
|
||||
* such that the GDB stack frame unwinders recognize it as the outermost frame
|
||||
* in the thread's stack.
|
||||
*
|
||||
|
@ -380,7 +380,7 @@ time_read_not_needed:
|
|||
* a main() function, and there does not appear to be a simple way of stopping
|
||||
* the unwinding of the stack.
|
||||
*
|
||||
* Given the initial thread created by z_arch_new_thread(), GDB expects to find
|
||||
* Given the initial thread created by arch_new_thread(), GDB expects to find
|
||||
* a return address on the stack immediately above the thread entry routine
|
||||
* z_thread_entry, in the location occupied by the initial EFLAGS. GDB
|
||||
* attempts to examine the memory at this return address, which typically
|
||||
|
|
|
@ -109,8 +109,8 @@ static FUNC_NORETURN void drop_to_user(k_thread_entry_t user_entry,
|
|||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
||||
FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
struct z_x86_thread_stack_header *header =
|
||||
(struct z_x86_thread_stack_header *)_current->stack_obj;
|
||||
|
@ -161,7 +161,7 @@ NANO_CPU_INT_REGISTER(z_x86_syscall_entry_stub, -1, -1, 0x80, 3);
|
|||
|
||||
extern int z_float_disable(struct k_thread *thread);
|
||||
|
||||
int z_arch_float_disable(struct k_thread *thread)
|
||||
int arch_float_disable(struct k_thread *thread)
|
||||
{
|
||||
#if defined(CONFIG_LAZY_FP_SHARING)
|
||||
return z_float_disable(thread);
|
||||
|
@ -171,10 +171,10 @@ int z_arch_float_disable(struct k_thread *thread)
|
|||
}
|
||||
#endif /* CONFIG_FLOAT && CONFIG_FP_SHARING */
|
||||
|
||||
void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t entry,
|
||||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned int options)
|
||||
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t entry,
|
||||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned int options)
|
||||
{
|
||||
char *stack_buf;
|
||||
char *stack_high;
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
/* Exports */
|
||||
GTEXT(z_x86_syscall_entry_stub)
|
||||
GTEXT(z_x86_userspace_enter)
|
||||
GTEXT(z_arch_user_string_nlen)
|
||||
GTEXT(arch_user_string_nlen)
|
||||
GTEXT(z_x86_user_string_nlen_fault_start)
|
||||
GTEXT(z_x86_user_string_nlen_fault_end)
|
||||
GTEXT(z_x86_user_string_nlen_fixup)
|
||||
|
@ -254,9 +254,9 @@ _bad_syscall:
|
|||
|
||||
|
||||
/*
|
||||
* size_t z_arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg)
|
||||
* size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg)
|
||||
*/
|
||||
SECTION_FUNC(TEXT, z_arch_user_string_nlen)
|
||||
SECTION_FUNC(TEXT, arch_user_string_nlen)
|
||||
push %ebp
|
||||
mov %esp, %ebp
|
||||
|
||||
|
@ -393,8 +393,8 @@ SECTION_FUNC(TEXT, z_x86_userspace_enter)
|
|||
push %eax
|
||||
push %edx
|
||||
rdtsc
|
||||
mov %eax,z_arch_timing_enter_user_mode_end
|
||||
mov %edx,z_arch_timing_enter_user_mode_end+4
|
||||
mov %eax,arch_timing_enter_user_mode_end
|
||||
mov %edx,arch_timing_enter_user_mode_end+4
|
||||
pop %edx
|
||||
pop %eax
|
||||
#endif
|
||||
|
|
|
@ -97,7 +97,7 @@ struct x86_cpuboot x86_cpuboot[] = {
|
|||
* will enter the kernel at fn(---, arg), running on the specified stack.
|
||||
*/
|
||||
|
||||
void z_arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
|
||||
void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
|
||||
void (*fn)(int key, void *data), void *arg)
|
||||
{
|
||||
u8_t vector = ((unsigned long) x86_ap_start) >> 12;
|
||||
|
|
|
@ -66,7 +66,7 @@ static int allocate_vector(unsigned int priority)
|
|||
* allocated. Whether it should simply __ASSERT instead is up for debate.
|
||||
*/
|
||||
|
||||
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
void (*func)(void *arg), void *arg, u32_t flags)
|
||||
{
|
||||
u32_t key;
|
||||
|
@ -91,7 +91,7 @@ int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
|||
#ifdef CONFIG_IRQ_OFFLOAD
|
||||
#include <irq_offload.h>
|
||||
|
||||
void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter)
|
||||
void arch_irq_offload(irq_offload_routine_t routine, void *parameter)
|
||||
{
|
||||
x86_irq_funcs[CONFIG_IRQ_OFFLOAD_VECTOR - IV_IRQS] = routine;
|
||||
x86_irq_args[CONFIG_IRQ_OFFLOAD_VECTOR - IV_IRQS] = parameter;
|
||||
|
@ -119,7 +119,7 @@ void z_x86_ipi_setup(void)
|
|||
* it is not clear exactly how/where/why to abstract this, as it
|
||||
* assumes the use of a local APIC (but there's no other mechanism).
|
||||
*/
|
||||
void z_arch_sched_ipi(void)
|
||||
void arch_sched_ipi(void)
|
||||
{
|
||||
z_loapic_ipi(0, LOAPIC_ICR_IPI_OTHERS, CONFIG_SCHED_IPI_VECTOR);
|
||||
}
|
||||
|
|
|
@ -10,10 +10,10 @@
|
|||
|
||||
extern void x86_sse_init(struct k_thread *); /* in locore.S */
|
||||
|
||||
void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t entry,
|
||||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned int options)
|
||||
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t entry,
|
||||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned int options)
|
||||
{
|
||||
#if defined(CONFIG_X86_USERSPACE) || defined(CONFIG_X86_STACK_PROTECTION)
|
||||
struct z_x86_thread_stack_header *header =
|
||||
|
|
|
@ -746,7 +746,7 @@ static void add_mmu_region(struct x86_page_tables *ptables,
|
|||
}
|
||||
}
|
||||
|
||||
/* Called from x86's z_arch_kernel_init() */
|
||||
/* Called from x86's arch_kernel_init() */
|
||||
void z_x86_paging_init(void)
|
||||
{
|
||||
size_t pages_free;
|
||||
|
@ -777,7 +777,7 @@ void z_x86_paging_init(void)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_X86_USERSPACE
|
||||
int z_arch_buffer_validate(void *addr, size_t size, int write)
|
||||
int arch_buffer_validate(void *addr, size_t size, int write)
|
||||
{
|
||||
return z_x86_mmu_validate(z_x86_thread_page_tables_get(_current), addr,
|
||||
size, write != 0);
|
||||
|
@ -1003,8 +1003,8 @@ void z_x86_thread_pt_init(struct k_thread *thread)
|
|||
* mode the per-thread page tables will be generated and the memory domain
|
||||
* configuration applied.
|
||||
*/
|
||||
void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
|
||||
u32_t partition_id)
|
||||
void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
|
||||
u32_t partition_id)
|
||||
{
|
||||
sys_dnode_t *node, *next_node;
|
||||
|
||||
|
@ -1024,7 +1024,7 @@ void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
|
|||
}
|
||||
}
|
||||
|
||||
void z_arch_mem_domain_destroy(struct k_mem_domain *domain)
|
||||
void arch_mem_domain_destroy(struct k_mem_domain *domain)
|
||||
{
|
||||
for (int i = 0, pcount = 0; pcount < domain->num_partitions; i++) {
|
||||
struct k_mem_partition *partition;
|
||||
|
@ -1035,11 +1035,11 @@ void z_arch_mem_domain_destroy(struct k_mem_domain *domain)
|
|||
}
|
||||
pcount++;
|
||||
|
||||
z_arch_mem_domain_partition_remove(domain, i);
|
||||
arch_mem_domain_partition_remove(domain, i);
|
||||
}
|
||||
}
|
||||
|
||||
void z_arch_mem_domain_thread_remove(struct k_thread *thread)
|
||||
void arch_mem_domain_thread_remove(struct k_thread *thread)
|
||||
{
|
||||
struct k_mem_domain *domain = thread->mem_domain_info.mem_domain;
|
||||
|
||||
|
@ -1062,8 +1062,8 @@ void z_arch_mem_domain_thread_remove(struct k_thread *thread)
|
|||
}
|
||||
}
|
||||
|
||||
void z_arch_mem_domain_partition_add(struct k_mem_domain *domain,
|
||||
u32_t partition_id)
|
||||
void arch_mem_domain_partition_add(struct k_mem_domain *domain,
|
||||
u32_t partition_id)
|
||||
{
|
||||
sys_dnode_t *node, *next_node;
|
||||
|
||||
|
@ -1080,7 +1080,7 @@ void z_arch_mem_domain_partition_add(struct k_mem_domain *domain,
|
|||
}
|
||||
}
|
||||
|
||||
void z_arch_mem_domain_thread_add(struct k_thread *thread)
|
||||
void arch_mem_domain_thread_add(struct k_thread *thread)
|
||||
{
|
||||
if ((thread->base.user_options & K_USER) == 0) {
|
||||
return;
|
||||
|
@ -1090,7 +1090,7 @@ void z_arch_mem_domain_thread_add(struct k_thread *thread)
|
|||
thread->mem_domain_info.mem_domain);
|
||||
}
|
||||
|
||||
int z_arch_mem_domain_max_partitions_get(void)
|
||||
int arch_mem_domain_max_partitions_get(void)
|
||||
{
|
||||
return CONFIG_MAX_DOMAIN_PARTITIONS;
|
||||
}
|
||||
|
|
|
@ -18,20 +18,20 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
static inline void z_arch_kernel_init(void)
|
||||
static inline void arch_kernel_init(void)
|
||||
{
|
||||
/* No-op on this arch */
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void
|
||||
z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
|
||||
arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
|
||||
{
|
||||
/* write into 'eax' slot created in z_swap() entry */
|
||||
|
||||
*(unsigned int *)(thread->callee_saved.esp) = value;
|
||||
}
|
||||
|
||||
extern void z_arch_cpu_atomic_idle(unsigned int key);
|
||||
extern void arch_cpu_atomic_idle(unsigned int key);
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
extern FUNC_NORETURN void z_x86_userspace_enter(k_thread_entry_t user_entry,
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
extern void z_x86_switch(void *switch_to, void **switched_from);
|
||||
|
||||
static inline void z_arch_switch(void *switch_to, void **switched_from)
|
||||
static inline void arch_switch(void *switch_to, void **switched_from)
|
||||
{
|
||||
z_x86_switch(switch_to, switched_from);
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ static inline void z_arch_switch(void *switch_to, void **switched_from)
|
|||
|
||||
extern void z_x86_ipi_setup(void);
|
||||
|
||||
static inline void z_arch_kernel_init(void)
|
||||
static inline void arch_kernel_init(void)
|
||||
{
|
||||
/* nothing */;
|
||||
}
|
||||
|
|
|
@ -15,10 +15,10 @@
|
|||
#endif
|
||||
|
||||
#ifndef _ASMLANGUAGE
|
||||
static inline bool z_arch_is_in_isr(void)
|
||||
static inline bool arch_is_in_isr(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return z_arch_curr_cpu()->nested != 0;
|
||||
return arch_curr_cpu()->nested != 0;
|
||||
#else
|
||||
return _kernel.nested != 0U;
|
||||
#endif
|
||||
|
|
|
@ -5,12 +5,12 @@
|
|||
|
||||
#include <debug/tracing.h>
|
||||
|
||||
void z_arch_cpu_idle(void)
|
||||
void arch_cpu_idle(void)
|
||||
{
|
||||
sys_trace_idle();
|
||||
__asm__ volatile ("waiti 0");
|
||||
}
|
||||
void z_arch_cpu_atomic_idle(unsigned int key)
|
||||
void arch_cpu_atomic_idle(unsigned int key)
|
||||
{
|
||||
sys_trace_idle();
|
||||
__asm__ volatile ("waiti 0\n\t"
|
||||
|
|
|
@ -23,11 +23,11 @@ void z_irq_do_offload(void *unused)
|
|||
offload_routine(offload_param);
|
||||
}
|
||||
|
||||
void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter)
|
||||
void arch_irq_offload(irq_offload_routine_t routine, void *parameter)
|
||||
{
|
||||
IRQ_CONNECT(CONFIG_IRQ_OFFLOAD_INTNUM, XCHAL_EXCM_LEVEL,
|
||||
z_irq_do_offload, NULL, 0);
|
||||
z_arch_irq_disable(CONFIG_IRQ_OFFLOAD_INTNUM);
|
||||
arch_irq_disable(CONFIG_IRQ_OFFLOAD_INTNUM);
|
||||
offload_routine = routine;
|
||||
offload_param = parameter;
|
||||
z_xt_set_intset(BIT(CONFIG_IRQ_OFFLOAD_INTNUM));
|
||||
|
@ -35,5 +35,5 @@ void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter)
|
|||
* Enable the software interrupt, in case it is disabled, so that IRQ
|
||||
* offload is serviced.
|
||||
*/
|
||||
z_arch_irq_enable(CONFIG_IRQ_OFFLOAD_INTNUM);
|
||||
arch_irq_enable(CONFIG_IRQ_OFFLOAD_INTNUM);
|
||||
}
|
||||
|
|
|
@ -56,10 +56,10 @@ void *xtensa_init_stack(int *stack_top,
|
|||
return &bsa[-9];
|
||||
}
|
||||
|
||||
void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t sz, k_thread_entry_t entry,
|
||||
void *p1, void *p2, void *p3,
|
||||
int prio, unsigned int opts)
|
||||
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t sz, k_thread_entry_t entry,
|
||||
void *p1, void *p2, void *p3,
|
||||
int prio, unsigned int opts)
|
||||
{
|
||||
char *base = Z_THREAD_STACK_BUFFER(stack);
|
||||
char *top = base + sz;
|
||||
|
@ -194,7 +194,7 @@ void *xtensa_excint1_c(int *interrupted_stack)
|
|||
|
||||
LOG_ERR(" ** FATAL EXCEPTION");
|
||||
LOG_ERR(" ** CPU %d EXCCAUSE %d (%s)",
|
||||
z_arch_curr_cpu()->id, cause,
|
||||
arch_curr_cpu()->id, cause,
|
||||
z_xtensa_exccause(cause));
|
||||
LOG_ERR(" ** PC %p VADDR %p",
|
||||
(void *)bsa[BSA_PC_OFF/4], (void *)vaddr);
|
||||
|
|
|
@ -31,7 +31,7 @@ extern void z_xt_coproc_init(void);
|
|||
|
||||
extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE);
|
||||
|
||||
static ALWAYS_INLINE void z_arch_kernel_init(void)
|
||||
static ALWAYS_INLINE void arch_kernel_init(void)
|
||||
{
|
||||
_cpu_t *cpu0 = &_kernel.cpus[0];
|
||||
|
||||
|
@ -55,7 +55,7 @@ static ALWAYS_INLINE void z_arch_kernel_init(void)
|
|||
|
||||
void xtensa_switch(void *switch_to, void **switched_from);
|
||||
|
||||
static inline void z_arch_switch(void *switch_to, void **switched_from)
|
||||
static inline void arch_switch(void *switch_to, void **switched_from)
|
||||
{
|
||||
return xtensa_switch(switch_to, switched_from);
|
||||
}
|
||||
|
@ -64,9 +64,9 @@ static inline void z_arch_switch(void *switch_to, void **switched_from)
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline bool z_arch_is_in_isr(void)
|
||||
static inline bool arch_is_in_isr(void)
|
||||
{
|
||||
return z_arch_curr_cpu()->nested != 0U;
|
||||
return arch_curr_cpu()->nested != 0U;
|
||||
}
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
|
|||
*
|
||||
* @return The vector assigned to this interrupt
|
||||
*/
|
||||
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
|
||||
#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
|
||||
({ \
|
||||
posix_isr_declare(irq_p, 0, isr_p, isr_param_p); \
|
||||
posix_irq_priority_set(irq_p, priority_p, flags_p); \
|
||||
|
@ -43,7 +43,7 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
|
|||
*
|
||||
* See include/irq.h for details.
|
||||
*/
|
||||
#define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
|
||||
#define ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
|
||||
({ \
|
||||
posix_isr_declare(irq_p, ISR_FLAG_DIRECT, (void (*)(void *))isr_p, \
|
||||
NULL); \
|
||||
|
@ -63,7 +63,7 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
|
|||
* All pre/post irq work of the interrupt is handled in the board
|
||||
* posix_irq_handler() both for direct and normal interrupts together
|
||||
*/
|
||||
#define Z_ARCH_ISR_DIRECT_DECLARE(name) \
|
||||
#define ARCH_ISR_DIRECT_DECLARE(name) \
|
||||
static inline int name##_body(void); \
|
||||
int name(void) \
|
||||
{ \
|
||||
|
@ -73,14 +73,14 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
|
|||
} \
|
||||
static inline int name##_body(void)
|
||||
|
||||
#define Z_ARCH_ISR_DIRECT_HEADER() do { } while (0)
|
||||
#define Z_ARCH_ISR_DIRECT_FOOTER(a) do { } while (0)
|
||||
#define ARCH_ISR_DIRECT_HEADER() do { } while (0)
|
||||
#define ARCH_ISR_DIRECT_FOOTER(a) do { } while (0)
|
||||
|
||||
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
||||
extern void posix_irq_check_idle_exit(void);
|
||||
#define Z_ARCH_ISR_DIRECT_PM() posix_irq_check_idle_exit()
|
||||
#define ARCH_ISR_DIRECT_PM() posix_irq_check_idle_exit()
|
||||
#else
|
||||
#define Z_ARCH_ISR_DIRECT_PM() do { } while (0)
|
||||
#define ARCH_ISR_DIRECT_PM() do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -30,7 +30,7 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
|
|||
*
|
||||
* @return The vector assigned to this interrupt
|
||||
*/
|
||||
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
|
||||
#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
|
||||
({ \
|
||||
posix_isr_declare(irq_p, 0, isr_p, isr_param_p); \
|
||||
posix_irq_priority_set(irq_p, priority_p, flags_p); \
|
||||
|
@ -43,7 +43,7 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
|
|||
*
|
||||
* See include/irq.h for details.
|
||||
*/
|
||||
#define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
|
||||
#define ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
|
||||
({ \
|
||||
posix_isr_declare(irq_p, ISR_FLAG_DIRECT, (void (*)(void *))isr_p, \
|
||||
NULL); \
|
||||
|
@ -63,7 +63,7 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
|
|||
* All pre/post irq work of the interrupt is handled in the board
|
||||
* posix_irq_handler() both for direct and normal interrupts together
|
||||
*/
|
||||
#define Z_ARCH_ISR_DIRECT_DECLARE(name) \
|
||||
#define ARCH_ISR_DIRECT_DECLARE(name) \
|
||||
static inline int name##_body(void); \
|
||||
int name(void) \
|
||||
{ \
|
||||
|
@ -73,14 +73,14 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
|
|||
} \
|
||||
static inline int name##_body(void)
|
||||
|
||||
#define Z_ARCH_ISR_DIRECT_HEADER() do { } while (0)
|
||||
#define Z_ARCH_ISR_DIRECT_FOOTER(a) do { } while (0)
|
||||
#define ARCH_ISR_DIRECT_HEADER() do { } while (0)
|
||||
#define ARCH_ISR_DIRECT_FOOTER(a) do { } while (0)
|
||||
|
||||
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
||||
extern void posix_irq_check_idle_exit(void);
|
||||
#define Z_ARCH_ISR_DIRECT_PM() posix_irq_check_idle_exit()
|
||||
#define ARCH_ISR_DIRECT_PM() posix_irq_check_idle_exit()
|
||||
#else
|
||||
#define Z_ARCH_ISR_DIRECT_PM() do { } while (0)
|
||||
#define ARCH_ISR_DIRECT_PM() do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
* Note that interrupts may be received in the meanwhile and that therefore this
|
||||
* thread may lose context
|
||||
*/
|
||||
void z_arch_busy_wait(u32_t usec_to_wait)
|
||||
void arch_busy_wait(u32_t usec_to_wait)
|
||||
{
|
||||
bs_time_t time_end = tm_get_hw_time() + usec_to_wait;
|
||||
|
||||
|
|
|
@ -407,9 +407,9 @@ CPU Idling/Power Management
|
|||
***************************
|
||||
|
||||
The kernel provides support for CPU power management with two functions:
|
||||
:c:func:`z_arch_cpu_idle` and :c:func:`z_arch_cpu_atomic_idle`.
|
||||
:c:func:`arch_cpu_idle` and :c:func:`arch_cpu_atomic_idle`.
|
||||
|
||||
:c:func:`z_arch_cpu_idle` can be as simple as calling the power saving
|
||||
:c:func:`arch_cpu_idle` can be as simple as calling the power saving
|
||||
instruction for the architecture with interrupts unlocked, for example
|
||||
:code:`hlt` on x86, :code:`wfi` or :code:`wfe` on ARM, :code:`sleep` on ARC.
|
||||
This function can be called in a loop within a context that does not care if it
|
||||
|
@ -422,7 +422,7 @@ basically two scenarios when it is correct to use this function:
|
|||
|
||||
* In the idle thread.
|
||||
|
||||
:c:func:`z_arch_cpu_atomic_idle`, on the other hand, must be able to atomically
|
||||
:c:func:`arch_cpu_atomic_idle`, on the other hand, must be able to atomically
|
||||
re-enable interrupts and invoke the power saving instruction. It can thus be
|
||||
used in real application code, again in single-threaded systems.
|
||||
|
||||
|
@ -511,32 +511,32 @@ implemented, and the system must enable the :option:`CONFIG_ARCH_HAS_USERSPACE`
|
|||
option. Please see the documentation for each of these functions for more
|
||||
details:
|
||||
|
||||
* :cpp:func:`z_arch_buffer_validate()` to test whether the current thread has
|
||||
* :cpp:func:`arch_buffer_validate()` to test whether the current thread has
|
||||
access permissions to a particular memory region
|
||||
|
||||
* :cpp:func:`z_arch_user_mode_enter()` which will irreversibly drop a supervisor
|
||||
* :cpp:func:`arch_user_mode_enter()` which will irreversibly drop a supervisor
|
||||
thread to user mode privileges. The stack must be wiped.
|
||||
|
||||
* :cpp:func:`z_arch_syscall_oops()` which generates a kernel oops when system
|
||||
* :cpp:func:`arch_syscall_oops()` which generates a kernel oops when system
|
||||
call parameters can't be validated, in such a way that the oops appears to be
|
||||
generated from where the system call was invoked in the user thread
|
||||
|
||||
* :cpp:func:`z_arch_syscall_invoke0()` through
|
||||
:cpp:func:`z_arch_syscall_invoke6()` invoke a system call with the
|
||||
* :cpp:func:`arch_syscall_invoke0()` through
|
||||
:cpp:func:`arch_syscall_invoke6()` invoke a system call with the
|
||||
appropriate number of arguments which must all be passed in during the
|
||||
privilege elevation via registers.
|
||||
|
||||
* :cpp:func:`z_arch_is_user_context()` return nonzero if the CPU is currently
|
||||
* :cpp:func:`arch_is_user_context()` return nonzero if the CPU is currently
|
||||
running in user mode
|
||||
|
||||
* :cpp:func:`z_arch_mem_domain_max_partitions_get()` which indicates the max
|
||||
* :cpp:func:`arch_mem_domain_max_partitions_get()` which indicates the max
|
||||
number of regions for a memory domain. MMU systems have an unlimited amount,
|
||||
MPU systems have constraints on this.
|
||||
|
||||
* :cpp:func:`z_arch_mem_domain_partition_remove()` Remove a partition from
|
||||
* :cpp:func:`arch_mem_domain_partition_remove()` Remove a partition from
|
||||
a memory domain if the currently executing thread was part of that domain.
|
||||
|
||||
* :cpp:func:`z_arch_mem_domain_destroy()` Reset the thread's memory domain
|
||||
* :cpp:func:`arch_mem_domain_destroy()` Reset the thread's memory domain
|
||||
configuration
|
||||
|
||||
In addition to implementing these APIs, there are some other tasks as well:
|
||||
|
|
|
@ -132,7 +132,7 @@ happens on a single CPU before other CPUs are brought online.
|
|||
Just before entering the application ``main()`` function, the kernel
|
||||
calls ``z_smp_init()`` to launch the SMP initialization process. This
|
||||
enumerates over the configured CPUs, calling into the architecture
|
||||
layer using ``z_arch_start_cpu()`` for each one. This function is
|
||||
layer using ``arch_start_cpu()`` for each one. This function is
|
||||
passed a memory region to use as a stack on the foreign CPU (in
|
||||
practice it uses the area that will become that CPU's interrupt
|
||||
stack), the address of a local ``smp_init_top()`` callback function to
|
||||
|
@ -172,7 +172,7 @@ handle the newly-runnable load.
|
|||
|
||||
So where possible, Zephyr SMP architectures should implement an
|
||||
interprocessor interrupt. The current framework is very simple: the
|
||||
architecture provides a ``z_arch_sched_ipi()`` call, which when invoked
|
||||
architecture provides a ``arch_sched_ipi()`` call, which when invoked
|
||||
will flag an interrupt on all CPUs (except the current one, though
|
||||
that is allowed behavior) which will then invoke the ``z_sched_ipi()``
|
||||
function implemented in the scheduler. The expectation is that these
|
||||
|
@ -239,7 +239,7 @@ offsets.
|
|||
|
||||
Note that an important requirement on the architecture layer is that
|
||||
the pointer to this CPU struct be available rapidly when in kernel
|
||||
context. The expectation is that ``z_arch_curr_cpu()`` will be
|
||||
context. The expectation is that ``arch_curr_cpu()`` will be
|
||||
implemented using a CPU-provided register or addressing mode that can
|
||||
store this value across arbitrary context switches or interrupts and
|
||||
make it available to any kernel-mode code.
|
||||
|
@ -270,7 +270,7 @@ Instead, the SMP "switch to" decision needs to be made synchronously
|
|||
with the swap call, and as we don't want per-architecture assembly
|
||||
code to be handling scheduler internal state, Zephyr requires a
|
||||
somewhat lower-level context switch primitives for SMP systems:
|
||||
``z_arch_switch()`` is always called with interrupts masked, and takes
|
||||
``arch_switch()`` is always called with interrupts masked, and takes
|
||||
exactly two arguments. The first is an opaque (architecture defined)
|
||||
handle to the context to which it should switch, and the second is a
|
||||
pointer to such a handle into which it should store the handle
|
||||
|
@ -288,4 +288,4 @@ in the interrupted thread struct.
|
|||
Note that while SMP requires :option:`CONFIG_USE_SWITCH`, the reverse is not
|
||||
true. A uniprocessor architecture built with :option:`CONFIG_SMP` = n might
|
||||
still decide to implement its context switching using
|
||||
``z_arch_switch()``.
|
||||
``arch_switch()``.
|
||||
|
|
|
@ -149,7 +149,7 @@ Inside this header is the body of :c:func:`k_sem_init()`::
|
|||
{
|
||||
#ifdef CONFIG_USERSPACE
|
||||
if (z_syscall_trap()) {
|
||||
z_arch_syscall_invoke3(*(uintptr_t *)&sem, *(uintptr_t *)&initial_count, *(uintptr_t *)&limit, K_SYSCALL_K_SEM_INIT);
|
||||
arch_syscall_invoke3(*(uintptr_t *)&sem, *(uintptr_t *)&initial_count, *(uintptr_t *)&limit, K_SYSCALL_K_SEM_INIT);
|
||||
return;
|
||||
}
|
||||
compiler_barrier();
|
||||
|
|
|
@ -33,7 +33,7 @@ static int save_irq;
|
|||
*
|
||||
* This routine enables a RISCV PLIC-specific interrupt line.
|
||||
* riscv_plic_irq_enable is called by SOC_FAMILY_RISCV_PRIVILEGE
|
||||
* z_arch_irq_enable function to enable external interrupts for
|
||||
* arch_irq_enable function to enable external interrupts for
|
||||
* IRQS level == 2, whenever CONFIG_RISCV_HAS_PLIC variable is set.
|
||||
* @param irq IRQ number to enable
|
||||
*
|
||||
|
@ -57,7 +57,7 @@ void riscv_plic_irq_enable(u32_t irq)
|
|||
*
|
||||
* This routine disables a RISCV PLIC-specific interrupt line.
|
||||
* riscv_plic_irq_disable is called by SOC_FAMILY_RISCV_PRIVILEGE
|
||||
* z_arch_irq_disable function to disable external interrupts, for
|
||||
* arch_irq_disable function to disable external interrupts, for
|
||||
* IRQS level == 2, whenever CONFIG_RISCV_HAS_PLIC variable is set.
|
||||
* @param irq IRQ number to disable
|
||||
*
|
||||
|
@ -98,7 +98,7 @@ int riscv_plic_irq_is_enabled(u32_t irq)
|
|||
* @brief Set priority of a riscv PLIC-specific interrupt line
|
||||
*
|
||||
* This routine set the priority of a RISCV PLIC-specific interrupt line.
|
||||
* riscv_plic_irq_set_prio is called by riscv z_arch_irq_priority_set to set
|
||||
* riscv_plic_irq_set_prio is called by riscv arch_irq_priority_set to set
|
||||
* the priority of an interrupt whenever CONFIG_RISCV_HAS_PLIC variable is set.
|
||||
* @param irq IRQ number for which to set priority
|
||||
*
|
||||
|
|
|
@ -72,7 +72,7 @@ void z_irq_controller_irq_config(unsigned int vector, unsigned int irq,
|
|||
*
|
||||
* @return N/A
|
||||
*/
|
||||
void z_arch_irq_enable(unsigned int irq)
|
||||
void arch_irq_enable(unsigned int irq)
|
||||
{
|
||||
if (IS_IOAPIC_IRQ(irq)) {
|
||||
z_ioapic_irq_enable(irq);
|
||||
|
@ -92,7 +92,7 @@ void z_arch_irq_enable(unsigned int irq)
|
|||
*
|
||||
* @return N/A
|
||||
*/
|
||||
void z_arch_irq_disable(unsigned int irq)
|
||||
void arch_irq_disable(unsigned int irq)
|
||||
{
|
||||
if (IS_IOAPIC_IRQ(irq)) {
|
||||
z_ioapic_irq_disable(irq);
|
||||
|
|
|
@ -83,17 +83,17 @@ static void vexriscv_litex_irq_handler(void *device)
|
|||
#endif
|
||||
}
|
||||
|
||||
void z_arch_irq_enable(unsigned int irq)
|
||||
void arch_irq_enable(unsigned int irq)
|
||||
{
|
||||
vexriscv_litex_irq_setmask(vexriscv_litex_irq_getmask() | (1 << irq));
|
||||
}
|
||||
|
||||
void z_arch_irq_disable(unsigned int irq)
|
||||
void arch_irq_disable(unsigned int irq)
|
||||
{
|
||||
vexriscv_litex_irq_setmask(vexriscv_litex_irq_getmask() & ~(1 << irq));
|
||||
}
|
||||
|
||||
int z_arch_irq_is_enabled(unsigned int irq)
|
||||
int arch_irq_is_enabled(unsigned int irq)
|
||||
{
|
||||
return vexriscv_litex_irq_getmask() & (1 << irq);
|
||||
}
|
||||
|
|
|
@ -251,14 +251,14 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
|
|||
/* Desired delay in the future */
|
||||
delay = (ticks == 0) ? CYC_PER_TICK : ticks * CYC_PER_TICK;
|
||||
|
||||
key = z_arch_irq_lock();
|
||||
key = arch_irq_lock();
|
||||
|
||||
timer0_limit_register_set(delay - 1);
|
||||
timer0_count_register_set(0);
|
||||
timer0_control_register_set(_ARC_V2_TMR_CTRL_NH |
|
||||
_ARC_V2_TMR_CTRL_IE);
|
||||
|
||||
z_arch_irq_unlock(key);
|
||||
arch_irq_unlock(key);
|
||||
#endif
|
||||
#else
|
||||
if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && idle && ticks == K_FOREVER) {
|
||||
|
|
|
@ -206,8 +206,8 @@ void timer_int_handler(void *unused /* parameter is not used */
|
|||
"pushl %eax\n\t"
|
||||
"pushl %edx\n\t"
|
||||
"rdtsc\n\t"
|
||||
"mov %eax, z_arch_timing_tick_start\n\t"
|
||||
"mov %edx, z_arch_timing_tick_start+4\n\t"
|
||||
"mov %eax, arch_timing_tick_start\n\t"
|
||||
"mov %edx, arch_timing_tick_start+4\n\t"
|
||||
"pop %edx\n\t"
|
||||
"pop %eax\n\t");
|
||||
#endif
|
||||
|
@ -293,8 +293,8 @@ void timer_int_handler(void *unused /* parameter is not used */
|
|||
"pushl %eax\n\t"
|
||||
"pushl %edx\n\t"
|
||||
"rdtsc\n\t"
|
||||
"mov %eax, z_arch_timing_tick_end\n\t"
|
||||
"mov %edx, z_arch_timing_tick_end+4\n\t"
|
||||
"mov %eax, arch_timing_tick_end\n\t"
|
||||
"mov %edx, arch_timing_tick_end+4\n\t"
|
||||
"pop %edx\n\t"
|
||||
"pop %eax\n\t");
|
||||
#endif /* CONFIG_EXECUTION_BENCHMARKING */
|
||||
|
|
|
@ -269,7 +269,7 @@ u32_t z_clock_elapsed(void)
|
|||
/*
|
||||
* Warning RTOS timer resolution is 30.5 us.
|
||||
* This is called by two code paths:
|
||||
* 1. Kernel call to k_cycle_get_32() -> z_arch_k_cycle_get_32() -> here.
|
||||
* 1. Kernel call to k_cycle_get_32() -> arch_k_cycle_get_32() -> here.
|
||||
* The kernel is casting return to (int) and using it uncasted in math
|
||||
* expressions with int types. Expression result is stored in an int.
|
||||
* 2. If CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT is not defined then
|
||||
|
@ -359,7 +359,7 @@ int z_clock_driver_init(struct device *device)
|
|||
* 32-bit basic timer 0 configured for 1MHz count up, auto-reload,
|
||||
* and no interrupt generation.
|
||||
*/
|
||||
void z_arch_busy_wait(u32_t usec_to_wait)
|
||||
void arch_busy_wait(u32_t usec_to_wait)
|
||||
{
|
||||
if (usec_to_wait == 0) {
|
||||
return;
|
||||
|
|
|
@ -123,7 +123,7 @@ u32_t z_clock_elapsed(void)
|
|||
* Note that interrupts may be received in the meanwhile and that therefore this
|
||||
* thread may loose context
|
||||
*/
|
||||
void z_arch_busy_wait(u32_t usec_to_wait)
|
||||
void arch_busy_wait(u32_t usec_to_wait)
|
||||
{
|
||||
u64_t time_end = hwm_get_time() + usec_to_wait;
|
||||
|
||||
|
|
|
@ -89,10 +89,10 @@ extern "C" {
|
|||
|
||||
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
#define Z_ARCH_THREAD_STACK_RESERVED \
|
||||
#define ARCH_THREAD_STACK_RESERVED \
|
||||
(STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE)
|
||||
#else
|
||||
#define Z_ARCH_THREAD_STACK_RESERVED (STACK_GUARD_SIZE)
|
||||
#define ARCH_THREAD_STACK_RESERVED (STACK_GUARD_SIZE)
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -107,8 +107,8 @@ extern "C" {
|
|||
* MPU start, size alignment
|
||||
*/
|
||||
#define Z_ARC_THREAD_STACK_ALIGN(size) Z_ARC_MPUV2_SIZE_ALIGN(size)
|
||||
#define Z_ARCH_THREAD_STACK_LEN(size) \
|
||||
(Z_ARC_MPUV2_SIZE_ALIGN(size) + Z_ARCH_THREAD_STACK_RESERVED)
|
||||
#define ARCH_THREAD_STACK_LEN(size) \
|
||||
(Z_ARC_MPUV2_SIZE_ALIGN(size) + ARCH_THREAD_STACK_RESERVED)
|
||||
/*
|
||||
* for stack array, each array member should be aligned both in size
|
||||
* and start
|
||||
|
@ -116,7 +116,7 @@ extern "C" {
|
|||
#define Z_ARC_THREAD_STACK_ARRAY_LEN(size) \
|
||||
(Z_ARC_MPUV2_SIZE_ALIGN(size) + \
|
||||
MAX(Z_ARC_MPUV2_SIZE_ALIGN(size), \
|
||||
POW2_CEIL(Z_ARCH_THREAD_STACK_RESERVED)))
|
||||
POW2_CEIL(ARCH_THREAD_STACK_RESERVED)))
|
||||
#else
|
||||
/*
|
||||
* MPUv3, no-mpu and no USERSPACE share the same macro definitions.
|
||||
|
@ -130,33 +130,33 @@ extern "C" {
|
|||
* aligned
|
||||
*/
|
||||
#define Z_ARC_THREAD_STACK_ALIGN(size) (STACK_ALIGN)
|
||||
#define Z_ARCH_THREAD_STACK_LEN(size) \
|
||||
(STACK_SIZE_ALIGN(size) + Z_ARCH_THREAD_STACK_RESERVED)
|
||||
#define ARCH_THREAD_STACK_LEN(size) \
|
||||
(STACK_SIZE_ALIGN(size) + ARCH_THREAD_STACK_RESERVED)
|
||||
#define Z_ARC_THREAD_STACK_ARRAY_LEN(size) \
|
||||
Z_ARCH_THREAD_STACK_LEN(size)
|
||||
ARCH_THREAD_STACK_LEN(size)
|
||||
|
||||
#endif /* CONFIG_USERSPACE && CONFIG_ARC_MPU_VER == 2 */
|
||||
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
#define ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct _k_thread_stack_element __noinit \
|
||||
__aligned(Z_ARC_THREAD_STACK_ALIGN(size)) \
|
||||
sym[Z_ARCH_THREAD_STACK_LEN(size)]
|
||||
sym[ARCH_THREAD_STACK_LEN(size)]
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
#define ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
struct _k_thread_stack_element __noinit \
|
||||
__aligned(Z_ARC_THREAD_STACK_ALIGN(size)) \
|
||||
sym[nmemb][Z_ARC_THREAD_STACK_ARRAY_LEN(size)]
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_MEMBER(sym, size) \
|
||||
#define ARCH_THREAD_STACK_MEMBER(sym, size) \
|
||||
struct _k_thread_stack_element \
|
||||
__aligned(Z_ARC_THREAD_STACK_ALIGN(size)) \
|
||||
sym[Z_ARCH_THREAD_STACK_LEN(size)]
|
||||
sym[ARCH_THREAD_STACK_LEN(size)]
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_SIZEOF(sym) \
|
||||
(sizeof(sym) - Z_ARCH_THREAD_STACK_RESERVED)
|
||||
#define ARCH_THREAD_STACK_SIZEOF(sym) \
|
||||
(sizeof(sym) - ARCH_THREAD_STACK_RESERVED)
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_BUFFER(sym) \
|
||||
#define ARCH_THREAD_STACK_BUFFER(sym) \
|
||||
((char *)(sym))
|
||||
|
||||
#ifdef CONFIG_ARC_MPU
|
||||
|
@ -227,7 +227,7 @@ extern "C" {
|
|||
/* Typedef for the k_mem_partition attribute*/
|
||||
typedef u32_t k_mem_partition_attr_t;
|
||||
|
||||
static ALWAYS_INLINE void z_arch_nop(void)
|
||||
static ALWAYS_INLINE void arch_nop(void)
|
||||
{
|
||||
__asm__ volatile("nop");
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#include <arch/arc/v2/aux_regs.h>
|
||||
#endif
|
||||
|
||||
static ALWAYS_INLINE _cpu_t *z_arch_curr_cpu(void)
|
||||
static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
u32_t core;
|
||||
|
|
|
@ -38,10 +38,10 @@ extern "C" {
|
|||
* just for enabling CONFIG_USERSPACE on arc w/o errors.
|
||||
*/
|
||||
|
||||
static inline uintptr_t z_arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t arg5, uintptr_t arg6,
|
||||
uintptr_t call_id)
|
||||
static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t arg5, uintptr_t arg6,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r1 __asm__("r1") = arg2;
|
||||
|
@ -63,10 +63,10 @@ static inline uintptr_t z_arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline uintptr_t z_arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t arg5,
|
||||
uintptr_t call_id)
|
||||
static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t arg5,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r1 __asm__("r1") = arg2;
|
||||
|
@ -87,9 +87,9 @@ static inline uintptr_t z_arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline uintptr_t z_arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t call_id)
|
||||
static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r1 __asm__("r1") = arg2;
|
||||
|
@ -109,9 +109,9 @@ static inline uintptr_t z_arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline uintptr_t z_arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3,
|
||||
uintptr_t call_id)
|
||||
static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r1 __asm__("r1") = arg2;
|
||||
|
@ -129,8 +129,8 @@ static inline uintptr_t z_arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline uintptr_t z_arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t call_id)
|
||||
static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r1 __asm__("r1") = arg2;
|
||||
|
@ -147,7 +147,7 @@ static inline uintptr_t z_arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline uintptr_t z_arch_syscall_invoke1(uintptr_t arg1, uintptr_t call_id)
|
||||
static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1, uintptr_t call_id)
|
||||
{
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r6 __asm__("r6") = call_id;
|
||||
|
@ -163,7 +163,7 @@ static inline uintptr_t z_arch_syscall_invoke1(uintptr_t arg1, uintptr_t call_id
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline uintptr_t z_arch_syscall_invoke0(uintptr_t call_id)
|
||||
static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
|
||||
{
|
||||
register u32_t ret __asm__("r0");
|
||||
register u32_t r6 __asm__("r6") = call_id;
|
||||
|
@ -179,7 +179,7 @@ static inline uintptr_t z_arch_syscall_invoke0(uintptr_t call_id)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline bool z_arch_is_user_context(void)
|
||||
static inline bool arch_is_user_context(void)
|
||||
{
|
||||
u32_t status;
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ extern "C" {
|
|||
/*
|
||||
* use trap_s to raise a SW exception
|
||||
*/
|
||||
#define Z_ARCH_EXCEPT(reason_p) do { \
|
||||
#define ARCH_EXCEPT(reason_p) do { \
|
||||
__asm__ volatile ( \
|
||||
"mov r0, %[reason]\n\t" \
|
||||
"trap_s %[id]\n\t" \
|
||||
|
|
|
@ -26,15 +26,15 @@ extern "C" {
|
|||
|
||||
#ifdef _ASMLANGUAGE
|
||||
GTEXT(_irq_exit);
|
||||
GTEXT(z_arch_irq_enable)
|
||||
GTEXT(z_arch_irq_disable)
|
||||
GTEXT(arch_irq_enable)
|
||||
GTEXT(arch_irq_disable)
|
||||
GTEXT(z_arc_firq_stack_set)
|
||||
#else
|
||||
|
||||
extern void z_arc_firq_stack_set(void);
|
||||
extern void z_arch_irq_enable(unsigned int irq);
|
||||
extern void z_arch_irq_disable(unsigned int irq);
|
||||
extern int z_arch_irq_is_enabled(unsigned int irq);
|
||||
extern void arch_irq_enable(unsigned int irq);
|
||||
extern void arch_irq_disable(unsigned int irq);
|
||||
extern int arch_irq_is_enabled(unsigned int irq);
|
||||
|
||||
extern void _irq_exit(void);
|
||||
extern void z_irq_priority_set(unsigned int irq, unsigned int prio,
|
||||
|
@ -50,7 +50,7 @@ extern void z_irq_spurious(void *unused);
|
|||
* We additionally set the priority in the interrupt controller at
|
||||
* runtime.
|
||||
*/
|
||||
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
|
||||
#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
|
||||
({ \
|
||||
Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
|
||||
z_irq_priority_set(irq_p, priority_p, flags_p); \
|
||||
|
@ -78,7 +78,7 @@ extern void z_irq_spurious(void *unused);
|
|||
* See include/irq.h for details.
|
||||
* All arguments must be computable at build time.
|
||||
*/
|
||||
#define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
|
||||
#define ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
|
||||
({ \
|
||||
Z_ISR_DECLARE(irq_p, ISR_FLAG_DIRECT, isr_p, NULL); \
|
||||
BUILD_ASSERT_MSG(priority_p || !IS_ENABLED(CONFIG_ARC_FIRQ) || \
|
||||
|
@ -92,14 +92,14 @@ extern void z_irq_spurious(void *unused);
|
|||
})
|
||||
|
||||
|
||||
static inline void z_arch_isr_direct_header(void)
|
||||
static inline void arch_isr_direct_header(void)
|
||||
{
|
||||
#ifdef CONFIG_TRACING
|
||||
z_sys_trace_isr_enter();
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void z_arch_isr_direct_footer(int maybe_swap)
|
||||
static inline void arch_isr_direct_footer(int maybe_swap)
|
||||
{
|
||||
/* clear SW generated interrupt */
|
||||
if (z_arc_v2_aux_reg_read(_ARC_V2_ICAUSE) ==
|
||||
|
@ -111,16 +111,16 @@ static inline void z_arch_isr_direct_footer(int maybe_swap)
|
|||
#endif
|
||||
}
|
||||
|
||||
#define Z_ARCH_ISR_DIRECT_HEADER() z_arch_isr_direct_header()
|
||||
extern void z_arch_isr_direct_header(void);
|
||||
#define ARCH_ISR_DIRECT_HEADER() arch_isr_direct_header()
|
||||
extern void arch_isr_direct_header(void);
|
||||
|
||||
#define Z_ARCH_ISR_DIRECT_FOOTER(swap) z_arch_isr_direct_footer(swap)
|
||||
#define ARCH_ISR_DIRECT_FOOTER(swap) arch_isr_direct_footer(swap)
|
||||
|
||||
/*
|
||||
* Scheduling can not be done in direct isr. If required, please use kernel
|
||||
* aware interrupt handling
|
||||
*/
|
||||
#define Z_ARCH_ISR_DIRECT_DECLARE(name) \
|
||||
#define ARCH_ISR_DIRECT_DECLARE(name) \
|
||||
static inline int name##_body(void); \
|
||||
__attribute__ ((interrupt("ilink")))void name(void) \
|
||||
{ \
|
||||
|
@ -163,7 +163,7 @@ extern void z_arch_isr_direct_header(void);
|
|||
* "interrupt disable state" prior to the call.
|
||||
*/
|
||||
|
||||
static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
|
||||
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
|
||||
{
|
||||
unsigned int key;
|
||||
|
||||
|
@ -171,12 +171,12 @@ static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
|
|||
return key;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
|
||||
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
|
||||
{
|
||||
__asm__ volatile("seti %0" : : "ir"(key) : "memory");
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key)
|
||||
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
|
||||
{
|
||||
/* ARC irq lock uses instruction "clri r0",
|
||||
* r0 == {26’d0, 1’b1, STATUS32.IE, STATUS32.E[3:0] }
|
||||
|
|
|
@ -23,7 +23,7 @@ extern unsigned int z_arc_cpu_sleep_mode;
|
|||
|
||||
extern u32_t z_timer_cycle_get_32(void);
|
||||
|
||||
static inline u32_t z_arch_k_cycle_get_32(void)
|
||||
static inline u32_t arch_k_cycle_get_32(void)
|
||||
{
|
||||
return z_timer_cycle_get_32();
|
||||
}
|
||||
|
|
|
@ -189,56 +189,56 @@ extern "C" {
|
|||
/* Guard is 'carved-out' of the thread stack region, and the supervisor
|
||||
* mode stack is allocated elsewhere by gen_priv_stack.py
|
||||
*/
|
||||
#define Z_ARCH_THREAD_STACK_RESERVED 0
|
||||
#define ARCH_THREAD_STACK_RESERVED 0
|
||||
#else
|
||||
#define Z_ARCH_THREAD_STACK_RESERVED MPU_GUARD_ALIGN_AND_SIZE
|
||||
#define ARCH_THREAD_STACK_RESERVED MPU_GUARD_ALIGN_AND_SIZE
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_USERSPACE) && \
|
||||
defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
|
||||
#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
#define ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct _k_thread_stack_element __noinit \
|
||||
__aligned(POW2_CEIL(size)) sym[POW2_CEIL(size)]
|
||||
#else
|
||||
#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
#define ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct _k_thread_stack_element __noinit __aligned(STACK_ALIGN) \
|
||||
sym[size+MPU_GUARD_ALIGN_AND_SIZE]
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_USERSPACE) && \
|
||||
defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
|
||||
#define Z_ARCH_THREAD_STACK_LEN(size) (POW2_CEIL(size))
|
||||
#define ARCH_THREAD_STACK_LEN(size) (POW2_CEIL(size))
|
||||
#else
|
||||
#define Z_ARCH_THREAD_STACK_LEN(size) ((size)+MPU_GUARD_ALIGN_AND_SIZE)
|
||||
#define ARCH_THREAD_STACK_LEN(size) ((size)+MPU_GUARD_ALIGN_AND_SIZE)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_USERSPACE) && \
|
||||
defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
|
||||
#define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
#define ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
struct _k_thread_stack_element __noinit \
|
||||
__aligned(POW2_CEIL(size)) \
|
||||
sym[nmemb][Z_ARCH_THREAD_STACK_LEN(size)]
|
||||
sym[nmemb][ARCH_THREAD_STACK_LEN(size)]
|
||||
#else
|
||||
#define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
#define ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
struct _k_thread_stack_element __noinit \
|
||||
__aligned(STACK_ALIGN) \
|
||||
sym[nmemb][Z_ARCH_THREAD_STACK_LEN(size)]
|
||||
sym[nmemb][ARCH_THREAD_STACK_LEN(size)]
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_USERSPACE) && \
|
||||
defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
|
||||
#define Z_ARCH_THREAD_STACK_MEMBER(sym, size) \
|
||||
#define ARCH_THREAD_STACK_MEMBER(sym, size) \
|
||||
struct _k_thread_stack_element __aligned(POW2_CEIL(size)) \
|
||||
sym[POW2_CEIL(size)]
|
||||
#else
|
||||
#define Z_ARCH_THREAD_STACK_MEMBER(sym, size) \
|
||||
#define ARCH_THREAD_STACK_MEMBER(sym, size) \
|
||||
struct _k_thread_stack_element __aligned(STACK_ALIGN) \
|
||||
sym[size+MPU_GUARD_ALIGN_AND_SIZE]
|
||||
#endif
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_SIZEOF(sym) (sizeof(sym) - MPU_GUARD_ALIGN_AND_SIZE)
|
||||
#define ARCH_THREAD_STACK_SIZEOF(sym) (sizeof(sym) - MPU_GUARD_ALIGN_AND_SIZE)
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_BUFFER(sym) \
|
||||
#define ARCH_THREAD_STACK_BUFFER(sym) \
|
||||
((char *)(sym) + MPU_GUARD_ALIGN_AND_SIZE)
|
||||
|
||||
/* Legacy case: retain containing extern "C" with C++ */
|
||||
|
|
|
@ -36,7 +36,7 @@ extern "C" {
|
|||
* except NMI.
|
||||
*/
|
||||
|
||||
static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
|
||||
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
|
||||
{
|
||||
unsigned int key;
|
||||
|
||||
|
@ -75,7 +75,7 @@ static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
|
|||
* previously disabled.
|
||||
*/
|
||||
|
||||
static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
|
||||
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
|
||||
{
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
if (key) {
|
||||
|
@ -100,7 +100,7 @@ static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
|
|||
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key)
|
||||
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
|
||||
{
|
||||
/* This convention works for both PRIMASK and BASEPRI */
|
||||
return key == 0;
|
||||
|
|
|
@ -31,7 +31,7 @@ extern "C" {
|
|||
* schedule a new thread until they are unlocked which is not what we want.
|
||||
* Force them unlocked as well.
|
||||
*/
|
||||
#define Z_ARCH_EXCEPT(reason_p) \
|
||||
#define ARCH_EXCEPT(reason_p) \
|
||||
register u32_t r0 __asm__("r0") = reason_p; \
|
||||
do { \
|
||||
__asm__ volatile ( \
|
||||
|
@ -42,7 +42,7 @@ do { \
|
|||
: "memory"); \
|
||||
} while (false)
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
#define Z_ARCH_EXCEPT(reason_p) do { \
|
||||
#define ARCH_EXCEPT(reason_p) do { \
|
||||
__asm__ volatile ( \
|
||||
"eors.n r0, r0\n\t" \
|
||||
"msr BASEPRI, r0\n\t" \
|
||||
|
|
|
@ -24,13 +24,13 @@ extern "C" {
|
|||
|
||||
#ifdef _ASMLANGUAGE
|
||||
GTEXT(z_arm_int_exit);
|
||||
GTEXT(z_arch_irq_enable)
|
||||
GTEXT(z_arch_irq_disable)
|
||||
GTEXT(z_arch_irq_is_enabled)
|
||||
GTEXT(arch_irq_enable)
|
||||
GTEXT(arch_irq_disable)
|
||||
GTEXT(arch_irq_is_enabled)
|
||||
#else
|
||||
extern void z_arch_irq_enable(unsigned int irq);
|
||||
extern void z_arch_irq_disable(unsigned int irq);
|
||||
extern int z_arch_irq_is_enabled(unsigned int irq);
|
||||
extern void arch_irq_enable(unsigned int irq);
|
||||
extern void arch_irq_disable(unsigned int irq);
|
||||
extern int arch_irq_is_enabled(unsigned int irq);
|
||||
|
||||
extern void z_arm_int_exit(void);
|
||||
|
||||
|
@ -76,14 +76,14 @@ extern void z_arm_irq_priority_set(unsigned int irq, unsigned int prio,
|
|||
* We additionally set the priority in the interrupt controller at
|
||||
* runtime.
|
||||
*/
|
||||
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
|
||||
#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
|
||||
({ \
|
||||
Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
|
||||
z_arm_irq_priority_set(irq_p, priority_p, flags_p); \
|
||||
irq_p; \
|
||||
})
|
||||
|
||||
#define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
|
||||
#define ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
|
||||
({ \
|
||||
Z_ISR_DECLARE(irq_p, ISR_FLAG_DIRECT, isr_p, NULL); \
|
||||
z_arm_irq_priority_set(irq_p, priority_p, flags_p); \
|
||||
|
@ -93,15 +93,15 @@ extern void z_arm_irq_priority_set(unsigned int irq, unsigned int prio,
|
|||
/* FIXME prefer these inline, but see GH-3056 */
|
||||
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
||||
extern void _arch_isr_direct_pm(void);
|
||||
#define Z_ARCH_ISR_DIRECT_PM() _arch_isr_direct_pm()
|
||||
#define ARCH_ISR_DIRECT_PM() _arch_isr_direct_pm()
|
||||
#else
|
||||
#define Z_ARCH_ISR_DIRECT_PM() do { } while (false)
|
||||
#define ARCH_ISR_DIRECT_PM() do { } while (false)
|
||||
#endif
|
||||
|
||||
#define Z_ARCH_ISR_DIRECT_HEADER() z_arch_isr_direct_header()
|
||||
extern void z_arch_isr_direct_header(void);
|
||||
#define ARCH_ISR_DIRECT_HEADER() arch_isr_direct_header()
|
||||
extern void arch_isr_direct_header(void);
|
||||
|
||||
#define Z_ARCH_ISR_DIRECT_FOOTER(swap) z_arch_isr_direct_footer(swap)
|
||||
#define ARCH_ISR_DIRECT_FOOTER(swap) arch_isr_direct_footer(swap)
|
||||
|
||||
/* arch/arm/core/exc_exit.S */
|
||||
extern void z_arm_int_exit(void);
|
||||
|
@ -110,7 +110,7 @@ extern void z_arm_int_exit(void);
|
|||
extern void sys_trace_isr_exit(void);
|
||||
#endif
|
||||
|
||||
static inline void z_arch_isr_direct_footer(int maybe_swap)
|
||||
static inline void arch_isr_direct_footer(int maybe_swap)
|
||||
{
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
|
@ -121,7 +121,7 @@ static inline void z_arch_isr_direct_footer(int maybe_swap)
|
|||
}
|
||||
}
|
||||
|
||||
#define Z_ARCH_ISR_DIRECT_DECLARE(name) \
|
||||
#define ARCH_ISR_DIRECT_DECLARE(name) \
|
||||
static inline int name##_body(void); \
|
||||
__attribute__ ((interrupt ("IRQ"))) void name(void) \
|
||||
{ \
|
||||
|
|
|
@ -21,12 +21,12 @@ extern "C" {
|
|||
#ifndef _ASMLANGUAGE
|
||||
extern u32_t z_timer_cycle_get_32(void);
|
||||
|
||||
static inline u32_t z_arch_k_cycle_get_32(void)
|
||||
static inline u32_t arch_k_cycle_get_32(void)
|
||||
{
|
||||
return z_timer_cycle_get_32();
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void z_arch_nop(void)
|
||||
static ALWAYS_INLINE void arch_nop(void)
|
||||
{
|
||||
__asm__ volatile("nop");
|
||||
}
|
||||
|
|
|
@ -36,10 +36,10 @@ extern "C" {
|
|||
/* Syscall invocation macros. arm-specific machine constraints used to ensure
|
||||
* args land in the proper registers.
|
||||
*/
|
||||
static inline uintptr_t z_arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t arg5, uintptr_t arg6,
|
||||
uintptr_t call_id)
|
||||
static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t arg5, uintptr_t arg6,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r1 __asm__("r1") = arg2;
|
||||
|
@ -59,10 +59,10 @@ static inline uintptr_t z_arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline uintptr_t z_arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t arg5,
|
||||
uintptr_t call_id)
|
||||
static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t arg5,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r1 __asm__("r1") = arg2;
|
||||
|
@ -81,9 +81,9 @@ static inline uintptr_t z_arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline uintptr_t z_arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t call_id)
|
||||
static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r1 __asm__("r1") = arg2;
|
||||
|
@ -101,9 +101,9 @@ static inline uintptr_t z_arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline uintptr_t z_arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3,
|
||||
uintptr_t call_id)
|
||||
static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r1 __asm__("r1") = arg2;
|
||||
|
@ -119,8 +119,8 @@ static inline uintptr_t z_arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline uintptr_t z_arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t call_id)
|
||||
static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r1 __asm__("r1") = arg2;
|
||||
|
@ -135,8 +135,8 @@ static inline uintptr_t z_arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline uintptr_t z_arch_syscall_invoke1(uintptr_t arg1,
|
||||
uintptr_t call_id)
|
||||
static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r6 __asm__("r6") = call_id;
|
||||
|
@ -149,7 +149,7 @@ static inline uintptr_t z_arch_syscall_invoke1(uintptr_t arg1,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline uintptr_t z_arch_syscall_invoke0(uintptr_t call_id)
|
||||
static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
|
||||
{
|
||||
register u32_t ret __asm__("r0");
|
||||
register u32_t r6 __asm__("r6") = call_id;
|
||||
|
@ -163,7 +163,7 @@ static inline uintptr_t z_arch_syscall_invoke0(uintptr_t call_id)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline bool z_arch_is_user_context(void)
|
||||
static inline bool arch_is_user_context(void)
|
||||
{
|
||||
u32_t value;
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ extern "C" {
|
|||
/* There is no notion of priority with the Nios II internal interrupt
|
||||
* controller and no flags are currently supported.
|
||||
*/
|
||||
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
|
||||
#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
|
||||
({ \
|
||||
Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
|
||||
irq_p; \
|
||||
|
@ -46,7 +46,7 @@ extern "C" {
|
|||
|
||||
extern void z_irq_spurious(void *unused);
|
||||
|
||||
static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
|
||||
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
|
||||
{
|
||||
unsigned int key, tmp;
|
||||
|
||||
|
@ -61,7 +61,7 @@ static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
|
|||
return key;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
|
||||
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
|
||||
{
|
||||
/* If the CPU is built without certain features, then
|
||||
* the only writable bit in the status register is PIE
|
||||
|
@ -93,13 +93,13 @@ static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
|
|||
#endif
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key)
|
||||
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
|
||||
{
|
||||
return key & 1;
|
||||
}
|
||||
|
||||
void z_arch_irq_enable(unsigned int irq);
|
||||
void z_arch_irq_disable(unsigned int irq);
|
||||
void arch_irq_enable(unsigned int irq);
|
||||
void arch_irq_disable(unsigned int irq);
|
||||
|
||||
struct __esf {
|
||||
u32_t ra; /* return address r31 */
|
||||
|
@ -173,12 +173,12 @@ enum nios2_exception_cause {
|
|||
|
||||
extern u32_t z_timer_cycle_get_32(void);
|
||||
|
||||
static inline u32_t z_arch_k_cycle_get_32(void)
|
||||
static inline u32_t arch_k_cycle_get_32(void)
|
||||
{
|
||||
return z_timer_cycle_get_32();
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void z_arch_nop(void)
|
||||
static ALWAYS_INLINE void arch_nop(void)
|
||||
{
|
||||
__asm__ volatile("nop");
|
||||
}
|
||||
|
|
|
@ -48,28 +48,28 @@ typedef struct __esf z_arch_esf_t;
|
|||
|
||||
extern u32_t z_timer_cycle_get_32(void);
|
||||
|
||||
static inline u32_t z_arch_k_cycle_get_32(void)
|
||||
static inline u32_t arch_k_cycle_get_32(void)
|
||||
{
|
||||
return z_timer_cycle_get_32();
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void z_arch_nop(void)
|
||||
static ALWAYS_INLINE void arch_nop(void)
|
||||
{
|
||||
__asm__ volatile("nop");
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key)
|
||||
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
|
||||
{
|
||||
return key == false;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
|
||||
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
|
||||
{
|
||||
return posix_irq_lock();
|
||||
}
|
||||
|
||||
|
||||
static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
|
||||
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
|
||||
{
|
||||
posix_irq_unlock(key);
|
||||
}
|
||||
|
|
|
@ -64,21 +64,21 @@ extern "C" {
|
|||
*/
|
||||
extern u32_t __soc_get_irq(void);
|
||||
|
||||
void z_arch_irq_enable(unsigned int irq);
|
||||
void z_arch_irq_disable(unsigned int irq);
|
||||
int z_arch_irq_is_enabled(unsigned int irq);
|
||||
void z_arch_irq_priority_set(unsigned int irq, unsigned int prio);
|
||||
void arch_irq_enable(unsigned int irq);
|
||||
void arch_irq_disable(unsigned int irq);
|
||||
int arch_irq_is_enabled(unsigned int irq);
|
||||
void arch_irq_priority_set(unsigned int irq, unsigned int prio);
|
||||
void z_irq_spurious(void *unused);
|
||||
|
||||
#if defined(CONFIG_RISCV_HAS_PLIC)
|
||||
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
|
||||
#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
|
||||
({ \
|
||||
Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
|
||||
z_arch_irq_priority_set(irq_p, priority_p); \
|
||||
arch_irq_priority_set(irq_p, priority_p); \
|
||||
irq_p; \
|
||||
})
|
||||
#else
|
||||
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
|
||||
#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
|
||||
({ \
|
||||
Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
|
||||
irq_p; \
|
||||
|
@ -89,7 +89,7 @@ void z_irq_spurious(void *unused);
|
|||
* use atomic instruction csrrc to lock global irq
|
||||
* csrrc: atomic read and clear bits in CSR register
|
||||
*/
|
||||
static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
|
||||
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
|
||||
{
|
||||
unsigned int key;
|
||||
ulong_t mstatus;
|
||||
|
@ -107,7 +107,7 @@ static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
|
|||
* use atomic instruction csrrs to unlock global irq
|
||||
* csrrs: atomic read and set bits in CSR register
|
||||
*/
|
||||
static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
|
||||
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
|
||||
{
|
||||
ulong_t mstatus;
|
||||
|
||||
|
@ -117,26 +117,26 @@ static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
|
|||
: "memory");
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key)
|
||||
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
|
||||
{
|
||||
/* FIXME: looking at z_arch_irq_lock, this should be reducable
|
||||
/* FIXME: looking at arch_irq_lock, this should be reducable
|
||||
* to just testing that key is nonzero (because it should only
|
||||
* have the single bit set). But there is a mask applied to
|
||||
* the argument in z_arch_irq_unlock() that has me worried
|
||||
* the argument in arch_irq_unlock() that has me worried
|
||||
* that something elseswhere might try to set a bit? Do it
|
||||
* the safe way for now.
|
||||
*/
|
||||
return (key & SOC_MSTATUS_IEN) == SOC_MSTATUS_IEN;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void z_arch_nop(void)
|
||||
static ALWAYS_INLINE void arch_nop(void)
|
||||
{
|
||||
__asm__ volatile("nop");
|
||||
}
|
||||
|
||||
extern u32_t z_timer_cycle_get_32(void);
|
||||
|
||||
static inline u32_t z_arch_k_cycle_get_32(void)
|
||||
static inline u32_t arch_k_cycle_get_32(void)
|
||||
{
|
||||
return z_timer_cycle_get_32();
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue