all: Update reserved function names
Update reserved function names starting with one underscore, replacing them as follows: '_k_' with 'z_' '_K_' with 'Z_' '_handler_' with 'z_handl_' '_Cstart' with 'z_cstart' '_Swap' with 'z_swap' This renaming is done on both global and those static function names in kernel/include and include/. Other static function names in kernel/ are renamed by removing the leading underscore. Other function names not starting with any prefix listed above are renamed starting with a 'z_' or 'Z_' prefix. Function names starting with two or three leading underscores are not automatcally renamed since these names will collide with the variants with two or three leading underscores. Various generator scripts have also been updated as well as perf, linker and usb files. These are drivers/serial/uart_handlers.c include/linker/kobject-text.ld kernel/include/syscall_handler.h scripts/gen_kobject_list.py scripts/gen_syscall_header.py Signed-off-by: Patrik Flykt <patrik.flykt@intel.com>
This commit is contained in:
parent
cf2d57952e
commit
4344e27c26
324 changed files with 2264 additions and 2263 deletions
|
@ -51,7 +51,7 @@
|
|||
|
||||
static bool dcache_available(void)
|
||||
{
|
||||
unsigned long val = _arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
|
||||
unsigned long val = z_arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
|
||||
|
||||
val &= 0xff; /* extract version */
|
||||
return (val == 0) ? false : true;
|
||||
|
@ -60,7 +60,7 @@ static bool dcache_available(void)
|
|||
static void dcache_dc_ctrl(u32_t dcache_en_mask)
|
||||
{
|
||||
if (dcache_available()) {
|
||||
_arc_v2_aux_reg_write(_ARC_V2_DC_CTRL, dcache_en_mask);
|
||||
z_arc_v2_aux_reg_write(_ARC_V2_DC_CTRL, dcache_en_mask);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -101,13 +101,13 @@ static void dcache_flush_mlines(u32_t start_addr, u32_t size)
|
|||
key = irq_lock(); /* --enter critical section-- */
|
||||
|
||||
do {
|
||||
_arc_v2_aux_reg_write(_ARC_V2_DC_FLDL, start_addr);
|
||||
z_arc_v2_aux_reg_write(_ARC_V2_DC_FLDL, start_addr);
|
||||
__asm__ volatile("nop_s");
|
||||
__asm__ volatile("nop_s");
|
||||
__asm__ volatile("nop_s");
|
||||
/* wait for flush completion */
|
||||
do {
|
||||
if ((_arc_v2_aux_reg_read(_ARC_V2_DC_CTRL) &
|
||||
if ((z_arc_v2_aux_reg_read(_ARC_V2_DC_CTRL) &
|
||||
DC_CTRL_FLUSH_STATUS) == 0) {
|
||||
break;
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ static void init_dcache_line_size(void)
|
|||
{
|
||||
u32_t val;
|
||||
|
||||
val = _arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
|
||||
val = z_arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
|
||||
__ASSERT((val&0xff) != 0, "d-cache is not present");
|
||||
val = ((val>>16) & 0xf) + 1;
|
||||
val *= 16;
|
||||
|
|
|
@ -145,7 +145,7 @@ SECTION_FUNC(TEXT, _firq_exit)
|
|||
st r0, [r1]
|
||||
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
bl _check_stack_sentinel
|
||||
bl z_check_stack_sentinel
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PREEMPT_ENABLED
|
||||
|
@ -284,7 +284,7 @@ _firq_return_from_coop:
|
|||
pop_s r0 /* status32 into r0 */
|
||||
/*
|
||||
* There are only two interrupt lock states: locked and unlocked. When
|
||||
* entering _Swap(), they are always locked, so the IE bit is unset in
|
||||
* entering z_swap(), they are always locked, so the IE bit is unset in
|
||||
* status32. If the incoming thread had them locked recursively, it
|
||||
* means that the IE bit should stay unset. The only time the bit
|
||||
* has to change is if they were not locked recursively.
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
*
|
||||
* This routine is called when fatal error conditions are detected by software
|
||||
* and is responsible only for reporting the error. Once reported, it then
|
||||
* invokes the user provided routine _SysFatalErrorHandler() which is
|
||||
* invokes the user provided routine z_SysFatalErrorHandler() which is
|
||||
* responsible for implementing the error handling policy.
|
||||
*
|
||||
* The caller is expected to always provide a usable ESF. In the event that the
|
||||
|
@ -34,7 +34,7 @@
|
|||
*
|
||||
* @return This function does not return.
|
||||
*/
|
||||
void _NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
|
||||
void z_NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
|
||||
{
|
||||
LOG_PANIC();
|
||||
|
||||
|
@ -70,7 +70,7 @@ void _NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
|
|||
|
||||
if (reason == _NANO_ERR_HW_EXCEPTION) {
|
||||
printk("Faulting instruction address = 0x%lx\n",
|
||||
_arc_v2_aux_reg_read(_ARC_V2_ERET));
|
||||
z_arc_v2_aux_reg_read(_ARC_V2_ERET));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -81,12 +81,12 @@ void _NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
|
|||
* decide.
|
||||
*/
|
||||
|
||||
_SysFatalErrorHandler(reason, pEsf);
|
||||
z_SysFatalErrorHandler(reason, pEsf);
|
||||
}
|
||||
|
||||
FUNC_NORETURN void _arch_syscall_oops(void *ssf_ptr)
|
||||
FUNC_NORETURN void z_arch_syscall_oops(void *ssf_ptr)
|
||||
{
|
||||
LOG_PANIC();
|
||||
_SysFatalErrorHandler(_NANO_ERR_KERNEL_OOPS, ssf_ptr);
|
||||
z_SysFatalErrorHandler(_NANO_ERR_KERNEL_OOPS, ssf_ptr);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
|
|
@ -34,14 +34,14 @@ static const struct z_exc_handle exceptions[] = {
|
|||
*
|
||||
* This routine is called when fatal error conditions are detected by hardware
|
||||
* and is responsible only for reporting the error. Once reported, it then
|
||||
* invokes the user provided routine _SysFatalErrorHandler() which is
|
||||
* invokes the user provided routine z_SysFatalErrorHandler() which is
|
||||
* responsible for implementing the error handling policy.
|
||||
*/
|
||||
void _Fault(NANO_ESF *esf)
|
||||
{
|
||||
u32_t vector, code, parameter;
|
||||
u32_t exc_addr = _arc_v2_aux_reg_read(_ARC_V2_EFA);
|
||||
u32_t ecr = _arc_v2_aux_reg_read(_ARC_V2_ECR);
|
||||
u32_t exc_addr = z_arc_v2_aux_reg_read(_ARC_V2_EFA);
|
||||
u32_t ecr = z_arc_v2_aux_reg_read(_ARC_V2_ECR);
|
||||
|
||||
LOG_PANIC();
|
||||
|
||||
|
@ -64,7 +64,7 @@ void _Fault(NANO_ESF *esf)
|
|||
|
||||
/* exception raised by kernel */
|
||||
if (vector == 0x9 && parameter == _TRAP_S_CALL_RUNTIME_EXCEPT) {
|
||||
_NanoFatalErrorHandler(esf->r0, esf);
|
||||
z_NanoFatalErrorHandler(esf->r0, esf);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -76,9 +76,9 @@ void _Fault(NANO_ESF *esf)
|
|||
* check violation
|
||||
*/
|
||||
if (vector == 6 && parameter == 2) {
|
||||
_NanoFatalErrorHandler(_NANO_ERR_STACK_CHK_FAIL, esf);
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_STACK_CHK_FAIL, esf);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
_NanoFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, esf);
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, esf);
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
* @return N/A
|
||||
*/
|
||||
|
||||
void _arch_irq_enable(unsigned int irq)
|
||||
void z_arch_irq_enable(unsigned int irq)
|
||||
{
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
|
@ -53,7 +53,7 @@ void _arch_irq_enable(unsigned int irq)
|
|||
* @return N/A
|
||||
*/
|
||||
|
||||
void _arch_irq_disable(unsigned int irq)
|
||||
void z_arch_irq_disable(unsigned int irq)
|
||||
{
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
|
@ -75,7 +75,7 @@ void _arch_irq_disable(unsigned int irq)
|
|||
* @return N/A
|
||||
*/
|
||||
|
||||
void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
|
||||
void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
|
||||
{
|
||||
ARG_UNUSED(flags);
|
||||
|
||||
|
@ -96,22 +96,22 @@ void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
|
|||
* @return N/A
|
||||
*/
|
||||
|
||||
void _irq_spurious(void *unused)
|
||||
void z_irq_spurious(void *unused)
|
||||
{
|
||||
ARG_UNUSED(unused);
|
||||
printk("_irq_spurious(). Spinning...\n");
|
||||
printk("z_irq_spurious(). Spinning...\n");
|
||||
for (;;) {
|
||||
;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_INTERRUPTS
|
||||
int _arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
void (*routine)(void *parameter), void *parameter,
|
||||
u32_t flags)
|
||||
{
|
||||
z_isr_install(irq, routine, parameter);
|
||||
_irq_priority_set(irq, priority, flags);
|
||||
z_irq_priority_set(irq, priority, flags);
|
||||
return irq;
|
||||
}
|
||||
#endif /* CONFIG_DYNAMIC_INTERRUPTS */
|
||||
|
|
|
@ -45,7 +45,7 @@ SECTION_VAR(BSS, saved_sp)
|
|||
#endif
|
||||
|
||||
#if defined(CONFIG_SYS_POWER_MANAGEMENT)
|
||||
GTEXT(_sys_power_save_idle_exit)
|
||||
GTEXT(z_sys_power_save_idle_exit)
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -95,7 +95,7 @@ The context switch code adopts this standard so that it is easier to follow:
|
|||
transition from outgoing thread to incoming thread
|
||||
|
||||
Not loading _kernel into r0 allows loading _kernel without stomping on
|
||||
the parameter in r0 in _Swap().
|
||||
the parameter in r0 in z_swap().
|
||||
|
||||
|
||||
ARCv2 processors have two kinds of interrupts: fast (FIRQ) and regular. The
|
||||
|
@ -195,7 +195,7 @@ From FIRQ:
|
|||
|
||||
o to coop
|
||||
|
||||
The address of the returning instruction from _Swap() is loaded in ilink and
|
||||
The address of the returning instruction from z_swap() is loaded in ilink and
|
||||
the saved status32 in status32_p0, taking care to adjust the interrupt lock
|
||||
state desired in status32_p0. The return value is put in r0.
|
||||
|
||||
|
@ -359,7 +359,7 @@ GTEXT(z_sys_trace_isr_enter)
|
|||
|
||||
st 0, [r1, _kernel_offset_to_idle] /* zero idle duration */
|
||||
push_s blink
|
||||
jl _sys_power_save_idle_exit
|
||||
jl z_sys_power_save_idle_exit
|
||||
pop_s blink
|
||||
|
||||
_skip_sys_power_save_idle_exit:
|
||||
|
|
|
@ -104,7 +104,7 @@ void configure_mpu_mem_domain(struct k_thread *thread)
|
|||
arc_core_mpu_configure_mem_domain(thread);
|
||||
}
|
||||
|
||||
int _arch_mem_domain_max_partitions_get(void)
|
||||
int z_arch_mem_domain_max_partitions_get(void)
|
||||
{
|
||||
return arc_core_mpu_get_max_domain_partition_regions();
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ int _arch_mem_domain_max_partitions_get(void)
|
|||
/*
|
||||
* Reset MPU region for a single memory partition
|
||||
*/
|
||||
void _arch_mem_domain_partition_remove(struct k_mem_domain *domain,
|
||||
void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
|
||||
u32_t partition_id)
|
||||
{
|
||||
ARG_UNUSED(domain);
|
||||
|
@ -126,7 +126,7 @@ void _arch_mem_domain_partition_remove(struct k_mem_domain *domain,
|
|||
/*
|
||||
* Configure MPU memory domain
|
||||
*/
|
||||
void _arch_mem_domain_configure(struct k_thread *thread)
|
||||
void z_arch_mem_domain_configure(struct k_thread *thread)
|
||||
{
|
||||
configure_mpu_mem_domain(thread);
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ void _arch_mem_domain_configure(struct k_thread *thread)
|
|||
/*
|
||||
* Destroy MPU regions for the mem domain
|
||||
*/
|
||||
void _arch_mem_domain_destroy(struct k_mem_domain *domain)
|
||||
void z_arch_mem_domain_destroy(struct k_mem_domain *domain)
|
||||
{
|
||||
ARG_UNUSED(domain);
|
||||
|
||||
|
@ -152,7 +152,7 @@ void _arch_mem_domain_partition_add(struct k_mem_domain *domain,
|
|||
/*
|
||||
* Validate the given buffer is user accessible or not
|
||||
*/
|
||||
int _arch_buffer_validate(void *addr, size_t size, int write)
|
||||
int z_arch_buffer_validate(void *addr, size_t size, int write)
|
||||
{
|
||||
return arc_core_mpu_buffer_validate(addr, size, write);
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ LOG_MODULE_DECLARE(mpu);
|
|||
*/
|
||||
static inline u8_t _get_num_regions(void)
|
||||
{
|
||||
u32_t num = _arc_v2_aux_reg_read(_ARC_V2_MPU_BUILD);
|
||||
u32_t num = z_arc_v2_aux_reg_read(_ARC_V2_MPU_BUILD);
|
||||
|
||||
num = (num & 0xFF00) >> 8;
|
||||
|
||||
|
@ -107,8 +107,8 @@ static inline void _region_init(u32_t index, u32_t region_addr, u32_t size,
|
|||
region_addr = 0U;
|
||||
}
|
||||
|
||||
_arc_v2_aux_reg_write(_ARC_V2_MPU_RDP0 + index, region_attr);
|
||||
_arc_v2_aux_reg_write(_ARC_V2_MPU_RDB0 + index, region_addr);
|
||||
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RDP0 + index, region_attr);
|
||||
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RDB0 + index, region_addr);
|
||||
|
||||
#elif CONFIG_ARC_MPU_VER == 3
|
||||
#define AUX_MPU_RPER_SID1 0x10000
|
||||
|
@ -122,11 +122,11 @@ static inline void _region_init(u32_t index, u32_t region_addr, u32_t size,
|
|||
AUX_MPU_RPER_SID1);
|
||||
}
|
||||
|
||||
_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
|
||||
_arc_v2_aux_reg_write(_ARC_V2_MPU_RSTART, region_addr);
|
||||
_arc_v2_aux_reg_write(_ARC_V2_MPU_REND,
|
||||
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
|
||||
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RSTART, region_addr);
|
||||
z_arc_v2_aux_reg_write(_ARC_V2_MPU_REND,
|
||||
CALC_REGION_END_ADDR(region_addr, size));
|
||||
_arc_v2_aux_reg_write(_ARC_V2_MPU_RPER, region_attr);
|
||||
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RPER, region_attr);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -135,8 +135,8 @@ static inline s32_t _mpu_probe(u32_t addr)
|
|||
{
|
||||
u32_t val;
|
||||
|
||||
_arc_v2_aux_reg_write(_ARC_V2_MPU_PROBE, addr);
|
||||
val = _arc_v2_aux_reg_read(_ARC_V2_MPU_INDEX);
|
||||
z_arc_v2_aux_reg_write(_ARC_V2_MPU_PROBE, addr);
|
||||
val = z_arc_v2_aux_reg_read(_ARC_V2_MPU_INDEX);
|
||||
|
||||
/* if no match or multiple regions match, return error */
|
||||
if (val & 0xC0000000) {
|
||||
|
@ -215,11 +215,11 @@ static inline u32_t _get_region_index_by_type(u32_t type)
|
|||
static inline int _is_enabled_region(u32_t r_index)
|
||||
{
|
||||
#if CONFIG_ARC_MPU_VER == 2
|
||||
return ((_arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + 2 * r_index)
|
||||
return ((z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + 2 * r_index)
|
||||
& AUX_MPU_RDB_VALID_MASK) == AUX_MPU_RDB_VALID_MASK);
|
||||
#elif CONFIG_ARC_MPU_VER == 3
|
||||
_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index);
|
||||
return ((_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER) &
|
||||
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index);
|
||||
return ((z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER) &
|
||||
AUX_MPU_RDB_VALID_MASK) == AUX_MPU_RDB_VALID_MASK);
|
||||
#endif
|
||||
}
|
||||
|
@ -234,9 +234,9 @@ static inline int _is_in_region(u32_t r_index, u32_t start, u32_t size)
|
|||
u32_t r_addr_end;
|
||||
u32_t r_size_lshift;
|
||||
|
||||
r_addr_start = _arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + 2 * r_index)
|
||||
r_addr_start = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + 2 * r_index)
|
||||
& (~AUX_MPU_RDB_VALID_MASK);
|
||||
r_size_lshift = _arc_v2_aux_reg_read(_ARC_V2_MPU_RDP0 + 2 * r_index)
|
||||
r_size_lshift = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDP0 + 2 * r_index)
|
||||
& AUX_MPU_RDP_ATTR_MASK;
|
||||
r_size_lshift = (r_size_lshift & 0x3) | ((r_size_lshift >> 7) & 0x1C);
|
||||
r_addr_end = r_addr_start + (1 << (r_size_lshift + 1));
|
||||
|
@ -264,10 +264,10 @@ static inline int _is_user_accessible_region(u32_t r_index, int write)
|
|||
u32_t r_ap;
|
||||
|
||||
#if CONFIG_ARC_MPU_VER == 2
|
||||
r_ap = _arc_v2_aux_reg_read(_ARC_V2_MPU_RDP0 + 2 * r_index);
|
||||
r_ap = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDP0 + 2 * r_index);
|
||||
#elif CONFIG_ARC_MPU_VER == 3
|
||||
_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index);
|
||||
r_ap = _arc_v2_aux_reg_read(_ARC_V2_MPU_RPER);
|
||||
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index);
|
||||
r_ap = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER);
|
||||
#endif
|
||||
r_ap &= AUX_MPU_RDP_ATTR_MASK;
|
||||
|
||||
|
@ -289,8 +289,8 @@ void arc_core_mpu_enable(void)
|
|||
{
|
||||
#if CONFIG_ARC_MPU_VER == 2
|
||||
/* Enable MPU */
|
||||
_arc_v2_aux_reg_write(_ARC_V2_MPU_EN,
|
||||
_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) | AUX_MPU_EN_ENABLE);
|
||||
z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN,
|
||||
z_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) | AUX_MPU_EN_ENABLE);
|
||||
|
||||
/* MPU is always enabled, use default region to
|
||||
* simulate MPU enable
|
||||
|
@ -308,8 +308,8 @@ void arc_core_mpu_disable(void)
|
|||
{
|
||||
#if CONFIG_ARC_MPU_VER == 2
|
||||
/* Disable MPU */
|
||||
_arc_v2_aux_reg_write(_ARC_V2_MPU_EN,
|
||||
_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) & AUX_MPU_EN_DISABLE);
|
||||
z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN,
|
||||
z_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) & AUX_MPU_EN_DISABLE);
|
||||
#elif CONFIG_ARC_MPU_VER == 3
|
||||
/* MPU is always enabled, use default region to
|
||||
* simulate MPU disable
|
||||
|
@ -411,12 +411,12 @@ void arc_core_mpu_configure(u8_t type, u32_t base, u32_t size)
|
|||
*/
|
||||
void arc_core_mpu_default(u32_t region_attr)
|
||||
{
|
||||
u32_t val = _arc_v2_aux_reg_read(_ARC_V2_MPU_EN) &
|
||||
u32_t val = z_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) &
|
||||
(~AUX_MPU_RDP_ATTR_MASK);
|
||||
|
||||
region_attr &= AUX_MPU_RDP_ATTR_MASK;
|
||||
|
||||
_arc_v2_aux_reg_write(_ARC_V2_MPU_EN, region_attr | val);
|
||||
z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN, region_attr | val);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
*
|
||||
*
|
||||
* Initialization of full C support: zero the .bss, copy the .data if XIP,
|
||||
* call _Cstart().
|
||||
* call z_cstart().
|
||||
*
|
||||
* Stack is available in this module, but not the global data/bss until their
|
||||
* initialization is performed.
|
||||
|
@ -40,14 +40,14 @@ static void disable_icache(void)
|
|||
{
|
||||
unsigned int val;
|
||||
|
||||
val = _arc_v2_aux_reg_read(_ARC_V2_I_CACHE_BUILD);
|
||||
val = z_arc_v2_aux_reg_read(_ARC_V2_I_CACHE_BUILD);
|
||||
val &= 0xff; /* version field */
|
||||
if (val == 0) {
|
||||
return; /* skip if i-cache is not present */
|
||||
}
|
||||
_arc_v2_aux_reg_write(_ARC_V2_IC_IVIC, 0);
|
||||
z_arc_v2_aux_reg_write(_ARC_V2_IC_IVIC, 0);
|
||||
__asm__ __volatile__ ("nop");
|
||||
_arc_v2_aux_reg_write(_ARC_V2_IC_CTRL, 1);
|
||||
z_arc_v2_aux_reg_write(_ARC_V2_IC_CTRL, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -64,12 +64,12 @@ static void invalidate_dcache(void)
|
|||
{
|
||||
unsigned int val;
|
||||
|
||||
val = _arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
|
||||
val = z_arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
|
||||
val &= 0xff; /* version field */
|
||||
if (val == 0) {
|
||||
return; /* skip if d-cache is not present */
|
||||
}
|
||||
_arc_v2_aux_reg_write(_ARC_V2_DC_IVDC, 1);
|
||||
z_arc_v2_aux_reg_write(_ARC_V2_DC_IVDC, 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -97,15 +97,15 @@ static void adjust_vector_table_base(void)
|
|||
* from the base address known by the ARC CPU,
|
||||
* set the vector base to the compiled-in address.
|
||||
*/
|
||||
vbr = _arc_v2_aux_reg_read(_ARC_V2_IRQ_VECT_BASE);
|
||||
vbr = z_arc_v2_aux_reg_read(_ARC_V2_IRQ_VECT_BASE);
|
||||
vbr &= 0xfffffc00;
|
||||
if (vbr != (unsigned int)&_VectorTable) {
|
||||
_arc_v2_aux_reg_write(_ARC_V2_IRQ_VECT_BASE,
|
||||
z_arc_v2_aux_reg_write(_ARC_V2_IRQ_VECT_BASE,
|
||||
(unsigned int)&_VectorTable);
|
||||
}
|
||||
}
|
||||
|
||||
extern FUNC_NORETURN void _Cstart(void);
|
||||
extern FUNC_NORETURN void z_cstart(void);
|
||||
/**
|
||||
*
|
||||
* @brief Prepare to and run C code
|
||||
|
@ -119,8 +119,8 @@ void _PrepC(void)
|
|||
{
|
||||
_icache_setup();
|
||||
adjust_vector_table_base();
|
||||
_bss_zero();
|
||||
_data_copy();
|
||||
_Cstart();
|
||||
z_bss_zero();
|
||||
z_data_copy();
|
||||
z_cstart();
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
|
|
@ -104,7 +104,7 @@ SECTION_FUNC(TEXT, _rirq_exit)
|
|||
st r0, [r1]
|
||||
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
bl _check_stack_sentinel
|
||||
bl z_check_stack_sentinel
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PREEMPT_ENABLED
|
||||
|
|
|
@ -51,7 +51,7 @@ GDATA(_kernel)
|
|||
* not has already been taken and a context switch must happen.
|
||||
*
|
||||
* @return may contain a return value setup by a call to
|
||||
* _set_thread_return_value()
|
||||
* z_set_thread_return_value()
|
||||
*
|
||||
* C function prototype:
|
||||
*
|
||||
|
@ -89,7 +89,7 @@ SECTION_FUNC(TEXT, __swap)
|
|||
* Carve space for the return value. Setting it to a default of
|
||||
* -EAGAIN eliminates the need for the timeout code to set it.
|
||||
* If another value is ever needed, it can be modified with
|
||||
* _set_thread_return_value().
|
||||
* z_set_thread_return_value().
|
||||
*/
|
||||
ld r3, [_k_neg_eagain]
|
||||
st_s r3, [r2, _thread_offset_to_return_value]
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
* @file
|
||||
* @brief ARCv2 system fatal error handler
|
||||
*
|
||||
* This module provides the _SysFatalErrorHandler() routine for ARCv2 BSPs.
|
||||
* This module provides the z_SysFatalErrorHandler() routine for ARCv2 BSPs.
|
||||
*/
|
||||
|
||||
#include <kernel.h>
|
||||
|
@ -37,7 +37,7 @@
|
|||
*
|
||||
* @return N/A
|
||||
*/
|
||||
__weak void _SysFatalErrorHandler(unsigned int reason,
|
||||
__weak void z_SysFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *pEsf)
|
||||
{
|
||||
ARG_UNUSED(pEsf);
|
||||
|
@ -52,7 +52,7 @@ __weak void _SysFatalErrorHandler(unsigned int reason,
|
|||
goto hang_system;
|
||||
}
|
||||
|
||||
if (_is_thread_essential()) {
|
||||
if (z_is_thread_essential()) {
|
||||
printk("Fatal fault in essential thread! Spinning...\n");
|
||||
goto hang_system;
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ struct init_stack_frame {
|
|||
* needed anymore.
|
||||
*
|
||||
* The initial context is a basic stack frame that contains arguments for
|
||||
* _thread_entry() return address, that points at _thread_entry()
|
||||
* z_thread_entry() return address, that points at z_thread_entry()
|
||||
* and status register.
|
||||
*
|
||||
* <options> is currently unused.
|
||||
|
@ -62,13 +62,13 @@ struct init_stack_frame {
|
|||
*
|
||||
* @return N/A
|
||||
*/
|
||||
void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stackSize, k_thread_entry_t pEntry,
|
||||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned int options)
|
||||
{
|
||||
char *pStackMem = K_THREAD_STACK_BUFFER(stack);
|
||||
_ASSERT_VALID_PRIO(priority, pEntry);
|
||||
Z_ASSERT_VALID_PRIO(priority, pEntry);
|
||||
|
||||
char *stackEnd;
|
||||
char *stackAdjEnd;
|
||||
|
@ -171,7 +171,7 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_ARC_HAS_SECURE
|
||||
pInitCtx->sec_stat = _arc_v2_aux_reg_read(_ARC_V2_SEC_STAT);
|
||||
pInitCtx->sec_stat = z_arc_v2_aux_reg_read(_ARC_V2_SEC_STAT);
|
||||
#endif
|
||||
|
||||
pInitCtx->r0 = (u32_t)pEntry;
|
||||
|
@ -206,7 +206,7 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
#endif
|
||||
#endif
|
||||
/*
|
||||
* seti instruction in the end of the _Swap() will
|
||||
* seti instruction in the end of the z_swap() will
|
||||
* enable the interrupts based on intlock_key
|
||||
* value.
|
||||
*
|
||||
|
@ -226,7 +226,7 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
||||
FUNC_NORETURN void _arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
|
||||
|
|
|
@ -6,9 +6,9 @@
|
|||
|
||||
/**
|
||||
* @file
|
||||
* @brief Wrapper for _thread_entry
|
||||
* @brief Wrapper for z_thread_entry
|
||||
*
|
||||
* Wrapper for _thread_entry routine when called from the initial context.
|
||||
* Wrapper for z_thread_entry routine when called from the initial context.
|
||||
*/
|
||||
|
||||
#include <toolchain.h>
|
||||
|
@ -17,10 +17,10 @@
|
|||
GTEXT(_thread_entry_wrapper)
|
||||
|
||||
/*
|
||||
* @brief Wrapper for _thread_entry
|
||||
* @brief Wrapper for z_thread_entry
|
||||
*
|
||||
* The routine pops parameters for the _thread_entry from stack frame, prepared
|
||||
* by the _new_thread() routine.
|
||||
* The routine pops parameters for the z_thread_entry from stack frame, prepared
|
||||
* by the z_new_thread() routine.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
@ -31,5 +31,5 @@ SECTION_FUNC(TEXT, _thread_entry_wrapper)
|
|||
pop_s r2
|
||||
pop_s r1
|
||||
pop_s r0
|
||||
j _thread_entry
|
||||
j z_thread_entry
|
||||
nop
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
*
|
||||
* @return 64-bit time stamp value
|
||||
*/
|
||||
u64_t _tsc_read(void)
|
||||
u64_t z_tsc_read(void)
|
||||
{
|
||||
unsigned int key;
|
||||
u64_t t;
|
||||
|
@ -31,7 +31,7 @@ u64_t _tsc_read(void)
|
|||
|
||||
key = irq_lock();
|
||||
t = (u64_t)z_tick_get();
|
||||
count = _arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT);
|
||||
count = z_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT);
|
||||
irq_unlock(key);
|
||||
t *= (u64_t)sys_clock_hw_cycles_per_tick();
|
||||
t += (u64_t)count;
|
||||
|
|
|
@ -52,7 +52,7 @@ GTEXT(z_arch_user_string_nlen_fault_start)
|
|||
GTEXT(z_arch_user_string_nlen_fault_end)
|
||||
GTEXT(z_arch_user_string_nlen_fixup)
|
||||
/*
|
||||
* @brief Wrapper for _thread_entry in the case of user thread
|
||||
* @brief Wrapper for z_thread_entry in the case of user thread
|
||||
* The init parameters are in privileged stack
|
||||
*
|
||||
* @return N/A
|
||||
|
|
|
@ -46,7 +46,7 @@ struct vector_table {
|
|||
u32_t unused_2;
|
||||
};
|
||||
|
||||
struct vector_table _VectorTable _GENERIC_SECTION(.exc_vector_table) = {
|
||||
struct vector_table _VectorTable Z_GENERIC_SECTION(.exc_vector_table) = {
|
||||
(u32_t)__reset,
|
||||
(u32_t)__memory_error,
|
||||
(u32_t)__instruction_error,
|
||||
|
|
|
@ -37,7 +37,7 @@ static ALWAYS_INLINE void kernel_arch_init(void)
|
|||
}
|
||||
|
||||
static ALWAYS_INLINE void
|
||||
_set_thread_return_value(struct k_thread *thread, unsigned int value)
|
||||
z_set_thread_return_value(struct k_thread *thread, unsigned int value)
|
||||
{
|
||||
thread->arch.return_value = value;
|
||||
}
|
||||
|
@ -51,12 +51,12 @@ _set_thread_return_value(struct k_thread *thread, unsigned int value)
|
|||
*/
|
||||
static ALWAYS_INLINE int _INTERRUPT_CAUSE(void)
|
||||
{
|
||||
u32_t irq_num = _arc_v2_aux_reg_read(_ARC_V2_ICAUSE);
|
||||
u32_t irq_num = z_arc_v2_aux_reg_read(_ARC_V2_ICAUSE);
|
||||
|
||||
return irq_num;
|
||||
}
|
||||
|
||||
#define _is_in_isr _arc_v2_irq_unit_is_in_isr
|
||||
#define z_is_in_isr z_arc_v2_irq_unit_is_in_isr
|
||||
|
||||
extern void _thread_entry_wrapper(void);
|
||||
extern void _user_thread_entry_wrapper(void);
|
||||
|
|
|
@ -54,7 +54,7 @@ struct _thread_arch {
|
|||
/* one of the _CAUSE_xxxx definitions above */
|
||||
int relinquish_cause;
|
||||
|
||||
/* return value from _Swap */
|
||||
/* return value from z_swap */
|
||||
unsigned int return_value;
|
||||
|
||||
#ifdef CONFIG_ARC_STACK_CHECKING
|
||||
|
|
|
@ -236,7 +236,7 @@ extern "C" {
|
|||
* The pc and status32 values will still be on the stack. We cannot
|
||||
* pop them yet because the callers of _pop_irq_stack_frame must reload
|
||||
* status32 differently depending on the execution context they are
|
||||
* running in (_Swap(), firq or exception).
|
||||
* running in (z_swap(), firq or exception).
|
||||
*/
|
||||
add_s sp, sp, ___isf_t_SIZEOF
|
||||
|
||||
|
|
|
@ -42,11 +42,11 @@ static ALWAYS_INLINE void _icache_setup(void)
|
|||
);
|
||||
u32_t val;
|
||||
|
||||
val = _arc_v2_aux_reg_read(_ARC_V2_I_CACHE_BUILD);
|
||||
val = z_arc_v2_aux_reg_read(_ARC_V2_I_CACHE_BUILD);
|
||||
val &= 0xff;
|
||||
if (val != 0) { /* is i-cache present? */
|
||||
/* configure i-cache */
|
||||
_arc_v2_aux_reg_write(_ARC_V2_IC_CTRL, icache_config);
|
||||
z_arc_v2_aux_reg_write(_ARC_V2_IC_CTRL, icache_config);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ static ALWAYS_INLINE void _irq_setup(void)
|
|||
);
|
||||
|
||||
k_cpu_sleep_mode = _ARC_V2_WAKE_IRQ_LEVEL;
|
||||
_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_CTRL, aux_irq_ctrl_value);
|
||||
z_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_CTRL, aux_irq_ctrl_value);
|
||||
|
||||
_kernel.irq_stack =
|
||||
K_THREAD_STACK_BUFFER(_interrupt_stack) + CONFIG_ISR_STACK_SIZE;
|
||||
|
|
|
@ -22,7 +22,7 @@ LOG_MODULE_REGISTER(mpu);
|
|||
* available MPU regions for dynamic programming depends on the number of the
|
||||
* static MPU regions currently being programmed, and the total number of HW-
|
||||
* available MPU regions. This macro is only used internally in function
|
||||
* _arch_configure_dynamic_mpu_regions(), to reserve sufficient area for the
|
||||
* z_arch_configure_dynamic_mpu_regions(), to reserve sufficient area for the
|
||||
* array of dynamic regions passed to the underlying driver.
|
||||
*/
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
|
@ -58,7 +58,7 @@ LOG_MODULE_REGISTER(mpu);
|
|||
* For some MPU architectures, such as the unmodified ARMv8-M MPU,
|
||||
* the function must execute with MPU enabled.
|
||||
*/
|
||||
void _arch_configure_static_mpu_regions(void)
|
||||
void z_arch_configure_static_mpu_regions(void)
|
||||
{
|
||||
#if defined(CONFIG_COVERAGE_GCOV) && defined(CONFIG_USERSPACE)
|
||||
const struct k_mem_partition gcov_region =
|
||||
|
@ -141,7 +141,7 @@ void _arch_configure_static_mpu_regions(void)
|
|||
* For some MPU architectures, such as the unmodified ARMv8-M MPU,
|
||||
* the function must execute with MPU enabled.
|
||||
*/
|
||||
void _arch_configure_dynamic_mpu_regions(struct k_thread *thread)
|
||||
void z_arch_configure_dynamic_mpu_regions(struct k_thread *thread)
|
||||
{
|
||||
/* Define an array of k_mem_partition objects to hold the configuration
|
||||
* of the respective dynamic MPU regions to be programmed for
|
||||
|
@ -259,7 +259,7 @@ void _arch_configure_dynamic_mpu_regions(struct k_thread *thread)
|
|||
* that is supported by the MPU hardware, and with respect
|
||||
* to the current static memory region configuration.
|
||||
*/
|
||||
int _arch_mem_domain_max_partitions_get(void)
|
||||
int z_arch_mem_domain_max_partitions_get(void)
|
||||
{
|
||||
int available_regions = arm_core_mpu_get_max_available_dyn_regions();
|
||||
|
||||
|
@ -277,13 +277,13 @@ int _arch_mem_domain_max_partitions_get(void)
|
|||
/**
|
||||
* @brief Configure the memory domain of the thread.
|
||||
*/
|
||||
void _arch_mem_domain_configure(struct k_thread *thread)
|
||||
void z_arch_mem_domain_configure(struct k_thread *thread)
|
||||
{
|
||||
/* Request to configure memory domain for a thread.
|
||||
* This triggers re-programming of the entire dynamic
|
||||
* memory map.
|
||||
*/
|
||||
_arch_configure_dynamic_mpu_regions(thread);
|
||||
z_arch_configure_dynamic_mpu_regions(thread);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -292,7 +292,7 @@ void _arch_mem_domain_configure(struct k_thread *thread)
|
|||
*
|
||||
* @param domain pointer to the memory domain (must be valid)
|
||||
*/
|
||||
void _arch_mem_domain_destroy(struct k_mem_domain *domain)
|
||||
void z_arch_mem_domain_destroy(struct k_mem_domain *domain)
|
||||
{
|
||||
/* This function will reset the access permission configuration
|
||||
* of the active partitions of the memory domain.
|
||||
|
@ -324,7 +324,7 @@ void _arch_mem_domain_destroy(struct k_mem_domain *domain)
|
|||
* @param partition_id the ID (sequence) number of the memory domain
|
||||
* partition (must be a valid partition).
|
||||
*/
|
||||
void _arch_mem_domain_partition_remove(struct k_mem_domain *domain,
|
||||
void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
|
||||
u32_t partition_id)
|
||||
{
|
||||
/* Request to remove a partition from a memory domain.
|
||||
|
@ -346,7 +346,7 @@ void _arch_mem_domain_partition_add(struct k_mem_domain *domain,
|
|||
/*
|
||||
* Validate the given buffer is user accessible or not
|
||||
*/
|
||||
int _arch_buffer_validate(void *addr, size_t size, int write)
|
||||
int z_arch_buffer_validate(void *addr, size_t size, int write)
|
||||
{
|
||||
return arm_core_mpu_buffer_validate(addr, size, write);
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ static _NmiHandler_t handler = _SysNmiOnReset;
|
|||
* @brief Default NMI handler installed when kernel is up
|
||||
*
|
||||
* The default handler outputs a error message and reboots the target. It is
|
||||
* installed by calling _NmiInit();
|
||||
* installed by calling z_NmiInit();
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
|
@ -57,7 +57,7 @@ static void _DefaultHandler(void)
|
|||
* @return N/A
|
||||
*/
|
||||
|
||||
void _NmiInit(void)
|
||||
void z_NmiInit(void)
|
||||
{
|
||||
handler = _DefaultHandler;
|
||||
}
|
||||
|
@ -91,5 +91,5 @@ void _NmiHandlerSet(void (*pHandler)(void))
|
|||
void __nmi(void)
|
||||
{
|
||||
handler();
|
||||
_ExcExit();
|
||||
z_ExcExit();
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
*
|
||||
*
|
||||
* Initialization of full C support: zero the .bss, copy the .data if XIP,
|
||||
* call _Cstart().
|
||||
* call z_cstart().
|
||||
*
|
||||
* Stack is available in this module, but not the global data/bss until their
|
||||
* initialization is performed.
|
||||
|
@ -92,7 +92,7 @@ static inline void relocate_vector_table(void)
|
|||
#else
|
||||
|
||||
#if defined(CONFIG_SW_VECTOR_RELAY)
|
||||
_GENERIC_SECTION(.vt_pointer_section) void *_vector_table_pointer;
|
||||
Z_GENERIC_SECTION(.vt_pointer_section) void *_vector_table_pointer;
|
||||
#endif
|
||||
|
||||
#define VECTOR_ADDRESS 0
|
||||
|
@ -151,7 +151,7 @@ static inline void enable_floating_point(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
extern FUNC_NORETURN void _Cstart(void);
|
||||
extern FUNC_NORETURN void z_cstart(void);
|
||||
/**
|
||||
*
|
||||
* @brief Prepare to and run C code
|
||||
|
@ -178,12 +178,12 @@ void _PrepC(void)
|
|||
set_and_switch_to_psp();
|
||||
relocate_vector_table();
|
||||
enable_floating_point();
|
||||
_bss_zero();
|
||||
_data_copy();
|
||||
z_bss_zero();
|
||||
z_data_copy();
|
||||
#ifdef CONFIG_BOOT_TIME_MEASUREMENT
|
||||
__start_time_stamp = 0U;
|
||||
#endif
|
||||
_IntLibInit();
|
||||
_Cstart();
|
||||
z_cstart();
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
|
||||
_ASM_FILE_PROLOGUE
|
||||
|
||||
GTEXT(_ExcExit)
|
||||
GTEXT(z_ExcExit)
|
||||
GTEXT(_IntExit)
|
||||
GDATA(_kernel)
|
||||
|
||||
|
@ -53,7 +53,7 @@ GDATA(_kernel)
|
|||
|
||||
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit)
|
||||
|
||||
/* _IntExit falls through to _ExcExit (they are aliases of each other) */
|
||||
/* _IntExit falls through to z_ExcExit (they are aliases of each other) */
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -65,7 +65,7 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit)
|
|||
* @return N/A
|
||||
*/
|
||||
|
||||
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)
|
||||
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_ExcExit)
|
||||
|
||||
#ifdef CONFIG_PREEMPT_ENABLED
|
||||
ldr r0, =_kernel
|
||||
|
@ -88,7 +88,7 @@ _EXIT_EXC:
|
|||
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
push {r0, lr}
|
||||
bl _check_stack_sentinel
|
||||
bl z_check_stack_sentinel
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
pop {r0, r1}
|
||||
mov lr, r1
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
* @file
|
||||
* @brief Kernel fatal error handler for ARM Cortex-M
|
||||
*
|
||||
* This module provides the _NanoFatalErrorHandler() routine for ARM Cortex-M.
|
||||
* This module provides the z_NanoFatalErrorHandler() routine for ARM Cortex-M.
|
||||
*/
|
||||
|
||||
#include <toolchain.h>
|
||||
|
@ -26,14 +26,14 @@
|
|||
*
|
||||
* This routine is called when fatal error conditions are detected by software
|
||||
* and is responsible only for reporting the error. Once reported, it then
|
||||
* invokes the user provided routine _SysFatalErrorHandler() which is
|
||||
* invokes the user provided routine z_SysFatalErrorHandler() which is
|
||||
* responsible for implementing the error handling policy.
|
||||
*
|
||||
* The caller is expected to always provide a usable ESF. In the event that the
|
||||
* fatal error does not have a hardware generated ESF, the caller should either
|
||||
* create its own or use a pointer to the global default ESF <_default_esf>.
|
||||
*
|
||||
* Unlike other arches, this function may return if _SysFatalErrorHandler
|
||||
* Unlike other arches, this function may return if z_SysFatalErrorHandler
|
||||
* determines that only the current thread should be aborted and the CPU
|
||||
* was in handler mode. PendSV will be asserted in this case and the current
|
||||
* thread taken off the run queue. Leaving the exception will immediately
|
||||
|
@ -44,7 +44,7 @@
|
|||
*
|
||||
* @return This function does not return.
|
||||
*/
|
||||
void _NanoFatalErrorHandler(unsigned int reason,
|
||||
void z_NanoFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *pEsf)
|
||||
{
|
||||
LOG_PANIC();
|
||||
|
@ -89,15 +89,15 @@ void _NanoFatalErrorHandler(unsigned int reason,
|
|||
* decide.
|
||||
*/
|
||||
|
||||
_SysFatalErrorHandler(reason, pEsf);
|
||||
z_SysFatalErrorHandler(reason, pEsf);
|
||||
}
|
||||
|
||||
void _do_kernel_oops(const NANO_ESF *esf)
|
||||
{
|
||||
_NanoFatalErrorHandler(esf->r0, esf);
|
||||
z_NanoFatalErrorHandler(esf->r0, esf);
|
||||
}
|
||||
|
||||
FUNC_NORETURN void _arch_syscall_oops(void *ssf_ptr)
|
||||
FUNC_NORETURN void z_arch_syscall_oops(void *ssf_ptr)
|
||||
{
|
||||
u32_t *ssf_contents = ssf_ptr;
|
||||
NANO_ESF oops_esf = { 0 };
|
||||
|
|
|
@ -764,8 +764,8 @@ static void _SecureStackDump(const NANO_ESF *secure_esf)
|
|||
* error handling policy allows the system to recover from the error),
|
||||
* - reporting the error information,
|
||||
* - determining the error reason to be provided as input to the user-
|
||||
* provided routine, _NanoFatalErrorHandler().
|
||||
* The _NanoFatalErrorHandler() is invoked once the above operations are
|
||||
* provided routine, z_NanoFatalErrorHandler().
|
||||
* The z_NanoFatalErrorHandler() is invoked once the above operations are
|
||||
* completed, and is responsible for implementing the error handling policy.
|
||||
*
|
||||
* The provided ESF pointer points to the exception stack frame of the current
|
||||
|
@ -869,7 +869,7 @@ void _Fault(NANO_ESF *esf, u32_t exc_return)
|
|||
defined(CONFIG_ARM_NONSECURE_FIRMWARE)
|
||||
_exit_fatal:
|
||||
#endif
|
||||
_NanoFatalErrorHandler(reason, esf);
|
||||
z_NanoFatalErrorHandler(reason, esf);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
* The ARM Cortex-M architecture provides its own k_thread_abort() to deal with
|
||||
* different CPU modes (handler vs thread) when a thread aborts. When its entry
|
||||
* point returns or when it aborts itself, the CPU is in thread mode and must
|
||||
* call _Swap() (which triggers a service call), but when in handler mode, the
|
||||
* call z_swap() (which triggers a service call), but when in handler mode, the
|
||||
* CPU must exit handler mode to cause the context switch, and thus must queue
|
||||
* the PendSV exception.
|
||||
*/
|
||||
|
|
|
@ -40,7 +40,7 @@ extern void __reserved(void);
|
|||
*
|
||||
* @return N/A
|
||||
*/
|
||||
void _arch_irq_enable(unsigned int irq)
|
||||
void z_arch_irq_enable(unsigned int irq)
|
||||
{
|
||||
NVIC_EnableIRQ((IRQn_Type)irq);
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ void _arch_irq_enable(unsigned int irq)
|
|||
*
|
||||
* @return N/A
|
||||
*/
|
||||
void _arch_irq_disable(unsigned int irq)
|
||||
void z_arch_irq_disable(unsigned int irq)
|
||||
{
|
||||
NVIC_DisableIRQ((IRQn_Type)irq);
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ void _arch_irq_disable(unsigned int irq)
|
|||
* @param irq IRQ line
|
||||
* @return interrupt enable state, true or false
|
||||
*/
|
||||
int _arch_irq_is_enabled(unsigned int irq)
|
||||
int z_arch_irq_is_enabled(unsigned int irq)
|
||||
{
|
||||
return NVIC->ISER[REG_FROM_IRQ(irq)] & (1 << BIT_FROM_IRQ(irq));
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ int _arch_irq_is_enabled(unsigned int irq)
|
|||
*
|
||||
* @return N/A
|
||||
*/
|
||||
void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
|
||||
void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
|
||||
{
|
||||
/* The kernel may reserve some of the highest priority levels.
|
||||
* So we offset the requested priority level with the number
|
||||
|
@ -126,7 +126,7 @@ void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
|
|||
*
|
||||
* @return N/A
|
||||
*/
|
||||
void _irq_spurious(void *unused)
|
||||
void z_irq_spurious(void *unused)
|
||||
{
|
||||
ARG_UNUSED(unused);
|
||||
__reserved();
|
||||
|
@ -163,7 +163,7 @@ void _arch_isr_direct_pm(void)
|
|||
s32_t idle_val = _kernel.idle;
|
||||
|
||||
_kernel.idle = 0;
|
||||
_sys_power_save_idle_exit(idle_val);
|
||||
z_sys_power_save_idle_exit(idle_val);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
|
@ -177,7 +177,7 @@ void _arch_isr_direct_pm(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
void _arch_isr_direct_header(void)
|
||||
void z_arch_isr_direct_header(void)
|
||||
{
|
||||
z_sys_trace_isr_enter();
|
||||
}
|
||||
|
@ -239,12 +239,12 @@ int irq_target_state_is_secure(unsigned int irq)
|
|||
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_INTERRUPTS
|
||||
int _arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
void (*routine)(void *parameter), void *parameter,
|
||||
u32_t flags)
|
||||
{
|
||||
z_isr_install(irq, routine, parameter);
|
||||
_irq_priority_set(irq, priority, flags);
|
||||
z_irq_priority_set(irq, priority, flags);
|
||||
return irq;
|
||||
}
|
||||
#endif /* CONFIG_DYNAMIC_INTERRUPTS */
|
||||
|
|
|
@ -57,7 +57,7 @@ SECTION_FUNC(TEXT, _isr_wrapper)
|
|||
* idle, this ensures that the calculation and programming of the device
|
||||
* for the next timer deadline is not interrupted. For non-tickless idle,
|
||||
* this ensures that the clearing of the kernel idle state is not
|
||||
* interrupted. In each case, _sys_power_save_idle_exit is called with
|
||||
* interrupted. In each case, z_sys_power_save_idle_exit is called with
|
||||
* interrupts disabled.
|
||||
*/
|
||||
cpsid i /* PRIMASK = 1 */
|
||||
|
@ -73,7 +73,7 @@ SECTION_FUNC(TEXT, _isr_wrapper)
|
|||
movs.n r1, #0
|
||||
/* clear kernel idle state */
|
||||
str r1, [r2, #_kernel_offset_to_idle]
|
||||
blx _sys_power_save_idle_exit
|
||||
blx z_sys_power_save_idle_exit
|
||||
_idle_state_cleared:
|
||||
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
|
@ -81,7 +81,7 @@ _idle_state_cleared:
|
|||
movne r1, #0
|
||||
/* clear kernel idle state */
|
||||
strne r1, [r2, #_kernel_offset_to_idle]
|
||||
blxne _sys_power_save_idle_exit
|
||||
blxne z_sys_power_save_idle_exit
|
||||
#else
|
||||
#error Unknown ARM architecture
|
||||
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
|
||||
|
|
|
@ -42,7 +42,7 @@ extern const int _k_neg_eagain;
|
|||
* as BASEPRI is not available.
|
||||
*
|
||||
* @return -EAGAIN, or a return value set by a call to
|
||||
* _set_thread_return_value()
|
||||
* z_set_thread_return_value()
|
||||
*
|
||||
*/
|
||||
int __swap(int key)
|
||||
|
|
|
@ -187,7 +187,7 @@ _thread_irq_disabled:
|
|||
push {r2,lr}
|
||||
ldr r0, =_kernel
|
||||
ldr r0, [r0, #_kernel_offset_to_current]
|
||||
bl _arch_configure_dynamic_mpu_regions
|
||||
bl z_arch_configure_dynamic_mpu_regions
|
||||
pop {r2,lr}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
* @file
|
||||
* @brief ARM Cortex-M system fatal error handler
|
||||
*
|
||||
* This module provides the _SysFatalErrorHandler() routine for Cortex-M
|
||||
* This module provides the z_SysFatalErrorHandler() routine for Cortex-M
|
||||
* platforms.
|
||||
*/
|
||||
|
||||
|
@ -38,7 +38,7 @@
|
|||
*
|
||||
* @return This function does not return.
|
||||
*/
|
||||
void __weak _SysFatalErrorHandler(unsigned int reason,
|
||||
void __weak z_SysFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *pEsf)
|
||||
{
|
||||
ARG_UNUSED(pEsf);
|
||||
|
@ -52,7 +52,7 @@ void __weak _SysFatalErrorHandler(unsigned int reason,
|
|||
if (reason == _NANO_ERR_KERNEL_PANIC) {
|
||||
goto hang_system;
|
||||
}
|
||||
if (k_is_in_isr() || _is_thread_essential()) {
|
||||
if (k_is_in_isr() || z_is_thread_essential()) {
|
||||
printk("Fatal fault in %s! Spinning...\n",
|
||||
k_is_in_isr() ? "ISR" : "essential thread");
|
||||
goto hang_system;
|
||||
|
|
|
@ -50,7 +50,7 @@ extern u8_t *_k_priv_stack_find(void *obj);
|
|||
* @return N/A
|
||||
*/
|
||||
|
||||
void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stackSize, k_thread_entry_t pEntry,
|
||||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned int options)
|
||||
|
@ -60,7 +60,7 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
/* Offset between the top of stack and the high end of stack area. */
|
||||
u32_t top_of_stack_offset = 0;
|
||||
|
||||
_ASSERT_VALID_PRIO(priority, pEntry);
|
||||
Z_ASSERT_VALID_PRIO(priority, pEntry);
|
||||
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
/* Truncate the stack size to align with the MPU region granularity.
|
||||
|
@ -106,12 +106,12 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
|
||||
#if CONFIG_USERSPACE
|
||||
if ((options & K_USER) != 0) {
|
||||
pInitCtx->pc = (u32_t)_arch_user_mode_enter;
|
||||
pInitCtx->pc = (u32_t)z_arch_user_mode_enter;
|
||||
} else {
|
||||
pInitCtx->pc = (u32_t)_thread_entry;
|
||||
pInitCtx->pc = (u32_t)z_thread_entry;
|
||||
}
|
||||
#else
|
||||
pInitCtx->pc = (u32_t)_thread_entry;
|
||||
pInitCtx->pc = (u32_t)z_thread_entry;
|
||||
#endif
|
||||
|
||||
/* force ARM mode by clearing LSB of address */
|
||||
|
@ -142,7 +142,7 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
||||
FUNC_NORETURN void _arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
* The ARM Cortex-M architecture provides its own k_thread_abort() to deal
|
||||
* with different CPU modes (handler vs thread) when a thread aborts. When its
|
||||
* entry point returns or when it aborts itself, the CPU is in thread mode and
|
||||
* must call _Swap() (which triggers a service call), but when in handler
|
||||
* must call z_swap() (which triggers a service call), but when in handler
|
||||
* mode, the CPU must exit handler mode to cause the context switch, and thus
|
||||
* must queue the PendSV exception.
|
||||
*/
|
||||
|
@ -25,9 +25,9 @@
|
|||
#include <wait_q.h>
|
||||
#include <misc/__assert.h>
|
||||
|
||||
extern void _k_thread_single_abort(struct k_thread *thread);
|
||||
extern void z_thread_single_abort(struct k_thread *thread);
|
||||
|
||||
void _impl_k_thread_abort(k_tid_t thread)
|
||||
void z_impl_k_thread_abort(k_tid_t thread)
|
||||
{
|
||||
unsigned int key;
|
||||
|
||||
|
@ -36,12 +36,12 @@ void _impl_k_thread_abort(k_tid_t thread)
|
|||
__ASSERT(!(thread->base.user_options & K_ESSENTIAL),
|
||||
"essential thread aborted");
|
||||
|
||||
_k_thread_single_abort(thread);
|
||||
_thread_monitor_exit(thread);
|
||||
z_thread_single_abort(thread);
|
||||
z_thread_monitor_exit(thread);
|
||||
|
||||
if (_current == thread) {
|
||||
if ((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) == 0) {
|
||||
(void)_Swap_irqlock(key);
|
||||
(void)z_swap_irqlock(key);
|
||||
CODE_UNREACHABLE;
|
||||
} else {
|
||||
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
|
||||
|
@ -49,5 +49,5 @@ void _impl_k_thread_abort(k_tid_t thread)
|
|||
}
|
||||
|
||||
/* The abort handler might have altered the ready queue. */
|
||||
_reschedule_irqlock(key);
|
||||
z_reschedule_irqlock(key);
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ SECTION_FUNC(TEXT,_arm_userspace_enter)
|
|||
/* Re-program dynamic memory map.
|
||||
*
|
||||
* Important note:
|
||||
* _arch_configure_dynamic_mpu_regions() may re-program the MPU Stack Guard
|
||||
* z_arch_configure_dynamic_mpu_regions() may re-program the MPU Stack Guard
|
||||
* to guard the privilege stack for overflows (if building with option
|
||||
* CONFIG_MPU_STACK_GUARD). There is a risk of actually overflowing the
|
||||
* stack while doing the re-programming. We minimize the risk by placing
|
||||
|
@ -82,7 +82,7 @@ SECTION_FUNC(TEXT,_arm_userspace_enter)
|
|||
push {r0,r1,r2,r3,ip,lr}
|
||||
ldr r0, =_kernel
|
||||
ldr r0, [r0, #_kernel_offset_to_current]
|
||||
bl _arch_configure_dynamic_mpu_regions
|
||||
bl z_arch_configure_dynamic_mpu_regions
|
||||
pop {r0,r1,r2,r3,ip,lr}
|
||||
#endif
|
||||
|
||||
|
@ -152,8 +152,8 @@ SECTION_FUNC(TEXT,_arm_userspace_enter)
|
|||
*/
|
||||
isb
|
||||
|
||||
/* jump to _thread_entry entry */
|
||||
ldr ip, =_thread_entry
|
||||
/* jump to z_thread_entry entry */
|
||||
ldr ip, =z_thread_entry
|
||||
bx ip
|
||||
|
||||
/**
|
||||
|
|
|
@ -30,8 +30,8 @@ extern "C" {
|
|||
extern void _FaultInit(void);
|
||||
extern void _CpuIdleInit(void);
|
||||
#ifdef CONFIG_ARM_MPU
|
||||
extern void _arch_configure_static_mpu_regions(void);
|
||||
extern void _arch_configure_dynamic_mpu_regions(struct k_thread *thread);
|
||||
extern void z_arch_configure_static_mpu_regions(void);
|
||||
extern void z_arch_configure_dynamic_mpu_regions(struct k_thread *thread);
|
||||
#endif /* CONFIG_ARM_MPU */
|
||||
|
||||
static ALWAYS_INLINE void kernel_arch_init(void)
|
||||
|
@ -56,7 +56,7 @@ static ALWAYS_INLINE void unlock_interrupts(void)
|
|||
}
|
||||
|
||||
static ALWAYS_INLINE void
|
||||
_arch_switch_to_main_thread(struct k_thread *main_thread,
|
||||
z_arch_switch_to_main_thread(struct k_thread *main_thread,
|
||||
k_thread_stack_t *main_stack,
|
||||
size_t main_stack_size, k_thread_entry_t _main)
|
||||
{
|
||||
|
@ -67,7 +67,7 @@ _arch_switch_to_main_thread(struct k_thread *main_thread,
|
|||
*
|
||||
* This function is invoked once, upon system initialization.
|
||||
*/
|
||||
_arch_configure_static_mpu_regions();
|
||||
z_arch_configure_static_mpu_regions();
|
||||
#endif
|
||||
|
||||
/* get high address of the stack, i.e. its start (stack grows down) */
|
||||
|
@ -110,21 +110,21 @@ _arch_switch_to_main_thread(struct k_thread *main_thread,
|
|||
* If stack protection is enabled, make sure to set it
|
||||
* before jumping to thread entry function
|
||||
*/
|
||||
_arch_configure_dynamic_mpu_regions(main_thread);
|
||||
z_arch_configure_dynamic_mpu_regions(main_thread);
|
||||
#endif
|
||||
_thread_entry(_main, 0, 0, 0);
|
||||
z_thread_entry(_main, 0, 0, 0);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void
|
||||
_set_thread_return_value(struct k_thread *thread, unsigned int value)
|
||||
z_set_thread_return_value(struct k_thread *thread, unsigned int value)
|
||||
{
|
||||
thread->arch.swap_return_value = value;
|
||||
}
|
||||
|
||||
extern void k_cpu_atomic_idle(unsigned int key);
|
||||
|
||||
#define _is_in_isr() _IsInIsr()
|
||||
#define z_is_in_isr() _IsInIsr()
|
||||
|
||||
extern FUNC_NORETURN void _arm_userspace_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3,
|
||||
|
|
|
@ -225,7 +225,7 @@ def main():
|
|||
offset = intlist["offset"]
|
||||
prefix = endian_prefix()
|
||||
|
||||
spurious_handler = "&_irq_spurious"
|
||||
spurious_handler = "&z_irq_spurious"
|
||||
sw_irq_handler = "ISR_WRAPPER"
|
||||
|
||||
debug('offset is ' + str(offset))
|
||||
|
|
|
@ -21,7 +21,7 @@ struct int_list_header {
|
|||
* header of the initList section, which is used by gen_isr_tables.py to create
|
||||
* the vector and sw isr tables,
|
||||
*/
|
||||
_GENERIC_SECTION(.irq_info) struct int_list_header _iheader = {
|
||||
Z_GENERIC_SECTION(.irq_info) struct int_list_header _iheader = {
|
||||
.table_size = IRQ_TABLE_SIZE,
|
||||
.offset = CONFIG_GEN_IRQ_START_VECTOR,
|
||||
};
|
||||
|
@ -29,7 +29,7 @@ _GENERIC_SECTION(.irq_info) struct int_list_header _iheader = {
|
|||
/* These are placeholder tables. They will be replaced by the real tables
|
||||
* generated by gen_isr_tables.py.
|
||||
*
|
||||
* _irq_spurious and _isr_wrapper are used as placeholder values to
|
||||
* z_irq_spurious and _isr_wrapper are used as placeholder values to
|
||||
* ensure that they are not optimized out in the first link. The first
|
||||
* link must contain the same symbols as the second one for the code
|
||||
* generation to work.
|
||||
|
@ -49,6 +49,6 @@ u32_t __irq_vector_table _irq_vector_table[IRQ_TABLE_SIZE] = {
|
|||
*/
|
||||
#ifdef CONFIG_GEN_SW_ISR_TABLE
|
||||
struct _isr_table_entry __sw_isr_table _sw_isr_table[IRQ_TABLE_SIZE] = {
|
||||
[0 ...(IRQ_TABLE_SIZE - 1)] = {(void *)0x42, (void *)&_irq_spurious},
|
||||
[0 ...(IRQ_TABLE_SIZE - 1)] = {(void *)0x42, (void *)&z_irq_spurious},
|
||||
};
|
||||
#endif
|
||||
|
|
|
@ -35,7 +35,7 @@ u64_t __common_var_swap_end_time;
|
|||
|
||||
#elif CONFIG_X86
|
||||
#define TIMING_INFO_PRE_READ()
|
||||
#define TIMING_INFO_OS_GET_TIME() (_tsc_read())
|
||||
#define TIMING_INFO_OS_GET_TIME() (z_tsc_read())
|
||||
#define TIMING_INFO_GET_TIMER_VALUE() (TIMING_INFO_OS_GET_TIME())
|
||||
#define SUBTRACT_CLOCK_CYCLES(val) (val)
|
||||
|
||||
|
@ -48,7 +48,7 @@ u64_t __common_var_swap_end_time;
|
|||
#elif CONFIG_ARC
|
||||
#define TIMING_INFO_PRE_READ()
|
||||
#define TIMING_INFO_OS_GET_TIME() (k_cycle_get_32())
|
||||
#define TIMING_INFO_GET_TIMER_VALUE() (_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT))
|
||||
#define TIMING_INFO_GET_TIMER_VALUE() (z_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT))
|
||||
#define SUBTRACT_CLOCK_CYCLES(val) ((u32_t)val)
|
||||
|
||||
#elif CONFIG_NIOS2
|
||||
|
|
|
@ -140,6 +140,6 @@ SECTION_FUNC(TEXT, __start)
|
|||
*/
|
||||
|
||||
/* Jump into C domain. _PrepC zeroes BSS, copies rw data into RAM,
|
||||
* and then enters _Cstart */
|
||||
* and then enters z_cstart */
|
||||
call _PrepC
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ const NANO_ESF _default_esf = {
|
|||
*
|
||||
* @return This function does not return.
|
||||
*/
|
||||
FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
|
||||
FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *esf)
|
||||
{
|
||||
LOG_PANIC();
|
||||
|
@ -102,7 +102,7 @@ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
|
|||
esf->estatus);
|
||||
#endif
|
||||
|
||||
_SysFatalErrorHandler(reason, esf);
|
||||
z_SysFatalErrorHandler(reason, esf);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_EXTRA_EXCEPTION_INFO) && defined(CONFIG_PRINTK) \
|
||||
|
@ -194,7 +194,7 @@ FUNC_NORETURN void _Fault(const NANO_ESF *esf)
|
|||
#endif /* ALT_CPU_HAS_EXTRA_EXCEPTION_INFO */
|
||||
#endif /* CONFIG_PRINTK */
|
||||
|
||||
_NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, esf);
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, esf);
|
||||
}
|
||||
|
||||
|
||||
|
@ -218,7 +218,7 @@ FUNC_NORETURN void _Fault(const NANO_ESF *esf)
|
|||
*
|
||||
* @return N/A
|
||||
*/
|
||||
FUNC_NORETURN __weak void _SysFatalErrorHandler(unsigned int reason,
|
||||
FUNC_NORETURN __weak void z_SysFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *pEsf)
|
||||
{
|
||||
ARG_UNUSED(pEsf);
|
||||
|
@ -232,7 +232,7 @@ FUNC_NORETURN __weak void _SysFatalErrorHandler(unsigned int reason,
|
|||
if (reason == _NANO_ERR_KERNEL_PANIC) {
|
||||
goto hang_system;
|
||||
}
|
||||
if (k_is_in_isr() || _is_thread_essential()) {
|
||||
if (k_is_in_isr() || z_is_thread_essential()) {
|
||||
printk("Fatal fault in %s! Spinning...\n",
|
||||
k_is_in_isr() ? "ISR" : "essential thread");
|
||||
goto hang_system;
|
||||
|
|
|
@ -21,16 +21,16 @@
|
|||
#include <kswap.h>
|
||||
#include <tracing.h>
|
||||
|
||||
void _irq_spurious(void *unused)
|
||||
void z_irq_spurious(void *unused)
|
||||
{
|
||||
ARG_UNUSED(unused);
|
||||
printk("Spurious interrupt detected! ipending: %x\n",
|
||||
_nios2_creg_read(NIOS2_CR_IPENDING));
|
||||
_NanoFatalErrorHandler(_NANO_ERR_SPURIOUS_INT, &_default_esf);
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_SPURIOUS_INT, &_default_esf);
|
||||
}
|
||||
|
||||
|
||||
void _arch_irq_enable(unsigned int irq)
|
||||
void z_arch_irq_enable(unsigned int irq)
|
||||
{
|
||||
u32_t ienable;
|
||||
unsigned int key;
|
||||
|
@ -46,7 +46,7 @@ void _arch_irq_enable(unsigned int irq)
|
|||
|
||||
|
||||
|
||||
void _arch_irq_disable(unsigned int irq)
|
||||
void z_arch_irq_disable(unsigned int irq)
|
||||
{
|
||||
u32_t ienable;
|
||||
unsigned int key;
|
||||
|
@ -103,12 +103,12 @@ void _enter_irq(u32_t ipending)
|
|||
|
||||
_kernel.nested--;
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
_check_stack_sentinel();
|
||||
z_check_stack_sentinel();
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_INTERRUPTS
|
||||
int _arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
void (*routine)(void *parameter), void *parameter,
|
||||
u32_t flags)
|
||||
{
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
*
|
||||
*
|
||||
* Initialization of full C support: zero the .bss, copy the .data if XIP,
|
||||
* call _Cstart().
|
||||
* call z_cstart().
|
||||
*
|
||||
* Stack is available in this module, but not the global data/bss until their
|
||||
* initialization is performed.
|
||||
|
@ -33,9 +33,9 @@
|
|||
|
||||
void _PrepC(void)
|
||||
{
|
||||
_bss_zero();
|
||||
z_bss_zero();
|
||||
#ifdef CONFIG_XIP
|
||||
_data_copy();
|
||||
z_data_copy();
|
||||
/* In most XIP scenarios we copy the exception code into RAM, so need
|
||||
* to flush instruction cache.
|
||||
*/
|
||||
|
@ -48,6 +48,6 @@ void _PrepC(void)
|
|||
_nios2_dcache_flush_all();
|
||||
#endif
|
||||
#endif
|
||||
_Cstart();
|
||||
z_cstart();
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
|
|
@ -121,7 +121,7 @@ SECTION_FUNC(exception.other, __swap)
|
|||
|
||||
/*
|
||||
* Load return value into r2 (return value register). -EAGAIN unless
|
||||
* someone previously called _set_thread_return_value(). Do this before
|
||||
* someone previously called z_set_thread_return_value(). Do this before
|
||||
* we potentially unlock interrupts.
|
||||
*/
|
||||
ldw r2, _thread_offset_to_retval(r2)
|
||||
|
@ -191,5 +191,5 @@ SECTION_FUNC(TEXT, _thread_entry_wrapper)
|
|||
/* pop all the stuff that we just loaded into registers */
|
||||
addi sp, sp, 16
|
||||
|
||||
call _thread_entry
|
||||
call z_thread_entry
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
#include <string.h>
|
||||
|
||||
/* forward declaration to asm function to adjust setup the arguments
|
||||
* to _thread_entry() since this arch puts the first four arguments
|
||||
* to z_thread_entry() since this arch puts the first four arguments
|
||||
* in r4-r7 and not on the stack
|
||||
*/
|
||||
void _thread_entry_wrapper(k_thread_entry_t, void *, void *, void *);
|
||||
|
@ -20,7 +20,7 @@ struct init_stack_frame {
|
|||
/* top of the stack / most recently pushed */
|
||||
|
||||
/* Used by _thread_entry_wrapper. pulls these off the stack and
|
||||
* into argument registers before calling _thread_entry()
|
||||
* into argument registers before calling z_thread_entry()
|
||||
*/
|
||||
k_thread_entry_t entry_point;
|
||||
void *arg1;
|
||||
|
@ -31,13 +31,13 @@ struct init_stack_frame {
|
|||
};
|
||||
|
||||
|
||||
void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t thread_func,
|
||||
void *arg1, void *arg2, void *arg3,
|
||||
int priority, unsigned int options)
|
||||
{
|
||||
char *stack_memory = K_THREAD_STACK_BUFFER(stack);
|
||||
_ASSERT_VALID_PRIO(priority, thread_func);
|
||||
Z_ASSERT_VALID_PRIO(priority, thread_func);
|
||||
|
||||
struct init_stack_frame *iframe;
|
||||
|
||||
|
|
|
@ -36,12 +36,12 @@ static ALWAYS_INLINE void kernel_arch_init(void)
|
|||
}
|
||||
|
||||
static ALWAYS_INLINE void
|
||||
_set_thread_return_value(struct k_thread *thread, unsigned int value)
|
||||
z_set_thread_return_value(struct k_thread *thread, unsigned int value)
|
||||
{
|
||||
thread->callee_saved.retval = value;
|
||||
}
|
||||
|
||||
#define _is_in_isr() (_kernel.nested != 0U)
|
||||
#define z_is_in_isr() (_kernel.nested != 0U)
|
||||
|
||||
#ifdef CONFIG_IRQ_OFFLOAD
|
||||
void _irq_do_offload(void);
|
||||
|
|
|
@ -54,10 +54,10 @@ struct _callee_saved {
|
|||
/* Stack pointer */
|
||||
u32_t sp;
|
||||
|
||||
/* IRQ status before irq_lock() and call to _Swap() */
|
||||
/* IRQ status before irq_lock() and call to z_swap() */
|
||||
u32_t key;
|
||||
|
||||
/* Return value of _Swap() */
|
||||
/* Return value of z_swap() */
|
||||
u32_t retval;
|
||||
};
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ const NANO_ESF _default_esf = {
|
|||
*
|
||||
* @return This function does not return.
|
||||
*/
|
||||
FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
|
||||
FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *esf)
|
||||
{
|
||||
LOG_PANIC();
|
||||
|
@ -72,9 +72,9 @@ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
|
|||
|
||||
#endif
|
||||
|
||||
void _SysFatalErrorHandler(unsigned int reason,
|
||||
void z_SysFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *pEsf);
|
||||
_SysFatalErrorHandler(reason, esf);
|
||||
z_SysFatalErrorHandler(reason, esf);
|
||||
}
|
||||
|
||||
|
||||
|
@ -98,7 +98,7 @@ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
|
|||
*
|
||||
* @return N/A
|
||||
*/
|
||||
FUNC_NORETURN __weak void _SysFatalErrorHandler(unsigned int reason,
|
||||
FUNC_NORETURN __weak void z_SysFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *pEsf)
|
||||
{
|
||||
ARG_UNUSED(pEsf);
|
||||
|
@ -111,7 +111,7 @@ FUNC_NORETURN __weak void _SysFatalErrorHandler(unsigned int reason,
|
|||
if (reason == _NANO_ERR_KERNEL_PANIC) {
|
||||
goto hang_system;
|
||||
}
|
||||
if (k_is_in_isr() || _is_thread_essential()) {
|
||||
if (k_is_in_isr() || z_is_thread_essential()) {
|
||||
posix_print_error_and_exit(
|
||||
"Fatal fault in %s! Stopping...\n",
|
||||
k_is_in_isr() ? "ISR" : "essential thread");
|
||||
|
@ -122,6 +122,6 @@ FUNC_NORETURN __weak void _SysFatalErrorHandler(unsigned int reason,
|
|||
hang_system:
|
||||
|
||||
posix_print_error_and_exit(
|
||||
"Stopped in _SysFatalErrorHandler()\n");
|
||||
"Stopped in z_SysFatalErrorHandler()\n");
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
|
|
@ -205,7 +205,7 @@ void posix_swap(int next_allowed_thread_nbr, int this_th_nbr)
|
|||
/**
|
||||
* Let the ready thread (main) run, and exit this thread (init)
|
||||
*
|
||||
* Called from _arch_switch_to_main_thread() which does the picking from the
|
||||
* Called from z_arch_switch_to_main_thread() which does the picking from the
|
||||
* kernel structures
|
||||
*
|
||||
* Note that we could have just done a swap(), but that would have left the
|
||||
|
@ -298,7 +298,7 @@ static void *posix_thread_starter(void *arg)
|
|||
|
||||
posix_new_thread_pre_start();
|
||||
|
||||
_thread_entry(ptr->entry_point, ptr->arg1, ptr->arg2, ptr->arg3);
|
||||
z_thread_entry(ptr->entry_point, ptr->arg1, ptr->arg2, ptr->arg3);
|
||||
|
||||
/*
|
||||
* We only reach this point if the thread actually returns which should
|
||||
|
@ -357,9 +357,9 @@ static int ttable_get_empty_slot(void)
|
|||
}
|
||||
|
||||
/**
|
||||
* Called from _new_thread(),
|
||||
* Called from z_new_thread(),
|
||||
* Create a new POSIX thread for the new Zephyr thread.
|
||||
* _new_thread() picks from the kernel structures what it is that we need to
|
||||
* z_new_thread() picks from the kernel structures what it is that we need to
|
||||
* call with what parameters
|
||||
*/
|
||||
void posix_new_thread(posix_thread_status_t *ptr)
|
||||
|
@ -472,9 +472,9 @@ void posix_abort_thread(int thread_idx)
|
|||
|
||||
#if defined(CONFIG_ARCH_HAS_THREAD_ABORT)
|
||||
|
||||
extern void _k_thread_single_abort(struct k_thread *thread);
|
||||
extern void z_thread_single_abort(struct k_thread *thread);
|
||||
|
||||
void _impl_k_thread_abort(k_tid_t thread)
|
||||
void z_impl_k_thread_abort(k_tid_t thread)
|
||||
{
|
||||
unsigned int key;
|
||||
int thread_idx;
|
||||
|
@ -490,8 +490,8 @@ void _impl_k_thread_abort(k_tid_t thread)
|
|||
__ASSERT(!(thread->base.user_options & K_ESSENTIAL),
|
||||
"essential thread aborted");
|
||||
|
||||
_k_thread_single_abort(thread);
|
||||
_thread_monitor_exit(thread);
|
||||
z_thread_single_abort(thread);
|
||||
z_thread_monitor_exit(thread);
|
||||
|
||||
if (_current == thread) {
|
||||
if (tstatus->aborted == 0) { /* LCOV_EXCL_BR_LINE */
|
||||
|
@ -510,7 +510,7 @@ void _impl_k_thread_abort(k_tid_t thread)
|
|||
thread_idx,
|
||||
__func__);
|
||||
|
||||
(void)_Swap_irqlock(key);
|
||||
(void)z_swap_irqlock(key);
|
||||
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
|
||||
}
|
||||
|
||||
|
@ -531,7 +531,7 @@ void _impl_k_thread_abort(k_tid_t thread)
|
|||
}
|
||||
|
||||
/* The abort handler might have altered the ready queue. */
|
||||
_reschedule_irqlock(key);
|
||||
z_reschedule_irqlock(key);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
*
|
||||
*
|
||||
* @return -EAGAIN, or a return value set by a call to
|
||||
* _set_thread_return_value()
|
||||
* z_set_thread_return_value()
|
||||
*
|
||||
*/
|
||||
|
||||
|
@ -48,7 +48,7 @@ int __swap(unsigned int key)
|
|||
*/
|
||||
_kernel.current->callee_saved.key = key;
|
||||
_kernel.current->callee_saved.retval = -EAGAIN;
|
||||
/* retval may be modified with a call to _set_thread_return_value() */
|
||||
/* retval may be modified with a call to z_set_thread_return_value() */
|
||||
|
||||
posix_thread_status_t *ready_thread_ptr =
|
||||
(posix_thread_status_t *)
|
||||
|
@ -86,7 +86,7 @@ int __swap(unsigned int key)
|
|||
* Note that we will never come back to this thread:
|
||||
* posix_core_main_thread_start() does never return
|
||||
*/
|
||||
void _arch_switch_to_main_thread(struct k_thread *main_thread,
|
||||
void z_arch_switch_to_main_thread(struct k_thread *main_thread,
|
||||
k_thread_stack_t *main_stack,
|
||||
size_t main_stack_size, k_thread_entry_t _main)
|
||||
{
|
||||
|
@ -114,7 +114,7 @@ void posix_irq_check_idle_exit(void)
|
|||
s32_t idle_val = _kernel.idle;
|
||||
|
||||
_kernel.idle = 0;
|
||||
_sys_power_save_idle_exit(idle_val);
|
||||
z_sys_power_save_idle_exit(idle_val);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
* pthreads stack and therefore we ignore the stack size
|
||||
*
|
||||
*/
|
||||
void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t thread_func,
|
||||
void *arg1, void *arg2, void *arg3,
|
||||
int priority, unsigned int options)
|
||||
|
@ -53,7 +53,7 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
|
||||
char *stack_memory = K_THREAD_STACK_BUFFER(stack);
|
||||
|
||||
_ASSERT_VALID_PRIO(priority, thread_func);
|
||||
Z_ASSERT_VALID_PRIO(priority, thread_func);
|
||||
|
||||
posix_thread_status_t *thread_status;
|
||||
|
||||
|
@ -66,7 +66,7 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
STACK_ROUND_DOWN(stack_memory + stack_size
|
||||
- sizeof(*thread_status));
|
||||
|
||||
/* _thread_entry() arguments */
|
||||
/* z_thread_entry() arguments */
|
||||
thread_status->entry_point = thread_func;
|
||||
thread_status->arg1 = arg1;
|
||||
thread_status->arg2 = arg2;
|
||||
|
|
|
@ -21,7 +21,7 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
#if defined(CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN)
|
||||
void _arch_switch_to_main_thread(struct k_thread *main_thread,
|
||||
void z_arch_switch_to_main_thread(struct k_thread *main_thread,
|
||||
k_thread_stack_t *main_stack,
|
||||
size_t main_stack_size, k_thread_entry_t _main);
|
||||
#endif
|
||||
|
@ -44,7 +44,7 @@ static inline void kernel_arch_init(void)
|
|||
|
||||
|
||||
static ALWAYS_INLINE void
|
||||
_set_thread_return_value(struct k_thread *thread, unsigned int value)
|
||||
z_set_thread_return_value(struct k_thread *thread, unsigned int value)
|
||||
{
|
||||
thread->callee_saved.retval = value;
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ _set_thread_return_value(struct k_thread *thread, unsigned int value)
|
|||
}
|
||||
#endif
|
||||
|
||||
#define _is_in_isr() (_kernel.nested != 0U)
|
||||
#define z_is_in_isr() (_kernel.nested != 0U)
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
|
|
|
@ -32,10 +32,10 @@ struct _caller_saved {
|
|||
|
||||
|
||||
struct _callee_saved {
|
||||
/* IRQ status before irq_lock() and call to _Swap() */
|
||||
/* IRQ status before irq_lock() and call to z_swap() */
|
||||
u32_t key;
|
||||
|
||||
/* Return value of _Swap() */
|
||||
/* Return value of z_swap() */
|
||||
u32_t retval;
|
||||
|
||||
/*
|
||||
|
|
|
@ -26,11 +26,11 @@ void posix_atomic_halt_cpu(unsigned int imask);
|
|||
|
||||
#include "soc_irq.h" /* Must exist and define _ARCH_IRQ/ISR_* macros */
|
||||
|
||||
unsigned int _arch_irq_lock(void);
|
||||
void _arch_irq_unlock(unsigned int key);
|
||||
void _arch_irq_enable(unsigned int irq);
|
||||
void _arch_irq_disable(unsigned int irq);
|
||||
int _arch_irq_is_enabled(unsigned int irq);
|
||||
unsigned int z_arch_irq_lock(void);
|
||||
void z_arch_irq_unlock(unsigned int key);
|
||||
void z_arch_irq_enable(unsigned int irq);
|
||||
void z_arch_irq_disable(unsigned int irq);
|
||||
int z_arch_irq_is_enabled(unsigned int irq);
|
||||
unsigned int posix_irq_lock(void);
|
||||
void posix_irq_unlock(unsigned int key);
|
||||
void posix_irq_full_unlock(void);
|
||||
|
|
|
@ -55,7 +55,7 @@ const NANO_ESF _default_esf = {
|
|||
*
|
||||
* @return This function does not return.
|
||||
*/
|
||||
FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
|
||||
FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *esf)
|
||||
{
|
||||
LOG_PANIC();
|
||||
|
@ -103,7 +103,7 @@ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
|
|||
esf->a2, esf->a3, esf->a4, esf->a5,
|
||||
esf->a6, esf->a7);
|
||||
|
||||
_SysFatalErrorHandler(reason, esf);
|
||||
z_SysFatalErrorHandler(reason, esf);
|
||||
/* spin forever */
|
||||
for (;;)
|
||||
__asm__ volatile("nop");
|
||||
|
@ -130,7 +130,7 @@ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
|
|||
*
|
||||
* @return N/A
|
||||
*/
|
||||
FUNC_NORETURN __weak void _SysFatalErrorHandler(unsigned int reason,
|
||||
FUNC_NORETURN __weak void z_SysFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *esf)
|
||||
{
|
||||
ARG_UNUSED(esf);
|
||||
|
@ -146,7 +146,7 @@ FUNC_NORETURN __weak void _SysFatalErrorHandler(unsigned int reason,
|
|||
if (reason == _NANO_ERR_KERNEL_PANIC) {
|
||||
goto hang_system;
|
||||
}
|
||||
if (k_is_in_isr() || _is_thread_essential()) {
|
||||
if (k_is_in_isr() || z_is_thread_essential()) {
|
||||
printk("Fatal fault in %s! Spinning...\n",
|
||||
k_is_in_isr() ? "ISR" : "essential thread");
|
||||
goto hang_system;
|
||||
|
@ -196,5 +196,5 @@ FUNC_NORETURN void _Fault(const NANO_ESF *esf)
|
|||
mcause &= SOC_MCAUSE_EXP_MASK;
|
||||
printk("Exception cause %s (%d)\n", cause_str(mcause), (int)mcause);
|
||||
|
||||
_NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, esf);
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, esf);
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include <kernel_structs.h>
|
||||
#include <misc/printk.h>
|
||||
|
||||
void _irq_spurious(void *unused)
|
||||
void z_irq_spurious(void *unused)
|
||||
{
|
||||
u32_t mcause;
|
||||
|
||||
|
@ -26,11 +26,11 @@ void _irq_spurious(void *unused)
|
|||
}
|
||||
#endif
|
||||
|
||||
_NanoFatalErrorHandler(_NANO_ERR_SPURIOUS_INT, &_default_esf);
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_SPURIOUS_INT, &_default_esf);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_INTERRUPTS
|
||||
int _arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
void (*routine)(void *parameter), void *parameter,
|
||||
u32_t flags)
|
||||
{
|
||||
|
|
|
@ -22,7 +22,7 @@ GTEXT(__soc_restore_context)
|
|||
|
||||
GTEXT(_k_neg_eagain)
|
||||
GTEXT(_is_next_thread_current)
|
||||
GTEXT(_get_next_ready_thread)
|
||||
GTEXT(z_get_next_ready_thread)
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
GTEXT(z_sys_trace_thread_switched_in)
|
||||
|
@ -281,7 +281,7 @@ on_thread_stack:
|
|||
addi sp, t0, 0
|
||||
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
call _check_stack_sentinel
|
||||
call z_check_stack_sentinel
|
||||
la t1, _kernel
|
||||
#endif
|
||||
|
||||
|
@ -332,7 +332,7 @@ reschedule:
|
|||
|
||||
/*
|
||||
* Save stack pointer of current thread and set the default return value
|
||||
* of _Swap to _k_neg_eagain for the thread.
|
||||
* of z_swap to _k_neg_eagain for the thread.
|
||||
*/
|
||||
sw sp, _thread_offset_to_sp(t1)
|
||||
la t2, _k_neg_eagain
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
* @brief Full C support initialization
|
||||
*
|
||||
*
|
||||
* Initialization of full C support: zero the .bss and call _Cstart().
|
||||
* Initialization of full C support: zero the .bss and call z_cstart().
|
||||
*
|
||||
* Stack is available in this module, but not the global data/bss until their
|
||||
* initialization is performed.
|
||||
|
@ -31,13 +31,13 @@
|
|||
|
||||
void _PrepC(void)
|
||||
{
|
||||
_bss_zero();
|
||||
z_bss_zero();
|
||||
#ifdef CONFIG_XIP
|
||||
_data_copy();
|
||||
z_data_copy();
|
||||
#endif
|
||||
#if defined(CONFIG_RISCV_SOC_INTERRUPT_INIT)
|
||||
soc_interrupt_init();
|
||||
#endif
|
||||
_Cstart();
|
||||
z_cstart();
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
|
|
@ -59,6 +59,6 @@ aa_loop:
|
|||
|
||||
/*
|
||||
* Jump into C domain. _PrepC zeroes BSS, copies rw data into RAM,
|
||||
* and then enters kernel _Cstart
|
||||
* and then enters kernel z_cstart
|
||||
*/
|
||||
call _PrepC
|
||||
|
|
|
@ -78,7 +78,7 @@ SECTION_FUNC(exception.other, __swap)
|
|||
* Prior to unlocking irq, load return value of
|
||||
* __swap to temp register t2 (from
|
||||
* _thread_offset_to_swap_return_value). Normally, it should be -EAGAIN,
|
||||
* unless someone has previously called _set_thread_return_value(..).
|
||||
* unless someone has previously called z_set_thread_return_value(..).
|
||||
*/
|
||||
la t0, _kernel
|
||||
|
||||
|
@ -113,8 +113,8 @@ SECTION_FUNC(TEXT, _thread_entry_wrapper)
|
|||
* the thread stack (initialized via function _thread).
|
||||
* In this case, thread_entry_t, * void *, void * and void * are stored
|
||||
* in registers a0, a1, a2 and a3. These registers are used as arguments
|
||||
* to function _thread_entry. Hence, just call _thread_entry with
|
||||
* to function z_thread_entry. Hence, just call z_thread_entry with
|
||||
* return address set to 0 to indicate a non-returning function call.
|
||||
*/
|
||||
|
||||
jal x0, _thread_entry
|
||||
jal x0, z_thread_entry
|
||||
|
|
|
@ -15,13 +15,13 @@ void _thread_entry_wrapper(k_thread_entry_t thread,
|
|||
void *arg2,
|
||||
void *arg3);
|
||||
|
||||
void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t thread_func,
|
||||
void *arg1, void *arg2, void *arg3,
|
||||
int priority, unsigned int options)
|
||||
{
|
||||
char *stack_memory = K_THREAD_STACK_BUFFER(stack);
|
||||
_ASSERT_VALID_PRIO(priority, thread_func);
|
||||
Z_ASSERT_VALID_PRIO(priority, thread_func);
|
||||
|
||||
struct __esf *stack_init;
|
||||
|
||||
|
|
|
@ -32,16 +32,16 @@ static ALWAYS_INLINE void kernel_arch_init(void)
|
|||
}
|
||||
|
||||
static ALWAYS_INLINE void
|
||||
_set_thread_return_value(struct k_thread *thread, unsigned int value)
|
||||
z_set_thread_return_value(struct k_thread *thread, unsigned int value)
|
||||
{
|
||||
thread->arch.swap_return_value = value;
|
||||
}
|
||||
|
||||
FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
|
||||
FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *esf);
|
||||
|
||||
|
||||
#define _is_in_isr() (_kernel.nested != 0U)
|
||||
#define z_is_in_isr() (_kernel.nested != 0U)
|
||||
|
||||
#ifdef CONFIG_IRQ_OFFLOAD
|
||||
int _irq_do_offload(void);
|
||||
|
|
|
@ -55,7 +55,7 @@ struct _caller_saved {
|
|||
typedef struct _caller_saved _caller_saved_t;
|
||||
|
||||
struct _thread_arch {
|
||||
u32_t swap_return_value; /* Return value of _Swap() */
|
||||
u32_t swap_return_value; /* Return value of z_swap() */
|
||||
};
|
||||
|
||||
typedef struct _thread_arch _thread_arch_t;
|
||||
|
|
|
@ -44,7 +44,7 @@ extern u64_t __idle_time_stamp; /* timestamp when CPU went idle */
|
|||
*/
|
||||
void k_cpu_idle(void)
|
||||
{
|
||||
_int_latency_stop();
|
||||
z_int_latency_stop();
|
||||
z_sys_trace_idle();
|
||||
#if defined(CONFIG_BOOT_TIME_MEASUREMENT)
|
||||
__idle_time_stamp = (u64_t)k_cycle_get_32();
|
||||
|
@ -75,7 +75,7 @@ void k_cpu_idle(void)
|
|||
|
||||
void k_cpu_atomic_idle(unsigned int key)
|
||||
{
|
||||
_int_latency_stop();
|
||||
z_int_latency_stop();
|
||||
z_sys_trace_idle();
|
||||
|
||||
__asm__ volatile (
|
||||
|
@ -96,7 +96,7 @@ void k_cpu_atomic_idle(unsigned int key)
|
|||
|
||||
/* restore interrupt lockout state before returning to caller */
|
||||
if ((key & 0x200) == 0) {
|
||||
_int_latency_start();
|
||||
z_int_latency_start();
|
||||
__asm__ volatile("cli");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
GTEXT(__start)
|
||||
|
||||
/* externs */
|
||||
GTEXT(_Cstart)
|
||||
GTEXT(z_cstart)
|
||||
|
||||
GDATA(_idt_base_address)
|
||||
GDATA(_interrupt_stack)
|
||||
|
@ -247,7 +247,7 @@ __csSet:
|
|||
/*
|
||||
* Set the stack pointer to the area used for the interrupt stack.
|
||||
* Note this stack is used during the execution of __start() and
|
||||
* _Cstart() until the multi-tasking kernel is initialized. The
|
||||
* z_cstart() until the multi-tasking kernel is initialized. The
|
||||
* dual-purposing of this area of memory is safe since
|
||||
* interrupts are disabled until the first context switch.
|
||||
*
|
||||
|
@ -377,7 +377,7 @@ __csSet:
|
|||
|
||||
/* Jump to C portion of kernel initialization and never return */
|
||||
|
||||
jmp _Cstart
|
||||
jmp z_cstart
|
||||
|
||||
|
||||
_x86_bss_zero:
|
||||
|
|
|
@ -134,7 +134,7 @@ SECTION_FUNC(TEXT, _exception_enter)
|
|||
|
||||
/*
|
||||
* Set the _EXC_ACTIVE state bit of the current thread.
|
||||
* This enables _Swap() to preserve the thread's FP registers
|
||||
* This enables z_swap() to preserve the thread's FP registers
|
||||
* (where needed) if the exception handler causes a context switch.
|
||||
* It also indicates to debug tools that an exception is being
|
||||
* handled in the event of a context switch.
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
* @file
|
||||
* @brief Kernel fatal error handler
|
||||
*
|
||||
* This module provides the _NanoFatalErrorHandler() routine.
|
||||
* This module provides the z_NanoFatalErrorHandler() routine.
|
||||
*/
|
||||
|
||||
#include <toolchain.h>
|
||||
|
@ -43,9 +43,9 @@ static bool check_stack_bounds(u32_t addr, size_t size, u16_t cs)
|
|||
{
|
||||
u32_t start, end;
|
||||
|
||||
if (_is_in_isr()) {
|
||||
if (z_is_in_isr()) {
|
||||
/* We were servicing an interrupt */
|
||||
start = (u32_t)_ARCH_THREAD_STACK_BUFFER(_interrupt_stack);
|
||||
start = (u32_t)Z_ARCH_THREAD_STACK_BUFFER(_interrupt_stack);
|
||||
end = start + CONFIG_ISR_STACK_SIZE;
|
||||
} else if ((cs & 0x3) != 0 ||
|
||||
(_current->base.user_options & K_USER) == 0) {
|
||||
|
@ -134,7 +134,7 @@ static void unwind_stack(u32_t base_ptr, u16_t cs)
|
|||
*
|
||||
* @return This function does not return.
|
||||
*/
|
||||
FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
|
||||
FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *pEsf)
|
||||
{
|
||||
LOG_PANIC();
|
||||
|
@ -208,10 +208,10 @@ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
|
|||
* fatal error handling policy defined for the platform.
|
||||
*/
|
||||
|
||||
_SysFatalErrorHandler(reason, pEsf);
|
||||
z_SysFatalErrorHandler(reason, pEsf);
|
||||
}
|
||||
|
||||
FUNC_NORETURN void _arch_syscall_oops(void *ssf_ptr)
|
||||
FUNC_NORETURN void z_arch_syscall_oops(void *ssf_ptr)
|
||||
{
|
||||
struct _x86_syscall_stack_frame *ssf =
|
||||
(struct _x86_syscall_stack_frame *)ssf_ptr;
|
||||
|
@ -225,14 +225,14 @@ FUNC_NORETURN void _arch_syscall_oops(void *ssf_ptr)
|
|||
oops.esp = ssf->esp;
|
||||
}
|
||||
|
||||
_NanoFatalErrorHandler(_NANO_ERR_KERNEL_OOPS, &oops);
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_KERNEL_OOPS, &oops);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_KERNEL_OOPS
|
||||
FUNC_NORETURN void _do_kernel_oops(const NANO_ESF *esf)
|
||||
{
|
||||
u32_t *stack_ptr = (u32_t *)esf->esp;
|
||||
_NanoFatalErrorHandler(*stack_ptr, esf);
|
||||
z_NanoFatalErrorHandler(*stack_ptr, esf);
|
||||
}
|
||||
|
||||
extern void (*_kernel_oops_handler)(void);
|
||||
|
@ -242,7 +242,7 @@ NANO_CPU_INT_REGISTER(_kernel_oops_handler, NANO_SOFT_IRQ,
|
|||
#endif
|
||||
|
||||
/*
|
||||
* Define a default ESF for use with _NanoFatalErrorHandler() in the event
|
||||
* Define a default ESF for use with z_NanoFatalErrorHandler() in the event
|
||||
* the caller does not have a NANO_ESF to pass
|
||||
*/
|
||||
const NANO_ESF _default_esf = {
|
||||
|
@ -280,7 +280,7 @@ static FUNC_NORETURN void generic_exc_handle(unsigned int vector,
|
|||
if ((BIT(vector) & _EXC_ERROR_CODE_FAULTS) != 0) {
|
||||
printk("***** Exception code: 0x%x\n", pEsf->errorCode);
|
||||
}
|
||||
_NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, pEsf);
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, pEsf);
|
||||
}
|
||||
|
||||
#define _EXC_FUNC(vector) \
|
||||
|
@ -351,7 +351,7 @@ static void dump_mmu_flags(struct x86_mmu_pdpt *pdpt, void *addr)
|
|||
{
|
||||
x86_page_entry_data_t pde_flags, pte_flags;
|
||||
|
||||
_x86_mmu_get_flags(pdpt, addr, &pde_flags, &pte_flags);
|
||||
z_x86_mmu_get_flags(pdpt, addr, &pde_flags, &pte_flags);
|
||||
|
||||
printk("PDE: ");
|
||||
dump_entry_flags(pde_flags);
|
||||
|
@ -414,10 +414,10 @@ void page_fault_handler(NANO_ESF *esf)
|
|||
#endif
|
||||
#ifdef CONFIG_THREAD_STACK_INFO
|
||||
if (check_stack_bounds(esf->esp, 0, esf->cs)) {
|
||||
_NanoFatalErrorHandler(_NANO_ERR_STACK_CHK_FAIL, esf);
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_STACK_CHK_FAIL, esf);
|
||||
}
|
||||
#endif
|
||||
_NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, esf);
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, esf);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
_EXCEPTION_CONNECT_CODE(page_fault_handler, IV_PAGE_FAULT);
|
||||
|
@ -437,7 +437,7 @@ static FUNC_NORETURN __used void _df_handler_top(void);
|
|||
extern char z_trampoline_stack_end[];
|
||||
#endif
|
||||
|
||||
_GENERIC_SECTION(.tss)
|
||||
Z_GENERIC_SECTION(.tss)
|
||||
struct task_state_segment _main_tss = {
|
||||
.ss0 = DATA_SEG,
|
||||
#ifdef CONFIG_X86_KPTI
|
||||
|
@ -450,7 +450,7 @@ struct task_state_segment _main_tss = {
|
|||
};
|
||||
|
||||
/* Special TSS for handling double-faults with a known good stack */
|
||||
_GENERIC_SECTION(.tss)
|
||||
Z_GENERIC_SECTION(.tss)
|
||||
struct task_state_segment _df_tss = {
|
||||
.esp = (u32_t)(_df_stack + sizeof(_df_stack)),
|
||||
.cs = CODE_SEG,
|
||||
|
@ -476,7 +476,7 @@ static FUNC_NORETURN __used void _df_handler_bottom(void)
|
|||
reason = _NANO_ERR_STACK_CHK_FAIL;
|
||||
}
|
||||
#endif
|
||||
_NanoFatalErrorHandler(reason, (NANO_ESF *)&_df_esf);
|
||||
z_NanoFatalErrorHandler(reason, (NANO_ESF *)&_df_esf);
|
||||
}
|
||||
|
||||
static FUNC_NORETURN __used void _df_handler_top(void)
|
||||
|
@ -499,7 +499,7 @@ static FUNC_NORETURN __used void _df_handler_top(void)
|
|||
_df_esf.eflags = _main_tss.eflags;
|
||||
|
||||
/* Restore the main IA task to a runnable state */
|
||||
_main_tss.esp = (u32_t)(_ARCH_THREAD_STACK_BUFFER(_interrupt_stack) +
|
||||
_main_tss.esp = (u32_t)(Z_ARCH_THREAD_STACK_BUFFER(_interrupt_stack) +
|
||||
CONFIG_ISR_STACK_SIZE);
|
||||
_main_tss.cs = CODE_SEG;
|
||||
_main_tss.ds = DATA_SEG;
|
||||
|
|
|
@ -88,7 +88,7 @@ static inline void _FpCtxInit(struct k_thread *thread)
|
|||
* Enable preservation of floating point context information.
|
||||
*
|
||||
* The transition from "non-FP supporting" to "FP supporting" must be done
|
||||
* atomically to avoid confusing the floating point logic used by _Swap(), so
|
||||
* atomically to avoid confusing the floating point logic used by z_swap(), so
|
||||
* this routine locks interrupts to ensure that a context switch does not occur.
|
||||
* The locking isn't really needed when the routine is called by a cooperative
|
||||
* thread (since context switching can't occur), but it is harmless.
|
||||
|
@ -167,7 +167,7 @@ void k_float_enable(struct k_thread *thread, unsigned int options)
|
|||
*
|
||||
* The saved FP context is needed in case the thread
|
||||
* we enabled FP support for is currently pre-empted,
|
||||
* since _Swap() uses it to restore FP context when
|
||||
* since z_swap() uses it to restore FP context when
|
||||
* the thread re-activates.
|
||||
*
|
||||
* Saving the FP context reinits the FPU, and thus
|
||||
|
@ -187,7 +187,7 @@ void k_float_enable(struct k_thread *thread, unsigned int options)
|
|||
* Disable preservation of floating point context information.
|
||||
*
|
||||
* The transition from "FP supporting" to "non-FP supporting" must be done
|
||||
* atomically to avoid confusing the floating point logic used by _Swap(), so
|
||||
* atomically to avoid confusing the floating point logic used by z_swap(), so
|
||||
* this routine locks interrupts to ensure that a context switch does not occur.
|
||||
* The locking isn't really needed when the routine is called by a cooperative
|
||||
* thread (since context switching can't occur), but it is harmless.
|
||||
|
|
|
@ -33,13 +33,13 @@
|
|||
GTEXT(__swap)
|
||||
|
||||
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
||||
GTEXT(_sys_power_save_idle_exit)
|
||||
GTEXT(z_sys_power_save_idle_exit)
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef CONFIG_INT_LATENCY_BENCHMARK
|
||||
GTEXT(_int_latency_start)
|
||||
GTEXT(_int_latency_stop)
|
||||
GTEXT(z_int_latency_start)
|
||||
GTEXT(z_int_latency_stop)
|
||||
#endif
|
||||
/**
|
||||
*
|
||||
|
@ -150,7 +150,7 @@ SECTION_FUNC(TEXT, _interrupt_enter)
|
|||
* interrupt.
|
||||
*/
|
||||
|
||||
call _int_latency_start
|
||||
call z_int_latency_start
|
||||
#endif
|
||||
|
||||
call z_sys_trace_isr_enter
|
||||
|
@ -196,7 +196,7 @@ alreadyOnIntStack:
|
|||
#ifdef CONFIG_INT_LATENCY_BENCHMARK
|
||||
pushl %eax
|
||||
pushl %edx
|
||||
call _int_latency_stop
|
||||
call z_int_latency_stop
|
||||
popl %edx
|
||||
popl %eax
|
||||
#endif
|
||||
|
@ -237,7 +237,7 @@ alreadyOnIntStack:
|
|||
_irq_controller_eoi_macro
|
||||
|
||||
#ifdef CONFIG_INT_LATENCY_BENCHMARK
|
||||
call _int_latency_start
|
||||
call z_int_latency_start
|
||||
#endif
|
||||
|
||||
/* determine whether exiting from a nested interrupt */
|
||||
|
@ -275,7 +275,7 @@ alreadyOnIntStack:
|
|||
popl %esp /* switch back to outgoing thread's stack */
|
||||
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
call _check_stack_sentinel
|
||||
call z_check_stack_sentinel
|
||||
#endif
|
||||
pushfl /* push KERNEL_LOCK_KEY argument */
|
||||
#ifdef CONFIG_X86_IAMCU
|
||||
|
@ -309,7 +309,7 @@ alreadyOnIntStack:
|
|||
|
||||
/* Restore volatile registers and return to the interrupted thread */
|
||||
#ifdef CONFIG_INT_LATENCY_BENCHMARK
|
||||
call _int_latency_stop
|
||||
call z_int_latency_stop
|
||||
#endif
|
||||
popl %edi
|
||||
popl %ecx
|
||||
|
@ -331,7 +331,7 @@ noReschedule:
|
|||
popl %esp /* pop thread stack pointer */
|
||||
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
call _check_stack_sentinel
|
||||
call z_check_stack_sentinel
|
||||
#endif
|
||||
|
||||
/* fall through to 'nestedInterrupt' */
|
||||
|
@ -345,7 +345,7 @@ noReschedule:
|
|||
|
||||
nestedInterrupt:
|
||||
#ifdef CONFIG_INT_LATENCY_BENCHMARK
|
||||
call _int_latency_stop
|
||||
call z_int_latency_stop
|
||||
#endif
|
||||
|
||||
popl %edi
|
||||
|
@ -360,7 +360,7 @@ nestedInterrupt:
|
|||
handle_idle:
|
||||
pushl %eax
|
||||
pushl %edx
|
||||
/* Populate 'ticks' argument to _sys_power_save_idle_exit */
|
||||
/* Populate 'ticks' argument to z_sys_power_save_idle_exit */
|
||||
#ifdef CONFIG_X86_IAMCU
|
||||
movl _kernel_offset_to_idle(%ecx), %eax
|
||||
#else
|
||||
|
@ -371,13 +371,13 @@ handle_idle:
|
|||
movl $0, _kernel_offset_to_idle(%ecx)
|
||||
|
||||
/*
|
||||
* Beware that a timer driver's _sys_power_save_idle_exit() implementation might
|
||||
* Beware that a timer driver's z_sys_power_save_idle_exit() implementation might
|
||||
* expect that interrupts are disabled when invoked. This ensures that
|
||||
* the calculation and programming of the device for the next timer
|
||||
* deadline is not interrupted.
|
||||
*/
|
||||
|
||||
call _sys_power_save_idle_exit
|
||||
call z_sys_power_save_idle_exit
|
||||
#ifndef CONFIG_X86_IAMCU
|
||||
/* SYS V: discard 'ticks' argument passed on the stack */
|
||||
add $0x4, %esp
|
||||
|
@ -457,7 +457,7 @@ SECTION_FUNC(TEXT, _SpuriousIntHandler)
|
|||
movl $_NANO_ERR_SPURIOUS_INT, %eax
|
||||
#endif
|
||||
/* call the fatal error handler */
|
||||
call _NanoFatalErrorHandler
|
||||
call z_NanoFatalErrorHandler
|
||||
|
||||
/* handler doesn't return */
|
||||
|
||||
|
|
|
@ -48,32 +48,32 @@ void *__attribute__((section(".spurNoErrIsr")))
|
|||
*/
|
||||
|
||||
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
||||
void _arch_irq_direct_pm(void)
|
||||
void z_arch_irq_direct_pm(void)
|
||||
{
|
||||
if (_kernel.idle) {
|
||||
s32_t idle_val = _kernel.idle;
|
||||
|
||||
_kernel.idle = 0;
|
||||
_sys_power_save_idle_exit(idle_val);
|
||||
z_sys_power_save_idle_exit(idle_val);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void _arch_isr_direct_header(void)
|
||||
void z_arch_isr_direct_header(void)
|
||||
{
|
||||
_int_latency_start();
|
||||
z_int_latency_start();
|
||||
z_sys_trace_isr_enter();
|
||||
|
||||
/* We're not going to unlock IRQs, but we still need to increment this
|
||||
* so that _is_in_isr() works
|
||||
* so that z_is_in_isr() works
|
||||
*/
|
||||
++_kernel.nested;
|
||||
}
|
||||
|
||||
void _arch_isr_direct_footer(int swap)
|
||||
void z_arch_isr_direct_footer(int swap)
|
||||
{
|
||||
_irq_controller_eoi();
|
||||
_int_latency_stop();
|
||||
z_int_latency_stop();
|
||||
sys_trace_isr_exit();
|
||||
--_kernel.nested;
|
||||
|
||||
|
@ -87,7 +87,7 @@ void _arch_isr_direct_footer(int swap)
|
|||
_kernel.ready_q.cache != _current) {
|
||||
unsigned int flags;
|
||||
|
||||
/* Fetch EFLAGS argument to _Swap() */
|
||||
/* Fetch EFLAGS argument to z_swap() */
|
||||
__asm__ volatile (
|
||||
"pushfl\n\t"
|
||||
"popl %0\n\t"
|
||||
|
@ -95,7 +95,7 @@ void _arch_isr_direct_footer(int swap)
|
|||
:
|
||||
: "memory"
|
||||
);
|
||||
(void)_Swap_irqlock(flags);
|
||||
(void)z_swap_irqlock(flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -301,7 +301,7 @@ static void idt_vector_install(int vector, void *irq_handler)
|
|||
* the processor.
|
||||
*/
|
||||
|
||||
int _arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
void (*routine)(void *parameter), void *parameter,
|
||||
u32_t flags)
|
||||
{
|
||||
|
@ -310,7 +310,7 @@ int _arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
|||
key = irq_lock();
|
||||
|
||||
#ifdef CONFIG_X86_FIXED_IRQ_MAPPING
|
||||
vector = _IRQ_TO_INTERRUPT_VECTOR(irq);
|
||||
vector = Z_IRQ_TO_INTERRUPT_VECTOR(irq);
|
||||
#else
|
||||
vector = priority_to_free_vector(priority);
|
||||
/* 0 indicates not used, vectors for interrupts start at 32 */
|
||||
|
@ -318,7 +318,7 @@ int _arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
|||
"IRQ %d already configured", irq);
|
||||
_irq_to_interrupt_vector[irq] = vector;
|
||||
#endif
|
||||
_irq_controller_irq_config(vector, irq, flags);
|
||||
z_irq_controller_irq_config(vector, irq, flags);
|
||||
|
||||
stub_idx = next_irq_stub++;
|
||||
__ASSERT(stub_idx < CONFIG_X86_DYNAMIC_IRQ_STUBS,
|
||||
|
|
|
@ -74,7 +74,7 @@
|
|||
* potential security leaks.
|
||||
*
|
||||
* @return -EAGAIN, or a return value set by a call to
|
||||
* _set_thread_return_value()
|
||||
* z_set_thread_return_value()
|
||||
*
|
||||
* C function prototype:
|
||||
*
|
||||
|
@ -130,7 +130,7 @@ SECTION_FUNC(TEXT, __swap)
|
|||
* Carve space for the return value. Setting it to a default of
|
||||
* -EAGAIN eliminates the need for the timeout code to set it.
|
||||
* If another value is ever needed, it can be modified with
|
||||
* _set_thread_return_value().
|
||||
* z_set_thread_return_value().
|
||||
*/
|
||||
|
||||
pushl _k_neg_eagain
|
||||
|
@ -336,7 +336,7 @@ CROHandlingDone:
|
|||
movl _thread_offset_to_esp(%eax), %esp
|
||||
|
||||
|
||||
/* load return value from a possible _set_thread_return_value() */
|
||||
/* load return value from a possible z_set_thread_return_value() */
|
||||
|
||||
popl %eax
|
||||
|
||||
|
@ -351,7 +351,7 @@ CROHandlingDone:
|
|||
* %eax may contain one of these values:
|
||||
*
|
||||
* - the return value for __swap() that was set up by a call to
|
||||
* _set_thread_return_value()
|
||||
* z_set_thread_return_value()
|
||||
* - -EINVAL
|
||||
*/
|
||||
|
||||
|
@ -365,7 +365,7 @@ CROHandlingDone:
|
|||
/* save %eax since it used as the return value for __swap */
|
||||
pushl %eax
|
||||
/* interrupts are being reenabled, stop accumulating time */
|
||||
call _int_latency_stop
|
||||
call z_int_latency_stop
|
||||
/* restore __swap's %eax */
|
||||
popl %eax
|
||||
|
||||
|
@ -398,11 +398,11 @@ time_read_not_needed:
|
|||
*
|
||||
* @brief Adjust stack/parameters before invoking thread entry function
|
||||
*
|
||||
* This function adjusts the initial stack frame created by _new_thread() such
|
||||
* This function adjusts the initial stack frame created by z_new_thread() such
|
||||
* that the GDB stack frame unwinders recognize it as the outermost frame in
|
||||
* the thread's stack. For targets that use the IAMCU calling convention, the
|
||||
* first three arguments are popped into eax, edx, and ecx. The function then
|
||||
* jumps to _thread_entry().
|
||||
* jumps to z_thread_entry().
|
||||
*
|
||||
* GDB normally stops unwinding a stack when it detects that it has
|
||||
* reached a function called main(). Kernel threads, however, do not have
|
||||
|
@ -411,9 +411,9 @@ time_read_not_needed:
|
|||
*
|
||||
* SYS V Systems:
|
||||
*
|
||||
* Given the initial thread created by _new_thread(), GDB expects to find a
|
||||
* Given the initial thread created by z_new_thread(), GDB expects to find a
|
||||
* return address on the stack immediately above the thread entry routine
|
||||
* _thread_entry, in the location occupied by the initial EFLAGS.
|
||||
* z_thread_entry, in the location occupied by the initial EFLAGS.
|
||||
* GDB attempts to examine the memory at this return address, which typically
|
||||
* results in an invalid access to page 0 of memory.
|
||||
*
|
||||
|
@ -422,17 +422,17 @@ time_read_not_needed:
|
|||
* an invalid access to address zero and returns an error, which causes the
|
||||
* GDB stack unwinder to stop somewhat gracefully.
|
||||
*
|
||||
* The initial EFLAGS cannot be overwritten until after _Swap() has swapped in
|
||||
* the new thread for the first time. This routine is called by _Swap() the
|
||||
* The initial EFLAGS cannot be overwritten until after z_swap() has swapped in
|
||||
* the new thread for the first time. This routine is called by z_swap() the
|
||||
* first time that the new thread is swapped in, and it jumps to
|
||||
* _thread_entry after it has done its work.
|
||||
* z_thread_entry after it has done its work.
|
||||
*
|
||||
* IAMCU Systems:
|
||||
*
|
||||
* There is no EFLAGS on the stack when we get here. _thread_entry() takes
|
||||
* There is no EFLAGS on the stack when we get here. z_thread_entry() takes
|
||||
* four arguments, and we need to pop off the first three into the
|
||||
* appropriate registers. Instead of using the 'call' instruction, we push
|
||||
* a NULL return address onto the stack and jump into _thread_entry,
|
||||
* a NULL return address onto the stack and jump into z_thread_entry,
|
||||
* ensuring the stack won't be unwound further. Placing some kind of return
|
||||
* address on the stack is mandatory so this isn't conditionally compiled.
|
||||
*
|
||||
|
@ -443,13 +443,13 @@ time_read_not_needed:
|
|||
* |__________________| |
|
||||
* | param1 | V
|
||||
* |__________________|
|
||||
* | pEntry | <---- ESP when invoked by _Swap() on IAMCU
|
||||
* | pEntry | <---- ESP when invoked by z_swap() on IAMCU
|
||||
* |__________________|
|
||||
* | initial EFLAGS | <---- ESP when invoked by _Swap() on Sys V
|
||||
* | initial EFLAGS | <---- ESP when invoked by z_swap() on Sys V
|
||||
* |__________________| (Zeroed by this routine on Sys V)
|
||||
*
|
||||
* The address of the thread entry function needs to be in %edi when this is
|
||||
* invoked. It will either be _thread_entry, or if userspace is enabled,
|
||||
* invoked. It will either be z_thread_entry, or if userspace is enabled,
|
||||
* _arch_drop_to_user_mode if this is a user thread.
|
||||
*
|
||||
* @return this routine does NOT return.
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
* @file
|
||||
* @brief Common system fatal error handler
|
||||
*
|
||||
* This module provides the _SysFatalErrorHandler() routine which is common to
|
||||
* This module provides the z_SysFatalErrorHandler() routine which is common to
|
||||
* supported platforms.
|
||||
*/
|
||||
|
||||
|
@ -39,7 +39,7 @@
|
|||
*
|
||||
* @return This function does not return.
|
||||
*/
|
||||
FUNC_NORETURN __weak void _SysFatalErrorHandler(unsigned int reason,
|
||||
FUNC_NORETURN __weak void z_SysFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *pEsf)
|
||||
{
|
||||
ARG_UNUSED(pEsf);
|
||||
|
@ -55,7 +55,7 @@ FUNC_NORETURN __weak void _SysFatalErrorHandler(unsigned int reason,
|
|||
if (reason == _NANO_ERR_KERNEL_PANIC) {
|
||||
goto hang_system;
|
||||
}
|
||||
if (k_is_in_isr() || _is_thread_essential()) {
|
||||
if (k_is_in_isr() || z_is_thread_essential()) {
|
||||
printk("Fatal fault in %s! Spinning...\n",
|
||||
k_is_in_isr() ? "ISR" : "essential thread");
|
||||
goto hang_system;
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
/* forward declaration */
|
||||
|
||||
/* Initial thread stack frame, such that everything is laid out as expected
|
||||
* for when _Swap() switches to it for the first time.
|
||||
* for when z_swap() switches to it for the first time.
|
||||
*/
|
||||
struct _x86_initial_frame {
|
||||
u32_t swap_retval;
|
||||
|
@ -58,7 +58,7 @@ struct _x86_initial_frame {
|
|||
* @param priority thread priority
|
||||
* @param options thread options: K_ESSENTIAL, K_FP_REGS, K_SSE_REGS
|
||||
*/
|
||||
void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t entry,
|
||||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned int options)
|
||||
|
@ -67,7 +67,7 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
char *stack_high;
|
||||
struct _x86_initial_frame *initial_frame;
|
||||
|
||||
_ASSERT_VALID_PRIO(priority, entry);
|
||||
Z_ASSERT_VALID_PRIO(priority, entry);
|
||||
stack_buf = K_THREAD_STACK_BUFFER(stack);
|
||||
_new_thread_init(thread, stack_buf, stack_size, priority, options);
|
||||
|
||||
|
@ -75,7 +75,7 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
if ((options & K_USER) == 0) {
|
||||
/* Running in kernel mode, kernel stack region is also a guard
|
||||
* page */
|
||||
_x86_mmu_set_flags(&z_x86_kernel_pdpt,
|
||||
z_x86_mmu_set_flags(&z_x86_kernel_pdpt,
|
||||
(void *)(stack_buf - MMU_PAGE_SIZE),
|
||||
MMU_PAGE_SIZE, MMU_ENTRY_NOT_PRESENT,
|
||||
MMU_PTE_P_MASK);
|
||||
|
@ -83,16 +83,16 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
#endif /* CONFIG_X86_USERSPACE */
|
||||
|
||||
#if CONFIG_X86_STACK_PROTECTION
|
||||
_x86_mmu_set_flags(&z_x86_kernel_pdpt, stack, MMU_PAGE_SIZE,
|
||||
z_x86_mmu_set_flags(&z_x86_kernel_pdpt, stack, MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_NOT_PRESENT, MMU_PTE_P_MASK);
|
||||
#endif
|
||||
|
||||
stack_high = (char *)STACK_ROUND_DOWN(stack_buf + stack_size);
|
||||
|
||||
/* Create an initial context on the stack expected by _Swap() */
|
||||
/* Create an initial context on the stack expected by z_swap() */
|
||||
initial_frame = (struct _x86_initial_frame *)
|
||||
(stack_high - sizeof(struct _x86_initial_frame));
|
||||
/* _thread_entry() arguments */
|
||||
/* z_thread_entry() arguments */
|
||||
initial_frame->entry = entry;
|
||||
initial_frame->p1 = parameter1;
|
||||
initial_frame->p2 = parameter2;
|
||||
|
@ -102,22 +102,22 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
#ifdef CONFIG_X86_USERSPACE
|
||||
if ((options & K_USER) != 0) {
|
||||
#ifdef _THREAD_WRAPPER_REQUIRED
|
||||
initial_frame->edi = (u32_t)_arch_user_mode_enter;
|
||||
initial_frame->edi = (u32_t)z_arch_user_mode_enter;
|
||||
initial_frame->thread_entry = _x86_thread_entry_wrapper;
|
||||
#else
|
||||
initial_frame->thread_entry = _arch_user_mode_enter;
|
||||
initial_frame->thread_entry = z_arch_user_mode_enter;
|
||||
#endif /* _THREAD_WRAPPER_REQUIRED */
|
||||
} else
|
||||
#endif /* CONFIG_X86_USERSPACE */
|
||||
{
|
||||
#ifdef _THREAD_WRAPPER_REQUIRED
|
||||
initial_frame->edi = (u32_t)_thread_entry;
|
||||
initial_frame->edi = (u32_t)z_thread_entry;
|
||||
initial_frame->thread_entry = _x86_thread_entry_wrapper;
|
||||
#else
|
||||
initial_frame->thread_entry = _thread_entry;
|
||||
initial_frame->thread_entry = z_thread_entry;
|
||||
#endif
|
||||
}
|
||||
/* Remaining _x86_initial_frame members can be garbage, _thread_entry()
|
||||
/* Remaining _x86_initial_frame members can be garbage, z_thread_entry()
|
||||
* doesn't care about their state when execution begins
|
||||
*/
|
||||
thread->callee_saved.esp = (unsigned long)initial_frame;
|
||||
|
@ -136,7 +136,7 @@ void _x86_swap_update_page_tables(struct k_thread *incoming,
|
|||
ROUND_UP(outgoing->stack_info.size, MMU_PAGE_SIZE));
|
||||
|
||||
/* Userspace can now access the incoming thread's stack */
|
||||
_x86_mmu_set_flags(&USER_PDPT,
|
||||
z_x86_mmu_set_flags(&USER_PDPT,
|
||||
(void *)incoming->stack_info.start,
|
||||
ROUND_UP(incoming->stack_info.size, MMU_PAGE_SIZE),
|
||||
MMU_ENTRY_PRESENT | K_MEM_PARTITION_P_RW_U_RW,
|
||||
|
@ -164,13 +164,13 @@ void _x86_swap_update_page_tables(struct k_thread *incoming,
|
|||
/* Ensure that the outgoing mem domain configuration
|
||||
* is set back to default state.
|
||||
*/
|
||||
_arch_mem_domain_destroy(outgoing->mem_domain_info.mem_domain);
|
||||
_arch_mem_domain_configure(incoming);
|
||||
z_arch_mem_domain_destroy(outgoing->mem_domain_info.mem_domain);
|
||||
z_arch_mem_domain_configure(incoming);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
FUNC_NORETURN void _arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
u32_t stack_end;
|
||||
|
@ -182,7 +182,7 @@ FUNC_NORETURN void _arch_user_mode_enter(k_thread_entry_t user_entry,
|
|||
_current->stack_info.size);
|
||||
|
||||
/* Set up the kernel stack used during privilege elevation */
|
||||
_x86_mmu_set_flags(&z_x86_kernel_pdpt,
|
||||
z_x86_mmu_set_flags(&z_x86_kernel_pdpt,
|
||||
(void *)(_current->stack_info.start - MMU_PAGE_SIZE),
|
||||
MMU_PAGE_SIZE,
|
||||
(MMU_ENTRY_PRESENT | MMU_ENTRY_WRITE |
|
||||
|
|
|
@ -366,7 +366,7 @@ SECTION_FUNC(TEXT, _x86_userspace_enter)
|
|||
mov %bx, %ds
|
||||
mov %bx, %es
|
||||
|
||||
/* Push arguments to _thread_entry() */
|
||||
/* Push arguments to z_thread_entry() */
|
||||
push %esi /* p3 */
|
||||
#ifndef CONFIG_X86_IAMCU
|
||||
push %ecx /* p2 */
|
||||
|
@ -377,7 +377,7 @@ SECTION_FUNC(TEXT, _x86_userspace_enter)
|
|||
push $0
|
||||
|
||||
/* Save stack pointer at this position, this is where it will be
|
||||
* when we land in _thread_entry()
|
||||
* when we land in z_thread_entry()
|
||||
*/
|
||||
mov %esp, %edi
|
||||
|
||||
|
@ -389,7 +389,7 @@ SECTION_FUNC(TEXT, _x86_userspace_enter)
|
|||
push %edi /* ESP */
|
||||
pushfl /* EFLAGS */
|
||||
push $USER_CODE_SEG /* CS */
|
||||
push $_thread_entry /* EIP */
|
||||
push $z_thread_entry /* EIP */
|
||||
|
||||
#ifdef CONFIG_EXECUTION_BENCHMARKING
|
||||
/* Save the eax and edx registers before reading the time stamp
|
||||
|
@ -404,5 +404,5 @@ SECTION_FUNC(TEXT, _x86_userspace_enter)
|
|||
pop %eax
|
||||
#endif
|
||||
|
||||
/* We will land in _thread_entry() in user mode after this */
|
||||
/* We will land in z_thread_entry() in user mode after this */
|
||||
KPTI_IRET_USER
|
||||
|
|
|
@ -45,7 +45,7 @@ MMU_BOOT_REGION((u32_t)&__kernel_ram_start, (u32_t)&__kernel_ram_size,
|
|||
MMU_ENTRY_EXECUTE_DISABLE);
|
||||
|
||||
|
||||
void _x86_mmu_get_flags(struct x86_mmu_pdpt *pdpt, void *addr,
|
||||
void z_x86_mmu_get_flags(struct x86_mmu_pdpt *pdpt, void *addr,
|
||||
x86_page_entry_data_t *pde_flags,
|
||||
x86_page_entry_data_t *pte_flags)
|
||||
{
|
||||
|
@ -63,7 +63,7 @@ void _x86_mmu_get_flags(struct x86_mmu_pdpt *pdpt, void *addr,
|
|||
}
|
||||
|
||||
|
||||
int _arch_buffer_validate(void *addr, size_t size, int write)
|
||||
int z_arch_buffer_validate(void *addr, size_t size, int write)
|
||||
{
|
||||
u32_t start_pde_num;
|
||||
u32_t end_pde_num;
|
||||
|
@ -180,7 +180,7 @@ static inline void tlb_flush_page(void *addr)
|
|||
}
|
||||
|
||||
|
||||
void _x86_mmu_set_flags(struct x86_mmu_pdpt *pdpt, void *ptr,
|
||||
void z_x86_mmu_set_flags(struct x86_mmu_pdpt *pdpt, void *ptr,
|
||||
size_t size,
|
||||
x86_page_entry_data_t flags,
|
||||
x86_page_entry_data_t mask)
|
||||
|
@ -231,12 +231,12 @@ void z_x86_reset_pages(void *start, size_t size)
|
|||
/* Clear both present bit and access flags. Only applies
|
||||
* to threads running in user mode.
|
||||
*/
|
||||
_x86_mmu_set_flags(&z_x86_user_pdpt, start, size,
|
||||
z_x86_mmu_set_flags(&z_x86_user_pdpt, start, size,
|
||||
MMU_ENTRY_NOT_PRESENT,
|
||||
K_MEM_PARTITION_PERM_MASK | MMU_PTE_P_MASK);
|
||||
#else
|
||||
/* Mark as supervisor read-write, user mode no access */
|
||||
_x86_mmu_set_flags(&z_x86_kernel_pdpt, start, size,
|
||||
z_x86_mmu_set_flags(&z_x86_kernel_pdpt, start, size,
|
||||
K_MEM_PARTITION_P_RW_U_NA,
|
||||
K_MEM_PARTITION_PERM_MASK);
|
||||
#endif /* CONFIG_X86_KPTI */
|
||||
|
@ -255,7 +255,7 @@ static inline void activate_partition(struct k_mem_partition *partition)
|
|||
mask = K_MEM_PARTITION_PERM_MASK;
|
||||
#endif /* CONFIG_X86_KPTI */
|
||||
|
||||
_x86_mmu_set_flags(&USER_PDPT,
|
||||
z_x86_mmu_set_flags(&USER_PDPT,
|
||||
(void *)partition->start,
|
||||
partition->size, attr, mask);
|
||||
}
|
||||
|
@ -307,7 +307,7 @@ out:
|
|||
}
|
||||
|
||||
/* Load the partitions of the thread. */
|
||||
void _arch_mem_domain_configure(struct k_thread *thread)
|
||||
void z_arch_mem_domain_configure(struct k_thread *thread)
|
||||
{
|
||||
_x86_mem_domain_pages_update(thread->mem_domain_info.mem_domain,
|
||||
X86_MEM_DOMAIN_SET_PAGES);
|
||||
|
@ -316,13 +316,13 @@ void _arch_mem_domain_configure(struct k_thread *thread)
|
|||
/* Destroy or reset the mmu page tables when necessary.
|
||||
* Needed when either swap takes place or k_mem_domain_destroy is called.
|
||||
*/
|
||||
void _arch_mem_domain_destroy(struct k_mem_domain *domain)
|
||||
void z_arch_mem_domain_destroy(struct k_mem_domain *domain)
|
||||
{
|
||||
_x86_mem_domain_pages_update(domain, X86_MEM_DOMAIN_RESET_PAGES);
|
||||
}
|
||||
|
||||
/* Reset/destroy one partition spcified in the argument of the API. */
|
||||
void _arch_mem_domain_partition_remove(struct k_mem_domain *domain,
|
||||
void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
|
||||
u32_t partition_id)
|
||||
{
|
||||
struct k_mem_partition *partition;
|
||||
|
@ -349,7 +349,7 @@ void _arch_mem_domain_partition_add(struct k_mem_domain *domain,
|
|||
activate_partition(partition);
|
||||
}
|
||||
|
||||
int _arch_mem_domain_max_partitions_get(void)
|
||||
int z_arch_mem_domain_max_partitions_get(void)
|
||||
{
|
||||
return CONFIG_MAX_DOMAIN_PARTITIONS;
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
#endif
|
||||
|
||||
/* Some configurations require that the stack/registers be adjusted before
|
||||
* _thread_entry. See discussion in swap.S for _x86_thread_entry_wrapper()
|
||||
* z_thread_entry. See discussion in swap.S for _x86_thread_entry_wrapper()
|
||||
*/
|
||||
#if defined(CONFIG_X86_IAMCU) || defined(CONFIG_DEBUG_INFO)
|
||||
#define _THREAD_WRAPPER_REQUIRED
|
||||
|
|
|
@ -39,7 +39,7 @@ static inline void kernel_arch_init(void)
|
|||
_kernel.irq_stack = K_THREAD_STACK_BUFFER(_interrupt_stack) +
|
||||
CONFIG_ISR_STACK_SIZE;
|
||||
#if CONFIG_X86_STACK_PROTECTION
|
||||
_x86_mmu_set_flags(&z_x86_kernel_pdpt, _interrupt_stack, MMU_PAGE_SIZE,
|
||||
z_x86_mmu_set_flags(&z_x86_kernel_pdpt, _interrupt_stack, MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_NOT_PRESENT, MMU_PTE_P_MASK);
|
||||
#endif
|
||||
}
|
||||
|
@ -58,9 +58,9 @@ static inline void kernel_arch_init(void)
|
|||
* @return N/A
|
||||
*/
|
||||
static ALWAYS_INLINE void
|
||||
_set_thread_return_value(struct k_thread *thread, unsigned int value)
|
||||
z_set_thread_return_value(struct k_thread *thread, unsigned int value)
|
||||
{
|
||||
/* write into 'eax' slot created in _Swap() entry */
|
||||
/* write into 'eax' slot created in z_swap() entry */
|
||||
|
||||
*(unsigned int *)(thread->callee_saved.esp) = value;
|
||||
}
|
||||
|
@ -129,7 +129,7 @@ extern FUNC_NORETURN void _x86_userspace_enter(k_thread_entry_t user_entry,
|
|||
}
|
||||
#endif
|
||||
|
||||
#define _is_in_isr() (_kernel.nested != 0U)
|
||||
#define z_is_in_isr() (_kernel.nested != 0U)
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
|
|
|
@ -236,7 +236,7 @@ typedef struct s_preempFloatReg {
|
|||
* The thread control stucture definition. It contains the
|
||||
* various fields to manage a _single_ thread. The TCS will be aligned
|
||||
* to the appropriate architecture specific boundary via the
|
||||
* _new_thread() call.
|
||||
* z_new_thread() call.
|
||||
*/
|
||||
|
||||
struct _thread_arch {
|
||||
|
@ -244,7 +244,7 @@ struct _thread_arch {
|
|||
#if defined(CONFIG_FP_SHARING)
|
||||
/*
|
||||
* Nested exception count to maintain setting of EXC_ACTIVE flag across
|
||||
* outermost exception. EXC_ACTIVE is used by _Swap() lazy FP
|
||||
* outermost exception. EXC_ACTIVE is used by z_swap() lazy FP
|
||||
* save/restore and by debug tools.
|
||||
*/
|
||||
unsigned excNestCount; /* nested exception count */
|
||||
|
|
|
@ -8,12 +8,12 @@
|
|||
* @file
|
||||
* @brief Stack frame created by swap (IA-32)
|
||||
*
|
||||
* This file details the stack frame generated by _Swap() when it saves a task
|
||||
* This file details the stack frame generated by z_swap() when it saves a task
|
||||
* or thread's context. This is specific to the IA-32 processor architecture.
|
||||
*
|
||||
* NOTE: _Swap() does not use this file as it uses the push instruction to
|
||||
* NOTE: z_swap() does not use this file as it uses the push instruction to
|
||||
* save a context. Changes to the file will not automatically be picked up by
|
||||
* _Swap(). Conversely, changes to _Swap() should be mirrored here if the
|
||||
* z_swap(). Conversely, changes to z_swap() should be mirrored here if the
|
||||
* stack frame is modified.
|
||||
*/
|
||||
|
||||
|
@ -33,8 +33,8 @@ typedef struct s_SwapStk {
|
|||
unsigned int ebx; /* EBX register */
|
||||
unsigned int esi; /* ESI register */
|
||||
unsigned int edi; /* EDI register */
|
||||
unsigned int retAddr; /* Return address of caller of _Swap() */
|
||||
unsigned int param; /* Parameter passed to _Swap() */
|
||||
unsigned int retAddr; /* Return address of caller of z_swap() */
|
||||
unsigned int param; /* Parameter passed to z_swap() */
|
||||
} tSwapStk;
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
|
|
@ -15,7 +15,7 @@ struct device;
|
|||
struct NANO_ESF {
|
||||
};
|
||||
|
||||
void _new_thread(struct k_thread *t, k_thread_stack_t *stack,
|
||||
void z_new_thread(struct k_thread *t, k_thread_stack_t *stack,
|
||||
size_t sz, k_thread_entry_t entry,
|
||||
void *p1, void *p2, void *p3,
|
||||
int prio, unsigned int opts)
|
||||
|
@ -29,7 +29,7 @@ void _new_thread(struct k_thread *t, k_thread_stack_t *stack,
|
|||
_new_thread_init(t, base, sz, prio, opts);
|
||||
|
||||
t->switch_handle = (void *)xuk_setup_stack((long) top,
|
||||
(void *)_thread_entry,
|
||||
(void *)z_thread_entry,
|
||||
eflags, (long *)args,
|
||||
nargs);
|
||||
}
|
||||
|
@ -53,18 +53,18 @@ void _unhandled_vector(int vector, int err, struct xuk_entry_frame *f)
|
|||
printk("*** R8 0x%llx R9 0x%llx R10 0x%llx R11 0x%llx\n",
|
||||
f->r8, f->r9, f->r10, f->r11);
|
||||
|
||||
_NanoFatalErrorHandler(x86_64_except_reason, NULL);
|
||||
z_NanoFatalErrorHandler(x86_64_except_reason, NULL);
|
||||
}
|
||||
|
||||
void _isr_entry(void)
|
||||
{
|
||||
_arch_curr_cpu()->nested++;
|
||||
z_arch_curr_cpu()->nested++;
|
||||
}
|
||||
|
||||
void *_isr_exit_restore_stack(void *interrupted)
|
||||
{
|
||||
bool nested = (--_arch_curr_cpu()->nested) > 0;
|
||||
void *next = _get_next_switch_handle(interrupted);
|
||||
bool nested = (--z_arch_curr_cpu()->nested) > 0;
|
||||
void *next = z_get_next_switch_handle(interrupted);
|
||||
|
||||
return (nested || next == interrupted) ? NULL : next;
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ struct {
|
|||
} cpu_init[CONFIG_MP_NUM_CPUS];
|
||||
|
||||
/* Called from Zephyr initialization */
|
||||
void _arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
|
||||
void z_arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
|
||||
void (*fn)(int, void *), void *arg)
|
||||
{
|
||||
cpu_init[cpu_num].arg = arg;
|
||||
|
@ -130,18 +130,18 @@ void _cpu_start(int cpu)
|
|||
/* The SMP CPU startup function pointers act as init
|
||||
* flags. Zero them here because this code is running
|
||||
* BEFORE .bss is zeroed! Should probably move that
|
||||
* out of _Cstart() for this architecture...
|
||||
* out of z_cstart() for this architecture...
|
||||
*/
|
||||
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
|
||||
cpu_init[i].fn = 0;
|
||||
}
|
||||
|
||||
/* Enter Zephyr */
|
||||
_Cstart();
|
||||
z_cstart();
|
||||
|
||||
} else if (cpu < CONFIG_MP_NUM_CPUS) {
|
||||
/* SMP initialization. First spin, waiting for
|
||||
* _arch_start_cpu() to be called from the main CPU
|
||||
* z_arch_start_cpu() to be called from the main CPU
|
||||
*/
|
||||
while (!cpu_init[cpu].fn) {
|
||||
}
|
||||
|
@ -157,14 +157,14 @@ void _cpu_start(int cpu)
|
|||
|
||||
/* Returns the initial stack to use for CPU startup on auxiliary (not
|
||||
* cpu 0) processors to the xuk layer, which gets selected by the
|
||||
* non-arch Zephyr kernel and stashed by _arch_start_cpu()
|
||||
* non-arch Zephyr kernel and stashed by z_arch_start_cpu()
|
||||
*/
|
||||
unsigned int _init_cpu_stack(int cpu)
|
||||
{
|
||||
return cpu_init[cpu].esp;
|
||||
}
|
||||
|
||||
int _arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
void (*routine)(void *parameter), void *parameter,
|
||||
u32_t flags)
|
||||
{
|
||||
|
@ -176,12 +176,12 @@ int _arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void _arch_irq_disable(unsigned int irq)
|
||||
void z_arch_irq_disable(unsigned int irq)
|
||||
{
|
||||
xuk_set_isr_mask(irq, 1);
|
||||
}
|
||||
|
||||
void _arch_irq_enable(unsigned int irq)
|
||||
void z_arch_irq_enable(unsigned int irq)
|
||||
{
|
||||
xuk_set_isr_mask(irq, 0);
|
||||
}
|
||||
|
@ -195,13 +195,13 @@ const NANO_ESF _default_esf;
|
|||
|
||||
int x86_64_except_reason;
|
||||
|
||||
void _NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *esf)
|
||||
void z_NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *esf)
|
||||
{
|
||||
_SysFatalErrorHandler(reason, esf);
|
||||
z_SysFatalErrorHandler(reason, esf);
|
||||
}
|
||||
|
||||
/* App-overridable handler. Does nothing here */
|
||||
void __weak _SysFatalErrorHandler(unsigned int reason, const NANO_ESF *esf)
|
||||
void __weak z_SysFatalErrorHandler(unsigned int reason, const NANO_ESF *esf)
|
||||
{
|
||||
ARG_UNUSED(reason);
|
||||
ARG_UNUSED(esf);
|
||||
|
|
|
@ -12,11 +12,11 @@
|
|||
static inline void kernel_arch_init(void)
|
||||
{
|
||||
/* This is a noop, we already took care of things before
|
||||
* _Cstart() is entered
|
||||
* z_cstart() is entered
|
||||
*/
|
||||
}
|
||||
|
||||
static inline struct _cpu *_arch_curr_cpu(void)
|
||||
static inline struct _cpu *z_arch_curr_cpu(void)
|
||||
{
|
||||
long long ret, off = 0;
|
||||
|
||||
|
@ -27,7 +27,7 @@ static inline struct _cpu *_arch_curr_cpu(void)
|
|||
return (struct _cpu *)(long)ret;
|
||||
}
|
||||
|
||||
static inline unsigned int _arch_irq_lock(void)
|
||||
static inline unsigned int z_arch_irq_lock(void)
|
||||
{
|
||||
unsigned long long key;
|
||||
|
||||
|
@ -35,7 +35,7 @@ static inline unsigned int _arch_irq_lock(void)
|
|||
return (int)key;
|
||||
}
|
||||
|
||||
static inline void _arch_irq_unlock(unsigned int key)
|
||||
static inline void z_arch_irq_unlock(unsigned int key)
|
||||
{
|
||||
if (key & 0x200) {
|
||||
__asm__ volatile("sti");
|
||||
|
@ -47,8 +47,8 @@ static inline void arch_nop(void)
|
|||
__asm__ volatile("nop");
|
||||
}
|
||||
|
||||
void _arch_irq_disable(unsigned int irq);
|
||||
void _arch_irq_enable(unsigned int irq);
|
||||
void z_arch_irq_disable(unsigned int irq);
|
||||
void z_arch_irq_enable(unsigned int irq);
|
||||
|
||||
/* Not a standard Zephyr function, but probably will be */
|
||||
static inline unsigned long long _arch_k_cycle_get_64(void)
|
||||
|
@ -59,17 +59,17 @@ static inline unsigned long long _arch_k_cycle_get_64(void)
|
|||
return (((unsigned long long)hi) << 32) | lo;
|
||||
}
|
||||
|
||||
static inline unsigned int _arch_k_cycle_get_32(void)
|
||||
static inline unsigned int z_arch_k_cycle_get_32(void)
|
||||
{
|
||||
#ifdef CONFIG_HPET_TIMER
|
||||
extern u32_t _timer_cycle_get_32(void);
|
||||
return _timer_cycle_get_32();
|
||||
extern u32_t z_timer_cycle_get_32(void);
|
||||
return z_timer_cycle_get_32();
|
||||
#else
|
||||
return (u32_t)_arch_k_cycle_get_64();
|
||||
#endif
|
||||
}
|
||||
|
||||
#define _is_in_isr() (_arch_curr_cpu()->nested != 0)
|
||||
#define z_is_in_isr() (z_arch_curr_cpu()->nested != 0)
|
||||
|
||||
static inline void _arch_switch(void *switch_to, void **switched_from)
|
||||
{
|
||||
|
@ -88,8 +88,8 @@ static inline u32_t x86_apic_scaled_tsc(void)
|
|||
|
||||
void x86_apic_set_timeout(u32_t cyc_from_now);
|
||||
|
||||
#define _ARCH_IRQ_CONNECT(irq, pri, isr, arg, flags) \
|
||||
_arch_irq_connect_dynamic(irq, pri, isr, arg, flags)
|
||||
#define Z_ARCH_IRQ_CONNECT(irq, pri, isr, arg, flags) \
|
||||
z_arch_irq_connect_dynamic(irq, pri, isr, arg, flags)
|
||||
|
||||
extern int x86_64_except_reason;
|
||||
|
||||
|
@ -97,7 +97,7 @@ extern int x86_64_except_reason;
|
|||
/* Vector 5 is the "bounds" exception which is otherwise vestigial
|
||||
* (BOUND is an illegal instruction in long mode)
|
||||
*/
|
||||
#define _ARCH_EXCEPT(reason) do { \
|
||||
#define Z_ARCH_EXCEPT(reason) do { \
|
||||
x86_64_except_reason = reason; \
|
||||
__asm__ volatile("int $5"); \
|
||||
} while (false)
|
||||
|
|
|
@ -17,12 +17,12 @@
|
|||
* __stack from linker script (see LSP Ref Manual)
|
||||
* _bss_table_start from linker script (see LSP Ref Manual)
|
||||
* _bss_table_end from linker script (see LSP Ref Manual)
|
||||
* _Cstart Entry point into Zephyr C domain
|
||||
* z_cstart Entry point into Zephyr C domain
|
||||
* __stack from linker script (see LSP Ref Manual)
|
||||
*/
|
||||
|
||||
.global __start
|
||||
.type _Cstart, @function
|
||||
.type z_cstart, @function
|
||||
|
||||
|
||||
/* Macros to abstract away ABI differences */
|
||||
|
@ -178,7 +178,7 @@ _start:
|
|||
#endif /* !XCHAL_HAVE_BOOTLOADER */
|
||||
|
||||
/* Enter C domain, never returns from here */
|
||||
CALL _Cstart
|
||||
CALL z_cstart
|
||||
|
||||
.size _start, . - _start
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ const NANO_ESF _default_esf = {
|
|||
*
|
||||
* This routine is called when fatal error conditions are detected by software
|
||||
* and is responsible only for reporting the error. Once reported, it then
|
||||
* invokes the user provided routine _SysFatalErrorHandler() which is
|
||||
* invokes the user provided routine z_SysFatalErrorHandler() which is
|
||||
* responsible for implementing the error handling policy.
|
||||
*
|
||||
* The caller is expected to always provide a usable ESF. In the event that the
|
||||
|
@ -49,7 +49,7 @@ const NANO_ESF _default_esf = {
|
|||
*
|
||||
* @return This function does not return.
|
||||
*/
|
||||
XTENSA_ERR_NORET void _NanoFatalErrorHandler(unsigned int reason,
|
||||
XTENSA_ERR_NORET void z_NanoFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *pEsf)
|
||||
{
|
||||
LOG_PANIC();
|
||||
|
@ -92,7 +92,7 @@ XTENSA_ERR_NORET void _NanoFatalErrorHandler(unsigned int reason,
|
|||
* appropriate to the various errors are something the customer must
|
||||
* decide.
|
||||
*/
|
||||
_SysFatalErrorHandler(reason, pEsf);
|
||||
z_SysFatalErrorHandler(reason, pEsf);
|
||||
}
|
||||
|
||||
|
||||
|
@ -190,7 +190,7 @@ XTENSA_ERR_NORET void FatalErrorHandler(void)
|
|||
{
|
||||
printk("*** Unhandled exception ****\n");
|
||||
dump_exc_state();
|
||||
_NanoFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, &_default_esf);
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, &_default_esf);
|
||||
}
|
||||
|
||||
XTENSA_ERR_NORET void ReservedInterruptHandler(unsigned int intNo)
|
||||
|
@ -200,7 +200,7 @@ XTENSA_ERR_NORET void ReservedInterruptHandler(unsigned int intNo)
|
|||
printk("INTENABLE = 0x%x\n"
|
||||
"INTERRUPT = 0x%x (%d)\n",
|
||||
get_sreg(INTENABLE), (1 << intNo), intNo);
|
||||
_NanoFatalErrorHandler(_NANO_ERR_RESERVED_IRQ, &_default_esf);
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_RESERVED_IRQ, &_default_esf);
|
||||
}
|
||||
|
||||
void exit(int return_code)
|
||||
|
@ -239,7 +239,7 @@ void exit(int return_code)
|
|||
*
|
||||
* @return N/A
|
||||
*/
|
||||
XTENSA_ERR_NORET __weak void _SysFatalErrorHandler(unsigned int reason,
|
||||
XTENSA_ERR_NORET __weak void z_SysFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *pEsf)
|
||||
{
|
||||
ARG_UNUSED(pEsf);
|
||||
|
@ -253,7 +253,7 @@ XTENSA_ERR_NORET __weak void _SysFatalErrorHandler(unsigned int reason,
|
|||
if (reason == _NANO_ERR_KERNEL_PANIC) {
|
||||
goto hang_system;
|
||||
}
|
||||
if (k_is_in_isr() || _is_thread_essential()) {
|
||||
if (k_is_in_isr() || z_is_thread_essential()) {
|
||||
printk("Fatal fault in %s! Spinning...\n",
|
||||
k_is_in_isr() ? "ISR" : "essential thread");
|
||||
goto hang_system;
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
* @return N/A
|
||||
*/
|
||||
|
||||
void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
|
||||
void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
|
||||
{
|
||||
__ASSERT(prio < XCHAL_EXCM_LEVEL + 1,
|
||||
"invalid priority %d! values must be less than %d\n",
|
||||
|
@ -38,7 +38,7 @@ void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_INTERRUPTS
|
||||
int _arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
void (*routine)(void *parameter), void *parameter,
|
||||
u32_t flags)
|
||||
{
|
||||
|
|
|
@ -27,7 +27,7 @@ void irq_offload(irq_offload_routine_t routine, void *parameter)
|
|||
{
|
||||
IRQ_CONNECT(CONFIG_IRQ_OFFLOAD_INTNUM, XCHAL_EXCM_LEVEL,
|
||||
_irq_do_offload, NULL, 0);
|
||||
_arch_irq_disable(CONFIG_IRQ_OFFLOAD_INTNUM);
|
||||
z_arch_irq_disable(CONFIG_IRQ_OFFLOAD_INTNUM);
|
||||
offload_routine = routine;
|
||||
offload_param = parameter;
|
||||
_xt_set_intset(1 << CONFIG_IRQ_OFFLOAD_INTNUM);
|
||||
|
@ -35,5 +35,5 @@ void irq_offload(irq_offload_routine_t routine, void *parameter)
|
|||
* Enable the software interrupt, in case it is disabled, so that IRQ
|
||||
* offload is serviced.
|
||||
*/
|
||||
_arch_irq_enable(CONFIG_IRQ_OFFLOAD_INTNUM);
|
||||
z_arch_irq_enable(CONFIG_IRQ_OFFLOAD_INTNUM);
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ extern void _xt_user_exit(void);
|
|||
* needed anymore.
|
||||
*
|
||||
* The initial context is a basic stack frame that contains arguments for
|
||||
* _thread_entry() return address, that points at _thread_entry()
|
||||
* z_thread_entry() return address, that points at z_thread_entry()
|
||||
* and status register.
|
||||
*
|
||||
* <options> is currently unused.
|
||||
|
@ -43,7 +43,7 @@ extern void _xt_user_exit(void);
|
|||
* @return N/A
|
||||
*/
|
||||
|
||||
void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stackSize, k_thread_entry_t pEntry,
|
||||
void *p1, void *p2, void *p3,
|
||||
int priority, unsigned int options)
|
||||
|
@ -90,7 +90,7 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
/* Explicitly initialize certain saved registers */
|
||||
|
||||
/* task entrypoint */
|
||||
pInitCtx->pc = (u32_t)_thread_entry;
|
||||
pInitCtx->pc = (u32_t)z_thread_entry;
|
||||
|
||||
/* physical top of stack frame */
|
||||
pInitCtx->a1 = (u32_t)pInitCtx + XT_STK_FRMSZ;
|
||||
|
|
|
@ -34,7 +34,7 @@ _zxt_dispatch:
|
|||
bnez a2, .L_frxt_dispatch_stk
|
||||
|
||||
.L_frxt_dispatch_sol:
|
||||
/* Solicited stack frame. Restore retval from _Swap */
|
||||
/* Solicited stack frame. Restore retval from z_swap */
|
||||
l32i a2, a3, THREAD_OFFSET(retval)
|
||||
l32i a3, sp, XT_SOL_ps
|
||||
|
||||
|
@ -71,9 +71,9 @@ _zxt_dispatch:
|
|||
#endif
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
#ifdef __XTENSA_CALL0_ABI__
|
||||
call0 _check_stack_sentinel
|
||||
call0 z_check_stack_sentinel
|
||||
#else
|
||||
call4 _check_stack_sentinel
|
||||
call4 z_check_stack_sentinel
|
||||
#endif
|
||||
#endif
|
||||
/*
|
||||
|
@ -341,10 +341,10 @@ _zxt_tick_timer_init:
|
|||
*/
|
||||
#ifdef __XTENSA_CALL0_ABI__
|
||||
movi a2, XT_TIMER_INTEN
|
||||
call0 _xt_ints_on
|
||||
call0 z_xt_ints_on
|
||||
#else
|
||||
movi a6, XT_TIMER_INTEN
|
||||
call4 _xt_ints_on
|
||||
call4 z_xt_ints_on
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -29,10 +29,10 @@ void *xtensa_init_stack(int *stack_top,
|
|||
|
||||
(void)memset(bsa, 0, bsasz);
|
||||
|
||||
bsa[BSA_PC_OFF/4] = _thread_entry;
|
||||
bsa[BSA_PC_OFF/4] = z_thread_entry;
|
||||
bsa[BSA_PS_OFF/4] = (void *)(PS_WOE | PS_UM | PS_CALLINC(1));
|
||||
|
||||
/* Arguments to _thread_entry(). Remember these start at A6,
|
||||
/* Arguments to z_thread_entry(). Remember these start at A6,
|
||||
* which will be rotated into A2 by the ENTRY instruction that
|
||||
* begins the C function. And A4-A7 and A8-A11 are optional
|
||||
* quads that live below the BSA!
|
||||
|
@ -59,7 +59,7 @@ void *xtensa_init_stack(int *stack_top,
|
|||
* utilities/testables.
|
||||
*/
|
||||
#ifdef CONFIG_XTENSA_ASM2
|
||||
void _new_thread(struct k_thread *thread, k_thread_stack_t *stack, size_t sz,
|
||||
void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack, size_t sz,
|
||||
k_thread_entry_t entry, void *p1, void *p2, void *p3,
|
||||
int prio, unsigned int opts)
|
||||
{
|
||||
|
@ -77,7 +77,7 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack, size_t sz,
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_XTENSA_ASM2
|
||||
void _irq_spurious(void *arg)
|
||||
void z_irq_spurious(void *arg)
|
||||
{
|
||||
int irqs, ie;
|
||||
|
||||
|
@ -87,7 +87,7 @@ void _irq_spurious(void *arg)
|
|||
__asm__ volatile("rsr.intenable %0" : "=r"(ie));
|
||||
printk(" ** Spurious INTERRUPT(s) %p, INTENABLE = %p\n",
|
||||
(void *)irqs, (void *)ie);
|
||||
_NanoFatalErrorHandler(_NANO_ERR_RESERVED_IRQ, &_default_esf);
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_RESERVED_IRQ, &_default_esf);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -143,7 +143,7 @@ void *xtensa_int##l##_c(void *interrupted_stack) \
|
|||
irqs ^= m; \
|
||||
__asm__ volatile("wsr.intclear %0" : : "r"(m)); \
|
||||
} \
|
||||
return _get_next_switch_handle(interrupted_stack); \
|
||||
return z_get_next_switch_handle(interrupted_stack); \
|
||||
}
|
||||
|
||||
DEF_INT_C_HANDLER(2)
|
||||
|
@ -191,7 +191,7 @@ void *xtensa_excint1_c(int *interrupted_stack)
|
|||
*/
|
||||
printk(" ** FATAL EXCEPTION\n");
|
||||
printk(" ** CPU %d EXCCAUSE %d PS %p PC %p VADDR %p\n",
|
||||
_arch_curr_cpu()->id, cause, (void *)bsa[BSA_PS_OFF/4],
|
||||
z_arch_curr_cpu()->id, cause, (void *)bsa[BSA_PS_OFF/4],
|
||||
(void *)bsa[BSA_PC_OFF/4], (void *)vaddr);
|
||||
|
||||
dump_stack(interrupted_stack);
|
||||
|
@ -201,9 +201,9 @@ void *xtensa_excint1_c(int *interrupted_stack)
|
|||
* as these are software errors. Should clean this
|
||||
* up.
|
||||
*/
|
||||
_NanoFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, &_default_esf);
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, &_default_esf);
|
||||
}
|
||||
|
||||
return _get_next_switch_handle(interrupted_stack);
|
||||
return z_get_next_switch_handle(interrupted_stack);
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ xt_exc_handler _xt_exception_table[XCHAL_EXCCAUSE_NUM] __aligned(4) = {
|
|||
#endif
|
||||
|
||||
#if defined(CONFIG_SW_ISR_TABLE) && defined(XCHAL_HAVE_INTERRUPTS)
|
||||
void _irq_spurious(void *arg)
|
||||
void z_irq_spurious(void *arg)
|
||||
{
|
||||
ReservedInterruptHandler((unsigned int)arg);
|
||||
CODE_UNREACHABLE;
|
||||
|
|
|
@ -41,7 +41,7 @@ _xt_vpri_mask: .word 0xFFFFFFFF /* Virtual priority mask */
|
|||
|
||||
/*
|
||||
-------------------------------------------------------------------------------
|
||||
unsigned int _xt_ints_on ( unsigned int mask )
|
||||
unsigned int z_xt_ints_on ( unsigned int mask )
|
||||
|
||||
Enables a set of interrupts. Does not simply set INTENABLE directly, but
|
||||
computes it as a function of the current virtual priority.
|
||||
|
@ -51,10 +51,10 @@ _xt_vpri_mask: .word 0xFFFFFFFF /* Virtual priority mask */
|
|||
|
||||
.text
|
||||
.align 4
|
||||
.global _xt_ints_on
|
||||
.type _xt_ints_on,@function
|
||||
.global z_xt_ints_on
|
||||
.type z_xt_ints_on,@function
|
||||
|
||||
_xt_ints_on:
|
||||
z_xt_ints_on:
|
||||
|
||||
ENTRY0
|
||||
#if XCHAL_HAVE_INTERRUPTS
|
||||
|
@ -74,12 +74,12 @@ _xt_ints_on:
|
|||
#endif
|
||||
RET0
|
||||
|
||||
.size _xt_ints_on, . - _xt_ints_on
|
||||
.size z_xt_ints_on, . - z_xt_ints_on
|
||||
|
||||
|
||||
/*
|
||||
-------------------------------------------------------------------------------
|
||||
unsigned int _xt_ints_off ( unsigned int mask )
|
||||
unsigned int z_xt_ints_off ( unsigned int mask )
|
||||
|
||||
Disables a set of interrupts. Does not simply set INTENABLE directly,
|
||||
but computes it as a function of the current virtual priority.
|
||||
|
@ -89,10 +89,10 @@ _xt_ints_on:
|
|||
|
||||
.text
|
||||
.align 4
|
||||
.global _xt_ints_off
|
||||
.type _xt_ints_off,@function
|
||||
.global z_xt_ints_off
|
||||
.type z_xt_ints_off,@function
|
||||
|
||||
_xt_ints_off:
|
||||
z_xt_ints_off:
|
||||
|
||||
ENTRY0
|
||||
#if XCHAL_HAVE_INTERRUPTS
|
||||
|
@ -113,6 +113,6 @@ _xt_ints_off:
|
|||
#endif
|
||||
RET0
|
||||
|
||||
.size _xt_ints_off, . - _xt_ints_off
|
||||
.size z_xt_ints_off, . - z_xt_ints_off
|
||||
|
||||
|
||||
|
|
|
@ -139,7 +139,7 @@
|
|||
* mask -- interrupt bitmask for this level
|
||||
*/
|
||||
.extern _kernel
|
||||
.extern _sys_power_save_idle_exit
|
||||
.extern z_sys_power_save_idle_exit
|
||||
|
||||
.macro dispatch_c_isr level mask
|
||||
|
||||
|
@ -203,14 +203,14 @@
|
|||
beqz a2, 10f
|
||||
xor a4, a2, a2
|
||||
s32i a4, a3, _kernel_offset_to_idle
|
||||
call0 _sys_power_save_idle_exit
|
||||
call0 z_sys_power_save_idle_exit
|
||||
mov a2, a12
|
||||
#else
|
||||
l32i a6, a3, _kernel_offset_to_idle
|
||||
beqz a6, 10f
|
||||
xor a4, a6, a6
|
||||
s32i a4, a3, _kernel_offset_to_idle
|
||||
call4 _sys_power_save_idle_exit
|
||||
call4 z_sys_power_save_idle_exit
|
||||
#endif /* __XTENSA_CALL0_ABI__ */
|
||||
10:
|
||||
#endif /* CONFIG_SYS_POWER_MANAGEMENT */
|
||||
|
|
|
@ -40,7 +40,7 @@ extern void _xt_coproc_init(void);
|
|||
|
||||
extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE);
|
||||
|
||||
static ALWAYS_INLINE _cpu_t *_arch_curr_cpu(void)
|
||||
static ALWAYS_INLINE _cpu_t *z_arch_curr_cpu(void)
|
||||
{
|
||||
#ifdef CONFIG_XTENSA_ASM2
|
||||
void *val;
|
||||
|
@ -110,7 +110,7 @@ static ALWAYS_INLINE void kernel_arch_init(void)
|
|||
*/
|
||||
#if !CONFIG_USE_SWITCH
|
||||
static ALWAYS_INLINE void
|
||||
_set_thread_return_value(struct k_thread *thread, unsigned int value)
|
||||
z_set_thread_return_value(struct k_thread *thread, unsigned int value)
|
||||
{
|
||||
thread->callee_saved.retval = value;
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ extern void k_cpu_atomic_idle(unsigned int key);
|
|||
}
|
||||
#endif
|
||||
|
||||
#define _is_in_isr() (_arch_curr_cpu()->nested != 0U)
|
||||
#define z_is_in_isr() (z_arch_curr_cpu()->nested != 0U)
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
* mask - Bit mask of interrupts to be enabled.
|
||||
*/
|
||||
#if CONFIG_XTENSA_ASM2
|
||||
static inline void _xt_ints_on(unsigned int mask)
|
||||
static inline void z_xt_ints_on(unsigned int mask)
|
||||
{
|
||||
int val;
|
||||
|
||||
|
@ -25,7 +25,7 @@ static inline void _xt_ints_on(unsigned int mask)
|
|||
__asm__ volatile("wsr.intenable %0; rsync" : : "r"(val));
|
||||
}
|
||||
#else
|
||||
extern void _xt_ints_on(unsigned int mask);
|
||||
extern void z_xt_ints_on(unsigned int mask);
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -35,7 +35,7 @@ extern void _xt_ints_on(unsigned int mask);
|
|||
* mask - Bit mask of interrupts to be disabled.
|
||||
*/
|
||||
#if CONFIG_XTENSA_ASM2
|
||||
static inline void _xt_ints_off(unsigned int mask)
|
||||
static inline void z_xt_ints_off(unsigned int mask)
|
||||
{
|
||||
int val;
|
||||
|
||||
|
@ -44,7 +44,7 @@ static inline void _xt_ints_off(unsigned int mask)
|
|||
__asm__ volatile("wsr.intenable %0; rsync" : : "r"(val));
|
||||
}
|
||||
#else
|
||||
extern void _xt_ints_off(unsigned int mask);
|
||||
extern void z_xt_ints_off(unsigned int mask);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -17,7 +17,7 @@ extern "C" {
|
|||
|
||||
void _isr_declare(unsigned int irq_p, int flags, void isr_p(void *),
|
||||
void *isr_param_p);
|
||||
void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
|
||||
void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
|
||||
|
||||
/**
|
||||
* Configure a static interrupt.
|
||||
|
@ -30,10 +30,10 @@ void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
|
|||
*
|
||||
* @return The vector assigned to this interrupt
|
||||
*/
|
||||
#define _ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
|
||||
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
|
||||
({ \
|
||||
_isr_declare(irq_p, 0, isr_p, isr_param_p); \
|
||||
_irq_priority_set(irq_p, priority_p, flags_p); \
|
||||
z_irq_priority_set(irq_p, priority_p, flags_p); \
|
||||
irq_p; \
|
||||
})
|
||||
|
||||
|
@ -43,10 +43,10 @@ void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
|
|||
*
|
||||
* See include/irq.h for details.
|
||||
*/
|
||||
#define _ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
|
||||
#define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
|
||||
({ \
|
||||
_isr_declare(irq_p, ISR_FLAG_DIRECT, (void (*)(void *))isr_p, NULL); \
|
||||
_irq_priority_set(irq_p, priority_p, flags_p); \
|
||||
z_irq_priority_set(irq_p, priority_p, flags_p); \
|
||||
irq_p; \
|
||||
})
|
||||
|
||||
|
@ -62,7 +62,7 @@ void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
|
|||
* All pre/post irq work of the interrupt is handled in the board
|
||||
* posix_irq_handler() both for direct and normal interrupts together
|
||||
*/
|
||||
#define _ARCH_ISR_DIRECT_DECLARE(name) \
|
||||
#define Z_ARCH_ISR_DIRECT_DECLARE(name) \
|
||||
static inline int name##_body(void); \
|
||||
int name(void) \
|
||||
{ \
|
||||
|
@ -72,14 +72,14 @@ void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
|
|||
} \
|
||||
static inline int name##_body(void)
|
||||
|
||||
#define _ARCH_ISR_DIRECT_HEADER() do { } while (0)
|
||||
#define _ARCH_ISR_DIRECT_FOOTER(a) do { } while (0)
|
||||
#define Z_ARCH_ISR_DIRECT_HEADER() do { } while (0)
|
||||
#define Z_ARCH_ISR_DIRECT_FOOTER(a) do { } while (0)
|
||||
|
||||
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
||||
extern void posix_irq_check_idle_exit(void);
|
||||
#define _ARCH_ISR_DIRECT_PM() posix_irq_check_idle_exit()
|
||||
#define Z_ARCH_ISR_DIRECT_PM() posix_irq_check_idle_exit()
|
||||
#else
|
||||
#define _ARCH_ISR_DIRECT_PM() do { } while (0)
|
||||
#define Z_ARCH_ISR_DIRECT_PM() do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -32,9 +32,9 @@ static inline void vector_to_irq(int irq_nbr, int *may_swap)
|
|||
{
|
||||
/*
|
||||
* As in this architecture an irq (code) executes in 0 time,
|
||||
* it is a bit senseless to call _int_latency_start/stop()
|
||||
* it is a bit senseless to call z_int_latency_start/stop()
|
||||
*/
|
||||
/* _int_latency_start(); */
|
||||
/* z_int_latency_start(); */
|
||||
|
||||
sys_trace_isr_enter();
|
||||
|
||||
|
@ -59,7 +59,7 @@ static inline void vector_to_irq(int irq_nbr, int *may_swap)
|
|||
}
|
||||
|
||||
sys_trace_isr_exit();
|
||||
/* _int_latency_stop(); */
|
||||
/* z_int_latency_stop(); */
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -114,7 +114,7 @@ void posix_irq_handler(void)
|
|||
&& (hw_irq_ctrl_get_cur_prio() == 256)
|
||||
&& (_kernel.ready_q.cache != _current)) {
|
||||
|
||||
(void)_Swap_irqlock(irq_lock);
|
||||
(void)z_swap_irqlock(irq_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -178,7 +178,7 @@ unsigned int posix_irq_lock(void)
|
|||
return hw_irq_ctrl_change_lock(true);
|
||||
}
|
||||
|
||||
unsigned int _arch_irq_lock(void)
|
||||
unsigned int z_arch_irq_lock(void)
|
||||
{
|
||||
return posix_irq_lock();
|
||||
}
|
||||
|
@ -201,7 +201,7 @@ void posix_irq_unlock(unsigned int key)
|
|||
hw_irq_ctrl_change_lock(key);
|
||||
}
|
||||
|
||||
void _arch_irq_unlock(unsigned int key)
|
||||
void z_arch_irq_unlock(unsigned int key)
|
||||
{
|
||||
posix_irq_unlock(key);
|
||||
}
|
||||
|
@ -212,17 +212,17 @@ void posix_irq_full_unlock(void)
|
|||
hw_irq_ctrl_change_lock(false);
|
||||
}
|
||||
|
||||
void _arch_irq_enable(unsigned int irq)
|
||||
void z_arch_irq_enable(unsigned int irq)
|
||||
{
|
||||
hw_irq_ctrl_enable_irq(irq);
|
||||
}
|
||||
|
||||
void _arch_irq_disable(unsigned int irq)
|
||||
void z_arch_irq_disable(unsigned int irq)
|
||||
{
|
||||
hw_irq_ctrl_disable_irq(irq);
|
||||
}
|
||||
|
||||
int _arch_irq_is_enabled(unsigned int irq)
|
||||
int z_arch_irq_is_enabled(unsigned int irq)
|
||||
{
|
||||
return hw_irq_ctrl_is_irq_enabled(irq);
|
||||
}
|
||||
|
@ -265,7 +265,7 @@ void _isr_declare(unsigned int irq_p, int flags, void isr_p(void *),
|
|||
*
|
||||
* @return N/A
|
||||
*/
|
||||
void _irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
|
||||
void z_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
|
||||
{
|
||||
hw_irq_ctrl_prio_set(irq, prio);
|
||||
}
|
||||
|
@ -317,7 +317,7 @@ void irq_offload(irq_offload_routine_t routine, void *parameter)
|
|||
off_routine = routine;
|
||||
off_parameter = parameter;
|
||||
_isr_declare(OFFLOAD_SW_IRQ, 0, offload_sw_irq_handler, NULL);
|
||||
_arch_irq_enable(OFFLOAD_SW_IRQ);
|
||||
z_arch_irq_enable(OFFLOAD_SW_IRQ);
|
||||
posix_sw_set_pending_IRQ(OFFLOAD_SW_IRQ);
|
||||
_arch_irq_disable(OFFLOAD_SW_IRQ);
|
||||
z_arch_irq_disable(OFFLOAD_SW_IRQ);
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ extern "C" {
|
|||
|
||||
void _isr_declare(unsigned int irq_p, int flags, void isr_p(void *),
|
||||
void *isr_param_p);
|
||||
void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
|
||||
void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
|
||||
|
||||
/**
|
||||
* Configure a static interrupt.
|
||||
|
@ -30,10 +30,10 @@ void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
|
|||
*
|
||||
* @return The vector assigned to this interrupt
|
||||
*/
|
||||
#define _ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
|
||||
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
|
||||
({ \
|
||||
_isr_declare(irq_p, 0, isr_p, isr_param_p); \
|
||||
_irq_priority_set(irq_p, priority_p, flags_p); \
|
||||
z_irq_priority_set(irq_p, priority_p, flags_p); \
|
||||
irq_p; \
|
||||
})
|
||||
|
||||
|
@ -43,10 +43,10 @@ void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
|
|||
*
|
||||
* See include/irq.h for details.
|
||||
*/
|
||||
#define _ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
|
||||
#define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
|
||||
({ \
|
||||
_isr_declare(irq_p, ISR_FLAG_DIRECT, (void (*)(void *))isr_p, NULL); \
|
||||
_irq_priority_set(irq_p, priority_p, flags_p); \
|
||||
z_irq_priority_set(irq_p, priority_p, flags_p); \
|
||||
irq_p; \
|
||||
})
|
||||
|
||||
|
@ -62,7 +62,7 @@ void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
|
|||
* All pre/post irq work of the interrupt is handled in the board
|
||||
* posix_irq_handler() both for direct and normal interrupts together
|
||||
*/
|
||||
#define _ARCH_ISR_DIRECT_DECLARE(name) \
|
||||
#define Z_ARCH_ISR_DIRECT_DECLARE(name) \
|
||||
static inline int name##_body(void); \
|
||||
int name(void) \
|
||||
{ \
|
||||
|
@ -72,14 +72,14 @@ void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
|
|||
} \
|
||||
static inline int name##_body(void)
|
||||
|
||||
#define _ARCH_ISR_DIRECT_HEADER() do { } while (0)
|
||||
#define _ARCH_ISR_DIRECT_FOOTER(a) do { } while (0)
|
||||
#define Z_ARCH_ISR_DIRECT_HEADER() do { } while (0)
|
||||
#define Z_ARCH_ISR_DIRECT_FOOTER(a) do { } while (0)
|
||||
|
||||
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
||||
extern void posix_irq_check_idle_exit(void);
|
||||
#define _ARCH_ISR_DIRECT_PM() posix_irq_check_idle_exit()
|
||||
#define Z_ARCH_ISR_DIRECT_PM() posix_irq_check_idle_exit()
|
||||
#else
|
||||
#define _ARCH_ISR_DIRECT_PM() do { } while (0)
|
||||
#define Z_ARCH_ISR_DIRECT_PM() do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -87,9 +87,9 @@ static inline void vector_to_irq(int irq_nbr, int *may_swap)
|
|||
|
||||
/*
|
||||
* As in this architecture an irq (code) executes in 0 time,
|
||||
* it is a bit senseless to call _int_latency_start/stop()
|
||||
* it is a bit senseless to call z_int_latency_start/stop()
|
||||
*/
|
||||
/* _int_latency_start(); */
|
||||
/* z_int_latency_start(); */
|
||||
sys_trace_isr_enter();
|
||||
|
||||
if (irq_vector_table[irq_nbr].func == NULL) { /* LCOV_EXCL_BR_LINE */
|
||||
|
@ -113,7 +113,7 @@ static inline void vector_to_irq(int irq_nbr, int *may_swap)
|
|||
}
|
||||
|
||||
sys_trace_isr_exit();
|
||||
/* _int_latency_stop(); */
|
||||
/* z_int_latency_stop(); */
|
||||
|
||||
bs_trace_raw_time(7, "Irq %i (%s) ended\n", irq_nbr, irqnames[irq_nbr]);
|
||||
}
|
||||
|
@ -172,7 +172,7 @@ void posix_irq_handler(void)
|
|||
&& (CPU_will_be_awaken_from_WFE == false)
|
||||
&& (_kernel.ready_q.cache != _current)) {
|
||||
|
||||
_Swap_irqlock(irq_lock);
|
||||
z_swap_irqlock(irq_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -236,7 +236,7 @@ unsigned int posix_irq_lock(void)
|
|||
return hw_irq_ctrl_change_lock(true);
|
||||
}
|
||||
|
||||
unsigned int _arch_irq_lock(void)
|
||||
unsigned int z_arch_irq_lock(void)
|
||||
{
|
||||
return posix_irq_lock();
|
||||
}
|
||||
|
@ -259,7 +259,7 @@ void posix_irq_unlock(unsigned int key)
|
|||
hw_irq_ctrl_change_lock(key);
|
||||
}
|
||||
|
||||
void _arch_irq_unlock(unsigned int key)
|
||||
void z_arch_irq_unlock(unsigned int key)
|
||||
{
|
||||
posix_irq_unlock(key);
|
||||
}
|
||||
|
@ -270,22 +270,22 @@ void posix_irq_full_unlock(void)
|
|||
hw_irq_ctrl_change_lock(false);
|
||||
}
|
||||
|
||||
void _arch_irq_enable(unsigned int irq)
|
||||
void z_arch_irq_enable(unsigned int irq)
|
||||
{
|
||||
hw_irq_ctrl_enable_irq(irq);
|
||||
}
|
||||
|
||||
void _arch_irq_disable(unsigned int irq)
|
||||
void z_arch_irq_disable(unsigned int irq)
|
||||
{
|
||||
hw_irq_ctrl_disable_irq(irq);
|
||||
}
|
||||
|
||||
int _arch_irq_is_enabled(unsigned int irq)
|
||||
int z_arch_irq_is_enabled(unsigned int irq)
|
||||
{
|
||||
return hw_irq_ctrl_is_irq_enabled(irq);
|
||||
}
|
||||
|
||||
void _arch_isr_direct_header(void)
|
||||
void z_arch_isr_direct_header(void)
|
||||
{
|
||||
/* Nothing to be done */
|
||||
}
|
||||
|
@ -328,7 +328,7 @@ void _isr_declare(unsigned int irq_p, int flags, void isr_p(void *),
|
|||
*
|
||||
* @return N/A
|
||||
*/
|
||||
void _irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
|
||||
void z_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
|
||||
{
|
||||
hw_irq_ctrl_prio_set(irq, prio);
|
||||
}
|
||||
|
@ -380,9 +380,9 @@ void irq_offload(irq_offload_routine_t routine, void *parameter)
|
|||
off_routine = routine;
|
||||
off_parameter = parameter;
|
||||
_isr_declare(OFFLOAD_SW_IRQ, 0, offload_sw_irq_handler, NULL);
|
||||
_arch_irq_enable(OFFLOAD_SW_IRQ);
|
||||
z_arch_irq_enable(OFFLOAD_SW_IRQ);
|
||||
posix_sw_set_pending_IRQ(OFFLOAD_SW_IRQ);
|
||||
_arch_irq_disable(OFFLOAD_SW_IRQ);
|
||||
z_arch_irq_disable(OFFLOAD_SW_IRQ);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -10,11 +10,11 @@
|
|||
Z_SYSCALL_HANDLER(aio_cmp_disable, dev, index)
|
||||
{
|
||||
Z_OOPS(Z_SYSCALL_DRIVER_AIO_CMP(dev, disable));
|
||||
return _impl_aio_cmp_disable((struct device *)dev, index);
|
||||
return z_impl_aio_cmp_disable((struct device *)dev, index);
|
||||
}
|
||||
|
||||
Z_SYSCALL_HANDLER(aio_cmp_get_pending_int, dev)
|
||||
{
|
||||
Z_OOPS(Z_SYSCALL_DRIVER_AIO_CMP(dev, get_pending_int));
|
||||
return _impl_aio_get_pending_int((struct device *)dev, index);
|
||||
return z_impl_aio_get_pending_int((struct device *)dev, index);
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue