arch: arm64: irq/switch: accessing nested using _cpu_t
With _kernel_offset_to_nested, we only able to access the nested counter of the first cpu. Since we are going to support SMP, we need accessing nested from per cpu. To get the current cpu, introduce z_arm64_curr_cpu for asm usage, because arch_curr_cpu could not be compiled in asm code. Signed-off-by: Peng Fan <peng.fan@nxp.com>
This commit is contained in:
parent
cfc7673c28
commit
e10d9364d0
5 changed files with 21 additions and 15 deletions
|
@ -70,3 +70,8 @@ void z_irq_spurious(const void *unused)
|
||||||
|
|
||||||
z_arm64_fatal_error(K_ERR_SPURIOUS_IRQ, NULL);
|
z_arm64_fatal_error(K_ERR_SPURIOUS_IRQ, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_cpu_t *z_arm64_curr_cpu(void)
|
||||||
|
{
|
||||||
|
return arch_curr_cpu();
|
||||||
|
}
|
||||||
|
|
|
@ -33,7 +33,7 @@ GTEXT(_isr_wrapper)
|
||||||
SECTION_FUNC(TEXT, _isr_wrapper)
|
SECTION_FUNC(TEXT, _isr_wrapper)
|
||||||
|
|
||||||
/* ++(_kernel->nested) to be checked by arch_is_in_isr() */
|
/* ++(_kernel->nested) to be checked by arch_is_in_isr() */
|
||||||
inc_nest_counter x0, x1
|
inc_nest_counter
|
||||||
|
|
||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
bl sys_trace_isr_enter
|
bl sys_trace_isr_enter
|
||||||
|
@ -80,7 +80,8 @@ spurious_continue:
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* if (--(_kernel->nested) != 0) exit */
|
/* if (--(_kernel->nested) != 0) exit */
|
||||||
dec_nest_counter x0, x1
|
dec_nest_counter
|
||||||
|
|
||||||
bne exit
|
bne exit
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -13,22 +13,22 @@
|
||||||
* Increment nested counter
|
* Increment nested counter
|
||||||
*/
|
*/
|
||||||
|
|
||||||
.macro inc_nest_counter xreg0, xreg1
|
.macro inc_nest_counter
|
||||||
ldr \xreg0, =_kernel
|
bl z_arm64_curr_cpu
|
||||||
ldr \xreg1, [\xreg0, #_kernel_offset_to_nested]
|
ldr x1, [x0, #___cpu_t_nested_OFFSET]
|
||||||
add \xreg1, \xreg1, #1
|
add x1, x1, #1
|
||||||
str \xreg1, [\xreg0, #_kernel_offset_to_nested]
|
str x1, [x0, #___cpu_t_nested_OFFSET]
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Decrement nested counter and update condition flags
|
* Decrement nested counter and update condition flags
|
||||||
*/
|
*/
|
||||||
|
|
||||||
.macro dec_nest_counter xreg0, xreg1
|
.macro dec_nest_counter
|
||||||
ldr \xreg0, =_kernel
|
bl z_arm64_curr_cpu
|
||||||
ldr \xreg1, [\xreg0, #_kernel_offset_to_nested]
|
ldr x1, [x0, #___cpu_t_nested_OFFSET]
|
||||||
subs \xreg1, \xreg1, #1
|
subs x1, x1, #1
|
||||||
str \xreg1, [\xreg0, #_kernel_offset_to_nested]
|
str x1, [x0, #___cpu_t_nested_OFFSET]
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
#endif /* _ASMLANGUAGE */
|
#endif /* _ASMLANGUAGE */
|
||||||
|
|
|
@ -119,12 +119,12 @@ SECTION_FUNC(TEXT, z_arm64_sync_exc)
|
||||||
b inv
|
b inv
|
||||||
offload:
|
offload:
|
||||||
/* ++(_kernel->nested) to be checked by arch_is_in_isr() */
|
/* ++(_kernel->nested) to be checked by arch_is_in_isr() */
|
||||||
inc_nest_counter x0, x1
|
inc_nest_counter
|
||||||
|
|
||||||
bl z_irq_do_offload
|
bl z_irq_do_offload
|
||||||
|
|
||||||
/* --(_kernel->nested) */
|
/* --(_kernel->nested) */
|
||||||
dec_nest_counter x0, x1
|
dec_nest_counter
|
||||||
b z_arm64_exit_exc
|
b z_arm64_exit_exc
|
||||||
#endif
|
#endif
|
||||||
b inv
|
b inv
|
||||||
|
|
|
@ -34,7 +34,7 @@ extern void z_arm64_offload(void);
|
||||||
|
|
||||||
static ALWAYS_INLINE bool arch_is_in_isr(void)
|
static ALWAYS_INLINE bool arch_is_in_isr(void)
|
||||||
{
|
{
|
||||||
return _kernel.cpus[0].nested != 0U;
|
return arch_curr_cpu()->nested != 0U;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue