aarch64: Rework {inc,dec}_nest_counter

There are several issues with the current implemenation of the
{inc,dec}_nest_counter macros.

The first problem is that it's internally using a call to a misplaced
function called z_arm64_curr_cpu() (for some unknown reason hosted in
irq_manage.c) that could potentially clobber the caller-saved registers
without any notice to the user of the macro.

The second problem is that being a macro the clobbered registers should
be specified at the calling site, this is not possible given the current
implementation.

To fix these issues and make the call quicker, this patch rewrites the
code in assembly leveraging the availability of the _curr_cpu array. It
now clobbers only two registers passed from the calling site.

Signed-off-by: Carlo Caione <ccaione@baylibre.com>
This commit is contained in:
Carlo Caione 2021-03-08 15:47:02 +01:00 committed by Anas Nashif
commit bdbe33b795
4 changed files with 47 additions and 21 deletions

View file

@ -70,8 +70,3 @@ void z_irq_spurious(const void *unused)
z_arm64_fatal_error(K_ERR_SPURIOUS_IRQ, NULL);
}
_cpu_t *z_arm64_curr_cpu(void)
{
return arch_curr_cpu();
}

View file

@ -33,7 +33,7 @@ GTEXT(_isr_wrapper)
SECTION_FUNC(TEXT, _isr_wrapper)
/* ++(_kernel->nested) to be checked by arch_is_in_isr() */
inc_nest_counter
inc_nest_counter x0, x1
#ifdef CONFIG_TRACING
bl sys_trace_isr_enter
@ -80,7 +80,7 @@ spurious_continue:
#endif
/* if (--(_kernel->nested) != 0) exit */
dec_nest_counter
dec_nest_counter x0, x1
bne exit

View file

@ -9,31 +9,64 @@
#ifdef _ASMLANGUAGE
GDATA(_curr_cpu)
GDATA(_kernel)
/*
* Get CPU id
*/
.macro z_arm64_get_cpu_id xreg0
mrs \xreg0, mpidr_el1
/* FIMXME: aff3 not taken into consideration */
ubfx \xreg0, \xreg0, #0, #24
.endm
/*
* Get CPU pointer
*/
.macro get_cpu xreg0, xreg1
ldr \xreg0, =_curr_cpu
z_arm64_get_cpu_id \xreg1
add \xreg0, \xreg0, \xreg1, lsl #3
ldr \xreg0, [\xreg0]
.endm
/*
* Increment nested counter
*/
.macro inc_nest_counter
bl z_arm64_curr_cpu
ldr x1, [x0, #___cpu_t_nested_OFFSET]
add x1, x1, #1
str x1, [x0, #___cpu_t_nested_OFFSET]
.macro inc_nest_counter xreg0, xreg1
#ifdef CONFIG_SMP
get_cpu \xreg0, \xreg1
ldr \xreg1, [\xreg0, #___cpu_t_nested_OFFSET]
add \xreg1, \xreg1, #1
str \xreg1, [\xreg0, #___cpu_t_nested_OFFSET]
#else
ldr \xreg0, =_kernel
ldr \xreg1, [\xreg0, #_kernel_offset_to_nested]
add \xreg1, \xreg1, #1
str \xreg1, [\xreg0, #_kernel_offset_to_nested]
#endif
.endm
/*
* Decrement nested counter and update condition flags
*/
.macro dec_nest_counter
bl z_arm64_curr_cpu
ldr x1, [x0, #___cpu_t_nested_OFFSET]
subs x1, x1, #1
str x1, [x0, #___cpu_t_nested_OFFSET]
.macro dec_nest_counter xreg0, xreg1
#ifdef CONFIG_SMP
get_cpu \xreg0, \xreg1
ldr \xreg1, [\xreg0, #___cpu_t_nested_OFFSET]
subs \xreg1, \xreg1, #1
str \xreg1, [\xreg0, #___cpu_t_nested_OFFSET]
#else
ldr \xreg0, =_kernel
ldr \xreg1, [\xreg0, #_kernel_offset_to_nested]
subs \xreg1, \xreg1, #1
str \xreg1, [\xreg0, #_kernel_offset_to_nested]
#endif
.endm
#endif /* _ASMLANGUAGE */

View file

@ -20,8 +20,6 @@
_ASM_FILE_PROLOGUE
GDATA(_kernel)
/*
* Routine to handle context switches
*
@ -119,12 +117,12 @@ SECTION_FUNC(TEXT, z_arm64_sync_exc)
b inv
offload:
/* ++(_kernel->nested) to be checked by arch_is_in_isr() */
inc_nest_counter
inc_nest_counter x0, x1
bl z_irq_do_offload
/* --(_kernel->nested) */
dec_nest_counter
dec_nest_counter x0, x1
b z_arm64_exit_exc
#endif
b inv