kernel: arch: move arch_swap() declaration
Moves the arch_swap() declaration out of kernel_arch_interface.h and into the various architectures' kernel_arch_func.h. This permits the arch_swap() to be inlined on ARM, but extern'd on the other architectures that still implement arch_swap(). Inlining this function on ARM has shown at least a +5% performance boost according to the thread_metric benchmark on the disco_l475_iot1 board. Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
This commit is contained in:
parent
fd1e9f7445
commit
909ff45f0c
12 changed files with 60 additions and 85 deletions
|
@ -24,4 +24,4 @@ zephyr_library_sources_ifdef(CONFIG_SEMIHOST semihost.c)
|
|||
zephyr_library_sources_ifdef(CONFIG_THREAD_LOCAL_STORAGE __aeabi_read_tp.S)
|
||||
zephyr_library_sources_ifdef(CONFIG_ARCH_CACHE cache.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_USE_SWITCH switch.S)
|
||||
zephyr_library_sources_ifndef(CONFIG_USE_SWITCH swap.c swap_helper.S exc_exit.S)
|
||||
zephyr_library_sources_ifndef(CONFIG_USE_SWITCH swap_helper.S exc_exit.S)
|
||||
|
|
|
@ -1,30 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2018 Linaro, Limited
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <zephyr/kernel.h>
|
||||
#include <kernel_internal.h>
|
||||
|
||||
#include <errno.h>
|
||||
|
||||
/* The 'key' actually represents the BASEPRI register
|
||||
* prior to disabling interrupts via the BASEPRI mechanism.
|
||||
*
|
||||
* arch_swap() itself does not do much.
|
||||
*/
|
||||
int arch_swap(unsigned int key)
|
||||
{
|
||||
/* store off key and return value */
|
||||
arch_current_thread()->arch.basepri = key;
|
||||
arch_current_thread()->arch.swap_return_value = -EAGAIN;
|
||||
|
||||
z_arm_cortex_r_svc();
|
||||
irq_unlock(key);
|
||||
|
||||
/* Context switch is performed here. Returning implies the
|
||||
* thread has been context-switched-in again.
|
||||
*/
|
||||
return arch_current_thread()->arch.swap_return_value;
|
||||
}
|
|
@ -11,7 +11,6 @@ zephyr_library_sources(
|
|||
scb.c
|
||||
thread_abort.c
|
||||
vector_table.S
|
||||
swap.c
|
||||
swap_helper.S
|
||||
irq_manage.c
|
||||
prep_c.c
|
||||
|
|
|
@ -1,49 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2018 Linaro, Limited
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <zephyr/kernel.h>
|
||||
#include <kernel_internal.h>
|
||||
|
||||
#include <errno.h>
|
||||
|
||||
/* The 'key' actually represents the BASEPRI register
|
||||
* prior to disabling interrupts via the BASEPRI mechanism.
|
||||
*
|
||||
* arch_swap() itself does not do much.
|
||||
*
|
||||
* It simply stores the intlock key (the BASEPRI value) parameter into
|
||||
* current->basepri, and then triggers a PendSV exception, which does
|
||||
* the heavy lifting of context switching.
|
||||
|
||||
* This is the only place we have to save BASEPRI since the other paths to
|
||||
* z_arm_pendsv all come from handling an interrupt, which means we know the
|
||||
* interrupts were not locked: in that case the BASEPRI value is 0.
|
||||
*
|
||||
* Given that arch_swap() is called to effect a cooperative context switch,
|
||||
* only the caller-saved integer registers need to be saved in the thread of the
|
||||
* outgoing thread. This is all performed by the hardware, which stores it in
|
||||
* its exception stack frame, created when handling the z_arm_pendsv exception.
|
||||
*
|
||||
* On ARMv6-M, the intlock key is represented by the PRIMASK register,
|
||||
* as BASEPRI is not available.
|
||||
*/
|
||||
int arch_swap(unsigned int key)
|
||||
{
|
||||
/* store off key and return value */
|
||||
arch_current_thread()->arch.basepri = key;
|
||||
arch_current_thread()->arch.swap_return_value = -EAGAIN;
|
||||
|
||||
/* set pending bit to make sure we will take a PendSV exception */
|
||||
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
|
||||
|
||||
/* clear mask or enable all irqs to take a pendsv */
|
||||
irq_unlock(0);
|
||||
|
||||
/* Context switch is performed here. Returning implies the
|
||||
* thread has been context-switched-in again.
|
||||
*/
|
||||
return arch_current_thread()->arch.swap_return_value;
|
||||
}
|
|
@ -37,6 +37,21 @@ static ALWAYS_INLINE void arch_kernel_init(void)
|
|||
|
||||
#ifndef CONFIG_USE_SWITCH
|
||||
|
||||
static ALWAYS_INLINE int arch_swap(unsigned int key)
|
||||
{
|
||||
/* store off key and return value */
|
||||
arch_current_thread()->arch.basepri = key;
|
||||
arch_current_thread()->arch.swap_return_value = -EAGAIN;
|
||||
|
||||
z_arm_cortex_r_svc();
|
||||
irq_unlock(key);
|
||||
|
||||
/* Context switch is performed here. Returning implies the
|
||||
* thread has been context-switched-in again.
|
||||
*/
|
||||
return arch_current_thread()->arch.swap_return_value;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void
|
||||
arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
|
||||
{
|
||||
|
|
|
@ -84,6 +84,25 @@ extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry,
|
|||
|
||||
extern void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf);
|
||||
|
||||
static ALWAYS_INLINE int arch_swap(unsigned int key)
|
||||
{
|
||||
/* store off key and return value */
|
||||
arch_current_thread()->arch.basepri = key;
|
||||
arch_current_thread()->arch.swap_return_value = -EAGAIN;
|
||||
|
||||
/* set pending bit to make sure we will take a PendSV exception */
|
||||
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
|
||||
|
||||
/* clear mask or enable all irqs to take a pendsv */
|
||||
irq_unlock(0);
|
||||
|
||||
/* Context switch is performed here. Returning implies the
|
||||
* thread has been context-switched-in again.
|
||||
*/
|
||||
return arch_current_thread()->arch.swap_return_value;
|
||||
}
|
||||
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -47,6 +47,8 @@ static inline bool arch_is_in_isr(void)
|
|||
return _current_cpu->nested != 0U;
|
||||
}
|
||||
|
||||
int arch_swap(unsigned int key);
|
||||
|
||||
#ifdef CONFIG_IRQ_OFFLOAD
|
||||
void z_irq_do_offload(void);
|
||||
#endif
|
||||
|
|
|
@ -51,6 +51,8 @@ static inline bool arch_is_in_isr(void)
|
|||
return _kernel.cpus[0].nested != 0U;
|
||||
}
|
||||
|
||||
int arch_swap(unsigned int key);
|
||||
|
||||
#ifdef CONFIG_IRQ_OFFLOAD
|
||||
void z_irq_do_offload(void);
|
||||
#endif
|
||||
|
|
|
@ -42,6 +42,8 @@ static inline bool arch_is_in_isr(void)
|
|||
return _kernel.cpus[0].nested != 0U;
|
||||
}
|
||||
|
||||
int arch_swap(unsigned int key);
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
#endif /* ZEPHYR_ARCH_POSIX_INCLUDE_KERNEL_ARCH_FUNC_H_ */
|
||||
|
|
|
@ -37,6 +37,8 @@ arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
|
|||
|
||||
extern void arch_cpu_atomic_idle(unsigned int key);
|
||||
|
||||
int arch_swap(unsigned int key);
|
||||
|
||||
/* ASM code to fiddle with registers to enable the MMU with PAE paging */
|
||||
void z_x86_enable_paging(void);
|
||||
|
||||
|
|
|
@ -129,7 +129,10 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
* location, which must be updated.
|
||||
*/
|
||||
static inline void arch_switch(void *switch_to, void **switched_from);
|
||||
#else
|
||||
#endif /* CONFIG_USE_SWITCH */
|
||||
|
||||
#if !defined(CONFIG_USE_SWITCH) || defined(__DOXYGEN__)
|
||||
#if defined(__DOXYGEN__)
|
||||
/**
|
||||
* Cooperatively context switch
|
||||
*
|
||||
|
@ -143,6 +146,7 @@ static inline void arch_switch(void *switch_to, void **switched_from);
|
|||
* blocking operation.
|
||||
*/
|
||||
int arch_swap(unsigned int key);
|
||||
#endif /* __DOXYGEN__ */
|
||||
|
||||
/**
|
||||
* Set the return value for the specified thread.
|
||||
|
@ -154,7 +158,7 @@ int arch_swap(unsigned int key);
|
|||
*/
|
||||
static ALWAYS_INLINE void
|
||||
arch_thread_return_value_set(struct k_thread *thread, unsigned int value);
|
||||
#endif /* CONFIG_USE_SWITCH */
|
||||
#endif /* !CONFIG_USE_SWITCH || __DOXYGEN__ */
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
|
||||
/**
|
||||
|
|
|
@ -429,6 +429,13 @@ static void alt_thread_entry(void *p1, void *p2, void *p3)
|
|||
"Alternative thread: switch flag not false on thread exit\n");
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_NO_OPTIMIZATIONS)
|
||||
static int __noinline arch_swap_wrapper(void)
|
||||
{
|
||||
return arch_swap(BASEPRI_MODIFIED_1);
|
||||
}
|
||||
#endif
|
||||
|
||||
ZTEST(arm_thread_swap, test_arm_thread_swap)
|
||||
{
|
||||
int test_flag;
|
||||
|
@ -609,9 +616,11 @@ ZTEST(arm_thread_swap, test_arm_thread_swap)
|
|||
|
||||
/* Fake a different irq_unlock key when performing swap.
|
||||
* This will be verified by the alternative test thread.
|
||||
*
|
||||
* Force an indirect call to arch_swap() to prevent the compiler from
|
||||
* changing the saved callee registers as arch_swap() is inlined.
|
||||
*/
|
||||
register int swap_return_val __asm__("r0") =
|
||||
arch_swap(BASEPRI_MODIFIED_1);
|
||||
register int swap_return_val __asm__("r0") = arch_swap_wrapper();
|
||||
|
||||
#endif /* CONFIG_NO_OPTIMIZATIONS */
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue