From c9d78401cc35976fafaad758f2ab6338e04bbbcf Mon Sep 17 00:00:00 2001 From: Danny Oerndrup Date: Fri, 13 Dec 2019 11:24:56 +0100 Subject: [PATCH] spinlock: Make SPIN_VALIDATE a Kconfig option. SPIN_VALIDATE is, as it was previously, enabled per default when having less than 4 CPUs and either having no flash or a flash size greater than 32kB. Small targets, which needs to have asserts enabled, can chose to have the spinlock validation enabled or not and thereby decide whether the overhead added is acceptable or not. Signed-off-by: Danny Oerndrup --- include/spinlock.h | 29 ++++++++++++----------------- kernel/sched.c | 2 +- kernel/thread.c | 5 ++--- subsys/debug/Kconfig | 10 ++++++++++ 4 files changed, 25 insertions(+), 21 deletions(-) diff --git a/include/spinlock.h b/include/spinlock.h index cb69f0e7255..2f5ef5814ea 100644 --- a/include/spinlock.h +++ b/include/spinlock.h @@ -10,23 +10,17 @@ /* There's a spinlock validation framework available when asserts are * enabled. It adds a relatively hefty overhead (about 3k or so) to - * kernel code size, don't use on platforms known to be small. (Note - * we're using the kconfig value here. This isn't defined for every - * board, but the default of zero works well as an "infinity" - * fallback. There is a DT_FLASH_SIZE parameter too, but that seems - * even more poorly supported. + * kernel code size, don't use on platforms known to be small. */ -#if (CONFIG_FLASH_SIZE == 0) || (CONFIG_FLASH_SIZE > 32) -#if defined(CONFIG_ASSERT) && (CONFIG_MP_NUM_CPUS < 4) +#ifdef CONFIG_SPIN_VALIDATE #include #include struct k_spinlock; bool z_spin_lock_valid(struct k_spinlock *l); bool z_spin_unlock_valid(struct k_spinlock *l); void z_spin_lock_set_owner(struct k_spinlock *l); -#define SPIN_VALIDATE -#endif -#endif +BUILD_ASSERT_MSG(CONFIG_MP_NUM_CPUS < 4, "Too many CPUs for mask"); +#endif /* CONFIG_SPIN_VALIDATE */ struct k_spinlock_key { int key; @@ -39,15 +33,16 @@ struct k_spinlock { atomic_t locked; #endif -#ifdef SPIN_VALIDATE +#ifdef CONFIG_SPIN_VALIDATE /* Stores the thread that holds the lock with the locking CPU * ID in the bottom two bits. */ uintptr_t thread_cpu; #endif -#if defined(CONFIG_CPLUSPLUS) && !defined(CONFIG_SMP) && !defined(SPIN_VALIDATE) - /* If CONFIG_SMP and SPIN_VALIDATE are both not defined +#if defined(CONFIG_CPLUSPLUS) && !defined(CONFIG_SMP) && \ + !defined(CONFIG_SPIN_VALIDATE) + /* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined * the k_spinlock struct will have no members. The result * is that in C sizeof(k_spinlock) is 0 and in C++ it is 1. * @@ -75,7 +70,7 @@ static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l) */ k.key = arch_irq_lock(); -#ifdef SPIN_VALIDATE +#ifdef CONFIG_SPIN_VALIDATE __ASSERT(z_spin_lock_valid(l), "Recursive spinlock"); #endif @@ -84,7 +79,7 @@ static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l) } #endif -#ifdef SPIN_VALIDATE +#ifdef CONFIG_SPIN_VALIDATE z_spin_lock_set_owner(l); #endif return k; @@ -94,7 +89,7 @@ static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l, k_spinlock_key_t key) { ARG_UNUSED(l); -#ifdef SPIN_VALIDATE +#ifdef CONFIG_SPIN_VALIDATE __ASSERT(z_spin_unlock_valid(l), "Not my spinlock!"); #endif @@ -117,7 +112,7 @@ static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l, static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l) { ARG_UNUSED(l); -#ifdef SPIN_VALIDATE +#ifdef CONFIG_SPIN_VALIDATE __ASSERT(z_spin_unlock_valid(l), "Not my spinlock!"); #endif #ifdef CONFIG_SMP diff --git a/kernel/sched.c b/kernel/sched.c index 1e9690941e8..fa0433b8d92 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -650,7 +650,7 @@ void *z_get_next_switch_handle(void *interrupted) #endif _current_cpu->swap_ok = 0; set_current(th); -#ifdef SPIN_VALIDATE +#ifdef CONFIG_SPIN_VALIDATE /* Changed _current! Update the spinlock * bookeeping so the validation doesn't get * confused when the "wrong" thread tries to diff --git a/kernel/thread.c b/kernel/thread.c index ef50f137bcb..17f03dd82c3 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -853,7 +853,7 @@ FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry, /* These spinlock assertion predicates are defined here because having * them in spinlock.h is a giant header ordering headache. */ -#ifdef SPIN_VALIDATE +#ifdef CONFIG_SPIN_VALIDATE bool z_spin_lock_valid(struct k_spinlock *l) { uintptr_t thread_cpu = l->thread_cpu; @@ -879,8 +879,7 @@ void z_spin_lock_set_owner(struct k_spinlock *l) { l->thread_cpu = _current_cpu->id | (uintptr_t)_current; } - -#endif +#endif /* CONFIG_SPIN_VALIDATE */ int z_impl_k_float_disable(struct k_thread *thread) { diff --git a/subsys/debug/Kconfig b/subsys/debug/Kconfig index 43865e98561..86b6251c5b0 100644 --- a/subsys/debug/Kconfig +++ b/subsys/debug/Kconfig @@ -163,6 +163,16 @@ config ASSERT_LEVEL Level 1: on + warning in every file that includes __assert.h Level 2: on + no warning +config SPIN_VALIDATE + bool "Enable spinlock validation" + depends on ASSERT + depends on MP_NUM_CPUS < 4 + default y if !FLASH || FLASH_SIZE > 32 + help + There's a spinlock validation framework available when asserts are + enabled. It adds a relatively hefty overhead (about 3k or so) to + kernel code size, don't use on platforms known to be small. + config FORCE_NO_ASSERT bool "Force-disable no assertions" help