spinlock: Make SPIN_VALIDATE a Kconfig option.
SPIN_VALIDATE is, as it was previously, enabled per default when having less than 4 CPUs and either having no flash or a flash size greater than 32kB. Small targets, which needs to have asserts enabled, can chose to have the spinlock validation enabled or not and thereby decide whether the overhead added is acceptable or not. Signed-off-by: Danny Oerndrup <daor@demant.com>
This commit is contained in:
parent
e181e1b773
commit
c9d78401cc
4 changed files with 25 additions and 21 deletions
|
@ -10,23 +10,17 @@
|
||||||
|
|
||||||
/* There's a spinlock validation framework available when asserts are
|
/* There's a spinlock validation framework available when asserts are
|
||||||
* enabled. It adds a relatively hefty overhead (about 3k or so) to
|
* enabled. It adds a relatively hefty overhead (about 3k or so) to
|
||||||
* kernel code size, don't use on platforms known to be small. (Note
|
* kernel code size, don't use on platforms known to be small.
|
||||||
* we're using the kconfig value here. This isn't defined for every
|
|
||||||
* board, but the default of zero works well as an "infinity"
|
|
||||||
* fallback. There is a DT_FLASH_SIZE parameter too, but that seems
|
|
||||||
* even more poorly supported.
|
|
||||||
*/
|
*/
|
||||||
#if (CONFIG_FLASH_SIZE == 0) || (CONFIG_FLASH_SIZE > 32)
|
#ifdef CONFIG_SPIN_VALIDATE
|
||||||
#if defined(CONFIG_ASSERT) && (CONFIG_MP_NUM_CPUS < 4)
|
|
||||||
#include <sys/__assert.h>
|
#include <sys/__assert.h>
|
||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
struct k_spinlock;
|
struct k_spinlock;
|
||||||
bool z_spin_lock_valid(struct k_spinlock *l);
|
bool z_spin_lock_valid(struct k_spinlock *l);
|
||||||
bool z_spin_unlock_valid(struct k_spinlock *l);
|
bool z_spin_unlock_valid(struct k_spinlock *l);
|
||||||
void z_spin_lock_set_owner(struct k_spinlock *l);
|
void z_spin_lock_set_owner(struct k_spinlock *l);
|
||||||
#define SPIN_VALIDATE
|
BUILD_ASSERT_MSG(CONFIG_MP_NUM_CPUS < 4, "Too many CPUs for mask");
|
||||||
#endif
|
#endif /* CONFIG_SPIN_VALIDATE */
|
||||||
#endif
|
|
||||||
|
|
||||||
struct k_spinlock_key {
|
struct k_spinlock_key {
|
||||||
int key;
|
int key;
|
||||||
|
@ -39,15 +33,16 @@ struct k_spinlock {
|
||||||
atomic_t locked;
|
atomic_t locked;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef SPIN_VALIDATE
|
#ifdef CONFIG_SPIN_VALIDATE
|
||||||
/* Stores the thread that holds the lock with the locking CPU
|
/* Stores the thread that holds the lock with the locking CPU
|
||||||
* ID in the bottom two bits.
|
* ID in the bottom two bits.
|
||||||
*/
|
*/
|
||||||
uintptr_t thread_cpu;
|
uintptr_t thread_cpu;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_CPLUSPLUS) && !defined(CONFIG_SMP) && !defined(SPIN_VALIDATE)
|
#if defined(CONFIG_CPLUSPLUS) && !defined(CONFIG_SMP) && \
|
||||||
/* If CONFIG_SMP and SPIN_VALIDATE are both not defined
|
!defined(CONFIG_SPIN_VALIDATE)
|
||||||
|
/* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined
|
||||||
* the k_spinlock struct will have no members. The result
|
* the k_spinlock struct will have no members. The result
|
||||||
* is that in C sizeof(k_spinlock) is 0 and in C++ it is 1.
|
* is that in C sizeof(k_spinlock) is 0 and in C++ it is 1.
|
||||||
*
|
*
|
||||||
|
@ -75,7 +70,7 @@ static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
|
||||||
*/
|
*/
|
||||||
k.key = arch_irq_lock();
|
k.key = arch_irq_lock();
|
||||||
|
|
||||||
#ifdef SPIN_VALIDATE
|
#ifdef CONFIG_SPIN_VALIDATE
|
||||||
__ASSERT(z_spin_lock_valid(l), "Recursive spinlock");
|
__ASSERT(z_spin_lock_valid(l), "Recursive spinlock");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -84,7 +79,7 @@ static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef SPIN_VALIDATE
|
#ifdef CONFIG_SPIN_VALIDATE
|
||||||
z_spin_lock_set_owner(l);
|
z_spin_lock_set_owner(l);
|
||||||
#endif
|
#endif
|
||||||
return k;
|
return k;
|
||||||
|
@ -94,7 +89,7 @@ static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
|
||||||
k_spinlock_key_t key)
|
k_spinlock_key_t key)
|
||||||
{
|
{
|
||||||
ARG_UNUSED(l);
|
ARG_UNUSED(l);
|
||||||
#ifdef SPIN_VALIDATE
|
#ifdef CONFIG_SPIN_VALIDATE
|
||||||
__ASSERT(z_spin_unlock_valid(l), "Not my spinlock!");
|
__ASSERT(z_spin_unlock_valid(l), "Not my spinlock!");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -117,7 +112,7 @@ static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
|
||||||
static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
|
static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
|
||||||
{
|
{
|
||||||
ARG_UNUSED(l);
|
ARG_UNUSED(l);
|
||||||
#ifdef SPIN_VALIDATE
|
#ifdef CONFIG_SPIN_VALIDATE
|
||||||
__ASSERT(z_spin_unlock_valid(l), "Not my spinlock!");
|
__ASSERT(z_spin_unlock_valid(l), "Not my spinlock!");
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
|
|
@ -650,7 +650,7 @@ void *z_get_next_switch_handle(void *interrupted)
|
||||||
#endif
|
#endif
|
||||||
_current_cpu->swap_ok = 0;
|
_current_cpu->swap_ok = 0;
|
||||||
set_current(th);
|
set_current(th);
|
||||||
#ifdef SPIN_VALIDATE
|
#ifdef CONFIG_SPIN_VALIDATE
|
||||||
/* Changed _current! Update the spinlock
|
/* Changed _current! Update the spinlock
|
||||||
* bookeeping so the validation doesn't get
|
* bookeeping so the validation doesn't get
|
||||||
* confused when the "wrong" thread tries to
|
* confused when the "wrong" thread tries to
|
||||||
|
|
|
@ -853,7 +853,7 @@ FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
|
||||||
/* These spinlock assertion predicates are defined here because having
|
/* These spinlock assertion predicates are defined here because having
|
||||||
* them in spinlock.h is a giant header ordering headache.
|
* them in spinlock.h is a giant header ordering headache.
|
||||||
*/
|
*/
|
||||||
#ifdef SPIN_VALIDATE
|
#ifdef CONFIG_SPIN_VALIDATE
|
||||||
bool z_spin_lock_valid(struct k_spinlock *l)
|
bool z_spin_lock_valid(struct k_spinlock *l)
|
||||||
{
|
{
|
||||||
uintptr_t thread_cpu = l->thread_cpu;
|
uintptr_t thread_cpu = l->thread_cpu;
|
||||||
|
@ -879,8 +879,7 @@ void z_spin_lock_set_owner(struct k_spinlock *l)
|
||||||
{
|
{
|
||||||
l->thread_cpu = _current_cpu->id | (uintptr_t)_current;
|
l->thread_cpu = _current_cpu->id | (uintptr_t)_current;
|
||||||
}
|
}
|
||||||
|
#endif /* CONFIG_SPIN_VALIDATE */
|
||||||
#endif
|
|
||||||
|
|
||||||
int z_impl_k_float_disable(struct k_thread *thread)
|
int z_impl_k_float_disable(struct k_thread *thread)
|
||||||
{
|
{
|
||||||
|
|
|
@ -163,6 +163,16 @@ config ASSERT_LEVEL
|
||||||
Level 1: on + warning in every file that includes __assert.h
|
Level 1: on + warning in every file that includes __assert.h
|
||||||
Level 2: on + no warning
|
Level 2: on + no warning
|
||||||
|
|
||||||
|
config SPIN_VALIDATE
|
||||||
|
bool "Enable spinlock validation"
|
||||||
|
depends on ASSERT
|
||||||
|
depends on MP_NUM_CPUS < 4
|
||||||
|
default y if !FLASH || FLASH_SIZE > 32
|
||||||
|
help
|
||||||
|
There's a spinlock validation framework available when asserts are
|
||||||
|
enabled. It adds a relatively hefty overhead (about 3k or so) to
|
||||||
|
kernel code size, don't use on platforms known to be small.
|
||||||
|
|
||||||
config FORCE_NO_ASSERT
|
config FORCE_NO_ASSERT
|
||||||
bool "Force-disable no assertions"
|
bool "Force-disable no assertions"
|
||||||
help
|
help
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue