diff --git a/include/irq.h b/include/irq.h index 29da432af09..359139483ad 100644 --- a/include/irq.h +++ b/include/irq.h @@ -190,7 +190,12 @@ extern "C" { * * @return Lock-out key. */ +#ifdef CONFIG_SMP +unsigned int _smp_global_lock(void); +#define irq_lock() _smp_global_lock() +#else #define irq_lock() _arch_irq_lock() +#endif /** * @brief Unlock interrupts. @@ -206,7 +211,12 @@ extern "C" { * * @return N/A */ +#ifdef CONFIG_SMP +void _smp_global_unlock(unsigned int key); +#define irq_unlock(key) _smp_global_unlock(key) +#else #define irq_unlock(key) _arch_irq_unlock(key) +#endif /** * @brief Enable an IRQ. diff --git a/kernel/CMakeLists.txt b/kernel/CMakeLists.txt index 07ae850605c..7aa9d15084e 100644 --- a/kernel/CMakeLists.txt +++ b/kernel/CMakeLists.txt @@ -22,6 +22,7 @@ add_library(kernel thread_abort.c version.c work_q.c + smp.c ) target_include_directories(kernel PRIVATE ${PROJECT_SOURCE_DIR}/include/posix) diff --git a/kernel/smp.c b/kernel/smp.c new file mode 100644 index 00000000000..1a1b6686885 --- /dev/null +++ b/kernel/smp.c @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2018 Intel corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include + +static struct k_spinlock global_spinlock; + +static volatile int recursive_count; + +/* FIXME: this value of key works on all known architectures as an + * "invalid state" that will never be legitimately returned from + * _arch_irq_lock(). But we should force the architecture code to + * define something for us. + */ +#define KEY_RECURSIVE 0xffffffff + +unsigned int _smp_global_lock(void) +{ + /* OK to test this outside the lock. If it's non-zero, then + * we hold the lock by definition + */ + if (recursive_count) { + recursive_count++; + + return KEY_RECURSIVE; + } + + unsigned int k = k_spin_lock(&global_spinlock).key; + + recursive_count = 1; + return k; +} + +void _smp_global_unlock(unsigned int key) +{ + if (key == KEY_RECURSIVE) { + recursive_count--; + return; + } + + k_spinlock_key_t sk = { .key = key }; + + recursive_count = 0; + k_spin_unlock(&global_spinlock, sk); +}