kernel: make k_current_get() work without syscall

We cache the current thread ID in a thread-local variable
at thread entry, and have k_current_get() return that,
eliminating system call overhead for this API.

DL: changed _current to use z_current_get() as it is
    being used during boot where TLS is not available.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
Andrew Boie 2020-11-06 13:11:12 -08:00 committed by Christopher Friedt
commit f07df42d49
4 changed files with 35 additions and 6 deletions

View file

@ -487,13 +487,35 @@ __syscall void k_yield(void);
*/ */
__syscall void k_wakeup(k_tid_t thread); __syscall void k_wakeup(k_tid_t thread);
/**
* @brief Get thread ID of the current thread.
*
* This unconditionally queries the kernel via a system call.
*
* @return ID of current thread.
*/
__syscall k_tid_t z_current_get(void);
#ifdef CONFIG_THREAD_LOCAL_STORAGE
/* Thread-local cache of current thread ID, set in z_thread_entry() */
extern __thread k_tid_t z_tls_current;
#endif
/** /**
* @brief Get thread ID of the current thread. * @brief Get thread ID of the current thread.
* *
* @return ID of current thread. * @return ID of current thread.
* *
*/ */
__syscall k_tid_t k_current_get(void) __attribute_const__; __attribute_const__
static inline k_tid_t k_current_get(void)
{
#ifdef CONFIG_THREAD_LOCAL_STORAGE
return z_tls_current;
#else
return z_current_get();
#endif
}
/** /**
* @brief Abort a thread. * @brief Abort a thread.

View file

@ -187,7 +187,7 @@ bool z_smp_cpu_mobile(void);
#define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \ #define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \
arch_curr_cpu(); }) arch_curr_cpu(); })
#define _current k_current_get() #define _current z_current_get()
#else #else
#define _current_cpu (&_kernel.cpus[0]) #define _current_cpu (&_kernel.cpus[0])

View file

@ -1365,7 +1365,7 @@ static inline void z_vrfy_k_wakeup(k_tid_t thread)
#include <syscalls/k_wakeup_mrsh.c> #include <syscalls/k_wakeup_mrsh.c>
#endif #endif
k_tid_t z_impl_k_current_get(void) k_tid_t z_impl_z_current_get(void)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* In SMP, _current is a field read from _current_cpu, which /* In SMP, _current is a field read from _current_cpu, which
@ -1384,11 +1384,11 @@ k_tid_t z_impl_k_current_get(void)
} }
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
static inline k_tid_t z_vrfy_k_current_get(void) static inline k_tid_t z_vrfy_z_current_get(void)
{ {
return z_impl_k_current_get(); return z_impl_z_current_get();
} }
#include <syscalls/k_current_get_mrsh.c> #include <syscalls/z_current_get_mrsh.c>
#endif #endif
int z_impl_k_is_preempt_thread(void) int z_impl_k_is_preempt_thread(void)

View file

@ -13,6 +13,10 @@
#include <kernel.h> #include <kernel.h>
#ifdef CONFIG_THREAD_LOCAL_STORAGE
__thread k_tid_t z_tls_current;
#endif
/* /*
* Common thread entry point function (used by all threads) * Common thread entry point function (used by all threads)
* *
@ -26,6 +30,9 @@
FUNC_NORETURN void z_thread_entry(k_thread_entry_t entry, FUNC_NORETURN void z_thread_entry(k_thread_entry_t entry,
void *p1, void *p2, void *p3) void *p1, void *p2, void *p3)
{ {
#ifdef CONFIG_THREAD_LOCAL_STORAGE
z_tls_current = z_current_get();
#endif
entry(p1, p2, p3); entry(p1, p2, p3);
k_thread_abort(k_current_get()); k_thread_abort(k_current_get());