diff --git a/include/kernel.h b/include/kernel.h index 09c0b52e6b5..80b44467d47 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -452,6 +452,12 @@ struct _thread_base { /* Recursive count of irq_lock() calls */ u8_t global_lock_count; + +#endif + +#ifdef CONFIG_SCHED_CPU_MASK + /* "May run on" bits for each CPU */ + u8_t cpu_mask; #endif /* data returned by APIs */ @@ -1035,6 +1041,52 @@ __syscall void k_thread_priority_set(k_tid_t thread, int prio); __syscall void k_thread_deadline_set(k_tid_t thread, int deadline); #endif +#ifdef CONFIG_SCHED_CPU_MASK +/** + * @brief Sets all CPU enable masks to zero + * + * After this returns, the thread will no longer be schedulable on any + * CPUs. The thread must not be currently runnable. + * + * @param thread Thread to operate upon + * @return Zero on success, otherwise error code + */ +int k_thread_cpu_mask_clear(k_tid_t thread); + +/** + * @brief Sets all CPU enable masks to one + * + * After this returns, the thread will be schedulable on any CPU. The + * thread must not be currently runnable. + * + * @param thread Thread to operate upon + * @return Zero on success, otherwise error code + */ +int k_thread_cpu_mask_enable_all(k_tid_t thread); + +/** + * @brief Enable thread to run on specified CPU + * + * The thread must not be currently runnable. + * + * @param thread Thread to operate upon + * @param cpu CPU index + * @return Zero on success, otherwise error code + */ +int k_thread_cpu_mask_enable(k_tid_t thread, int cpu); + +/** + * @brief Prevent thread to run on specified CPU + * + * The thread must not be currently runnable. + * + * @param thread Thread to operate upon + * @param cpu CPU index + * @return Zero on success, otherwise error code + */ +int k_thread_cpu_mask_disable(k_tid_t thread, int cpu); +#endif + /** * @brief Suspend a thread. * diff --git a/kernel/Kconfig b/kernel/Kconfig index b223e47f0d8..add91f7648d 100644 --- a/kernel/Kconfig +++ b/kernel/Kconfig @@ -125,6 +125,24 @@ config SCHED_DEADLINE single priority will choose the next expiring deadline and not simply the least recently added thread. +config SCHED_CPU_MASK + bool "Enable CPU mask affinity/pinning API" + depends on SCHED_DUMB + help + When true, the app will have access to the + z_thread_*_cpu_mask() APIs which control per-CPU affinity + masks in SMP mode, allowing apps to pin threads to specific + CPUs or disallow threads from running on given CPUs. Note + that as currently implemented, this involves an inherent + O(N) scaling in the number of idle-but-runnable threads, and + thus works only with the DUMB scheduler (as SCALABLE and + MULTIQ would see no benefit). + + Note that this setting does not technically depend on SMP + and is implemented without it for testing purposes, but for + obvious reasons makes sense as an application API only where + there is more than one CPU. With one CPU, it's just a + higher overhead version of k_thread_start/stop(). config MAIN_STACK_SIZE int "Size of stack for initialization and main thread" diff --git a/kernel/init.c b/kernel/init.c index 02ad7c50e7a..07f95611f32 100644 --- a/kernel/init.c +++ b/kernel/init.c @@ -471,7 +471,12 @@ FUNC_NORETURN void _Cstart(void) kernel_arch_init(); #ifdef CONFIG_MULTITHREADING - struct k_thread dummy_thread = { .base.thread_state = _THREAD_DUMMY }; + struct k_thread dummy_thread = { + .base.thread_state = _THREAD_DUMMY, +# ifdef CONFIG_SCHED_CPU_MASK + .base.cpu_mask = -1, +# endif + }; _current = &dummy_thread; #endif diff --git a/kernel/sched.c b/kernel/sched.c index 4b25626d8c3..7a3092a54b8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -17,7 +17,11 @@ #if defined(CONFIG_SCHED_DUMB) #define _priq_run_add _priq_dumb_add #define _priq_run_remove _priq_dumb_remove -#define _priq_run_best _priq_dumb_best +# if defined(CONFIG_SCHED_CPU_MASK) +# define _priq_run_best _priq_dumb_mask_best +# else +# define _priq_run_best _priq_dumb_best +# endif #elif defined(CONFIG_SCHED_SCALABLE) #define _priq_run_add _priq_rb_add #define _priq_run_remove _priq_rb_remove @@ -155,6 +159,23 @@ static ALWAYS_INLINE bool should_preempt(struct k_thread *th, int preempt_ok) return false; } +#ifdef CONFIG_SCHED_CPU_MASK +static ALWAYS_INLINE struct k_thread *_priq_dumb_mask_best(sys_dlist_t *pq) +{ + /* With masks enabled we need to be prepared to walk the list + * looking for one we can run + */ + struct k_thread *t; + + SYS_DLIST_FOR_EACH_CONTAINER(pq, t, base.qnode_dlist) { + if ((t->base.cpu_mask & BIT(_current_cpu->id)) != 0) { + return t; + } + } + return NULL; +} +#endif + static ALWAYS_INLINE struct k_thread *next_up(void) { #ifndef CONFIG_SMP @@ -933,3 +954,47 @@ int _impl_k_is_preempt_thread(void) #ifdef CONFIG_USERSPACE Z_SYSCALL_HANDLER0_SIMPLE(k_is_preempt_thread); #endif + +#ifdef CONFIG_SCHED_CPU_MASK +# ifdef CONFIG_SMP +/* Right now we use a single byte for this mask */ +BUILD_ASSERT_MSG(CONFIG_MP_NUM_CPU <= 8, "Too many CPUs for mask word"); +# endif + + +static int cpu_mask_mod(k_tid_t t, u32_t enable_mask, u32_t disable_mask) +{ + int ret = 0; + + LOCKED(&sched_lock) { + if (_is_thread_prevented_from_running(t)) { + t->base.cpu_mask |= enable_mask; + t->base.cpu_mask &= ~disable_mask; + } else { + ret = -EINVAL; + } + } + return ret; +} + +int k_thread_cpu_mask_clear(k_tid_t thread) +{ + return cpu_mask_mod(thread, 0, 0xffffffff); +} + +int k_thread_cpu_mask_enable_all(k_tid_t thread) +{ + return cpu_mask_mod(thread, 0xffffffff, 0); +} + +int k_thread_cpu_mask_enable(k_tid_t thread, int cpu) +{ + return cpu_mask_mod(thread, BIT(cpu), 0); +} + +int k_thread_cpu_mask_disable(k_tid_t thread, int cpu) +{ + return cpu_mask_mod(thread, 0, BIT(cpu)); +} + +#endif /* CONFIG_SCHED_CPU_MASK */ diff --git a/kernel/thread.c b/kernel/thread.c index ab2442809a5..92a975c5118 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -390,6 +390,9 @@ void _setup_new_thread(struct k_thread *new_thread, /* Any given thread has access to itself */ k_object_access_grant(new_thread, new_thread); #endif +#ifdef CONFIG_SCHED_CPU_MASK + new_thread->base.cpu_mask = -1; +#endif #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN /* _current may be null if the dummy thread is not used */ if (!_current) {