From d7ad176be656892d505be1e4da2569aaecea9fcd Mon Sep 17 00:00:00 2001 From: Benjamin Walsh Date: Thu, 10 Nov 2016 14:46:58 -0500 Subject: [PATCH] kernel: export k_sched_lock and k_sched_unlock. Oversight. These functions are used extensively in the kernel guts, but are also supposed to be an API. k_sched_lock used to be implemented as a static inline. However, until the header files are cleaned-up, and everything, including applications get access to the kernel internal data structures, it must be implemented as a function. To reduce the cost to the internals of the kernel, the new internal _sched_lock() contains the same implemetation, but is inlined. Change-Id: If2f61d7714f87d81ddbeed69fedd111b8ce01376 Signed-off-by: Benjamin Walsh --- include/kernel.h | 32 ++++++++++++++++++++++++++++++++ kernel/unified/include/ksched.h | 12 ++++++------ kernel/unified/legacy_timer.c | 6 +++--- kernel/unified/mem_pool.c | 6 +++--- kernel/unified/mutex.c | 4 ++-- kernel/unified/pipes.c | 4 ++-- kernel/unified/sched.c | 10 ++++++++++ kernel/unified/thread.c | 4 ++-- 8 files changed, 60 insertions(+), 18 deletions(-) diff --git a/include/kernel.h b/include/kernel.h index 3b940b94d6a..71461d0ded4 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -420,6 +420,38 @@ extern void k_sched_time_slice_set(int32_t slice, int prio); */ extern int k_is_in_isr(void); +/* + * @brief Lock the scheduler + * + * Prevent another thread from preempting the current thread. + * + * @note If the thread does an operation that causes it to pend, it will still + * be context switched out. + * + * @note Similar to irq_lock, the scheduler lock state is tracked per-thread. + * + * This should be chosen over irq_lock when possible, basically when the data + * protected by it is not accessible from ISRs. However, the associated + * k_sched_unlock() is heavier to use than irq_unlock, so if the amount of + * processing is really small, irq_lock might be a better choice. + * + * Can be called recursively. + * + * @return N/A + */ +extern void k_sched_lock(void); + +/* + * @brief Unlock the scheduler + * + * Re-enable scheduling previously disabled by k_sched_lock(). Must be called + * an equal amount of times k_sched_lock() was called. Threads are rescheduled + * upon exit. + * + * @return N/A + */ +extern void k_sched_unlock(void); + /** * @brief Set current thread's custom data. * diff --git a/kernel/unified/include/ksched.h b/kernel/unified/include/ksched.h index 6fadbbee233..7162bcb2e0e 100644 --- a/kernel/unified/include/ksched.h +++ b/kernel/unified/include/ksched.h @@ -193,14 +193,14 @@ static inline int _must_switch_threads(void) } /* - * Application API. + * Internal equivalent to k_sched_lock so that it does not incur a function + * call penalty in the kernel guts. * - * lock the scheduler: prevents another thread from preempting the current one - * except if the current thread does an operation that causes it to pend - * - * Can be called recursively. + * Must be kept in sync until the header files are cleaned-up and the + * applications have access to the kernel internal deta structures (through + * APIs of course). */ -static inline void k_sched_lock(void) +static inline void _sched_lock(void) { __ASSERT(!_is_in_isr(), ""); diff --git a/kernel/unified/legacy_timer.c b/kernel/unified/legacy_timer.c index 5021fb10877..aae48913514 100644 --- a/kernel/unified/legacy_timer.c +++ b/kernel/unified/legacy_timer.c @@ -70,7 +70,7 @@ SYS_INIT(init_dyamic_timers, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); ktimer_t task_timer_alloc(void) { - k_sched_lock(); + _sched_lock(); /* * This conversion works only if timeout member @@ -85,7 +85,7 @@ ktimer_t task_timer_alloc(void) void task_timer_free(ktimer_t timer) { k_timer_stop(timer); - k_sched_lock(); + _sched_lock(); sys_dlist_append(&timer_pool, &timer->timeout.node); k_sched_unlock(); } @@ -105,7 +105,7 @@ void task_timer_start(ktimer_t timer, int32_t duration, bool _timer_pool_is_empty(void) { - k_sched_lock(); + _sched_lock(); bool is_empty = sys_dlist_is_empty(&timer_pool); diff --git a/kernel/unified/mem_pool.c b/kernel/unified/mem_pool.c index c5ef31c33a5..930c67d06dc 100644 --- a/kernel/unified/mem_pool.c +++ b/kernel/unified/mem_pool.c @@ -467,7 +467,7 @@ static void block_waiters_check(struct k_mem_pool *pool) void k_mem_pool_defrag(struct k_mem_pool *pool) { - k_sched_lock(); + _sched_lock(); /* do complete defragmentation of memory pool (i.e. all block sets) */ defrag(pool, pool->nr_of_block_sets - 1, 0); @@ -483,7 +483,7 @@ int k_mem_pool_alloc(struct k_mem_pool *pool, struct k_mem_block *block, char *found_block; int offset; - k_sched_lock(); + _sched_lock(); /* locate block set to try allocating from */ offset = compute_block_set_index(pool, size); @@ -529,7 +529,7 @@ void k_mem_pool_free(struct k_mem_block *block) int offset; struct k_mem_pool *pool = block->pool_id; - k_sched_lock(); + _sched_lock(); /* determine block set that block belongs to */ offset = compute_block_set_index(pool, block->req_size); diff --git a/kernel/unified/mutex.c b/kernel/unified/mutex.c index 8ef9f5ae8b3..ee51c67c4e9 100644 --- a/kernel/unified/mutex.c +++ b/kernel/unified/mutex.c @@ -131,7 +131,7 @@ int k_mutex_lock(struct k_mutex *mutex, int32_t timeout) { int new_prio, key; - k_sched_lock(); + _sched_lock(); if (likely(mutex->lock_count == 0 || mutex->owner == _current)) { @@ -217,7 +217,7 @@ void k_mutex_unlock(struct k_mutex *mutex) __ASSERT(mutex->lock_count > 0, ""); __ASSERT(mutex->owner == _current, ""); - k_sched_lock(); + _sched_lock(); RECORD_STATE_CHANGE(); diff --git a/kernel/unified/pipes.c b/kernel/unified/pipes.c index bd8a7027d32..97083445ec3 100644 --- a/kernel/unified/pipes.c +++ b/kernel/unified/pipes.c @@ -422,7 +422,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc, return -EIO; } - k_sched_lock(); + _sched_lock(); irq_unlock(key); /* @@ -562,7 +562,7 @@ int k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read, return -EIO; } - k_sched_lock(); + _sched_lock(); irq_unlock(key); num_bytes_read = _pipe_buffer_get(pipe, data, bytes_to_read); diff --git a/kernel/unified/sched.c b/kernel/unified/sched.c index 47c2199df14..ce1575a74ca 100644 --- a/kernel/unified/sched.c +++ b/kernel/unified/sched.c @@ -99,6 +99,16 @@ void _reschedule_threads(int key) } } +void k_sched_lock(void) +{ + __ASSERT(!_is_in_isr(), ""); + + atomic_inc(&_nanokernel.current->sched_locked); + + K_DEBUG("scheduler locked (%p:%d)\n", + _current, _current->sched_locked); +} + void k_sched_unlock(void) { __ASSERT(_nanokernel.current->sched_locked > 0, ""); diff --git a/kernel/unified/thread.c b/kernel/unified/thread.c index 15349614f29..b1e37ca4b49 100644 --- a/kernel/unified/thread.c +++ b/kernel/unified/thread.c @@ -277,7 +277,7 @@ void _k_thread_group_op(uint32_t groups, void (*func)(struct k_thread *)) __ASSERT(!_is_in_isr(), ""); - k_sched_lock(); + _sched_lock(); /* Invoke func() on each static thread in the specified group set. */ @@ -393,7 +393,7 @@ void _init_static_threads(void) thread_data->thread->init_data = thread_data; } - k_sched_lock(); + _sched_lock(); /* Start all (legacy) threads that are part of the EXE task group */ _k_thread_group_op(K_TASK_GROUP_EXE, _k_thread_single_start);