kernel: export k_sched_lock and k_sched_unlock.

Oversight. These functions are used extensively in the kernel guts, but
are also supposed to be an API.

k_sched_lock used to be implemented as a static inline. However, until
the header files are cleaned-up, and everything, including applications
get access to the kernel internal data structures, it must be
implemented as a function. To reduce the cost to the internals of the
kernel, the new internal _sched_lock() contains the same implemetation,
but is inlined.

Change-Id: If2f61d7714f87d81ddbeed69fedd111b8ce01376
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
Benjamin Walsh 2016-11-10 14:46:58 -05:00 committed by Anas Nashif
commit d7ad176be6
8 changed files with 60 additions and 18 deletions

View file

@ -420,6 +420,38 @@ extern void k_sched_time_slice_set(int32_t slice, int prio);
*/
extern int k_is_in_isr(void);
/*
* @brief Lock the scheduler
*
* Prevent another thread from preempting the current thread.
*
* @note If the thread does an operation that causes it to pend, it will still
* be context switched out.
*
* @note Similar to irq_lock, the scheduler lock state is tracked per-thread.
*
* This should be chosen over irq_lock when possible, basically when the data
* protected by it is not accessible from ISRs. However, the associated
* k_sched_unlock() is heavier to use than irq_unlock, so if the amount of
* processing is really small, irq_lock might be a better choice.
*
* Can be called recursively.
*
* @return N/A
*/
extern void k_sched_lock(void);
/*
* @brief Unlock the scheduler
*
* Re-enable scheduling previously disabled by k_sched_lock(). Must be called
* an equal amount of times k_sched_lock() was called. Threads are rescheduled
* upon exit.
*
* @return N/A
*/
extern void k_sched_unlock(void);
/**
* @brief Set current thread's custom data.
*

View file

@ -193,14 +193,14 @@ static inline int _must_switch_threads(void)
}
/*
* Application API.
* Internal equivalent to k_sched_lock so that it does not incur a function
* call penalty in the kernel guts.
*
* lock the scheduler: prevents another thread from preempting the current one
* except if the current thread does an operation that causes it to pend
*
* Can be called recursively.
* Must be kept in sync until the header files are cleaned-up and the
* applications have access to the kernel internal deta structures (through
* APIs of course).
*/
static inline void k_sched_lock(void)
static inline void _sched_lock(void)
{
__ASSERT(!_is_in_isr(), "");

View file

@ -70,7 +70,7 @@ SYS_INIT(init_dyamic_timers, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
ktimer_t task_timer_alloc(void)
{
k_sched_lock();
_sched_lock();
/*
* This conversion works only if timeout member
@ -85,7 +85,7 @@ ktimer_t task_timer_alloc(void)
void task_timer_free(ktimer_t timer)
{
k_timer_stop(timer);
k_sched_lock();
_sched_lock();
sys_dlist_append(&timer_pool, &timer->timeout.node);
k_sched_unlock();
}
@ -105,7 +105,7 @@ void task_timer_start(ktimer_t timer, int32_t duration,
bool _timer_pool_is_empty(void)
{
k_sched_lock();
_sched_lock();
bool is_empty = sys_dlist_is_empty(&timer_pool);

View file

@ -467,7 +467,7 @@ static void block_waiters_check(struct k_mem_pool *pool)
void k_mem_pool_defrag(struct k_mem_pool *pool)
{
k_sched_lock();
_sched_lock();
/* do complete defragmentation of memory pool (i.e. all block sets) */
defrag(pool, pool->nr_of_block_sets - 1, 0);
@ -483,7 +483,7 @@ int k_mem_pool_alloc(struct k_mem_pool *pool, struct k_mem_block *block,
char *found_block;
int offset;
k_sched_lock();
_sched_lock();
/* locate block set to try allocating from */
offset = compute_block_set_index(pool, size);
@ -529,7 +529,7 @@ void k_mem_pool_free(struct k_mem_block *block)
int offset;
struct k_mem_pool *pool = block->pool_id;
k_sched_lock();
_sched_lock();
/* determine block set that block belongs to */
offset = compute_block_set_index(pool, block->req_size);

View file

@ -131,7 +131,7 @@ int k_mutex_lock(struct k_mutex *mutex, int32_t timeout)
{
int new_prio, key;
k_sched_lock();
_sched_lock();
if (likely(mutex->lock_count == 0 || mutex->owner == _current)) {
@ -217,7 +217,7 @@ void k_mutex_unlock(struct k_mutex *mutex)
__ASSERT(mutex->lock_count > 0, "");
__ASSERT(mutex->owner == _current, "");
k_sched_lock();
_sched_lock();
RECORD_STATE_CHANGE();

View file

@ -422,7 +422,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
return -EIO;
}
k_sched_lock();
_sched_lock();
irq_unlock(key);
/*
@ -562,7 +562,7 @@ int k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
return -EIO;
}
k_sched_lock();
_sched_lock();
irq_unlock(key);
num_bytes_read = _pipe_buffer_get(pipe, data, bytes_to_read);

View file

@ -99,6 +99,16 @@ void _reschedule_threads(int key)
}
}
void k_sched_lock(void)
{
__ASSERT(!_is_in_isr(), "");
atomic_inc(&_nanokernel.current->sched_locked);
K_DEBUG("scheduler locked (%p:%d)\n",
_current, _current->sched_locked);
}
void k_sched_unlock(void)
{
__ASSERT(_nanokernel.current->sched_locked > 0, "");

View file

@ -277,7 +277,7 @@ void _k_thread_group_op(uint32_t groups, void (*func)(struct k_thread *))
__ASSERT(!_is_in_isr(), "");
k_sched_lock();
_sched_lock();
/* Invoke func() on each static thread in the specified group set. */
@ -393,7 +393,7 @@ void _init_static_threads(void)
thread_data->thread->init_data = thread_data;
}
k_sched_lock();
_sched_lock();
/* Start all (legacy) threads that are part of the EXE task group */
_k_thread_group_op(K_TASK_GROUP_EXE, _k_thread_single_start);