kernel: have k_sched_lock call _sched_lock
Having two implementations of the same thing is bad, especially when one can just call the other inline version. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
624a75c76e
commit
9f38d2a91a
2 changed files with 3 additions and 20 deletions
|
@ -239,12 +239,8 @@ static inline int _must_switch_threads(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Internal equivalent to k_sched_lock so that it does not incur a function
|
* Called directly by other internal kernel code.
|
||||||
* call penalty in the kernel guts.
|
* Exposed to applications via k_sched_lock(), which just calls this
|
||||||
*
|
|
||||||
* Must be kept in sync until the header files are cleaned-up and the
|
|
||||||
* applications have access to the kernel internal deta structures (through
|
|
||||||
* APIs of course).
|
|
||||||
*/
|
*/
|
||||||
static inline void _sched_lock(void)
|
static inline void _sched_lock(void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -137,20 +137,7 @@ void _reschedule_threads(int key)
|
||||||
|
|
||||||
void k_sched_lock(void)
|
void k_sched_lock(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_PREEMPT_ENABLED
|
_sched_lock();
|
||||||
__ASSERT(_current->base.sched_locked != 1, "");
|
|
||||||
__ASSERT(!_is_in_isr(), "");
|
|
||||||
|
|
||||||
--_current->base.sched_locked;
|
|
||||||
|
|
||||||
/* Probably not needed since we're in a real function,
|
|
||||||
* but it doesn't hurt.
|
|
||||||
*/
|
|
||||||
compiler_barrier();
|
|
||||||
|
|
||||||
K_DEBUG("scheduler locked (%p:%d)\n",
|
|
||||||
_current, _current->base.sched_locked);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void k_sched_unlock(void)
|
void k_sched_unlock(void)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue