kernel: enable and optimize coop-only configurations

Some kernel operations, like scheduler locking can be optmized out,
since coop threads lock the scheduler by their very nature. Also, the
interrupt exit path for all architecture does not have to do any
rescheduling, again by the nature of non-preemptible threads.

Change-Id: I270e926df3ce46e11d77270330f2f4b463971763
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
Benjamin Walsh 2016-12-14 14:34:29 -05:00 committed by Benjamin Walsh
commit 8e4a534ea1
7 changed files with 50 additions and 0 deletions

View file

@ -127,6 +127,7 @@ SECTION_FUNC(TEXT, _firq_exit)
#endif
#endif
#ifdef CONFIG_PREEMPT_ENABLED
mov_s r1, _kernel
ld_s r2, [r1, _kernel_offset_to_current]
@ -162,6 +163,8 @@ _firq_check_for_swap:
/* fall to no rescheduling */
#endif /* CONFIG_PREEMPT_ENABLED */
.balign 4
_firq_no_reschedule:
/*
@ -195,6 +198,8 @@ _firq_no_reschedule:
#endif
rtie
#ifdef CONFIG_PREEMPT_ENABLED
.balign 4
_firq_reschedule:
@ -294,6 +299,7 @@ _firq_return_from_firq:
/* LP registers are already restored, just switch back to bank 0 */
rtie
#endif /* CONFIG_PREEMPT_ENABLED */
/**
*

View file

@ -96,6 +96,8 @@ SECTION_FUNC(TEXT, _rirq_enter)
SECTION_FUNC(TEXT, _rirq_exit)
#ifdef CONFIG_PREEMPT_ENABLED
mov r1, _kernel
ld_s r2, [r1, _kernel_offset_to_current]
@ -246,6 +248,8 @@ _rirq_return_from_rirq:
/* fall through to rtie instruction */
#endif /* CONFIG_PREEMPT_ENABLED */
.balign 4
_rirq_no_reschedule:

View file

@ -87,6 +87,7 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit)
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)
#ifdef CONFIG_PREEMPT_ENABLED
ldr r0, =_kernel
ldr r1, [r0, #_kernel_offset_to_current]
@ -115,5 +116,6 @@ _ExcExitWithGdbStub:
_GDB_STUB_EXC_EXIT
_EXIT_EXC:
#endif /* CONFIG_PREEMPT_ENABLED */
bx lr

View file

@ -125,6 +125,7 @@ on_irq_stack:
movhi r10, %hi(_kernel)
ori r10, r10, %lo(_kernel)
#ifdef CONFIG_PREEMPT_ENABLED
ldw r11, _kernel_offset_to_current(r10)
/* Determine whether the exception of the ISR requires context
* switch
@ -164,6 +165,9 @@ on_irq_stack:
call _Swap
jmpi _exception_exit
#else
jmpi no_reschedule
#endif /* CONFIG_PREEMPT_ENABLED */
not_interrupt:

View file

@ -284,6 +284,7 @@ alreadyOnIntStack:
jne nestedInterrupt /* 'iret' if nested case */
#ifdef CONFIG_PREEMPT_ENABLED
movl _kernel_offset_to_current(%ecx), %edx
/*
@ -376,6 +377,7 @@ alreadyOnIntStack:
/* Pop of EFLAGS will re-enable interrupts and restore direction flag */
iret
#endif /* CONFIG_PREEMPT_ENABLED */
noReschedule:

View file

@ -125,19 +125,35 @@ static inline int _is_higher_prio_than_current(struct k_thread *thread)
/* is thread currenlty cooperative ? */
static inline int _is_coop(struct k_thread *thread)
{
#if defined(CONFIG_PREEMPT_ENABLED) && defined(CONFIG_COOP_ENABLED)
return thread->base.prio < 0;
#elif defined(CONFIG_COOP_ENABLED)
return 1;
#elif defined(CONFIG_PREEMPT_ENABLED)
return 0;
#else
#error "Impossible configuration"
#endif
}
/* is thread currently preemptible ? */
static inline int _is_preempt(struct k_thread *thread)
{
#ifdef CONFIG_PREEMPT_ENABLED
return !_is_coop(thread) && !atomic_get(&thread->base.sched_locked);
#else
return 0;
#endif
}
/* is current thread preemptible and we are not running in ISR context */
static inline int _is_current_execution_context_preemptible(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
return !_is_in_isr() && _is_preempt(_current);
#else
return 0;
#endif
}
/* find out if priority is under priority inheritance ceiling */
@ -220,12 +236,14 @@ static inline int _must_switch_threads(void)
*/
static inline void _sched_lock(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
__ASSERT(!_is_in_isr(), "");
atomic_inc(&_current->base.sched_locked);
K_DEBUG("scheduler locked (%p:%d)\n",
_current, _current->base.sched_locked);
#endif
}
/**
@ -236,9 +254,11 @@ static inline void _sched_lock(void)
*/
static inline void _sched_unlock_no_reschedule(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
__ASSERT(!_is_in_isr(), "");
atomic_dec(&_current->base.sched_locked);
#endif
}
static inline void _set_thread_states(struct k_thread *thread, uint32_t states)

View file

@ -111,6 +111,7 @@ void _remove_thread_from_ready_q(struct k_thread *thread)
/* must be called with interrupts locked */
void _reschedule_threads(int key)
{
#ifdef CONFIG_PREEMPT_ENABLED
K_DEBUG("rescheduling threads\n");
if (_must_switch_threads()) {
@ -119,20 +120,26 @@ void _reschedule_threads(int key)
} else {
irq_unlock(key);
}
#else
irq_unlock(key);
#endif
}
void k_sched_lock(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
__ASSERT(!_is_in_isr(), "");
atomic_inc(&_current->base.sched_locked);
K_DEBUG("scheduler locked (%p:%d)\n",
_current, _current->base.sched_locked);
#endif
}
void k_sched_unlock(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
__ASSERT(_current->base.sched_locked > 0, "");
__ASSERT(!_is_in_isr(), "");
@ -144,6 +151,7 @@ void k_sched_unlock(void)
_current, _current->base.sched_locked);
_reschedule_threads(key);
#endif
}
/*
@ -206,6 +214,7 @@ void _pend_current_thread(_wait_q_t *wait_q, int32_t timeout)
*/
int __must_switch_threads(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
K_DEBUG("current prio: %d, highest prio: %d\n",
_current->base.prio, _get_highest_ready_prio());
@ -213,6 +222,9 @@ int __must_switch_threads(void)
_dump_ready_q();
return _is_prio_higher(_get_highest_ready_prio(), _current->base.prio);
#else
return 0;
#endif
}
int k_thread_priority_get(k_tid_t thread)