xtensa, kernel/sched: Move next switch_handle selection to the scheduler
The xtensa asm2 layer had a function to select the next switch handle to return into following an exception. There is no arch-specific code there, it's just scheduler logic. Move it to the scheduler where it belongs. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
e82578919a
commit
9d367eeb0a
3 changed files with 31 additions and 25 deletions
|
@ -137,29 +137,6 @@ static void dump_stack(int *stack)
|
|||
printk(" ** SAR %p\n", (void *)bsa[BSA_SAR_OFF/4]);
|
||||
}
|
||||
|
||||
#if CONFIG_XTENSA_ASM2
|
||||
static inline void *restore_stack(void *interrupted_stack)
|
||||
{
|
||||
if (!_is_preempt(_current) &&
|
||||
!(_current->base.thread_state & _THREAD_DEAD)) {
|
||||
return interrupted_stack;
|
||||
}
|
||||
|
||||
int key = irq_lock();
|
||||
|
||||
_current->switch_handle = interrupted_stack;
|
||||
_current = _get_next_ready_thread();
|
||||
|
||||
void *ret = _current->switch_handle;
|
||||
|
||||
irq_unlock(key);
|
||||
|
||||
_check_stack_sentinel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* The wrapper code lives here instead of in the python script that
|
||||
* generates _xtensa_handle_one_int*(). Seems cleaner, still kind of
|
||||
* ugly.
|
||||
|
@ -174,7 +151,7 @@ void *xtensa_int##l##_c(void *interrupted_stack) \
|
|||
irqs ^= m; \
|
||||
__asm__ volatile("wsr.intclear %0" : : "r"(m)); \
|
||||
} \
|
||||
return restore_stack(interrupted_stack); \
|
||||
return _get_next_switch_handle(interrupted_stack); \
|
||||
}
|
||||
|
||||
DEF_INT_C_HANDLER(2)
|
||||
|
@ -235,6 +212,6 @@ void *xtensa_excint1_c(int *interrupted_stack)
|
|||
_NanoFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, &_default_esf);
|
||||
}
|
||||
|
||||
return restore_stack(interrupted_stack);
|
||||
return _get_next_switch_handle(interrupted_stack);
|
||||
}
|
||||
|
||||
|
|
|
@ -541,4 +541,10 @@ static inline int _is_thread_user(void)
|
|||
#endif
|
||||
}
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
/**
|
||||
* Returns the switch_handle of the next thread to run following an interrupt.
|
||||
*/
|
||||
void *_get_next_switch_handle(void *interrupted);
|
||||
|
||||
#endif /* _ksched__h_ */
|
||||
|
|
|
@ -538,3 +538,26 @@ struct k_thread *_get_next_ready_thread(void)
|
|||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USE_SWITCH
|
||||
void *_get_next_switch_handle(void *interrupted)
|
||||
{
|
||||
if (!_is_preempt(_current) &&
|
||||
!(_current->base.thread_state & _THREAD_DEAD)) {
|
||||
return interrupted;
|
||||
}
|
||||
|
||||
int key = irq_lock();
|
||||
|
||||
_current->switch_handle = interrupted;
|
||||
_current = _get_next_ready_thread();
|
||||
|
||||
void *ret = _current->switch_handle;
|
||||
|
||||
irq_unlock(key);
|
||||
|
||||
_check_stack_sentinel();
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue