kernel: sched: removing dead code

Due to the recent changes to scheduler z_find_first_thread_to_unpend
& z_remove_thread_from_ready_q are not used anymore. So removing the
dead code.

fixes: #32691

Signed-off-by: Spoorthy Priya Yerabolu <spoorthy.priya.yerabolu@intel.com>
This commit is contained in:
Spoorthy Priya Yerabolu 2021-03-05 01:32:39 -08:00 committed by Ioannis Glaropoulos
commit 4118ed1d4d
2 changed files with 0 additions and 24 deletions

View file

@ -38,7 +38,6 @@ BUILD_ASSERT(K_LOWEST_APPLICATION_THREAD_PRIO
void z_sched_init(void); void z_sched_init(void);
void z_move_thread_to_end_of_prio_q(struct k_thread *thread); void z_move_thread_to_end_of_prio_q(struct k_thread *thread);
void z_remove_thread_from_ready_q(struct k_thread *thread);
int z_is_thread_time_slicing(struct k_thread *thread); int z_is_thread_time_slicing(struct k_thread *thread);
void z_unpend_thread_no_timeout(struct k_thread *thread); void z_unpend_thread_no_timeout(struct k_thread *thread);
struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q); struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q);
@ -55,8 +54,6 @@ int z_unpend_all(_wait_q_t *wait_q);
void z_thread_priority_set(struct k_thread *thread, int prio); void z_thread_priority_set(struct k_thread *thread, int prio);
bool z_set_prio(struct k_thread *thread, int prio); bool z_set_prio(struct k_thread *thread, int prio);
void *z_get_next_switch_handle(void *interrupted); void *z_get_next_switch_handle(void *interrupted);
struct k_thread *z_find_first_thread_to_unpend(_wait_q_t *wait_q,
struct k_thread *from);
void idle(void *a, void *b, void *c); void idle(void *a, void *b, void *c);
void z_time_slice(int ticks); void z_time_slice(int ticks);
void z_reset_time_slice(void); void z_reset_time_slice(void);

View file

@ -595,13 +595,6 @@ static void unready_thread(struct k_thread *thread)
update_cache(thread == _current); update_cache(thread == _current);
} }
void z_remove_thread_from_ready_q(struct k_thread *thread)
{
LOCKED(&sched_spinlock) {
unready_thread(thread);
}
}
/* sched_spinlock must be held */ /* sched_spinlock must be held */
static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q) static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q)
{ {
@ -643,20 +636,6 @@ void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
pend(thread, wait_q, timeout); pend(thread, wait_q, timeout);
} }
ALWAYS_INLINE struct k_thread *z_find_first_thread_to_unpend(_wait_q_t *wait_q,
struct k_thread *from)
{
ARG_UNUSED(from);
struct k_thread *ret = NULL;
LOCKED(&sched_spinlock) {
ret = _priq_wait_best(&wait_q->waitq);
}
return ret;
}
static inline void unpend_thread_no_timeout(struct k_thread *thread) static inline void unpend_thread_no_timeout(struct k_thread *thread)
{ {
_priq_wait_remove(&pended_on(thread)->waitq, thread); _priq_wait_remove(&pended_on(thread)->waitq, thread);