kernel/queue: Clean up scheduler API usage
This was the only spot where the scheduler-internal _peek_first_pending_thread() API was used. Given that this kind of thing is inherently racy (it may not be pending as long as you expect if a timeout expires, etc...), it would be nice to retire it. And as it happens all the queue code was using it for was to detect the case of a non-empty wait_q over which it was looping, which is trivial to do without API support. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
85bc0a3fe6
commit
345553b19b
1 changed files with 9 additions and 9 deletions
|
@ -148,26 +148,21 @@ void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
|
||||||
{
|
{
|
||||||
__ASSERT(head && tail, "invalid head or tail");
|
__ASSERT(head && tail, "invalid head or tail");
|
||||||
|
|
||||||
|
int need_sched = 0;
|
||||||
unsigned int key = irq_lock();
|
unsigned int key = irq_lock();
|
||||||
#if !defined(CONFIG_POLL)
|
#if !defined(CONFIG_POLL)
|
||||||
struct k_thread *first_thread, *thread;
|
struct k_thread *thread;
|
||||||
|
|
||||||
first_thread = _peek_first_pending_thread(&queue->wait_q);
|
|
||||||
while (head && ((thread = _unpend_first_thread(&queue->wait_q)))) {
|
while (head && ((thread = _unpend_first_thread(&queue->wait_q)))) {
|
||||||
prepare_thread_to_run(thread, head);
|
prepare_thread_to_run(thread, head);
|
||||||
head = *(void **)head;
|
head = *(void **)head;
|
||||||
|
need_sched = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (head) {
|
if (head) {
|
||||||
sys_slist_append_list(&queue->data_q, head, tail);
|
sys_slist_append_list(&queue->data_q, head, tail);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (first_thread) {
|
|
||||||
if (!_is_in_isr() && _must_switch_threads()) {
|
|
||||||
(void)_Swap(key);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#else
|
#else
|
||||||
sys_slist_append_list(&queue->data_q, head, tail);
|
sys_slist_append_list(&queue->data_q, head, tail);
|
||||||
if (handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE)) {
|
if (handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE)) {
|
||||||
|
@ -176,7 +171,12 @@ void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
|
||||||
}
|
}
|
||||||
#endif /* !CONFIG_POLL */
|
#endif /* !CONFIG_POLL */
|
||||||
|
|
||||||
irq_unlock(key);
|
if (need_sched) {
|
||||||
|
_reschedule_threads(key);
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
irq_unlock(key);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
|
void k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue