kernel: workq: Add internal function z_work_submit_to_queue()
This adds the internal function z_work_submit_to_queue(), which submits the work item to the queue but doesn't force the thread to yield, compared to the public function k_work_submit_to_queue(). When called from poll.c in the context of k_work_poll events, it ensures that the thread does not yield in the context of the spinlock of object that became available. Fixes #45267 Signed-off-by: Lucas Dietrich <ld.adecy@gmail.com>
This commit is contained in:
parent
d778d5c711
commit
9a848b3ad4
2 changed files with 27 additions and 5 deletions
|
@ -576,6 +576,9 @@ static void triggered_work_expiration_handler(struct _timeout *timeout)
|
||||||
k_work_submit_to_queue(twork->workq, &twork->work);
|
k_work_submit_to_queue(twork->workq, &twork->work);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern int z_work_submit_to_queue(struct k_work_q *queue,
|
||||||
|
struct k_work *work);
|
||||||
|
|
||||||
static int signal_triggered_work(struct k_poll_event *event, uint32_t status)
|
static int signal_triggered_work(struct k_poll_event *event, uint32_t status)
|
||||||
{
|
{
|
||||||
struct z_poller *poller = event->poller;
|
struct z_poller *poller = event->poller;
|
||||||
|
@ -587,7 +590,7 @@ static int signal_triggered_work(struct k_poll_event *event, uint32_t status)
|
||||||
|
|
||||||
z_abort_timeout(&twork->timeout);
|
z_abort_timeout(&twork->timeout);
|
||||||
twork->poll_result = 0;
|
twork->poll_result = 0;
|
||||||
k_work_submit_to_queue(work_q, &twork->work);
|
z_work_submit_to_queue(work_q, &twork->work);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -355,19 +355,38 @@ static int submit_to_queue_locked(struct k_work *work,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int k_work_submit_to_queue(struct k_work_q *queue,
|
/* Submit work to a queue but do not yield the current thread.
|
||||||
|
*
|
||||||
|
* Intended for internal use.
|
||||||
|
*
|
||||||
|
* See also submit_to_queue_locked().
|
||||||
|
*
|
||||||
|
* @param queuep pointer to a queue reference.
|
||||||
|
* @param work the work structure to be submitted
|
||||||
|
*
|
||||||
|
* @retval see submit_to_queue_locked()
|
||||||
|
*/
|
||||||
|
int z_work_submit_to_queue(struct k_work_q *queue,
|
||||||
struct k_work *work)
|
struct k_work *work)
|
||||||
{
|
{
|
||||||
__ASSERT_NO_MSG(work != NULL);
|
__ASSERT_NO_MSG(work != NULL);
|
||||||
|
|
||||||
k_spinlock_key_t key = k_spin_lock(&lock);
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
||||||
|
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit_to_queue, queue, work);
|
|
||||||
|
|
||||||
int ret = submit_to_queue_locked(work, &queue);
|
int ret = submit_to_queue_locked(work, &queue);
|
||||||
|
|
||||||
k_spin_unlock(&lock, key);
|
k_spin_unlock(&lock, key);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int k_work_submit_to_queue(struct k_work_q *queue,
|
||||||
|
struct k_work *work)
|
||||||
|
{
|
||||||
|
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit_to_queue, queue, work);
|
||||||
|
|
||||||
|
int ret = z_work_submit_to_queue(queue, work);
|
||||||
|
|
||||||
/* submit_to_queue_locked() won't reschedule on its own
|
/* submit_to_queue_locked() won't reschedule on its own
|
||||||
* (really it should, otherwise this process will result in
|
* (really it should, otherwise this process will result in
|
||||||
* spurious calls to z_swap() due to the race), so do it here
|
* spurious calls to z_swap() due to the race), so do it here
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue