net: context: Fix use of k_delayed_work_cancel with SYN backlog
This patch fixes a regression which the original patch introducing this code (improving concurrent connection handling) had on *sequential* connection handling. Without this patch, with the default CONFIG_NET_TCP_BACKLOG_SIZE of 1, after each connection request, there was 1s (ACK timeout) "dead time" during which new connection wasn't ptocessed. This is because k_delayed_work_remaining_get() was checked the wrong way. But there's no need to use k_delayed_work_remaining_get() at all, instead just call k_delayed_work_cancel() and dispatch on its return code. Note that there's still a problem of synchronizing access to the global array tcp_backlog, as worker (which modifies it) may preempt packet handling code (which also modifies it). Signed-off-by: Paul Sokolovsky <paul.sokolovsky@linaro.org>
This commit is contained in:
parent
ee989be286
commit
4334144caa
1 changed files with 14 additions and 14 deletions
|
@ -258,15 +258,15 @@ static int tcp_backlog_ack(struct net_pkt *pkt, struct net_context *context)
|
|||
context->tcp->send_seq = tcp_backlog[r].send_seq;
|
||||
context->tcp->send_ack = tcp_backlog[r].send_ack;
|
||||
|
||||
/* For now, remember to check that the delayed work wasn't already
|
||||
* scheduled to run, and if yes, don't zero out here. Improve the
|
||||
* delayed work cancellation, but for now use a boolean to keep this
|
||||
* in sync
|
||||
*/
|
||||
if (k_delayed_work_remaining_get(&tcp_backlog[r].ack_timer) > 0) {
|
||||
if (k_delayed_work_cancel(&tcp_backlog[r].ack_timer) < 0) {
|
||||
/* Too late to cancel - just set flag for worker.
|
||||
* TODO: Note that in this case, we can be preempted
|
||||
* anytime (could have been preempted even before we did
|
||||
* the check), so access to tcp_backlog should be synchronized
|
||||
* between this function and worker.
|
||||
*/
|
||||
tcp_backlog[r].cancelled = true;
|
||||
} else {
|
||||
k_delayed_work_cancel(&tcp_backlog[r].ack_timer);
|
||||
memset(&tcp_backlog[r], 0, sizeof(struct tcp_backlog_entry));
|
||||
}
|
||||
|
||||
|
@ -293,15 +293,15 @@ static int tcp_backlog_rst(struct net_pkt *pkt)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* For now, remember to check that the delayed work wasn't already
|
||||
* scheduled to run, and if yes, don't zero out here. Improve the
|
||||
* delayed work cancellation, but for now use a boolean to keep this
|
||||
* in sync
|
||||
*/
|
||||
if (k_delayed_work_remaining_get(&tcp_backlog[r].ack_timer) > 0) {
|
||||
if (k_delayed_work_cancel(&tcp_backlog[r].ack_timer) < 0) {
|
||||
/* Too late to cancel - just set flag for worker.
|
||||
* TODO: Note that in this case, we can be preempted
|
||||
* anytime (could have been preempted even before we did
|
||||
* the check), so access to tcp_backlog should be synchronized
|
||||
* between this function and worker.
|
||||
*/
|
||||
tcp_backlog[r].cancelled = true;
|
||||
} else {
|
||||
k_delayed_work_cancel(&tcp_backlog[r].ack_timer);
|
||||
memset(&tcp_backlog[r], 0, sizeof(struct tcp_backlog_entry));
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue