2019-10-21 11:12:35 +03:00
|
|
|
/*
|
2020-02-26 17:38:45 +02:00
|
|
|
* Copyright (c) 2018-2020 Intel Corporation
|
2019-10-21 11:12:35 +03:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <logging/log.h>
|
|
|
|
LOG_MODULE_REGISTER(net_tcp, CONFIG_NET_TCP_LOG_LEVEL);
|
|
|
|
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <zephyr.h>
|
2020-06-11 21:28:55 -07:00
|
|
|
#include <random/rand32.h>
|
2019-10-21 11:12:35 +03:00
|
|
|
#include <net/net_pkt.h>
|
|
|
|
#include <net/net_context.h>
|
2020-02-26 17:57:48 +02:00
|
|
|
#include <net/udp.h>
|
2020-02-26 20:54:11 +02:00
|
|
|
#include "ipv4.h"
|
2020-04-03 15:12:56 +03:00
|
|
|
#include "ipv6.h"
|
2019-10-21 11:12:35 +03:00
|
|
|
#include "connection.h"
|
|
|
|
#include "net_stats.h"
|
|
|
|
#include "net_private.h"
|
|
|
|
#include "tcp2_priv.h"
|
|
|
|
|
2020-09-18 10:37:32 +03:00
|
|
|
#define FIN_TIMEOUT_MS MSEC_PER_SEC
|
|
|
|
#define FIN_TIMEOUT K_MSEC(FIN_TIMEOUT_MS)
|
|
|
|
|
2019-11-26 15:29:56 +02:00
|
|
|
static int tcp_rto = CONFIG_NET_TCP_INIT_RETRANSMISSION_TIMEOUT;
|
2020-09-10 17:24:46 +03:00
|
|
|
static int tcp_retries = CONFIG_NET_TCP_RETRY_COUNT;
|
2019-10-21 11:12:35 +03:00
|
|
|
static int tcp_window = NET_IPV6_MTU;
|
|
|
|
|
|
|
|
static sys_slist_t tcp_conns = SYS_SLIST_STATIC_INIT(&tcp_conns);
|
|
|
|
|
2019-10-23 10:56:38 +03:00
|
|
|
static K_MEM_SLAB_DEFINE(tcp_conns_slab, sizeof(struct tcp),
|
|
|
|
CONFIG_NET_MAX_CONTEXTS, 4);
|
|
|
|
|
2019-10-21 11:12:35 +03:00
|
|
|
static void tcp_in(struct tcp *conn, struct net_pkt *pkt);
|
|
|
|
|
|
|
|
int (*tcp_send_cb)(struct net_pkt *pkt) = NULL;
|
2020-03-10 14:14:20 +02:00
|
|
|
size_t (*tcp_recv_cb)(struct tcp *conn, struct net_pkt *pkt) = NULL;
|
2019-10-21 11:12:35 +03:00
|
|
|
|
2020-06-01 12:58:38 +03:00
|
|
|
static int tcp_pkt_linearize(struct net_pkt *pkt, size_t pos, size_t len)
|
2019-10-21 11:12:35 +03:00
|
|
|
{
|
2020-06-01 12:58:38 +03:00
|
|
|
struct net_buf *buf, *first = pkt->cursor.buf, *second = first->frags;
|
|
|
|
int ret = 0;
|
|
|
|
size_t len1, len2;
|
|
|
|
|
|
|
|
if (net_pkt_get_len(pkt) < (pos + len)) {
|
2020-06-03 06:20:22 -05:00
|
|
|
NET_ERR("Insufficient packet len=%zd (pos+len=%zu)",
|
2020-06-01 12:58:38 +03:00
|
|
|
net_pkt_get_len(pkt), pos + len);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
buf = net_pkt_get_frag(pkt, TCP_PKT_ALLOC_TIMEOUT);
|
|
|
|
|
|
|
|
if (!buf || buf->size < len) {
|
|
|
|
if (buf) {
|
|
|
|
net_buf_unref(buf);
|
|
|
|
}
|
|
|
|
ret = -ENOBUFS;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
net_buf_linearize(buf->data, buf->size, pkt->frags, pos, len);
|
|
|
|
net_buf_add(buf, len);
|
|
|
|
|
|
|
|
len1 = first->len - (pkt->cursor.pos - pkt->cursor.buf->data);
|
|
|
|
len2 = len - len1;
|
|
|
|
|
|
|
|
first->len -= len1;
|
2019-10-21 11:12:35 +03:00
|
|
|
|
2020-06-01 12:58:38 +03:00
|
|
|
while (len2) {
|
|
|
|
size_t pull_len = MIN(second->len, len2);
|
|
|
|
struct net_buf *next;
|
|
|
|
|
|
|
|
len2 -= pull_len;
|
|
|
|
net_buf_pull(second, pull_len);
|
|
|
|
next = second->frags;
|
|
|
|
if (second->len == 0) {
|
|
|
|
net_buf_unref(second);
|
|
|
|
}
|
|
|
|
second = next;
|
|
|
|
}
|
|
|
|
|
|
|
|
buf->frags = second;
|
|
|
|
first->frags = buf;
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct tcphdr *th_get(struct net_pkt *pkt)
|
|
|
|
{
|
|
|
|
size_t ip_len = net_pkt_ip_hdr_len(pkt) + net_pkt_ip_opts_len(pkt);
|
|
|
|
struct tcphdr *th = NULL;
|
|
|
|
again:
|
2020-02-26 17:38:45 +02:00
|
|
|
net_pkt_cursor_init(pkt);
|
|
|
|
net_pkt_set_overwrite(pkt, true);
|
2019-10-21 11:12:35 +03:00
|
|
|
|
2020-06-01 12:58:38 +03:00
|
|
|
if (net_pkt_skip(pkt, ip_len) != 0) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!net_pkt_is_contiguous(pkt, sizeof(*th))) {
|
|
|
|
if (tcp_pkt_linearize(pkt, ip_len, sizeof(*th)) < 0) {
|
|
|
|
goto out;
|
|
|
|
}
|
2019-10-21 11:12:35 +03:00
|
|
|
|
2020-06-01 12:58:38 +03:00
|
|
|
goto again;
|
|
|
|
}
|
2020-02-26 17:38:45 +02:00
|
|
|
|
2020-06-01 12:58:38 +03:00
|
|
|
th = net_pkt_cursor_get_pos(pkt);
|
|
|
|
out:
|
|
|
|
return th;
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static size_t tcp_endpoint_len(sa_family_t af)
|
|
|
|
{
|
|
|
|
return (af == AF_INET) ? sizeof(struct sockaddr_in) :
|
|
|
|
sizeof(struct sockaddr_in6);
|
|
|
|
}
|
|
|
|
|
2020-04-21 14:12:33 +03:00
|
|
|
static int tcp_endpoint_set(union tcp_endpoint *ep, struct net_pkt *pkt,
|
|
|
|
enum pkt_addr src)
|
2019-10-21 11:12:35 +03:00
|
|
|
{
|
2020-04-21 14:12:33 +03:00
|
|
|
int ret = 0;
|
2019-10-21 11:12:35 +03:00
|
|
|
|
2020-04-21 14:12:33 +03:00
|
|
|
switch (net_pkt_family(pkt)) {
|
2020-04-16 11:29:06 +03:00
|
|
|
case AF_INET:
|
|
|
|
if (IS_ENABLED(CONFIG_NET_IPV4)) {
|
2020-04-21 14:12:33 +03:00
|
|
|
struct net_ipv4_hdr *ip = NET_IPV4_HDR(pkt);
|
2020-09-10 17:29:40 +03:00
|
|
|
struct tcphdr *th;
|
|
|
|
|
|
|
|
th = th_get(pkt);
|
|
|
|
if (!th) {
|
|
|
|
return -ENOBUFS;
|
|
|
|
}
|
2020-04-16 11:29:06 +03:00
|
|
|
|
2020-04-21 14:12:33 +03:00
|
|
|
memset(ep, 0, sizeof(*ep));
|
|
|
|
|
2020-04-16 11:29:06 +03:00
|
|
|
ep->sin.sin_port = src == TCP_EP_SRC ? th->th_sport :
|
|
|
|
th->th_dport;
|
2020-04-21 14:12:33 +03:00
|
|
|
net_ipaddr_copy(&ep->sin.sin_addr,
|
|
|
|
src == TCP_EP_SRC ?
|
|
|
|
&ip->src : &ip->dst);
|
|
|
|
ep->sa.sa_family = AF_INET;
|
|
|
|
} else {
|
|
|
|
ret = -EINVAL;
|
2020-04-16 11:29:06 +03:00
|
|
|
}
|
2019-10-21 11:12:35 +03:00
|
|
|
|
|
|
|
break;
|
|
|
|
|
2020-04-16 11:29:06 +03:00
|
|
|
case AF_INET6:
|
|
|
|
if (IS_ENABLED(CONFIG_NET_IPV6)) {
|
2020-04-21 14:12:33 +03:00
|
|
|
struct net_ipv6_hdr *ip = NET_IPV6_HDR(pkt);
|
2020-09-10 17:29:40 +03:00
|
|
|
struct tcphdr *th;
|
|
|
|
|
|
|
|
th = th_get(pkt);
|
|
|
|
if (!th) {
|
|
|
|
return -ENOBUFS;
|
|
|
|
}
|
2020-04-16 11:29:06 +03:00
|
|
|
|
2020-04-21 14:12:33 +03:00
|
|
|
memset(ep, 0, sizeof(*ep));
|
|
|
|
|
2020-04-16 11:29:06 +03:00
|
|
|
ep->sin6.sin6_port = src == TCP_EP_SRC ? th->th_sport :
|
|
|
|
th->th_dport;
|
2020-04-21 14:12:33 +03:00
|
|
|
net_ipaddr_copy(&ep->sin6.sin6_addr,
|
|
|
|
src == TCP_EP_SRC ?
|
|
|
|
&ip->src : &ip->dst);
|
|
|
|
ep->sa.sa_family = AF_INET6;
|
|
|
|
} else {
|
|
|
|
ret = -EINVAL;
|
2020-04-16 11:29:06 +03:00
|
|
|
}
|
2019-10-21 11:12:35 +03:00
|
|
|
|
|
|
|
break;
|
2020-04-16 11:29:06 +03:00
|
|
|
|
2019-10-21 11:12:35 +03:00
|
|
|
default:
|
2020-04-21 14:12:33 +03:00
|
|
|
NET_ERR("Unknown address family: %hu", net_pkt_family(pkt));
|
|
|
|
ret = -EINVAL;
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
|
2020-04-21 14:12:33 +03:00
|
|
|
return ret;
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
static const char *tcp_flags(uint8_t flags)
|
2019-10-21 11:12:35 +03:00
|
|
|
{
|
2019-11-27 14:47:01 +02:00
|
|
|
#define BUF_SIZE 25 /* 6 * 4 + 1 */
|
2019-10-21 11:12:35 +03:00
|
|
|
static char buf[BUF_SIZE];
|
2019-11-27 14:47:01 +02:00
|
|
|
int len = 0;
|
|
|
|
|
|
|
|
buf[0] = '\0';
|
|
|
|
|
|
|
|
if (flags) {
|
|
|
|
if (flags & SYN) {
|
|
|
|
len += snprintk(buf + len, BUF_SIZE - len, "SYN,");
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
2019-11-27 14:47:01 +02:00
|
|
|
if (flags & FIN) {
|
|
|
|
len += snprintk(buf + len, BUF_SIZE - len, "FIN,");
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
2019-11-27 14:47:01 +02:00
|
|
|
if (flags & ACK) {
|
|
|
|
len += snprintk(buf + len, BUF_SIZE - len, "ACK,");
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
2019-11-27 14:47:01 +02:00
|
|
|
if (flags & PSH) {
|
|
|
|
len += snprintk(buf + len, BUF_SIZE - len, "PSH,");
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
2019-11-27 14:47:01 +02:00
|
|
|
if (flags & RST) {
|
|
|
|
len += snprintk(buf + len, BUF_SIZE - len, "RST,");
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
2019-11-27 14:47:01 +02:00
|
|
|
if (flags & URG) {
|
|
|
|
len += snprintk(buf + len, BUF_SIZE - len, "URG,");
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
2019-11-27 14:47:01 +02:00
|
|
|
|
|
|
|
buf[len - 1] = '\0'; /* delete the last comma */
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
#undef BUF_SIZE
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2020-05-13 17:45:47 +03:00
|
|
|
static size_t tcp_data_len(struct net_pkt *pkt)
|
|
|
|
{
|
|
|
|
struct tcphdr *th = th_get(pkt);
|
|
|
|
size_t tcp_options_len = (th->th_off - 5) * 4;
|
2020-07-27 09:27:50 +03:00
|
|
|
int len = net_pkt_get_len(pkt) - net_pkt_ip_hdr_len(pkt) -
|
2020-05-13 17:45:47 +03:00
|
|
|
net_pkt_ip_opts_len(pkt) - sizeof(*th) - tcp_options_len;
|
|
|
|
|
2020-07-27 09:27:50 +03:00
|
|
|
return len > 0 ? (size_t)len : 0;
|
2020-05-13 17:45:47 +03:00
|
|
|
}
|
|
|
|
|
2019-10-21 11:12:35 +03:00
|
|
|
static const char *tcp_th(struct net_pkt *pkt)
|
|
|
|
{
|
|
|
|
#define BUF_SIZE 80
|
|
|
|
static char buf[BUF_SIZE];
|
2019-11-28 19:18:22 +02:00
|
|
|
int len = 0;
|
2019-10-21 11:12:35 +03:00
|
|
|
struct tcphdr *th = th_get(pkt);
|
|
|
|
|
2019-11-28 19:18:22 +02:00
|
|
|
buf[0] = '\0';
|
2019-10-21 11:12:35 +03:00
|
|
|
|
|
|
|
if (th->th_off < 5) {
|
2019-11-28 19:18:22 +02:00
|
|
|
len += snprintk(buf + len, BUF_SIZE - len,
|
2020-05-27 11:26:57 -05:00
|
|
|
"bogus th_off: %hu", (uint16_t)th->th_off);
|
2019-10-21 11:12:35 +03:00
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
2019-11-28 19:18:22 +02:00
|
|
|
len += snprintk(buf + len, BUF_SIZE - len,
|
|
|
|
"%s Seq=%u", tcp_flags(th->th_flags), th_seq(th));
|
2019-10-21 11:12:35 +03:00
|
|
|
|
2019-11-28 19:18:22 +02:00
|
|
|
if (th->th_flags & ACK) {
|
|
|
|
len += snprintk(buf + len, BUF_SIZE - len,
|
|
|
|
" Ack=%u", th_ack(th));
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
|
2020-02-26 17:38:45 +02:00
|
|
|
len += snprintk(buf + len, BUF_SIZE - len,
|
|
|
|
" Len=%ld", (long)tcp_data_len(pkt));
|
2019-10-21 11:12:35 +03:00
|
|
|
end:
|
|
|
|
#undef BUF_SIZE
|
2019-11-28 19:18:22 +02:00
|
|
|
return buf;
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void tcp_send(struct net_pkt *pkt)
|
|
|
|
{
|
2020-03-06 15:12:24 +02:00
|
|
|
NET_DBG("%s", log_strdup(tcp_th(pkt)));
|
2019-10-21 11:12:35 +03:00
|
|
|
|
|
|
|
tcp_pkt_ref(pkt);
|
|
|
|
|
|
|
|
if (tcp_send_cb) {
|
|
|
|
if (tcp_send_cb(pkt) < 0) {
|
|
|
|
NET_ERR("net_send_data()");
|
|
|
|
tcp_pkt_unref(pkt);
|
|
|
|
}
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (net_send_data(pkt) < 0) {
|
|
|
|
NET_ERR("net_send_data()");
|
|
|
|
tcp_pkt_unref(pkt);
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
tcp_pkt_unref(pkt);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcp_send_queue_flush(struct tcp *conn)
|
|
|
|
{
|
|
|
|
struct net_pkt *pkt;
|
|
|
|
|
net: tcp2: fix sysworkq corruption in tcp_conn_unref()
Bug description:
When in tcp_conn_unref(), in case one of the delayed works is already
submitted to sysworkq (after delay period), e.g. send_timer, the check
of k_delayed_work_remaining_get() prevents calling
k_delayed_work_cancel().
This leads to corrupting sysworkq when zeroing struct tcp* conn.
Note that the "next" pointer for the work queue is part of the struct
work (in _reserved field). Which is, in this case, a member of struct
tcp.
Scenario leading to the bug:
(1) net_tcp_connect() is called from a work in sysworkq
(2) net_tcp_connect() submits conn->send_timer to sysworkq
(3) while net_tcp_connect() is waiting on connect_sem, delay period
passes (z_timeout) and send_timer enters sysworkq work slist
(4) also, some other code (app) submits more works to queue, now pointed
by conn->send_timer in sysworkq work list
(5) connection fails (no answer to SYN), causing a call to
tcp_conn_unref()
(6) tcp_conn_unref() is calling tcp_send_queue_flush()
(7) checking k_delayed_work_remaining_get(&conn->send_timer) returns 0
due to delay period end, but send_timer is still in sysworkq work
slist (sysworkq thread still hasn't handled the work)
(8) BUG!: no call to k_delayed_work_cancel(&conn->send_timer)
(9) back in tcp_conn_unref(), a call to memset(conn, 0, sizeof(*conn))
zeroes conn->send_timer
(10) conn->send_timer is pointed to in sysworkq work slist, but is
zeroed, clearing pointer to following works submitted in stage (4)
(11) EFFECT! the works in stage (4) are never executed!!
NOTES:
* k_delayed_work_cancel(), handles both states:
(1) delayed work pends on timeout and
(2) work already in queue.
So there is no need to check k_delayed_work_remaining_get()
* This is also relevant for conn->send_data_timer
Solution:
removing checks of k_delayed_work_remaining_get(), always calling
k_delayed_work_cancel() for work in struct tcp, in unref, before memset
Signed-off-by: David Komel <a8961713@gmail.com>
2020-09-23 11:49:17 +03:00
|
|
|
k_delayed_work_cancel(&conn->send_timer);
|
2019-10-21 11:12:35 +03:00
|
|
|
|
|
|
|
while ((pkt = tcp_slist(&conn->send_queue, get,
|
|
|
|
struct net_pkt, next))) {
|
|
|
|
tcp_pkt_unref(pkt);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-23 10:56:38 +03:00
|
|
|
static int tcp_conn_unref(struct tcp *conn)
|
|
|
|
{
|
2020-06-08 19:20:22 +03:00
|
|
|
int key, ref_count = atomic_get(&conn->ref_count);
|
2019-10-23 10:56:38 +03:00
|
|
|
|
|
|
|
NET_DBG("conn: %p, ref_count=%d", conn, ref_count);
|
|
|
|
|
2020-06-08 19:20:22 +03:00
|
|
|
#if !defined(CONFIG_NET_TEST_PROTOCOL)
|
|
|
|
if (conn->in_connect) {
|
|
|
|
NET_DBG("conn: %p is waiting on connect semaphore", conn);
|
|
|
|
tcp_send_queue_flush(conn);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_TEST_PROTOCOL */
|
|
|
|
|
|
|
|
ref_count = atomic_dec(&conn->ref_count) - 1;
|
|
|
|
|
2019-10-23 10:56:38 +03:00
|
|
|
if (ref_count) {
|
2020-03-03 18:39:24 +02:00
|
|
|
tp_out(net_context_get_family(conn->context), conn->iface,
|
|
|
|
"TP_TRACE", "event", "CONN_DELETE");
|
2019-10-23 10:56:38 +03:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
key = irq_lock();
|
|
|
|
|
|
|
|
if (conn->context->conn_handler) {
|
|
|
|
net_conn_unregister(conn->context->conn_handler);
|
|
|
|
conn->context->conn_handler = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (conn->context->recv_cb) {
|
|
|
|
conn->context->recv_cb(conn->context, NULL, NULL, NULL,
|
|
|
|
-ECONNRESET, conn->recv_user_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
conn->context->tcp = NULL;
|
|
|
|
|
|
|
|
net_context_unref(conn->context);
|
|
|
|
|
|
|
|
tcp_send_queue_flush(conn);
|
|
|
|
|
net: tcp2: fix sysworkq corruption in tcp_conn_unref()
Bug description:
When in tcp_conn_unref(), in case one of the delayed works is already
submitted to sysworkq (after delay period), e.g. send_timer, the check
of k_delayed_work_remaining_get() prevents calling
k_delayed_work_cancel().
This leads to corrupting sysworkq when zeroing struct tcp* conn.
Note that the "next" pointer for the work queue is part of the struct
work (in _reserved field). Which is, in this case, a member of struct
tcp.
Scenario leading to the bug:
(1) net_tcp_connect() is called from a work in sysworkq
(2) net_tcp_connect() submits conn->send_timer to sysworkq
(3) while net_tcp_connect() is waiting on connect_sem, delay period
passes (z_timeout) and send_timer enters sysworkq work slist
(4) also, some other code (app) submits more works to queue, now pointed
by conn->send_timer in sysworkq work list
(5) connection fails (no answer to SYN), causing a call to
tcp_conn_unref()
(6) tcp_conn_unref() is calling tcp_send_queue_flush()
(7) checking k_delayed_work_remaining_get(&conn->send_timer) returns 0
due to delay period end, but send_timer is still in sysworkq work
slist (sysworkq thread still hasn't handled the work)
(8) BUG!: no call to k_delayed_work_cancel(&conn->send_timer)
(9) back in tcp_conn_unref(), a call to memset(conn, 0, sizeof(*conn))
zeroes conn->send_timer
(10) conn->send_timer is pointed to in sysworkq work slist, but is
zeroed, clearing pointer to following works submitted in stage (4)
(11) EFFECT! the works in stage (4) are never executed!!
NOTES:
* k_delayed_work_cancel(), handles both states:
(1) delayed work pends on timeout and
(2) work already in queue.
So there is no need to check k_delayed_work_remaining_get()
* This is also relevant for conn->send_data_timer
Solution:
removing checks of k_delayed_work_remaining_get(), always calling
k_delayed_work_cancel() for work in struct tcp, in unref, before memset
Signed-off-by: David Komel <a8961713@gmail.com>
2020-09-23 11:49:17 +03:00
|
|
|
k_delayed_work_cancel(&conn->send_data_timer);
|
2020-05-15 18:17:37 +03:00
|
|
|
tcp_pkt_unref(conn->send_data);
|
|
|
|
|
2020-03-26 11:24:43 +02:00
|
|
|
k_delayed_work_cancel(&conn->timewait_timer);
|
2020-09-18 10:37:32 +03:00
|
|
|
k_delayed_work_cancel(&conn->fin_timer);
|
2020-03-26 11:24:43 +02:00
|
|
|
|
2020-09-09 12:38:39 +03:00
|
|
|
sys_slist_find_and_remove(&tcp_conns, &conn->next);
|
2019-10-23 10:56:38 +03:00
|
|
|
|
2020-07-27 12:30:05 +03:00
|
|
|
memset(conn, 0, sizeof(*conn));
|
|
|
|
|
2019-10-23 10:56:38 +03:00
|
|
|
k_mem_slab_free(&tcp_conns_slab, (void **)&conn);
|
|
|
|
|
|
|
|
irq_unlock(key);
|
|
|
|
out:
|
|
|
|
return ref_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
int net_tcp_unref(struct net_context *context)
|
|
|
|
{
|
|
|
|
int ref_count = 0;
|
|
|
|
|
|
|
|
NET_DBG("context: %p, conn: %p", context, context->tcp);
|
|
|
|
|
|
|
|
if (context->tcp) {
|
|
|
|
ref_count = tcp_conn_unref(context->tcp);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ref_count;
|
|
|
|
}
|
|
|
|
|
2020-03-06 13:43:43 +02:00
|
|
|
static void tcp_send_process(struct k_work *work)
|
2019-10-21 11:12:35 +03:00
|
|
|
{
|
2020-03-06 13:43:43 +02:00
|
|
|
struct tcp *conn = CONTAINER_OF(work, struct tcp, send_timer);
|
2019-10-21 11:12:35 +03:00
|
|
|
struct net_pkt *pkt = tcp_slist(&conn->send_queue, peek_head,
|
|
|
|
struct net_pkt, next);
|
|
|
|
|
2020-09-15 09:15:50 +03:00
|
|
|
if (!pkt) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-03-06 15:12:24 +02:00
|
|
|
NET_DBG("%s %s", log_strdup(tcp_th(pkt)), conn->in_retransmission ?
|
2019-10-21 11:12:35 +03:00
|
|
|
"in_retransmission" : "");
|
|
|
|
|
|
|
|
if (conn->in_retransmission) {
|
|
|
|
if (conn->send_retries > 0) {
|
2020-09-10 17:29:40 +03:00
|
|
|
struct net_pkt *clone = tcp_pkt_clone(pkt);
|
|
|
|
|
|
|
|
if (clone) {
|
|
|
|
tcp_send(clone);
|
|
|
|
conn->send_retries--;
|
|
|
|
}
|
2019-10-21 11:12:35 +03:00
|
|
|
} else {
|
2019-10-23 10:56:38 +03:00
|
|
|
tcp_conn_unref(conn);
|
|
|
|
conn = NULL;
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
} else {
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t fl = th_get(pkt)->th_flags;
|
2019-10-21 11:12:35 +03:00
|
|
|
bool forget = ACK == fl || PSH == fl || (ACK | PSH) == fl ||
|
|
|
|
RST & fl;
|
|
|
|
|
|
|
|
pkt = forget ? tcp_slist(&conn->send_queue, get, struct net_pkt,
|
|
|
|
next) : tcp_pkt_clone(pkt);
|
2020-05-13 14:46:03 +03:00
|
|
|
if (!pkt) {
|
|
|
|
NET_ERR("net_pkt alloc failure");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-10-21 11:12:35 +03:00
|
|
|
tcp_send(pkt);
|
|
|
|
|
2020-03-06 13:43:43 +02:00
|
|
|
if (forget == false && !k_delayed_work_remaining_get(
|
|
|
|
&conn->send_timer)) {
|
2019-10-21 11:12:35 +03:00
|
|
|
conn->send_retries = tcp_retries;
|
|
|
|
conn->in_retransmission = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (conn && conn->in_retransmission) {
|
2020-03-06 13:43:43 +02:00
|
|
|
k_delayed_work_submit(&conn->send_timer, K_MSEC(tcp_rto));
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcp_send_timer_cancel(struct tcp *conn)
|
|
|
|
{
|
2020-01-08 09:42:23 +02:00
|
|
|
NET_ASSERT(conn->in_retransmission == true, "Not in retransmission");
|
2019-10-21 11:12:35 +03:00
|
|
|
|
2020-03-06 13:43:43 +02:00
|
|
|
k_delayed_work_cancel(&conn->send_timer);
|
2019-10-21 11:12:35 +03:00
|
|
|
|
|
|
|
{
|
|
|
|
struct net_pkt *pkt = tcp_slist(&conn->send_queue, get,
|
|
|
|
struct net_pkt, next);
|
2020-09-18 10:37:32 +03:00
|
|
|
if (pkt) {
|
|
|
|
NET_DBG("%s", log_strdup(tcp_th(pkt)));
|
|
|
|
tcp_pkt_unref(pkt);
|
|
|
|
}
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (sys_slist_is_empty(&conn->send_queue)) {
|
|
|
|
conn->in_retransmission = false;
|
|
|
|
} else {
|
|
|
|
conn->send_retries = tcp_retries;
|
2020-03-06 13:43:43 +02:00
|
|
|
k_delayed_work_submit(&conn->send_timer, K_MSEC(tcp_rto));
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static const char *tcp_state_to_str(enum tcp_state state, bool prefix)
|
|
|
|
{
|
|
|
|
const char *s = NULL;
|
|
|
|
#define _(_x) case _x: do { s = #_x; goto out; } while (0)
|
|
|
|
switch (state) {
|
|
|
|
_(TCP_LISTEN);
|
|
|
|
_(TCP_SYN_SENT);
|
|
|
|
_(TCP_SYN_RECEIVED);
|
|
|
|
_(TCP_ESTABLISHED);
|
2020-03-25 18:35:47 +02:00
|
|
|
_(TCP_FIN_WAIT_1);
|
|
|
|
_(TCP_FIN_WAIT_2);
|
2019-10-21 11:12:35 +03:00
|
|
|
_(TCP_CLOSE_WAIT);
|
|
|
|
_(TCP_CLOSING);
|
|
|
|
_(TCP_LAST_ACK);
|
|
|
|
_(TCP_TIME_WAIT);
|
|
|
|
_(TCP_CLOSED);
|
|
|
|
}
|
|
|
|
#undef _
|
2020-01-08 09:42:23 +02:00
|
|
|
NET_ASSERT(s, "Invalid TCP state: %u", state);
|
2019-10-21 11:12:35 +03:00
|
|
|
out:
|
|
|
|
return prefix ? s : (s + 4);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const char *tcp_conn_state(struct tcp *conn, struct net_pkt *pkt)
|
|
|
|
{
|
2020-09-17 14:11:28 +03:00
|
|
|
#define BUF_SIZE 160
|
2019-10-21 11:12:35 +03:00
|
|
|
static char buf[BUF_SIZE];
|
|
|
|
|
2019-12-12 16:52:01 +02:00
|
|
|
snprintk(buf, BUF_SIZE, "%s [%s Seq=%u Ack=%u]", pkt ? tcp_th(pkt) : "",
|
2019-10-21 11:12:35 +03:00
|
|
|
tcp_state_to_str(conn->state, false),
|
|
|
|
conn->seq, conn->ack);
|
|
|
|
#undef BUF_SIZE
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2020-07-28 09:56:08 +03:00
|
|
|
static uint8_t *tcp_options_get(struct net_pkt *pkt, int tcp_options_len,
|
|
|
|
uint8_t *buf, size_t buf_len)
|
2020-04-16 18:54:28 +03:00
|
|
|
{
|
|
|
|
struct net_pkt_cursor backup;
|
2020-07-28 09:56:08 +03:00
|
|
|
int ret;
|
2020-04-16 18:54:28 +03:00
|
|
|
|
|
|
|
net_pkt_cursor_backup(pkt, &backup);
|
|
|
|
net_pkt_cursor_init(pkt);
|
|
|
|
net_pkt_skip(pkt, net_pkt_ip_hdr_len(pkt) + net_pkt_ip_opts_len(pkt) +
|
|
|
|
sizeof(struct tcphdr));
|
2020-07-28 09:56:08 +03:00
|
|
|
ret = net_pkt_read(pkt, buf, MIN(tcp_options_len, buf_len));
|
|
|
|
if (ret < 0) {
|
|
|
|
buf = NULL;
|
|
|
|
}
|
|
|
|
|
2020-04-16 18:54:28 +03:00
|
|
|
net_pkt_cursor_restore(pkt, &backup);
|
|
|
|
|
2020-07-28 09:56:08 +03:00
|
|
|
return buf;
|
2020-04-16 18:54:28 +03:00
|
|
|
}
|
|
|
|
|
2020-04-22 15:19:52 +03:00
|
|
|
static bool tcp_options_check(struct tcp_options *recv_options,
|
|
|
|
struct net_pkt *pkt, ssize_t len)
|
2019-10-21 11:12:35 +03:00
|
|
|
{
|
2020-07-28 09:56:08 +03:00
|
|
|
uint8_t options_buf[40]; /* TCP header max options size is 40 */
|
2019-10-21 11:12:35 +03:00
|
|
|
bool result = len > 0 && ((len % 4) == 0) ? true : false;
|
2020-07-28 09:56:08 +03:00
|
|
|
uint8_t *options = tcp_options_get(pkt, len, options_buf,
|
|
|
|
sizeof(options_buf));
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t opt, opt_len;
|
2019-10-21 11:12:35 +03:00
|
|
|
|
|
|
|
NET_DBG("len=%zd", len);
|
|
|
|
|
2020-04-22 15:19:52 +03:00
|
|
|
recv_options->mss_found = false;
|
|
|
|
recv_options->wnd_found = false;
|
|
|
|
|
2020-07-28 09:56:08 +03:00
|
|
|
for ( ; options && len >= 1; options += opt_len, len -= opt_len) {
|
2019-10-21 11:12:35 +03:00
|
|
|
opt = options[0];
|
|
|
|
|
2020-04-01 13:50:44 +03:00
|
|
|
if (opt == TCPOPT_END) {
|
|
|
|
break;
|
|
|
|
} else if (opt == TCPOPT_NOP) {
|
2020-04-02 14:03:20 +03:00
|
|
|
opt_len = 1;
|
2019-12-14 12:41:23 +02:00
|
|
|
continue;
|
2020-04-02 14:03:20 +03:00
|
|
|
} else {
|
|
|
|
if (len < 2) { /* Only END and NOP can have length 1 */
|
2020-04-15 13:17:16 +03:00
|
|
|
NET_ERR("Illegal option %d with length %zd",
|
2020-04-02 14:03:20 +03:00
|
|
|
opt, len);
|
|
|
|
result = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
opt_len = options[1];
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
2020-05-27 11:26:57 -05:00
|
|
|
NET_DBG("opt: %hu, opt_len: %hu", (uint16_t)opt, (uint16_t)opt_len);
|
2019-12-14 12:41:23 +02:00
|
|
|
|
|
|
|
if (opt_len < 2 || opt_len > len) {
|
|
|
|
result = false;
|
2019-10-21 11:12:35 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (opt) {
|
|
|
|
case TCPOPT_MAXSEG:
|
|
|
|
if (opt_len != 4) {
|
|
|
|
result = false;
|
|
|
|
goto end;
|
|
|
|
}
|
2020-04-22 15:19:52 +03:00
|
|
|
|
2020-06-23 19:44:49 +02:00
|
|
|
recv_options->mss =
|
|
|
|
ntohs(UNALIGNED_GET((uint16_t *)(options + 2)));
|
2020-04-22 15:19:52 +03:00
|
|
|
recv_options->mss_found = true;
|
2020-05-13 19:52:59 +03:00
|
|
|
NET_DBG("MSS=%hu", recv_options->mss);
|
2019-10-21 11:12:35 +03:00
|
|
|
break;
|
|
|
|
case TCPOPT_WINDOW:
|
|
|
|
if (opt_len != 3) {
|
|
|
|
result = false;
|
|
|
|
goto end;
|
|
|
|
}
|
2020-04-22 15:19:52 +03:00
|
|
|
|
|
|
|
recv_options->window = opt;
|
|
|
|
recv_options->wnd_found = true;
|
2019-10-21 11:12:35 +03:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
end:
|
|
|
|
if (false == result) {
|
|
|
|
NET_WARN("Invalid TCP options");
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2020-07-27 09:27:50 +03:00
|
|
|
static int tcp_data_get(struct tcp *conn, struct net_pkt *pkt)
|
2019-10-21 11:12:35 +03:00
|
|
|
{
|
2020-07-27 09:27:50 +03:00
|
|
|
int len = tcp_data_len(pkt);
|
2019-10-21 11:12:35 +03:00
|
|
|
|
2020-03-10 14:14:20 +02:00
|
|
|
if (tcp_recv_cb) {
|
|
|
|
tcp_recv_cb(conn, pkt);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-10-21 11:12:35 +03:00
|
|
|
if (len > 0) {
|
2019-10-22 15:04:02 +03:00
|
|
|
if (conn->context->recv_cb) {
|
2020-05-13 14:07:07 +03:00
|
|
|
struct net_pkt *up =
|
|
|
|
net_pkt_clone(pkt, TCP_PKT_ALLOC_TIMEOUT);
|
|
|
|
|
|
|
|
if (!up) {
|
|
|
|
len = -ENOBUFS;
|
|
|
|
goto out;
|
|
|
|
}
|
2019-10-22 15:04:02 +03:00
|
|
|
|
|
|
|
net_pkt_cursor_init(up);
|
|
|
|
net_pkt_set_overwrite(up, true);
|
2020-02-26 17:38:45 +02:00
|
|
|
|
|
|
|
net_pkt_skip(up, net_pkt_get_len(up) - len);
|
2019-10-22 15:04:02 +03:00
|
|
|
|
|
|
|
net_context_packet_received(
|
|
|
|
(struct net_conn *)conn->context->conn_handler,
|
|
|
|
up, NULL, NULL, conn->recv_user_data);
|
|
|
|
}
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
2020-03-10 14:14:20 +02:00
|
|
|
out:
|
2019-10-21 11:12:35 +03:00
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2020-04-03 15:12:56 +03:00
|
|
|
static int tcp_finalize_pkt(struct net_pkt *pkt)
|
2019-10-21 11:12:35 +03:00
|
|
|
{
|
2020-04-03 15:12:56 +03:00
|
|
|
net_pkt_cursor_init(pkt);
|
2019-10-21 11:12:35 +03:00
|
|
|
|
2020-04-03 15:12:56 +03:00
|
|
|
if (IS_ENABLED(CONFIG_NET_IPV4) && net_pkt_family(pkt) == AF_INET) {
|
|
|
|
return net_ipv4_finalize(pkt, IPPROTO_TCP);
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
2020-03-09 14:41:15 +02:00
|
|
|
|
2020-04-03 15:12:56 +03:00
|
|
|
if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) {
|
|
|
|
return net_ipv6_finalize(pkt, IPPROTO_TCP);
|
|
|
|
}
|
2020-03-09 14:41:15 +02:00
|
|
|
|
2020-04-03 15:12:56 +03:00
|
|
|
return -EINVAL;
|
2020-02-26 20:54:11 +02:00
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
static int tcp_header_add(struct tcp *conn, struct net_pkt *pkt, uint8_t flags,
|
|
|
|
uint32_t seq)
|
2020-02-26 20:54:11 +02:00
|
|
|
{
|
2020-04-03 15:12:56 +03:00
|
|
|
NET_PKT_DATA_ACCESS_DEFINE(tcp_access, struct tcphdr);
|
|
|
|
struct tcphdr *th;
|
2020-02-26 20:54:11 +02:00
|
|
|
|
2020-04-03 15:12:56 +03:00
|
|
|
th = (struct tcphdr *)net_pkt_get_data(pkt, &tcp_access);
|
|
|
|
if (!th) {
|
|
|
|
return -ENOBUFS;
|
|
|
|
}
|
2020-02-26 20:54:11 +02:00
|
|
|
|
2020-04-03 15:12:56 +03:00
|
|
|
memset(th, 0, sizeof(struct tcphdr));
|
2020-02-26 20:54:11 +02:00
|
|
|
|
2020-04-21 14:12:33 +03:00
|
|
|
th->th_sport = conn->src.sin.sin_port;
|
|
|
|
th->th_dport = conn->dst.sin.sin_port;
|
2020-02-26 20:54:11 +02:00
|
|
|
|
2020-04-03 15:12:56 +03:00
|
|
|
th->th_off = 5;
|
|
|
|
th->th_flags = flags;
|
2020-05-13 14:42:50 +03:00
|
|
|
th->th_win = htons(conn->recv_win);
|
2020-05-13 17:33:53 +03:00
|
|
|
th->th_seq = htonl(seq);
|
2020-02-26 20:54:11 +02:00
|
|
|
|
2020-04-03 15:12:56 +03:00
|
|
|
if (ACK & flags) {
|
|
|
|
th->th_ack = htonl(conn->ack);
|
2020-02-26 20:54:11 +02:00
|
|
|
}
|
|
|
|
|
2020-04-03 15:12:56 +03:00
|
|
|
return net_pkt_set_data(pkt, &tcp_access);
|
|
|
|
}
|
2020-02-26 20:54:11 +02:00
|
|
|
|
2020-04-03 15:12:56 +03:00
|
|
|
static int ip_header_add(struct tcp *conn, struct net_pkt *pkt)
|
|
|
|
{
|
|
|
|
if (IS_ENABLED(CONFIG_NET_IPV4) && net_pkt_family(pkt) == AF_INET) {
|
|
|
|
return net_context_create_ipv4_new(conn->context, pkt,
|
2020-04-21 14:12:33 +03:00
|
|
|
&conn->src.sin.sin_addr,
|
|
|
|
&conn->dst.sin.sin_addr);
|
2020-04-03 15:12:56 +03:00
|
|
|
}
|
2020-02-26 20:54:11 +02:00
|
|
|
|
2020-04-03 15:12:56 +03:00
|
|
|
if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) {
|
|
|
|
return net_context_create_ipv6_new(conn->context, pkt,
|
2020-04-21 14:12:33 +03:00
|
|
|
&conn->src.sin6.sin6_addr,
|
|
|
|
&conn->dst.sin6.sin6_addr);
|
2020-04-03 15:12:56 +03:00
|
|
|
}
|
2020-02-26 20:54:11 +02:00
|
|
|
|
2020-04-03 15:12:56 +03:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2020-02-26 20:54:11 +02:00
|
|
|
|
2020-09-10 17:29:40 +03:00
|
|
|
static int tcp_out_ext(struct tcp *conn, uint8_t flags, struct net_pkt *data,
|
|
|
|
uint32_t seq)
|
2019-10-21 11:12:35 +03:00
|
|
|
{
|
2020-03-09 14:41:15 +02:00
|
|
|
struct net_pkt *pkt;
|
2020-09-10 17:29:40 +03:00
|
|
|
int ret = 0;
|
2020-04-03 15:12:56 +03:00
|
|
|
|
2020-04-22 15:09:23 +03:00
|
|
|
pkt = tcp_pkt_alloc(conn, sizeof(struct tcphdr));
|
2020-04-03 15:12:56 +03:00
|
|
|
if (!pkt) {
|
2020-09-10 17:29:40 +03:00
|
|
|
ret = -ENOBUFS;
|
2020-05-13 17:33:53 +03:00
|
|
|
goto out;
|
2020-03-09 14:41:15 +02:00
|
|
|
}
|
2019-10-21 11:12:35 +03:00
|
|
|
|
2020-05-13 17:33:53 +03:00
|
|
|
if (data) {
|
|
|
|
/* Append the data buffer to the pkt */
|
|
|
|
net_pkt_append_buffer(pkt, data->buffer);
|
|
|
|
data->buffer = NULL;
|
2020-04-03 15:12:56 +03:00
|
|
|
}
|
2020-03-09 14:41:15 +02:00
|
|
|
|
2020-05-13 17:33:53 +03:00
|
|
|
ret = ip_header_add(conn, pkt);
|
|
|
|
if (ret < 0) {
|
|
|
|
tcp_pkt_unref(pkt);
|
|
|
|
goto out;
|
2020-04-03 15:12:56 +03:00
|
|
|
}
|
2020-03-09 14:41:15 +02:00
|
|
|
|
2020-05-13 17:33:53 +03:00
|
|
|
ret = tcp_header_add(conn, pkt, flags, seq);
|
|
|
|
if (ret < 0) {
|
|
|
|
tcp_pkt_unref(pkt);
|
|
|
|
goto out;
|
2020-04-03 15:12:56 +03:00
|
|
|
}
|
2019-10-21 11:12:35 +03:00
|
|
|
|
2020-05-13 17:33:53 +03:00
|
|
|
ret = tcp_finalize_pkt(pkt);
|
|
|
|
if (ret < 0) {
|
|
|
|
tcp_pkt_unref(pkt);
|
|
|
|
goto out;
|
2020-03-09 14:41:15 +02:00
|
|
|
}
|
|
|
|
|
2020-03-06 15:12:24 +02:00
|
|
|
NET_DBG("%s", log_strdup(tcp_th(pkt)));
|
2019-10-21 11:12:35 +03:00
|
|
|
|
|
|
|
if (tcp_send_cb) {
|
2020-09-10 17:29:40 +03:00
|
|
|
ret = tcp_send_cb(pkt);
|
2019-10-21 11:12:35 +03:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
sys_slist_append(&conn->send_queue, &pkt->next);
|
|
|
|
|
2020-03-06 13:43:43 +02:00
|
|
|
tcp_send_process((struct k_work *)&conn->send_timer);
|
2019-10-21 11:12:35 +03:00
|
|
|
out:
|
2020-09-10 17:29:40 +03:00
|
|
|
return ret;
|
2020-05-13 17:33:53 +03:00
|
|
|
}
|
2020-04-03 15:12:56 +03:00
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
static void tcp_out(struct tcp *conn, uint8_t flags)
|
2020-05-13 17:33:53 +03:00
|
|
|
{
|
2020-09-10 17:29:40 +03:00
|
|
|
(void)tcp_out_ext(conn, flags, NULL /* no data */, conn->seq);
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
|
2020-05-15 18:17:37 +03:00
|
|
|
static int tcp_pkt_pull(struct net_pkt *pkt, size_t len)
|
|
|
|
{
|
|
|
|
int total = net_pkt_get_len(pkt);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (len > total) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
net_pkt_cursor_init(pkt);
|
|
|
|
net_pkt_set_overwrite(pkt, true);
|
|
|
|
net_pkt_pull(pkt, len);
|
|
|
|
net_pkt_trim_buffer(pkt);
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-09-10 17:29:40 +03:00
|
|
|
static int tcp_pkt_peek(struct net_pkt *to, struct net_pkt *from, size_t pos,
|
|
|
|
size_t len)
|
2020-05-15 18:17:37 +03:00
|
|
|
{
|
|
|
|
net_pkt_cursor_init(to);
|
|
|
|
net_pkt_cursor_init(from);
|
|
|
|
|
|
|
|
if (pos) {
|
|
|
|
net_pkt_set_overwrite(from, true);
|
|
|
|
net_pkt_skip(from, pos);
|
|
|
|
}
|
|
|
|
|
2020-09-10 17:29:40 +03:00
|
|
|
return net_pkt_copy(to, from, len);
|
2020-05-15 18:17:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool tcp_window_full(struct tcp *conn)
|
|
|
|
{
|
|
|
|
bool window_full = !(conn->unacked_len < conn->send_win);
|
|
|
|
|
|
|
|
NET_DBG("conn: %p window_full=%hu", conn, window_full);
|
|
|
|
|
|
|
|
return window_full;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tcp_unsent_len(struct tcp *conn)
|
|
|
|
{
|
|
|
|
int unsent_len;
|
|
|
|
|
|
|
|
if (conn->unacked_len > conn->send_data_total) {
|
|
|
|
NET_ERR("total=%zu, unacked_len=%d",
|
|
|
|
conn->send_data_total, conn->unacked_len);
|
|
|
|
unsent_len = -ERANGE;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsent_len = conn->send_data_total - conn->unacked_len;
|
|
|
|
out:
|
|
|
|
NET_DBG("unsent_len=%d", unsent_len);
|
|
|
|
|
|
|
|
return unsent_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tcp_send_data(struct tcp *conn)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
int pos, len;
|
|
|
|
struct net_pkt *pkt;
|
|
|
|
|
|
|
|
pos = conn->unacked_len;
|
|
|
|
len = MIN3(conn->send_data_total - conn->unacked_len,
|
|
|
|
conn->send_win - conn->unacked_len,
|
|
|
|
conn_mss(conn));
|
|
|
|
|
|
|
|
pkt = tcp_pkt_alloc(conn, len);
|
|
|
|
if (!pkt) {
|
|
|
|
NET_ERR("conn: %p packet allocation failed, len=%d", conn, len);
|
|
|
|
ret = -ENOBUFS;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-09-10 17:29:40 +03:00
|
|
|
ret = tcp_pkt_peek(pkt, conn->send_data, pos, len);
|
|
|
|
if (ret < 0) {
|
|
|
|
tcp_pkt_unref(pkt);
|
|
|
|
ret = -ENOBUFS;
|
|
|
|
goto out;
|
|
|
|
}
|
2020-05-15 18:17:37 +03:00
|
|
|
|
2020-09-10 17:29:40 +03:00
|
|
|
ret = tcp_out_ext(conn, PSH | ACK, pkt, conn->seq + conn->unacked_len);
|
|
|
|
if (ret == 0) {
|
|
|
|
conn->unacked_len += len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The data we want to send, has been moved to the send queue so we
|
|
|
|
* can unref the head net_pkt. If there was an error, we need to remove
|
|
|
|
* the packet anyway.
|
|
|
|
*/
|
|
|
|
tcp_pkt_unref(pkt);
|
2020-05-15 18:17:37 +03:00
|
|
|
|
|
|
|
conn_send_data_dump(conn);
|
|
|
|
|
2020-09-10 17:29:40 +03:00
|
|
|
out:
|
2020-05-15 18:17:37 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Send all queued but unsent data from the send_data packet by packet
|
|
|
|
* until the receiver's window is full. */
|
|
|
|
static int tcp_send_queued_data(struct tcp *conn)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
bool subscribe = false;
|
|
|
|
|
|
|
|
if (conn->data_mode == TCP_DATA_MODE_RESEND) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (tcp_unsent_len(conn) > 0) {
|
|
|
|
|
|
|
|
if (tcp_window_full(conn)) {
|
|
|
|
subscribe = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = tcp_send_data(conn);
|
|
|
|
if (ret < 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (conn->unacked_len) {
|
|
|
|
subscribe = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (k_delayed_work_remaining_get(&conn->send_data_timer)) {
|
|
|
|
subscribe = false;
|
|
|
|
}
|
|
|
|
|
2020-09-10 17:29:40 +03:00
|
|
|
/* If we have out-of-bufs case, then do not start retransmit timer
|
|
|
|
* yet. The socket layer will catch this and resend data if needed.
|
|
|
|
*/
|
|
|
|
if (ret == -ENOBUFS) {
|
|
|
|
NET_DBG("No bufs, cancelling retransmit timer");
|
|
|
|
k_delayed_work_cancel(&conn->send_data_timer);
|
|
|
|
}
|
|
|
|
|
2020-05-15 18:17:37 +03:00
|
|
|
if (subscribe) {
|
|
|
|
conn->send_data_retries = 0;
|
|
|
|
k_delayed_work_submit(&conn->send_data_timer, K_MSEC(tcp_rto));
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcp_resend_data(struct k_work *work)
|
|
|
|
{
|
|
|
|
struct tcp *conn = CONTAINER_OF(work, struct tcp, send_data_timer);
|
|
|
|
bool conn_unref = false;
|
2020-09-10 17:29:40 +03:00
|
|
|
int ret;
|
2020-05-15 18:17:37 +03:00
|
|
|
|
|
|
|
NET_DBG("send_data_retries=%hu", conn->send_data_retries);
|
|
|
|
|
|
|
|
if (conn->send_data_retries >= tcp_retries) {
|
|
|
|
NET_DBG("conn: %p close, data retransmissions exceeded", conn);
|
|
|
|
conn_unref = true;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
conn->data_mode = TCP_DATA_MODE_RESEND;
|
|
|
|
conn->unacked_len = 0;
|
|
|
|
|
2020-09-10 17:29:40 +03:00
|
|
|
ret = tcp_send_data(conn);
|
|
|
|
if (ret == 0) {
|
|
|
|
conn->send_data_retries++;
|
2020-09-18 10:37:32 +03:00
|
|
|
|
|
|
|
if (conn->in_close && conn->send_data_total == 0) {
|
|
|
|
NET_DBG("TCP connection in active close, "
|
|
|
|
"not disposing yet (waiting %dms)",
|
|
|
|
FIN_TIMEOUT_MS);
|
|
|
|
k_delayed_work_submit(&conn->fin_timer, FIN_TIMEOUT);
|
|
|
|
|
|
|
|
conn_state(conn, TCP_FIN_WAIT_1);
|
|
|
|
|
|
|
|
ret = tcp_out_ext(conn, FIN | ACK, NULL,
|
|
|
|
conn->seq + conn->unacked_len);
|
|
|
|
if (ret == 0) {
|
|
|
|
conn_seq(conn, + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
goto out;
|
|
|
|
}
|
2020-09-10 17:29:40 +03:00
|
|
|
}
|
|
|
|
|
2020-05-15 18:17:37 +03:00
|
|
|
k_delayed_work_submit(&conn->send_data_timer, K_MSEC(tcp_rto));
|
2020-09-10 17:29:40 +03:00
|
|
|
|
2020-05-15 18:17:37 +03:00
|
|
|
out:
|
|
|
|
if (conn_unref) {
|
|
|
|
tcp_conn_unref(conn);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-26 11:24:43 +02:00
|
|
|
static void tcp_timewait_timeout(struct k_work *work)
|
|
|
|
{
|
|
|
|
struct tcp *conn = CONTAINER_OF(work, struct tcp, timewait_timer);
|
|
|
|
|
|
|
|
NET_DBG("conn: %p %s", conn, log_strdup(tcp_conn_state(conn, NULL)));
|
|
|
|
|
2020-09-18 10:37:32 +03:00
|
|
|
/* Extra unref from net_tcp_put() */
|
|
|
|
net_context_unref(conn->context);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcp_fin_timeout(struct k_work *work)
|
|
|
|
{
|
|
|
|
struct tcp *conn = CONTAINER_OF(work, struct tcp, fin_timer);
|
|
|
|
|
|
|
|
NET_DBG("Did not receive FIN in %dms", FIN_TIMEOUT_MS);
|
|
|
|
NET_DBG("conn: %p %s", conn, log_strdup(tcp_conn_state(conn, NULL)));
|
|
|
|
|
|
|
|
/* Extra unref from net_tcp_put() */
|
|
|
|
net_context_unref(conn->context);
|
2020-03-26 11:24:43 +02:00
|
|
|
}
|
|
|
|
|
2019-10-23 10:56:38 +03:00
|
|
|
static void tcp_conn_ref(struct tcp *conn)
|
|
|
|
{
|
|
|
|
int ref_count = atomic_inc(&conn->ref_count) + 1;
|
|
|
|
|
|
|
|
NET_DBG("conn: %p, ref_count: %d", conn, ref_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct tcp *tcp_conn_alloc(void)
|
|
|
|
{
|
|
|
|
struct tcp *conn = NULL;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = k_mem_slab_alloc(&tcp_conns_slab, (void **)&conn, K_NO_WAIT);
|
|
|
|
if (ret) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(conn, 0, sizeof(*conn));
|
|
|
|
|
2020-03-11 15:00:47 +02:00
|
|
|
k_mutex_init(&conn->lock);
|
|
|
|
|
2019-10-23 10:56:38 +03:00
|
|
|
conn->state = TCP_LISTEN;
|
|
|
|
|
2020-05-13 14:42:50 +03:00
|
|
|
conn->recv_win = tcp_window;
|
2019-10-23 10:56:38 +03:00
|
|
|
|
2020-05-13 12:11:24 +03:00
|
|
|
conn->seq = (IS_ENABLED(CONFIG_NET_TEST_PROTOCOL) ||
|
|
|
|
IS_ENABLED(CONFIG_NET_TEST)) ? 0 : sys_rand32_get();
|
|
|
|
|
2019-10-23 10:56:38 +03:00
|
|
|
sys_slist_init(&conn->send_queue);
|
|
|
|
|
2020-03-06 13:43:43 +02:00
|
|
|
k_delayed_work_init(&conn->send_timer, tcp_send_process);
|
2019-10-23 10:56:38 +03:00
|
|
|
|
2020-03-26 11:24:43 +02:00
|
|
|
k_delayed_work_init(&conn->timewait_timer, tcp_timewait_timeout);
|
2020-09-18 10:37:32 +03:00
|
|
|
k_delayed_work_init(&conn->fin_timer, tcp_fin_timeout);
|
2020-03-26 11:24:43 +02:00
|
|
|
|
2020-05-15 18:17:37 +03:00
|
|
|
conn->send_data = tcp_pkt_alloc(conn, 0);
|
|
|
|
k_delayed_work_init(&conn->send_data_timer, tcp_resend_data);
|
|
|
|
|
2020-06-08 19:20:22 +03:00
|
|
|
k_sem_init(&conn->connect_sem, 0, UINT_MAX);
|
|
|
|
conn->in_connect = false;
|
|
|
|
|
2019-10-23 10:56:38 +03:00
|
|
|
tcp_conn_ref(conn);
|
|
|
|
|
2020-09-09 12:38:39 +03:00
|
|
|
sys_slist_append(&tcp_conns, &conn->next);
|
2019-10-23 10:56:38 +03:00
|
|
|
out:
|
|
|
|
NET_DBG("conn: %p", conn);
|
|
|
|
|
|
|
|
return conn;
|
|
|
|
}
|
|
|
|
|
|
|
|
int net_tcp_get(struct net_context *context)
|
|
|
|
{
|
|
|
|
int ret = 0, key = irq_lock();
|
|
|
|
struct tcp *conn;
|
|
|
|
|
|
|
|
conn = tcp_conn_alloc();
|
|
|
|
if (conn == NULL) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Mutually link the net_context and tcp connection */
|
|
|
|
conn->context = context;
|
|
|
|
context->tcp = conn;
|
|
|
|
out:
|
|
|
|
irq_unlock(key);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-10-21 11:12:35 +03:00
|
|
|
static bool tcp_endpoint_cmp(union tcp_endpoint *ep, struct net_pkt *pkt,
|
2020-04-16 11:29:06 +03:00
|
|
|
enum pkt_addr which)
|
2019-10-21 11:12:35 +03:00
|
|
|
{
|
2020-04-21 14:12:33 +03:00
|
|
|
union tcp_endpoint ep_tmp;
|
2019-10-21 11:12:35 +03:00
|
|
|
|
2020-04-21 14:12:33 +03:00
|
|
|
if (tcp_endpoint_set(&ep_tmp, pkt, which) < 0) {
|
|
|
|
return false;
|
|
|
|
}
|
2019-10-21 11:12:35 +03:00
|
|
|
|
2020-04-21 14:12:33 +03:00
|
|
|
return !memcmp(ep, &ep_tmp, tcp_endpoint_len(ep->sa.sa_family));
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool tcp_conn_cmp(struct tcp *conn, struct net_pkt *pkt)
|
|
|
|
{
|
2020-04-21 14:12:33 +03:00
|
|
|
return tcp_endpoint_cmp(&conn->src, pkt, TCP_EP_DST) &&
|
|
|
|
tcp_endpoint_cmp(&conn->dst, pkt, TCP_EP_SRC);
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct tcp *tcp_conn_search(struct net_pkt *pkt)
|
|
|
|
{
|
|
|
|
bool found = false;
|
|
|
|
struct tcp *conn;
|
2020-09-09 12:40:50 +03:00
|
|
|
struct tcp *tmp;
|
2019-10-21 11:12:35 +03:00
|
|
|
|
2020-09-09 12:40:50 +03:00
|
|
|
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&tcp_conns, conn, tmp, next) {
|
2019-10-21 11:12:35 +03:00
|
|
|
|
|
|
|
found = tcp_conn_cmp(conn, pkt);
|
|
|
|
if (found) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return found ? conn : NULL;
|
|
|
|
}
|
|
|
|
|
2019-10-23 10:56:38 +03:00
|
|
|
static struct tcp *tcp_conn_new(struct net_pkt *pkt);
|
|
|
|
|
2019-11-26 16:33:44 +02:00
|
|
|
static enum net_verdict tcp_recv(struct net_conn *net_conn,
|
|
|
|
struct net_pkt *pkt,
|
|
|
|
union net_ip_header *ip,
|
|
|
|
union net_proto_header *proto,
|
|
|
|
void *user_data)
|
2019-10-23 10:56:38 +03:00
|
|
|
{
|
2019-12-14 14:28:56 +02:00
|
|
|
struct tcp *conn;
|
|
|
|
struct tcphdr *th;
|
2019-10-23 10:56:38 +03:00
|
|
|
|
|
|
|
ARG_UNUSED(net_conn);
|
|
|
|
ARG_UNUSED(proto);
|
|
|
|
|
2019-12-14 14:28:56 +02:00
|
|
|
conn = tcp_conn_search(pkt);
|
|
|
|
if (conn) {
|
|
|
|
goto in;
|
|
|
|
}
|
|
|
|
|
|
|
|
th = th_get(pkt);
|
2019-10-23 10:56:38 +03:00
|
|
|
|
2020-03-06 15:41:52 +02:00
|
|
|
if (th->th_flags & SYN && !(th->th_flags & ACK)) {
|
2019-12-14 14:28:56 +02:00
|
|
|
struct tcp *conn_old = ((struct net_context *)user_data)->tcp;
|
2019-10-23 10:56:38 +03:00
|
|
|
|
|
|
|
conn = tcp_conn_new(pkt);
|
2020-09-18 12:04:02 +03:00
|
|
|
if (!conn) {
|
|
|
|
NET_ERR("Cannot allocate a new TCP connection");
|
|
|
|
goto in;
|
|
|
|
}
|
2019-10-23 10:56:38 +03:00
|
|
|
|
2020-04-21 14:12:33 +03:00
|
|
|
net_ipaddr_copy(&conn_old->context->remote, &conn->dst.sa);
|
2019-10-23 10:56:38 +03:00
|
|
|
|
|
|
|
conn_old->accept_cb(conn->context,
|
2019-12-14 14:28:56 +02:00
|
|
|
&conn_old->context->remote,
|
|
|
|
sizeof(struct sockaddr), 0,
|
|
|
|
conn_old->context);
|
2019-10-23 10:56:38 +03:00
|
|
|
}
|
2019-12-14 14:28:56 +02:00
|
|
|
in:
|
2019-10-23 10:56:38 +03:00
|
|
|
if (conn) {
|
|
|
|
tcp_in(conn, pkt);
|
|
|
|
}
|
2020-02-26 17:57:48 +02:00
|
|
|
|
2019-10-23 10:56:38 +03:00
|
|
|
return NET_DROP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create a new tcp connection, as a part of it, create and register
|
|
|
|
* net_context
|
|
|
|
*/
|
|
|
|
static struct tcp *tcp_conn_new(struct net_pkt *pkt)
|
|
|
|
{
|
|
|
|
struct tcp *conn = NULL;
|
|
|
|
struct net_context *context = NULL;
|
|
|
|
sa_family_t af = net_pkt_family(pkt);
|
2020-04-21 14:16:56 +03:00
|
|
|
struct sockaddr local_addr = { 0 };
|
2019-10-23 10:56:38 +03:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = net_context_get(af, SOCK_STREAM, IPPROTO_TCP, &context);
|
|
|
|
if (ret < 0) {
|
|
|
|
NET_ERR("net_context_get(): %d", ret);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
conn = context->tcp;
|
|
|
|
conn->iface = pkt->iface;
|
|
|
|
|
2020-04-21 14:12:33 +03:00
|
|
|
net_context_set_family(conn->context, net_pkt_family(pkt));
|
2020-02-26 17:57:48 +02:00
|
|
|
|
2020-04-21 14:12:33 +03:00
|
|
|
if (tcp_endpoint_set(&conn->dst, pkt, TCP_EP_SRC) < 0) {
|
|
|
|
net_context_unref(context);
|
|
|
|
conn = NULL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tcp_endpoint_set(&conn->src, pkt, TCP_EP_DST) < 0) {
|
|
|
|
net_context_unref(context);
|
|
|
|
conn = NULL;
|
|
|
|
goto err;
|
|
|
|
}
|
2019-10-23 10:56:38 +03:00
|
|
|
|
2020-03-06 15:12:24 +02:00
|
|
|
NET_DBG("conn: src: %s, dst: %s",
|
2020-04-21 14:12:33 +03:00
|
|
|
log_strdup(net_sprint_addr(conn->src.sa.sa_family,
|
|
|
|
(const void *)&conn->src.sin.sin_addr)),
|
|
|
|
log_strdup(net_sprint_addr(conn->dst.sa.sa_family,
|
|
|
|
(const void *)&conn->dst.sin.sin_addr)));
|
2019-10-23 10:56:38 +03:00
|
|
|
|
2020-04-21 14:12:33 +03:00
|
|
|
memcpy(&context->remote, &conn->dst, sizeof(context->remote));
|
2019-10-23 10:56:38 +03:00
|
|
|
context->flags |= NET_CONTEXT_REMOTE_ADDR_SET;
|
|
|
|
|
2020-04-21 14:16:56 +03:00
|
|
|
net_sin_ptr(&context->local)->sin_family = af;
|
|
|
|
|
|
|
|
local_addr.sa_family = net_context_get_family(context);
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_NET_IPV6) &&
|
|
|
|
net_context_get_family(context) == AF_INET6) {
|
|
|
|
if (net_sin6_ptr(&context->local)->sin6_addr) {
|
|
|
|
net_ipaddr_copy(&net_sin6(&local_addr)->sin6_addr,
|
|
|
|
net_sin6_ptr(&context->local)->sin6_addr);
|
|
|
|
}
|
|
|
|
} else if (IS_ENABLED(CONFIG_NET_IPV4) &&
|
|
|
|
net_context_get_family(context) == AF_INET) {
|
|
|
|
if (net_sin_ptr(&context->local)->sin_addr) {
|
|
|
|
net_ipaddr_copy(&net_sin(&local_addr)->sin_addr,
|
|
|
|
net_sin_ptr(&context->local)->sin_addr);
|
|
|
|
}
|
|
|
|
}
|
2019-10-23 10:56:38 +03:00
|
|
|
|
|
|
|
NET_DBG("context: local: %s, remote: %s",
|
2020-04-21 14:16:56 +03:00
|
|
|
log_strdup(net_sprint_addr(
|
|
|
|
local_addr.sa_family,
|
|
|
|
(const void *)&net_sin(&local_addr)->sin_addr)),
|
|
|
|
log_strdup(net_sprint_addr(
|
|
|
|
context->remote.sa_family,
|
|
|
|
(const void *)&net_sin(&context->remote)->sin_addr)));
|
2019-10-23 10:56:38 +03:00
|
|
|
|
|
|
|
ret = net_conn_register(IPPROTO_TCP, af,
|
2020-04-21 14:16:56 +03:00
|
|
|
&context->remote, &local_addr,
|
2020-04-21 14:12:33 +03:00
|
|
|
ntohs(conn->dst.sin.sin_port),/* local port */
|
|
|
|
ntohs(conn->src.sin.sin_port),/* remote port */
|
2019-11-26 16:33:44 +02:00
|
|
|
tcp_recv, context,
|
2019-10-23 10:56:38 +03:00
|
|
|
&context->conn_handler);
|
|
|
|
if (ret < 0) {
|
|
|
|
NET_ERR("net_conn_register(): %d", ret);
|
|
|
|
net_context_unref(context);
|
|
|
|
conn = NULL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
err:
|
|
|
|
return conn;
|
|
|
|
}
|
|
|
|
|
2019-10-21 11:12:35 +03:00
|
|
|
/* TCP state machine, everything happens here */
|
|
|
|
static void tcp_in(struct tcp *conn, struct net_pkt *pkt)
|
|
|
|
{
|
2020-03-06 15:21:49 +02:00
|
|
|
struct tcphdr *th = pkt ? th_get(pkt) : NULL;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t next = 0, fl = th ? th->th_flags : 0;
|
2020-04-02 14:03:20 +03:00
|
|
|
size_t tcp_options_len = th ? (th->th_off - 5) * 4 : 0;
|
2020-01-11 11:23:42 +02:00
|
|
|
size_t len;
|
2020-09-10 17:29:40 +03:00
|
|
|
int ret;
|
2019-10-21 11:12:35 +03:00
|
|
|
|
2020-03-11 15:00:47 +02:00
|
|
|
k_mutex_lock(&conn->lock, K_FOREVER);
|
|
|
|
|
2020-03-06 15:12:24 +02:00
|
|
|
NET_DBG("%s", log_strdup(tcp_conn_state(conn, pkt)));
|
2019-10-21 11:12:35 +03:00
|
|
|
|
|
|
|
if (th && th->th_off < 5) {
|
|
|
|
tcp_out(conn, RST);
|
|
|
|
conn_state(conn, TCP_CLOSED);
|
|
|
|
goto next_state;
|
|
|
|
}
|
|
|
|
|
2020-04-22 15:19:52 +03:00
|
|
|
if (tcp_options_len && !tcp_options_check(&conn->recv_options, pkt,
|
|
|
|
tcp_options_len)) {
|
2020-04-02 14:03:20 +03:00
|
|
|
NET_DBG("DROP: Invalid TCP option list");
|
|
|
|
tcp_out(conn, RST);
|
|
|
|
conn_state(conn, TCP_CLOSED);
|
|
|
|
goto next_state;
|
|
|
|
}
|
|
|
|
|
2020-05-13 14:42:50 +03:00
|
|
|
if (th) {
|
2020-09-16 17:47:26 +03:00
|
|
|
size_t max_win;
|
|
|
|
|
2020-05-13 14:42:50 +03:00
|
|
|
conn->send_win = ntohs(th->th_win);
|
2020-09-16 17:47:26 +03:00
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_NET_TCP_MAX_SEND_WINDOW_SIZE)
|
|
|
|
if (CONFIG_NET_TCP_MAX_SEND_WINDOW_SIZE) {
|
|
|
|
max_win = CONFIG_NET_TCP_MAX_SEND_WINDOW_SIZE;
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
/* Adjust the window so that we do not run out of bufs
|
|
|
|
* while waiting acks.
|
|
|
|
*/
|
|
|
|
max_win = (CONFIG_NET_BUF_TX_COUNT *
|
|
|
|
CONFIG_NET_BUF_DATA_SIZE) / 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
max_win = MAX(max_win, NET_IPV6_MTU);
|
|
|
|
if ((size_t)conn->send_win > max_win) {
|
|
|
|
NET_DBG("Lowering send window from %zd to %zd",
|
|
|
|
(size_t)conn->send_win, max_win);
|
|
|
|
|
|
|
|
conn->send_win = max_win;
|
|
|
|
}
|
2020-05-13 14:42:50 +03:00
|
|
|
}
|
|
|
|
|
2019-10-21 11:12:35 +03:00
|
|
|
if (FL(&fl, &, RST)) {
|
|
|
|
conn_state(conn, TCP_CLOSED);
|
|
|
|
}
|
|
|
|
next_state:
|
2020-01-11 11:23:42 +02:00
|
|
|
len = pkt ? tcp_data_len(pkt) : 0;
|
|
|
|
|
2019-10-21 11:12:35 +03:00
|
|
|
switch (conn->state) {
|
|
|
|
case TCP_LISTEN:
|
|
|
|
if (FL(&fl, ==, SYN)) {
|
|
|
|
conn_ack(conn, th_seq(th) + 1); /* capture peer's isn */
|
|
|
|
tcp_out(conn, SYN | ACK);
|
|
|
|
conn_seq(conn, + 1);
|
|
|
|
next = TCP_SYN_RECEIVED;
|
|
|
|
} else {
|
|
|
|
tcp_out(conn, SYN);
|
|
|
|
conn_seq(conn, + 1);
|
|
|
|
next = TCP_SYN_SENT;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case TCP_SYN_RECEIVED:
|
2020-01-11 13:04:41 +02:00
|
|
|
if (FL(&fl, &, ACK, th_ack(th) == conn->seq &&
|
|
|
|
th_seq(th) == conn->ack)) {
|
2019-10-21 11:12:35 +03:00
|
|
|
tcp_send_timer_cancel(conn);
|
|
|
|
next = TCP_ESTABLISHED;
|
2019-11-27 15:09:44 +02:00
|
|
|
net_context_set_state(conn->context,
|
|
|
|
NET_CONTEXT_CONNECTED);
|
2020-06-08 17:50:13 +03:00
|
|
|
|
2020-01-11 13:10:05 +02:00
|
|
|
if (len) {
|
2020-05-13 14:07:07 +03:00
|
|
|
if (tcp_data_get(conn, pkt) < 0) {
|
|
|
|
break;
|
|
|
|
}
|
2020-01-11 13:11:36 +02:00
|
|
|
conn_ack(conn, + len);
|
|
|
|
tcp_out(conn, ACK);
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case TCP_SYN_SENT:
|
|
|
|
/* if we are in SYN SENT and receive only a SYN without an
|
|
|
|
* ACK , shouldn't we go to SYN RECEIVED state? See Figure
|
|
|
|
* 6 of RFC 793
|
|
|
|
*/
|
2020-06-08 17:50:13 +03:00
|
|
|
if (FL(&fl, &, SYN | ACK, th && th_ack(th) == conn->seq)) {
|
2019-10-21 11:12:35 +03:00
|
|
|
tcp_send_timer_cancel(conn);
|
2020-06-08 17:50:13 +03:00
|
|
|
conn_ack(conn, th_seq(th) + 1);
|
|
|
|
if (len) {
|
2020-05-13 14:07:07 +03:00
|
|
|
if (tcp_data_get(conn, pkt) < 0) {
|
|
|
|
break;
|
|
|
|
}
|
2020-06-08 17:50:13 +03:00
|
|
|
conn_ack(conn, + len);
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
2020-06-08 19:20:22 +03:00
|
|
|
k_sem_give(&conn->connect_sem);
|
2020-06-08 17:50:13 +03:00
|
|
|
next = TCP_ESTABLISHED;
|
|
|
|
net_context_set_state(conn->context,
|
|
|
|
NET_CONTEXT_CONNECTED);
|
|
|
|
tcp_out(conn, ACK);
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case TCP_ESTABLISHED:
|
|
|
|
/* full-close */
|
2020-03-09 18:19:13 +02:00
|
|
|
if (th && FL(&fl, ==, (FIN | ACK), th_seq(th) == conn->ack)) {
|
2020-04-24 15:49:29 +03:00
|
|
|
conn_ack(conn, + 1);
|
|
|
|
tcp_out(conn, FIN | ACK);
|
|
|
|
next = TCP_LAST_ACK;
|
|
|
|
break;
|
|
|
|
} else if (th && FL(&fl, ==, FIN, th_seq(th) == conn->ack)) {
|
2019-10-21 11:12:35 +03:00
|
|
|
conn_ack(conn, + 1);
|
|
|
|
tcp_out(conn, ACK);
|
|
|
|
next = TCP_CLOSE_WAIT;
|
|
|
|
break;
|
2020-09-03 17:07:23 +03:00
|
|
|
} else if (th && FL(&fl, ==, (FIN | ACK | PSH),
|
|
|
|
th_seq(th) == conn->ack)) {
|
2020-09-04 15:20:02 +03:00
|
|
|
if (len) {
|
|
|
|
if (tcp_data_get(conn, pkt) < 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-03 17:07:23 +03:00
|
|
|
conn_ack(conn, + len + 1);
|
|
|
|
tcp_out(conn, FIN | ACK);
|
|
|
|
next = TCP_LAST_ACK;
|
|
|
|
break;
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
2020-04-24 15:49:29 +03:00
|
|
|
|
2020-05-15 18:17:37 +03:00
|
|
|
if (th && net_tcp_seq_cmp(th_ack(th), conn->seq) > 0) {
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t len_acked = th_ack(th) - conn->seq;
|
2020-05-15 18:17:37 +03:00
|
|
|
|
|
|
|
NET_DBG("conn: %p len_acked=%u", conn, len_acked);
|
|
|
|
|
|
|
|
if ((conn->send_data_total < len_acked) ||
|
|
|
|
(tcp_pkt_pull(conn->send_data,
|
|
|
|
len_acked) < 0)) {
|
|
|
|
NET_ERR("conn: %p, Invalid len_acked=%u "
|
|
|
|
"(total=%zu)", conn, len_acked,
|
|
|
|
conn->send_data_total);
|
|
|
|
tcp_out(conn, RST);
|
|
|
|
conn_state(conn, TCP_CLOSED);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
conn->send_data_total -= len_acked;
|
|
|
|
conn->unacked_len -= len_acked;
|
|
|
|
conn_seq(conn, + len_acked);
|
|
|
|
|
|
|
|
conn_send_data_dump(conn);
|
|
|
|
|
|
|
|
if (!k_delayed_work_remaining_get(&conn->send_data_timer)) {
|
2020-09-10 17:29:40 +03:00
|
|
|
NET_DBG("conn: %p, Missing a subscription "
|
2020-05-15 18:17:37 +03:00
|
|
|
"of the send_data queue timer", conn);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
conn->send_data_retries = 0;
|
|
|
|
k_delayed_work_cancel(&conn->send_data_timer);
|
|
|
|
if (conn->data_mode == TCP_DATA_MODE_RESEND) {
|
|
|
|
conn->unacked_len = 0;
|
|
|
|
}
|
|
|
|
conn->data_mode = TCP_DATA_MODE_SEND;
|
|
|
|
|
2020-09-18 10:37:32 +03:00
|
|
|
/* We are closing the connection, send a FIN to peer */
|
|
|
|
if (conn->in_close && conn->send_data_total == 0) {
|
|
|
|
tcp_send_timer_cancel(conn);
|
|
|
|
next = TCP_FIN_WAIT_1;
|
|
|
|
|
|
|
|
tcp_out(conn, FIN | ACK);
|
|
|
|
conn_seq(conn, + 1);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-09-10 17:29:40 +03:00
|
|
|
ret = tcp_send_queued_data(conn);
|
|
|
|
if (ret < 0 && ret != -ENOBUFS) {
|
2020-05-15 18:17:37 +03:00
|
|
|
tcp_out(conn, RST);
|
|
|
|
conn_state(conn, TCP_CLOSED);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-09 12:25:11 +03:00
|
|
|
if (th && len) {
|
2020-03-09 18:19:13 +02:00
|
|
|
if (th_seq(th) == conn->ack) {
|
2020-05-13 14:07:07 +03:00
|
|
|
if (tcp_data_get(conn, pkt) < 0) {
|
|
|
|
break;
|
|
|
|
}
|
2019-10-21 11:12:35 +03:00
|
|
|
conn_ack(conn, + len);
|
|
|
|
tcp_out(conn, ACK);
|
2020-05-13 14:33:47 +03:00
|
|
|
} else if (net_tcp_seq_greater(conn->ack, th_seq(th))) {
|
2020-03-09 18:19:13 +02:00
|
|
|
tcp_out(conn, ACK); /* peer has resent */
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
}
|
2020-04-24 15:49:29 +03:00
|
|
|
break;
|
2019-10-21 11:12:35 +03:00
|
|
|
case TCP_CLOSE_WAIT:
|
2020-04-24 15:49:29 +03:00
|
|
|
tcp_out(conn, FIN);
|
2019-10-21 11:12:35 +03:00
|
|
|
next = TCP_LAST_ACK;
|
|
|
|
break;
|
|
|
|
case TCP_LAST_ACK:
|
2020-04-24 15:49:29 +03:00
|
|
|
if (th && FL(&fl, ==, ACK, th_seq(th) == conn->ack)) {
|
2019-10-21 11:12:35 +03:00
|
|
|
tcp_send_timer_cancel(conn);
|
|
|
|
next = TCP_CLOSED;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case TCP_CLOSED:
|
2019-10-23 10:56:38 +03:00
|
|
|
tcp_conn_unref(conn);
|
2019-10-21 11:12:35 +03:00
|
|
|
break;
|
2020-03-25 18:35:47 +02:00
|
|
|
case TCP_FIN_WAIT_1:
|
2020-04-07 15:32:08 +03:00
|
|
|
if (th && FL(&fl, ==, (FIN | ACK), th_seq(th) == conn->ack)) {
|
|
|
|
tcp_send_timer_cancel(conn);
|
2020-04-20 14:33:24 +03:00
|
|
|
conn_ack(conn, + 1);
|
|
|
|
tcp_out(conn, ACK);
|
2020-04-07 15:32:08 +03:00
|
|
|
next = TCP_TIME_WAIT;
|
2020-04-19 18:16:18 +03:00
|
|
|
} else if (th && FL(&fl, ==, FIN, th_seq(th) == conn->ack)) {
|
|
|
|
tcp_send_timer_cancel(conn);
|
|
|
|
conn_ack(conn, + 1);
|
|
|
|
tcp_out(conn, ACK);
|
|
|
|
next = TCP_CLOSING;
|
|
|
|
} else if (th && FL(&fl, ==, ACK, th_seq(th) == conn->ack)) {
|
|
|
|
tcp_send_timer_cancel(conn);
|
|
|
|
next = TCP_FIN_WAIT_2;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case TCP_FIN_WAIT_2:
|
2020-09-18 10:37:32 +03:00
|
|
|
if (th && (FL(&fl, ==, FIN, th_seq(th) == conn->ack) ||
|
|
|
|
FL(&fl, ==, FIN | ACK, th_seq(th) == conn->ack))) {
|
|
|
|
/* Received FIN on FIN_WAIT_2, so cancel the timer */
|
|
|
|
k_delayed_work_cancel(&conn->fin_timer);
|
|
|
|
|
2020-04-19 18:16:18 +03:00
|
|
|
conn_ack(conn, + 1);
|
|
|
|
tcp_out(conn, ACK);
|
|
|
|
next = TCP_TIME_WAIT;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case TCP_CLOSING:
|
|
|
|
if (th && FL(&fl, ==, ACK, th_seq(th) == conn->ack)) {
|
|
|
|
tcp_send_timer_cancel(conn);
|
|
|
|
next = TCP_TIME_WAIT;
|
2020-04-07 15:32:08 +03:00
|
|
|
}
|
|
|
|
break;
|
2020-03-26 11:24:43 +02:00
|
|
|
case TCP_TIME_WAIT:
|
|
|
|
k_delayed_work_submit(&conn->timewait_timer,
|
|
|
|
K_MSEC(CONFIG_NET_TCP_TIME_WAIT_DELAY));
|
|
|
|
break;
|
2019-10-21 11:12:35 +03:00
|
|
|
default:
|
2020-01-08 09:42:23 +02:00
|
|
|
NET_ASSERT(false, "%s is unimplemented",
|
|
|
|
tcp_state_to_str(conn->state, true));
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (next) {
|
2019-12-12 19:20:06 +02:00
|
|
|
pkt = NULL;
|
2019-10-21 11:12:35 +03:00
|
|
|
th = NULL;
|
|
|
|
conn_state(conn, next);
|
|
|
|
next = 0;
|
|
|
|
goto next_state;
|
|
|
|
}
|
2020-03-11 15:00:47 +02:00
|
|
|
|
|
|
|
k_mutex_unlock(&conn->lock);
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
|
2020-03-27 13:09:10 +02:00
|
|
|
/* Active connection close: send FIN and go to FIN_WAIT_1 state */
|
2019-10-21 11:12:35 +03:00
|
|
|
int net_tcp_put(struct net_context *context)
|
|
|
|
{
|
2019-08-15 15:31:11 +03:00
|
|
|
struct tcp *conn = context->tcp;
|
2019-10-21 11:12:35 +03:00
|
|
|
|
2020-09-18 19:24:03 +03:00
|
|
|
if (!conn) {
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
2020-09-18 10:37:32 +03:00
|
|
|
k_mutex_lock(&conn->lock, K_FOREVER);
|
|
|
|
|
2020-03-06 15:12:24 +02:00
|
|
|
NET_DBG("%s", conn ? log_strdup(tcp_conn_state(conn, NULL)) : "");
|
2020-09-17 13:17:44 +03:00
|
|
|
NET_DBG("context %p %s", context,
|
|
|
|
log_strdup(({ const char *state = net_context_state(context);
|
|
|
|
state ? state : "<unknown>"; })));
|
2019-08-15 15:31:11 +03:00
|
|
|
|
2020-06-08 15:48:15 +03:00
|
|
|
if (conn && conn->state == TCP_ESTABLISHED) {
|
2020-09-18 10:37:32 +03:00
|
|
|
/* Send all remaining data if possible. */
|
|
|
|
if (conn->send_data_total > 0) {
|
2020-09-22 11:06:26 -05:00
|
|
|
NET_DBG("conn %p pending %zu bytes", conn,
|
2020-09-18 10:37:32 +03:00
|
|
|
conn->send_data_total);
|
|
|
|
conn->in_close = true;
|
|
|
|
|
|
|
|
/* How long to wait until all the data has been sent?
|
|
|
|
*/
|
|
|
|
k_delayed_work_submit(&conn->send_data_timer,
|
|
|
|
K_MSEC(tcp_rto));
|
|
|
|
} else {
|
|
|
|
int ret;
|
2020-04-20 14:33:24 +03:00
|
|
|
|
2020-09-18 10:37:32 +03:00
|
|
|
NET_DBG("TCP connection in active close, not "
|
|
|
|
"disposing yet (waiting %dms)", FIN_TIMEOUT_MS);
|
|
|
|
k_delayed_work_submit(&conn->fin_timer, FIN_TIMEOUT);
|
2020-04-20 14:33:24 +03:00
|
|
|
|
2020-09-18 10:37:32 +03:00
|
|
|
ret = tcp_out_ext(conn, FIN | ACK, NULL,
|
|
|
|
conn->seq + conn->unacked_len);
|
|
|
|
if (ret == 0) {
|
|
|
|
conn_seq(conn, + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
conn_state(conn, TCP_FIN_WAIT_1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make sure we do not delete the connection yet until we have
|
|
|
|
* sent the final ACK.
|
|
|
|
*/
|
|
|
|
net_context_ref(context);
|
2019-08-15 15:31:11 +03:00
|
|
|
}
|
|
|
|
|
2020-09-18 10:37:32 +03:00
|
|
|
k_mutex_unlock(&conn->lock);
|
|
|
|
|
2019-08-15 15:31:11 +03:00
|
|
|
net_context_unref(context);
|
|
|
|
|
|
|
|
return 0;
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
int net_tcp_listen(struct net_context *context)
|
|
|
|
{
|
|
|
|
/* when created, tcp connections are in state TCP_LISTEN */
|
|
|
|
net_context_set_state(context, NET_CONTEXT_LISTENING);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
int net_tcp_update_recv_wnd(struct net_context *context, int32_t delta)
|
2019-10-21 11:12:35 +03:00
|
|
|
{
|
|
|
|
ARG_UNUSED(context);
|
|
|
|
ARG_UNUSED(delta);
|
|
|
|
|
|
|
|
return -EPROTONOSUPPORT;
|
|
|
|
}
|
|
|
|
|
2020-05-15 18:17:37 +03:00
|
|
|
/* net_context queues the outgoing data for the TCP connection */
|
2020-03-09 14:41:15 +02:00
|
|
|
int net_tcp_queue_data(struct net_context *context, struct net_pkt *pkt)
|
2019-10-19 10:08:06 +03:00
|
|
|
{
|
|
|
|
struct tcp *conn = context->tcp;
|
2020-09-10 17:29:40 +03:00
|
|
|
struct net_buf *orig_buf = NULL;
|
2020-03-09 14:41:15 +02:00
|
|
|
int ret = 0;
|
2020-05-15 18:17:37 +03:00
|
|
|
size_t len;
|
2019-10-19 10:08:06 +03:00
|
|
|
|
2020-03-09 14:41:15 +02:00
|
|
|
if (!conn || conn->state != TCP_ESTABLISHED) {
|
2020-09-10 17:29:40 +03:00
|
|
|
return -ENOTCONN;
|
2019-10-19 10:08:06 +03:00
|
|
|
}
|
|
|
|
|
2020-05-15 18:17:37 +03:00
|
|
|
k_mutex_lock(&conn->lock, K_FOREVER);
|
|
|
|
|
2020-09-16 19:41:31 +03:00
|
|
|
if (tcp_window_full(conn)) {
|
2020-09-18 10:39:17 +03:00
|
|
|
/* Trigger resend if the timer is not active */
|
|
|
|
if (!k_delayed_work_remaining_get(&conn->send_data_timer)) {
|
|
|
|
NET_DBG("Window full, trigger resend");
|
|
|
|
tcp_resend_data(
|
|
|
|
(struct k_work *)&conn->send_data_timer);
|
|
|
|
}
|
|
|
|
|
2020-09-16 19:41:31 +03:00
|
|
|
ret = -EAGAIN;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-05-15 18:17:37 +03:00
|
|
|
len = net_pkt_get_len(pkt);
|
|
|
|
|
2020-09-10 17:29:40 +03:00
|
|
|
if (conn->send_data->buffer) {
|
|
|
|
orig_buf = net_buf_frag_last(conn->send_data->buffer);
|
|
|
|
}
|
|
|
|
|
2020-05-15 18:17:37 +03:00
|
|
|
net_pkt_append_buffer(conn->send_data, pkt->buffer);
|
|
|
|
conn->send_data_total += len;
|
|
|
|
NET_DBG("conn: %p Queued %zu bytes (total %zu)", conn, len,
|
|
|
|
conn->send_data_total);
|
|
|
|
pkt->buffer = NULL;
|
|
|
|
|
|
|
|
ret = tcp_send_queued_data(conn);
|
2020-09-10 17:29:40 +03:00
|
|
|
if (ret < 0 && ret != -ENOBUFS) {
|
2020-05-15 18:17:37 +03:00
|
|
|
tcp_conn_unref(conn);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-09-10 17:29:40 +03:00
|
|
|
if (ret == -ENOBUFS) {
|
|
|
|
/* Restore the original data so that we do not resend the pkt
|
|
|
|
* data multiple times.
|
|
|
|
*/
|
|
|
|
conn->send_data_total -= len;
|
|
|
|
|
|
|
|
if (orig_buf) {
|
|
|
|
pkt->buffer = orig_buf->frags;
|
|
|
|
orig_buf->frags = NULL;
|
|
|
|
} else {
|
|
|
|
pkt->buffer = conn->send_data->buffer;
|
|
|
|
conn->send_data->buffer = NULL;
|
|
|
|
}
|
2020-09-10 17:30:57 +03:00
|
|
|
} else {
|
|
|
|
/* We should not free the pkt if there was an error. It will be
|
|
|
|
* freed in net_context.c:context_sendto()
|
|
|
|
*/
|
|
|
|
tcp_pkt_unref(pkt);
|
2020-09-10 17:29:40 +03:00
|
|
|
}
|
|
|
|
out:
|
2020-05-15 18:17:37 +03:00
|
|
|
k_mutex_unlock(&conn->lock);
|
2020-09-10 17:29:40 +03:00
|
|
|
|
2019-10-19 10:08:06 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* net context is about to send out queued data - inform caller only */
|
2019-10-21 11:12:35 +03:00
|
|
|
int net_tcp_send_data(struct net_context *context, net_context_send_cb_t cb,
|
|
|
|
void *user_data)
|
|
|
|
{
|
2019-10-19 10:08:06 +03:00
|
|
|
if (cb) {
|
|
|
|
cb(context, 0, user_data);
|
|
|
|
}
|
2019-10-21 11:12:35 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-22 11:26:54 +03:00
|
|
|
/* When connect() is called on a TCP socket, register the socket for incoming
|
|
|
|
* traffic with net context and give the TCP packet receiving function, which
|
|
|
|
* in turn will call tcp_in() to deliver the TCP packet to the stack
|
|
|
|
*/
|
|
|
|
int net_tcp_connect(struct net_context *context,
|
|
|
|
const struct sockaddr *remote_addr,
|
|
|
|
struct sockaddr *local_addr,
|
2020-05-27 11:26:57 -05:00
|
|
|
uint16_t remote_port, uint16_t local_port,
|
2020-04-03 13:08:19 +03:00
|
|
|
k_timeout_t timeout, net_context_connect_cb_t cb,
|
|
|
|
void *user_data)
|
2019-10-21 11:12:35 +03:00
|
|
|
{
|
2020-02-26 14:46:26 +02:00
|
|
|
struct tcp *conn;
|
2020-05-27 16:16:01 +03:00
|
|
|
int ret = 0;
|
2019-10-21 11:12:35 +03:00
|
|
|
|
2020-03-09 21:19:34 +02:00
|
|
|
NET_DBG("context: %p, local: %s, remote: %s", context,
|
2020-04-21 14:16:56 +03:00
|
|
|
log_strdup(net_sprint_addr(
|
|
|
|
local_addr->sa_family,
|
|
|
|
(const void *)&net_sin(local_addr)->sin_addr)),
|
|
|
|
log_strdup(net_sprint_addr(
|
|
|
|
remote_addr->sa_family,
|
|
|
|
(const void *)&net_sin(remote_addr)->sin_addr)));
|
2020-03-09 21:19:34 +02:00
|
|
|
|
2020-02-26 14:46:26 +02:00
|
|
|
conn = context->tcp;
|
|
|
|
conn->iface = net_context_get_iface(context);
|
|
|
|
|
2019-10-22 11:26:54 +03:00
|
|
|
switch (net_context_get_family(context)) {
|
2020-02-26 14:46:26 +02:00
|
|
|
const struct in_addr *ip4;
|
2020-03-09 21:19:34 +02:00
|
|
|
const struct in6_addr *ip6;
|
2020-02-26 14:46:26 +02:00
|
|
|
|
2019-10-22 11:26:54 +03:00
|
|
|
case AF_INET:
|
2020-04-21 14:12:33 +03:00
|
|
|
memset(&conn->src, 0, sizeof(struct sockaddr_in));
|
|
|
|
memset(&conn->dst, 0, sizeof(struct sockaddr_in));
|
2020-02-26 14:46:26 +02:00
|
|
|
|
2020-04-21 14:12:33 +03:00
|
|
|
conn->src.sa.sa_family = AF_INET;
|
|
|
|
conn->dst.sa.sa_family = AF_INET;
|
2020-02-26 14:46:26 +02:00
|
|
|
|
2020-04-21 14:12:33 +03:00
|
|
|
conn->dst.sin.sin_port = remote_port;
|
|
|
|
conn->src.sin.sin_port = local_port;
|
2020-02-26 14:46:26 +02:00
|
|
|
|
|
|
|
/* we have to select the source address here as
|
|
|
|
* net_context_create_ipv4_new() is not called in the packet
|
|
|
|
* output chain
|
|
|
|
*/
|
2020-04-21 14:12:33 +03:00
|
|
|
ip4 = net_if_ipv4_select_src_addr(
|
|
|
|
net_context_get_iface(context),
|
|
|
|
&net_sin(remote_addr)->sin_addr);
|
|
|
|
conn->src.sin.sin_addr = *ip4;
|
|
|
|
net_ipaddr_copy(&conn->dst.sin.sin_addr,
|
|
|
|
&net_sin(remote_addr)->sin_addr);
|
2019-10-22 11:26:54 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case AF_INET6:
|
2020-04-21 14:12:33 +03:00
|
|
|
memset(&conn->src, 0, sizeof(struct sockaddr_in6));
|
|
|
|
memset(&conn->dst, 0, sizeof(struct sockaddr_in6));
|
2020-03-09 21:19:34 +02:00
|
|
|
|
2020-04-21 14:12:33 +03:00
|
|
|
conn->src.sin6.sin6_family = AF_INET6;
|
|
|
|
conn->dst.sin6.sin6_family = AF_INET6;
|
2020-03-09 21:19:34 +02:00
|
|
|
|
2020-04-21 14:12:33 +03:00
|
|
|
conn->dst.sin6.sin6_port = remote_port;
|
|
|
|
conn->src.sin6.sin6_port = local_port;
|
2020-03-09 21:19:34 +02:00
|
|
|
|
2020-04-21 14:12:33 +03:00
|
|
|
ip6 = net_if_ipv6_select_src_addr(
|
|
|
|
net_context_get_iface(context),
|
|
|
|
&net_sin6(remote_addr)->sin6_addr);
|
|
|
|
conn->src.sin6.sin6_addr = *ip6;
|
|
|
|
net_ipaddr_copy(&conn->dst.sin6.sin6_addr,
|
|
|
|
&net_sin6(remote_addr)->sin6_addr);
|
2019-10-22 11:26:54 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2020-05-27 16:16:01 +03:00
|
|
|
ret = -EPROTONOSUPPORT;
|
2019-10-22 11:26:54 +03:00
|
|
|
}
|
|
|
|
|
2020-04-21 14:12:33 +03:00
|
|
|
NET_DBG("conn: %p src: %s, dst: %s", conn,
|
|
|
|
log_strdup(net_sprint_addr(conn->src.sa.sa_family,
|
|
|
|
(const void *)&conn->src.sin.sin_addr)),
|
|
|
|
log_strdup(net_sprint_addr(conn->dst.sa.sa_family,
|
|
|
|
(const void *)&conn->dst.sin.sin_addr)));
|
2020-03-09 21:19:34 +02:00
|
|
|
|
2019-10-22 11:26:54 +03:00
|
|
|
net_context_set_state(context, NET_CONTEXT_CONNECTING);
|
|
|
|
|
|
|
|
ret = net_conn_register(net_context_get_ip_proto(context),
|
|
|
|
net_context_get_family(context),
|
|
|
|
remote_addr, local_addr,
|
|
|
|
ntohs(remote_port), ntohs(local_port),
|
2019-11-26 16:33:44 +02:00
|
|
|
tcp_recv, context,
|
2019-10-22 11:26:54 +03:00
|
|
|
&context->conn_handler);
|
|
|
|
if (ret < 0) {
|
2020-05-27 16:16:01 +03:00
|
|
|
goto out;
|
2019-10-22 11:26:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Input of a (nonexistent) packet with no flags set will cause
|
|
|
|
* a TCP connection to be established
|
|
|
|
*/
|
|
|
|
tcp_in(conn, NULL);
|
|
|
|
|
2020-06-08 19:20:22 +03:00
|
|
|
if (!IS_ENABLED(CONFIG_NET_TEST_PROTOCOL)) {
|
|
|
|
conn->in_connect = true;
|
|
|
|
|
|
|
|
if (k_sem_take(&conn->connect_sem, timeout) != 0 &&
|
|
|
|
conn->state != TCP_ESTABLISHED) {
|
|
|
|
conn->in_connect = false;
|
|
|
|
tcp_conn_unref(conn);
|
|
|
|
ret = -ETIMEDOUT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
conn->in_connect = false;
|
|
|
|
}
|
2020-05-27 16:16:01 +03:00
|
|
|
out:
|
|
|
|
NET_DBG("conn: %p, ret=%d", conn, ret);
|
|
|
|
|
|
|
|
return ret;
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
int net_tcp_accept(struct net_context *context, net_tcp_accept_cb_t cb,
|
|
|
|
void *user_data)
|
|
|
|
{
|
2019-10-22 11:26:54 +03:00
|
|
|
struct tcp *conn = context->tcp;
|
|
|
|
struct sockaddr local_addr = { };
|
2020-05-27 11:26:57 -05:00
|
|
|
uint16_t local_port, remote_port;
|
2019-10-21 11:12:35 +03:00
|
|
|
|
2020-04-02 18:14:50 +03:00
|
|
|
if (!conn) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2019-10-22 11:26:54 +03:00
|
|
|
|
2020-04-02 18:14:50 +03:00
|
|
|
NET_DBG("context: %p, tcp: %p, cb: %p", context, conn, cb);
|
2019-10-22 11:26:54 +03:00
|
|
|
|
2020-04-02 18:14:50 +03:00
|
|
|
if (conn->state != TCP_LISTEN) {
|
2019-10-22 11:26:54 +03:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-04-02 18:14:50 +03:00
|
|
|
conn->accept_cb = cb;
|
2019-10-22 11:26:54 +03:00
|
|
|
local_addr.sa_family = net_context_get_family(context);
|
|
|
|
|
|
|
|
switch (local_addr.sa_family) {
|
|
|
|
struct sockaddr_in *in;
|
|
|
|
struct sockaddr_in6 *in6;
|
|
|
|
|
|
|
|
case AF_INET:
|
|
|
|
in = (struct sockaddr_in *)&local_addr;
|
|
|
|
|
|
|
|
if (net_sin_ptr(&context->local)->sin_addr) {
|
|
|
|
net_ipaddr_copy(&in->sin_addr,
|
|
|
|
net_sin_ptr(&context->local)->sin_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
in->sin_port =
|
|
|
|
net_sin((struct sockaddr *)&context->local)->sin_port;
|
|
|
|
local_port = ntohs(in->sin_port);
|
|
|
|
remote_port = ntohs(net_sin(&context->remote)->sin_port);
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AF_INET6:
|
|
|
|
in6 = (struct sockaddr_in6 *)&local_addr;
|
|
|
|
|
|
|
|
if (net_sin6_ptr(&context->local)->sin6_addr) {
|
|
|
|
net_ipaddr_copy(&in6->sin6_addr,
|
|
|
|
net_sin6_ptr(&context->local)->sin6_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
in6->sin6_port =
|
|
|
|
net_sin6((struct sockaddr *)&context->local)->sin6_port;
|
|
|
|
local_port = ntohs(in6->sin6_port);
|
|
|
|
remote_port = ntohs(net_sin6(&context->remote)->sin6_port);
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
context->user_data = user_data;
|
|
|
|
|
2020-07-31 14:43:49 +03:00
|
|
|
/* Remove the temporary connection handler and register
|
|
|
|
* a proper now as we have an established connection.
|
|
|
|
*/
|
|
|
|
net_conn_unregister(context->conn_handler);
|
|
|
|
|
2019-10-22 11:26:54 +03:00
|
|
|
return net_conn_register(net_context_get_ip_proto(context),
|
|
|
|
local_addr.sa_family,
|
|
|
|
context->flags & NET_CONTEXT_REMOTE_ADDR_SET ?
|
|
|
|
&context->remote : NULL,
|
|
|
|
&local_addr,
|
|
|
|
remote_port, local_port,
|
2019-11-26 16:33:44 +02:00
|
|
|
tcp_recv, context,
|
2019-10-22 11:26:54 +03:00
|
|
|
&context->conn_handler);
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
int net_tcp_recv(struct net_context *context, net_context_recv_cb_t cb,
|
|
|
|
void *user_data)
|
|
|
|
{
|
2019-10-22 15:04:02 +03:00
|
|
|
struct tcp *conn = context->tcp;
|
2019-10-21 11:12:35 +03:00
|
|
|
|
2019-10-22 15:04:02 +03:00
|
|
|
NET_DBG("context: %p, cb: %p, user_data: %p", context, cb, user_data);
|
|
|
|
|
|
|
|
context->recv_cb = cb;
|
|
|
|
|
|
|
|
if (conn) {
|
|
|
|
conn->recv_user_data = user_data;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
int net_tcp_finalize(struct net_pkt *pkt)
|
|
|
|
{
|
2019-10-22 18:06:50 +03:00
|
|
|
NET_PKT_DATA_ACCESS_DEFINE(tcp_access, struct net_tcp_hdr);
|
|
|
|
struct net_tcp_hdr *tcp_hdr;
|
2019-10-21 11:12:35 +03:00
|
|
|
|
2019-10-22 18:06:50 +03:00
|
|
|
tcp_hdr = (struct net_tcp_hdr *)net_pkt_get_data(pkt, &tcp_access);
|
|
|
|
if (!tcp_hdr) {
|
|
|
|
return -ENOBUFS;
|
|
|
|
}
|
|
|
|
|
|
|
|
tcp_hdr->chksum = 0U;
|
|
|
|
|
|
|
|
if (net_if_need_calc_tx_checksum(net_pkt_iface(pkt))) {
|
|
|
|
tcp_hdr->chksum = net_calc_chksum_tcp(pkt);
|
|
|
|
}
|
|
|
|
|
|
|
|
return net_pkt_set_data(pkt, &tcp_access);
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
struct net_tcp_hdr *net_tcp_input(struct net_pkt *pkt,
|
|
|
|
struct net_pkt_data_access *tcp_access)
|
|
|
|
{
|
2019-10-22 18:07:46 +03:00
|
|
|
struct net_tcp_hdr *tcp_hdr;
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_NET_TCP_CHECKSUM) &&
|
|
|
|
net_if_need_calc_rx_checksum(net_pkt_iface(pkt)) &&
|
|
|
|
net_calc_chksum_tcp(pkt) != 0U) {
|
|
|
|
NET_DBG("DROP: checksum mismatch");
|
|
|
|
goto drop;
|
|
|
|
}
|
|
|
|
|
|
|
|
tcp_hdr = (struct net_tcp_hdr *)net_pkt_get_data(pkt, tcp_access);
|
|
|
|
if (tcp_hdr && !net_pkt_set_data(pkt, tcp_access)) {
|
|
|
|
return tcp_hdr;
|
|
|
|
}
|
2019-10-21 11:12:35 +03:00
|
|
|
|
2019-10-22 18:07:46 +03:00
|
|
|
drop:
|
|
|
|
net_stats_update_tcp_seg_chkerr(net_pkt_iface(pkt));
|
2019-10-21 11:12:35 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(CONFIG_NET_TEST_PROTOCOL)
|
2020-03-10 14:14:20 +02:00
|
|
|
static enum net_verdict tcp_input(struct net_conn *net_conn,
|
|
|
|
struct net_pkt *pkt,
|
|
|
|
union net_ip_header *ip,
|
|
|
|
union net_proto_header *proto,
|
|
|
|
void *user_data)
|
|
|
|
{
|
|
|
|
struct tcphdr *th = th_get(pkt);
|
|
|
|
|
|
|
|
if (th) {
|
|
|
|
struct tcp *conn = tcp_conn_search(pkt);
|
|
|
|
|
|
|
|
if (conn == NULL && SYN == th->th_flags) {
|
|
|
|
struct net_context *context =
|
|
|
|
tcp_calloc(1, sizeof(struct net_context));
|
|
|
|
net_tcp_get(context);
|
2020-04-21 14:12:33 +03:00
|
|
|
net_context_set_family(context, net_pkt_family(pkt));
|
2020-03-10 14:14:20 +02:00
|
|
|
conn = context->tcp;
|
2020-04-21 14:12:33 +03:00
|
|
|
tcp_endpoint_set(&conn->dst, pkt, TCP_EP_SRC);
|
|
|
|
tcp_endpoint_set(&conn->src, pkt, TCP_EP_DST);
|
2020-03-10 14:14:20 +02:00
|
|
|
/* Make an extra reference, the sanity check suite
|
|
|
|
* will delete the connection explicitly
|
|
|
|
*/
|
|
|
|
tcp_conn_ref(conn);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (conn) {
|
|
|
|
conn->iface = pkt->iface;
|
|
|
|
tcp_in(conn, pkt);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NET_DROP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t tp_tcp_recv_cb(struct tcp *conn, struct net_pkt *pkt)
|
|
|
|
{
|
|
|
|
ssize_t len = tcp_data_len(pkt);
|
|
|
|
struct net_pkt *up = tcp_pkt_clone(pkt);
|
|
|
|
|
|
|
|
NET_DBG("pkt: %p, len: %zu", pkt, net_pkt_get_len(pkt));
|
|
|
|
|
|
|
|
net_pkt_cursor_init(up);
|
|
|
|
net_pkt_set_overwrite(up, true);
|
|
|
|
|
|
|
|
net_pkt_pull(up, net_pkt_get_len(up) - len);
|
|
|
|
|
|
|
|
net_tcp_queue_data(conn->context, up);
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2019-12-23 11:54:35 +02:00
|
|
|
static ssize_t tp_tcp_recv(int fd, void *buf, size_t len, int flags)
|
2019-10-21 11:12:35 +03:00
|
|
|
{
|
2020-03-09 15:44:04 +02:00
|
|
|
return 0;
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void tp_init(struct tcp *conn, struct tp *tp)
|
|
|
|
{
|
|
|
|
struct tp out = {
|
|
|
|
.msg = "",
|
|
|
|
.status = "",
|
|
|
|
.state = tcp_state_to_str(conn->state, true),
|
|
|
|
.seq = conn->seq,
|
|
|
|
.ack = conn->ack,
|
|
|
|
.rcv = "",
|
|
|
|
.data = "",
|
|
|
|
.op = "",
|
|
|
|
};
|
|
|
|
|
|
|
|
*tp = out;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcp_to_json(struct tcp *conn, void *data, size_t *data_len)
|
|
|
|
{
|
|
|
|
struct tp tp;
|
|
|
|
|
|
|
|
tp_init(conn, &tp);
|
|
|
|
|
|
|
|
tp_encode(&tp, data, data_len);
|
|
|
|
}
|
|
|
|
|
2020-02-26 17:57:48 +02:00
|
|
|
enum net_verdict tp_input(struct net_conn *net_conn,
|
|
|
|
struct net_pkt *pkt,
|
|
|
|
union net_ip_header *ip_hdr,
|
|
|
|
union net_proto_header *proto,
|
|
|
|
void *user_data)
|
2019-10-21 11:12:35 +03:00
|
|
|
{
|
2020-02-26 17:57:48 +02:00
|
|
|
struct net_udp_hdr *uh = net_udp_get_hdr(pkt, NULL);
|
2019-10-21 11:12:35 +03:00
|
|
|
size_t data_len = ntohs(uh->len) - sizeof(*uh);
|
|
|
|
struct tcp *conn = tcp_conn_search(pkt);
|
|
|
|
size_t json_len = 0;
|
|
|
|
struct tp *tp;
|
|
|
|
struct tp_new *tp_new;
|
|
|
|
enum tp_type type;
|
|
|
|
bool responded = false;
|
|
|
|
static char buf[512];
|
|
|
|
|
2020-02-26 17:57:48 +02:00
|
|
|
net_pkt_cursor_init(pkt);
|
|
|
|
net_pkt_set_overwrite(pkt, true);
|
|
|
|
net_pkt_skip(pkt, net_pkt_ip_hdr_len(pkt) +
|
|
|
|
net_pkt_ip_opts_len(pkt) + sizeof(*uh));
|
2019-10-21 11:12:35 +03:00
|
|
|
net_pkt_read(pkt, buf, data_len);
|
|
|
|
buf[data_len] = '\0';
|
|
|
|
data_len += 1;
|
|
|
|
|
|
|
|
type = json_decode_msg(buf, data_len);
|
|
|
|
|
|
|
|
data_len = ntohs(uh->len) - sizeof(*uh);
|
2020-02-26 17:57:48 +02:00
|
|
|
|
2019-10-21 11:12:35 +03:00
|
|
|
net_pkt_cursor_init(pkt);
|
2020-02-26 17:57:48 +02:00
|
|
|
net_pkt_set_overwrite(pkt, true);
|
|
|
|
net_pkt_skip(pkt, net_pkt_ip_hdr_len(pkt) +
|
|
|
|
net_pkt_ip_opts_len(pkt) + sizeof(*uh));
|
2019-10-21 11:12:35 +03:00
|
|
|
net_pkt_read(pkt, buf, data_len);
|
|
|
|
buf[data_len] = '\0';
|
|
|
|
data_len += 1;
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case TP_CONFIG_REQUEST:
|
|
|
|
tp_new = json_to_tp_new(buf, data_len);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
tp = json_to_tp(buf, data_len);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case TP_COMMAND:
|
|
|
|
if (is("CONNECT", tp->op)) {
|
2020-03-03 18:39:24 +02:00
|
|
|
tp_output(pkt->family, pkt->iface, buf, 1);
|
2019-10-21 11:12:35 +03:00
|
|
|
responded = true;
|
2019-10-23 10:56:38 +03:00
|
|
|
{
|
|
|
|
struct net_context *context = tcp_calloc(1,
|
|
|
|
sizeof(struct net_context));
|
|
|
|
net_tcp_get(context);
|
2020-04-21 14:12:33 +03:00
|
|
|
net_context_set_family(context,
|
|
|
|
net_pkt_family(pkt));
|
2019-10-23 10:56:38 +03:00
|
|
|
conn = context->tcp;
|
2020-04-21 14:12:33 +03:00
|
|
|
tcp_endpoint_set(&conn->dst, pkt, TCP_EP_SRC);
|
|
|
|
tcp_endpoint_set(&conn->src, pkt, TCP_EP_DST);
|
2019-10-23 10:56:38 +03:00
|
|
|
conn->iface = pkt->iface;
|
|
|
|
tcp_conn_ref(conn);
|
|
|
|
}
|
2019-10-21 11:12:35 +03:00
|
|
|
conn->seq = tp->seq;
|
|
|
|
tcp_in(conn, NULL);
|
|
|
|
}
|
|
|
|
if (is("CLOSE", tp->op)) {
|
|
|
|
tp_trace = false;
|
2019-10-23 10:56:38 +03:00
|
|
|
{
|
|
|
|
struct net_context *context;
|
|
|
|
|
|
|
|
conn = (void *)sys_slist_peek_head(&tcp_conns);
|
|
|
|
context = conn->context;
|
2020-03-03 18:39:24 +02:00
|
|
|
while (tcp_conn_unref(conn))
|
|
|
|
;
|
2019-10-23 10:56:38 +03:00
|
|
|
tcp_free(context);
|
|
|
|
}
|
2019-10-21 11:12:35 +03:00
|
|
|
tp_mem_stat();
|
|
|
|
tp_nbuf_stat();
|
|
|
|
tp_pkt_stat();
|
|
|
|
tp_seq_stat();
|
|
|
|
}
|
|
|
|
if (is("CLOSE2", tp->op)) {
|
2019-10-23 10:56:38 +03:00
|
|
|
struct tcp *conn =
|
|
|
|
(void *)sys_slist_peek_head(&tcp_conns);
|
|
|
|
net_tcp_put(conn->context);
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
if (is("RECV", tp->op)) {
|
|
|
|
#define HEXSTR_SIZE 64
|
|
|
|
char hexstr[HEXSTR_SIZE];
|
2019-12-23 11:54:35 +02:00
|
|
|
ssize_t len = tp_tcp_recv(0, buf, sizeof(buf), 0);
|
2019-10-21 11:12:35 +03:00
|
|
|
|
|
|
|
tp_init(conn, tp);
|
|
|
|
bin2hex(buf, len, hexstr, HEXSTR_SIZE);
|
|
|
|
tp->data = hexstr;
|
|
|
|
NET_DBG("%zd = tcp_recv(\"%s\")", len, tp->data);
|
|
|
|
json_len = sizeof(buf);
|
|
|
|
tp_encode(tp, buf, &json_len);
|
|
|
|
}
|
|
|
|
if (is("SEND", tp->op)) {
|
|
|
|
ssize_t len = tp_str_to_hex(buf, sizeof(buf), tp->data);
|
|
|
|
struct tcp *conn =
|
|
|
|
(void *)sys_slist_peek_head(&tcp_conns);
|
|
|
|
|
2020-03-03 18:39:24 +02:00
|
|
|
tp_output(pkt->family, pkt->iface, buf, 1);
|
2019-10-21 11:12:35 +03:00
|
|
|
responded = true;
|
|
|
|
NET_DBG("tcp_send(\"%s\")", tp->data);
|
2020-03-10 14:14:20 +02:00
|
|
|
{
|
2020-04-03 15:22:26 +03:00
|
|
|
struct net_pkt *data_pkt;
|
|
|
|
|
2020-04-22 15:09:23 +03:00
|
|
|
data_pkt = tcp_pkt_alloc(conn, len);
|
2020-04-03 15:22:26 +03:00
|
|
|
net_pkt_write(data_pkt, buf, len);
|
|
|
|
net_pkt_cursor_init(data_pkt);
|
|
|
|
net_tcp_queue_data(conn->context, data_pkt);
|
2020-03-10 14:14:20 +02:00
|
|
|
}
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case TP_CONFIG_REQUEST:
|
|
|
|
tp_new_find_and_apply(tp_new, "tcp_rto", &tcp_rto, TP_INT);
|
|
|
|
tp_new_find_and_apply(tp_new, "tcp_retries", &tcp_retries,
|
|
|
|
TP_INT);
|
|
|
|
tp_new_find_and_apply(tp_new, "tcp_window", &tcp_window,
|
|
|
|
TP_INT);
|
|
|
|
tp_new_find_and_apply(tp_new, "tp_trace", &tp_trace, TP_BOOL);
|
|
|
|
break;
|
|
|
|
case TP_INTROSPECT_REQUEST:
|
|
|
|
json_len = sizeof(buf);
|
|
|
|
conn = (void *)sys_slist_peek_head(&tcp_conns);
|
|
|
|
tcp_to_json(conn, buf, &json_len);
|
|
|
|
break;
|
|
|
|
case TP_DEBUG_STOP: case TP_DEBUG_CONTINUE:
|
|
|
|
tp_state = tp->type;
|
|
|
|
break;
|
|
|
|
default:
|
2020-01-08 09:42:23 +02:00
|
|
|
NET_ASSERT(false, "Unimplemented tp command: %s", tp->msg);
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (json_len) {
|
2020-03-03 18:39:24 +02:00
|
|
|
tp_output(pkt->family, pkt->iface, buf, json_len);
|
2019-10-21 11:12:35 +03:00
|
|
|
} else if ((TP_CONFIG_REQUEST == type || TP_COMMAND == type)
|
|
|
|
&& responded == false) {
|
2020-03-03 18:39:24 +02:00
|
|
|
tp_output(pkt->family, pkt->iface, buf, 1);
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
|
|
|
|
2020-02-26 17:57:48 +02:00
|
|
|
return NET_DROP;
|
2019-10-21 11:12:35 +03:00
|
|
|
}
|
2020-03-10 14:14:20 +02:00
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
static void test_cb_register(sa_family_t family, uint8_t proto, uint16_t remote_port,
|
|
|
|
uint16_t local_port, net_conn_cb_t cb)
|
2020-03-10 14:14:20 +02:00
|
|
|
{
|
|
|
|
struct net_conn_handle *conn_handle = NULL;
|
|
|
|
const struct sockaddr addr = { .sa_family = family, };
|
|
|
|
|
|
|
|
int ret = net_conn_register(proto,
|
|
|
|
family,
|
|
|
|
&addr, /* remote address */
|
|
|
|
&addr, /* local address */
|
|
|
|
local_port,
|
|
|
|
remote_port,
|
|
|
|
cb,
|
|
|
|
NULL, /* user_data */
|
|
|
|
&conn_handle);
|
|
|
|
if (ret < 0) {
|
|
|
|
NET_ERR("net_conn_register(): %d", ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_TEST_PROTOCOL */
|
|
|
|
|
|
|
|
void net_tcp_init(void)
|
|
|
|
{
|
|
|
|
#if defined(CONFIG_NET_TEST_PROTOCOL)
|
|
|
|
/* Register inputs for TTCN-3 based TCP2 sanity check */
|
|
|
|
test_cb_register(AF_INET, IPPROTO_TCP, 4242, 4242, tcp_input);
|
|
|
|
test_cb_register(AF_INET6, IPPROTO_TCP, 4242, 4242, tcp_input);
|
|
|
|
test_cb_register(AF_INET, IPPROTO_UDP, 4242, 4242, tp_input);
|
|
|
|
test_cb_register(AF_INET6, IPPROTO_UDP, 4242, 4242, tp_input);
|
|
|
|
|
|
|
|
tcp_recv_cb = tp_tcp_recv_cb;
|
|
|
|
#endif
|
|
|
|
}
|