net: Collect net_pkt TX timing statistics when passing IP stack

Collect information how long net_pkt has travelled in IP stack
in certain points. See network documentation what these points
are and how to get information about the timings. This initial
commit adds support to TX timing collection.

Signed-off-by: Jukka Rissanen <jukka.rissanen@linux.intel.com>
This commit is contained in:
Jukka Rissanen 2020-05-13 11:09:10 +03:00 committed by Jukka Rissanen
commit 76398945c3
7 changed files with 208 additions and 5 deletions

View file

@ -57,6 +57,13 @@ extern "C" {
#define NET_ASSERT(cond, ...) __ASSERT(cond, "" __VA_ARGS__)
/* This needs to be here in order to avoid circular include dependency */
#if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)
#if !defined(NET_PKT_DETAIL_STATS_COUNT)
#define NET_PKT_DETAIL_STATS_COUNT 3
#endif
#endif /* CONFIG_NET_PKT_TXTIME_STATS_DETAIL */
/** @endcond */
struct net_buf;

View file

@ -112,8 +112,22 @@ struct net_pkt {
#if defined(CONFIG_NET_PKT_TIMESTAMP) || \
defined(CONFIG_NET_PKT_RXTIME_STATS) || \
defined(CONFIG_NET_PKT_TXTIME_STATS)
/** Timestamp if available. */
struct net_ptp_time timestamp;
struct {
/** Timestamp if available. */
struct net_ptp_time timestamp;
#if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)
/** Collect extra statistics for net_pkt processing
* from various points in the IP stack. See networking
* documentation where these points are located and how
* to interpret the results.
*/
struct {
uint32_t stat[NET_PKT_DETAIL_STATS_COUNT];
int count;
} detail;
#endif /* CONFIG_NET_PKT_TXTIME_STATS_DETAIL */
};
#endif /* CONFIG_NET_PKT_TIMESTAMP */
#if defined(CONFIG_NET_PKT_TXTIME)
/** Network packet TX time in the future (in nanoseconds) */
@ -817,6 +831,64 @@ static inline void net_pkt_set_txtime(struct net_pkt *pkt, uint64_t txtime)
}
#endif /* CONFIG_NET_PKT_TXTIME */
#if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)
static inline uint32_t *net_pkt_stats_tick(struct net_pkt *pkt)
{
return pkt->detail.stat;
}
static inline int net_pkt_stats_tick_count(struct net_pkt *pkt)
{
return pkt->detail.count;
}
static inline void net_pkt_stats_tick_reset(struct net_pkt *pkt)
{
memset(&pkt->detail, 0, sizeof(pkt->detail));
}
static ALWAYS_INLINE void net_pkt_set_stats_tick(struct net_pkt *pkt,
uint32_t tick)
{
if (pkt->detail.count >= NET_PKT_DETAIL_STATS_COUNT) {
NET_ERR("Detail stats count overflow (%d >= %d)",
pkt->detail.count, NET_PKT_DETAIL_STATS_COUNT);
return;
}
pkt->detail.stat[pkt->detail.count++] = tick;
}
#define net_pkt_set_tx_stats_tick(pkt, tick) net_pkt_set_stats_tick(pkt, tick)
#else
static inline uint32_t *net_pkt_stats_tick(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return NULL;
}
static inline int net_pkt_stats_tick_count(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
return 0;
}
static inline void net_pkt_stats_tick_reset(struct net_pkt *pkt)
{
ARG_UNUSED(pkt);
}
static inline void net_pkt_set_stats_tick(struct net_pkt *pkt, uint32_t tick)
{
ARG_UNUSED(pkt);
ARG_UNUSED(tick);
}
#define net_pkt_set_tx_stats_tick(pkt, tick)
#endif /* CONFIG_NET_PKT_TXTIME_STATS_DETAIL */
static inline size_t net_pkt_get_len(struct net_pkt *pkt)
{
return net_buf_frags_len(pkt->frags);

View file

@ -224,6 +224,10 @@ struct net_stats_rx_time {
struct net_stats_tc {
struct {
struct net_stats_tx_time tx_time;
#if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)
struct net_stats_tx_time
tx_time_detail[NET_PKT_DETAIL_STATS_COUNT];
#endif
net_stats_t pkts;
net_stats_t bytes;
uint8_t priority;
@ -314,6 +318,11 @@ struct net_stats {
defined(CONFIG_NET_PKT_TXTIME_STATS)
/** Network packet TX time statistics */
struct net_stats_tx_time tx_time;
#if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)
/** Network packet TX time detail statistics */
struct net_stats_tx_time tx_time_detail[NET_PKT_DETAIL_STATS_COUNT];
#endif
#endif
#if defined(CONFIG_NET_PKT_RXTIME_STATS)

View file

@ -707,7 +707,7 @@ config NET_PKT_TXTIME_STATS
bool "Enable network packet TX time statistics"
select NET_PKT_TIMESTAMP
select NET_STATISTICS
depends on (NET_UDP || NET_TCP) && !NET_PKT_TXTIME
depends on (NET_UDP || NET_TCP || NET_SOCKETS_PACKET) && !NET_PKT_TXTIME && NET_NATIVE
help
Enable network packet TX time statistics support. This is used to
calculate how long on average it takes for a packet to travel from
@ -720,6 +720,16 @@ config NET_PKT_TXTIME_STATS
Note that CONFIG_NET_PKT_TXTIME cannot be set at the same time
because net_pkt shares the time variable for statistics and TX time.
config NET_PKT_TXTIME_STATS_DETAIL
bool "Get extra transmit detail statistics in TX path"
depends on NET_PKT_TXTIME_STATS
help
Store receive statistics detail information in certain key points
in TX path. This is very special configuration and will increase
the size of net_pkt so in typical cases you should not enable it.
The extra statistics can be seen in net-shell using "net stats"
command.
config NET_PROMISCUOUS_MODE
bool "Enable promiscuous mode support [EXPERIMENTAL]"
select NET_MGMT

View file

@ -183,6 +183,23 @@ static inline void net_context_send_cb(struct net_context *context,
}
}
static void update_txtime_stats_detail(struct net_pkt *pkt,
uint32_t start_time, uint32_t stop_time)
{
uint32_t val, prev = start_time;
int i;
for (i = 0; i < net_pkt_stats_tick_count(pkt); i++) {
if (!net_pkt_stats_tick(pkt)[i]) {
break;
}
val = net_pkt_stats_tick(pkt)[i] - prev;
prev = net_pkt_stats_tick(pkt)[i];
net_pkt_stats_tick(pkt)[i] = val;
}
}
static bool net_if_tx(struct net_if *iface, struct net_pkt *pkt)
{
struct net_linkaddr ll_dst = {
@ -240,6 +257,13 @@ static bool net_if_tx(struct net_if *iface, struct net_pkt *pkt)
memcpy(&start_timestamp, net_pkt_timestamp(pkt),
sizeof(start_timestamp));
pkt_priority = net_pkt_priority(pkt);
if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)) {
/* Make sure the statistics information is not
* lost by keeping the net_pkt over L2 send.
*/
net_pkt_ref(pkt);
}
}
status = net_if_l2(iface)->send(iface, pkt);
@ -251,11 +275,35 @@ static bool net_if_tx(struct net_if *iface, struct net_pkt *pkt)
}
}
if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS) && status >= 0) {
if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS)) {
uint32_t end_tick = k_cycle_get_32();
net_pkt_set_tx_stats_tick(pkt, end_tick);
net_stats_update_tc_tx_time(iface,
pkt_priority,
start_timestamp.nanosecond,
k_cycle_get_32());
end_tick);
if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)) {
update_txtime_stats_detail(
pkt,
start_timestamp.nanosecond,
end_tick);
net_stats_update_tc_tx_time_detail(
iface, pkt_priority,
net_pkt_stats_tick(pkt));
/* For TCP connections, we might keep the pkt
* longer so that we can resend it if needed.
* Because of that we need to clear the
* statistics here.
*/
net_pkt_stats_tick_reset(pkt);
net_pkt_unref(pkt);
}
}
} else {
@ -303,6 +351,8 @@ static void process_tx_packet(struct k_work *work)
pkt = CONTAINER_OF(work, struct net_pkt, work);
net_pkt_set_tx_stats_tick(pkt, k_cycle_get_32());
iface = net_pkt_iface(pkt);
net_if_tx(iface, pkt);

View file

@ -334,6 +334,24 @@ static inline void net_stats_update_tx_time(struct net_if *iface,
#define net_stats_update_tx_time(iface, start_time, end_time)
#endif /* (TIMESTAMP || NET_PKT_TXTIME_STATS) && NET_STATISTICS */
#if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)
static inline void net_stats_update_tx_time_detail(struct net_if *iface,
uint32_t detail_stat[])
{
int i;
for (i = 0; i < NET_PKT_DETAIL_STATS_COUNT; i++) {
UPDATE_STAT(iface,
stats.tx_time_detail[i].sum +=
k_cyc_to_ns_floor64(detail_stat[i]) / 1000);
UPDATE_STAT(iface,
stats.tx_time_detail[i].count += 1);
}
}
#else
#define net_stats_update_tx_time_detail(iface, detail_stat)
#endif /* NET_PKT_TXTIME_STATS_DETAIL */
#if defined(CONFIG_NET_PKT_RXTIME_STATS) && defined(CONFIG_NET_STATISTICS)
static inline void net_stats_update_rx_time(struct net_if *iface,
uint32_t start_time,
@ -389,6 +407,28 @@ static inline void net_stats_update_tc_tx_time(struct net_if *iface,
#define net_stats_update_tc_tx_time(iface, tc, start_time, end_time)
#endif /* (NET_CONTEXT_TIMESTAMP || NET_PKT_TXTIME_STATS) && NET_STATISTICS */
#if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)
static inline void net_stats_update_tc_tx_time_detail(struct net_if *iface,
uint8_t priority,
uint32_t detail_stat[])
{
int tc = net_tx_priority2tc(priority);
int i;
for (i = 0; i < NET_PKT_DETAIL_STATS_COUNT; i++) {
UPDATE_STAT(iface,
stats.tc.sent[tc].tx_time_detail[i].sum +=
k_cyc_to_ns_floor64(detail_stat[i]) / 1000);
UPDATE_STAT(iface,
stats.tc.sent[tc].tx_time_detail[i].count += 1);
}
net_stats_update_tx_time_detail(iface, detail_stat);
}
#else
#define net_stats_update_tc_tx_time_detail(iface, tc, detail_stat)
#endif /* CONFIG_NET_PKT_TXTIME_STATS_DETAIL */
#if defined(CONFIG_NET_PKT_RXTIME_STATS) && defined(CONFIG_NET_STATISTICS) \
&& defined(CONFIG_NET_NATIVE)
static inline void net_stats_update_tc_rx_time(struct net_if *iface,
@ -449,6 +489,19 @@ static inline void net_stats_update_tc_tx_time(struct net_if *iface,
#define net_stats_update_tc_tx_time(iface, priority, start_time, end_time)
#endif /* (NET_CONTEXT_TIMESTAMP || NET_PKT_TXTIME_STATS) && NET_STATISTICS */
#if defined(CONFIG_NET_PKT_TXTIME_STATS_DETAIL)
static inline void net_stats_update_tc_tx_time_detail(struct net_if *iface,
uint8_t pkt_priority,
uint32_t detail_stat[])
{
ARG_UNUSED(pkt_priority);
net_stats_update_tx_time_detail(iface, detail_stat);
}
#else
#define net_stats_update_tc_tx_time_detail(iface, pkt_priority, detail_stat)
#endif /* CONFIG_NET_PKT_TXTIME_STATS_DETAIL */
#if defined(CONFIG_NET_PKT_RXTIME_STATS) && defined(CONFIG_NET_STATISTICS) \
&& defined(CONFIG_NET_NATIVE)
static inline void net_stats_update_tc_rx_time(struct net_if *iface,

View file

@ -35,6 +35,8 @@ bool net_tc_submit_to_tx_queue(uint8_t tc, struct net_pkt *pkt)
return false;
}
net_pkt_set_tx_stats_tick(pkt, k_cycle_get_32());
k_work_submit_to_queue(&tx_classes[tc].work_q, net_pkt_work(pkt));
return true;