tests: net: Check leaks when appending frags to a cloned packet

This test checks that new frags appended to a cloned packet are properly
freed.

Signed-off-by: Andriy Gelman <andriy.gelman@gmail.com>
This commit is contained in:
Andriy Gelman 2022-05-22 10:09:40 -04:00 committed by Carles Cufí
commit d67a130368

View file

@ -1094,6 +1094,88 @@ void test_net_pkt_shallow_clone_noleak_buf(void)
} }
#define TEST_NET_PKT_SHALLOW_CLONE_APPEND_BUF(extra_frag_refcounts) \
void test_net_pkt_shallow_clone_append_buf_##extra_frag_refcounts(void) \
{ \
const int bufs_to_allocate = 3; \
const int bufs_frag = 2; \
\
zassert_true(bufs_frag + bufs_to_allocate < CONFIG_NET_BUF_DATA_SIZE, \
"Total bufs to allocate must less than available space"); \
\
const size_t pkt_size = CONFIG_NET_BUF_DATA_SIZE * bufs_to_allocate; \
\
struct net_pkt *pkt, *shallow_pkt; \
struct net_buf *frag_head; \
struct net_buf *frag; \
struct net_buf_pool *tx_data; \
\
pkt = net_pkt_alloc_with_buffer(NULL, pkt_size, \
AF_UNSPEC, 0, K_NO_WAIT); \
zassert_true(pkt != NULL, "Pkt not allocated"); \
\
net_pkt_get_info(NULL, NULL, NULL, &tx_data); \
zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count \
- bufs_to_allocate, "Incorrect net buf allocation"); \
\
shallow_pkt = net_pkt_shallow_clone(pkt, K_NO_WAIT); \
zassert_true(shallow_pkt != NULL, "Pkt not allocated"); \
\
/* allocate buffers for the frag */ \
for (int i = 0; i < bufs_frag; i++) { \
frag = net_buf_alloc_len(tx_data, CONFIG_NET_BUF_DATA_SIZE, K_NO_WAIT); \
zassert_true(frag != NULL, "Frag not allocated"); \
net_pkt_append_buffer(pkt, frag); \
if (i == 0) { \
frag_head = frag; \
} \
} \
\
zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count \
- bufs_to_allocate - bufs_frag, "Incorrect net buf allocation"); \
\
/* Note: if the frag is appended to a net buf, then the nut buf */ \
/* takes ownership of one ref count. Otherwise net_buf_unref() must */ \
/* be called on the frag to free the buffers. */ \
\
for (int i = 0; i < extra_frag_refcounts; i++) { \
frag_head = net_buf_ref(frag_head); \
} \
\
net_pkt_unref(pkt); \
\
/* we shouldn't have freed any buffers yet */ \
zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count \
- bufs_to_allocate - bufs_frag, \
"Incorrect net buf allocation"); \
\
net_pkt_unref(shallow_pkt); \
\
if (extra_frag_refcounts == 0) { \
/* if no extra ref counts to frag were added then we should free */ \
/* all the buffers at this point */ \
zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count, \
"Leak detected"); \
} else { \
/* otherwise only bufs_frag should be available, and frag could */ \
/* still used at this point */ \
zassert_equal(atomic_get(&tx_data->avail_count), \
tx_data->buf_count - bufs_frag, "Leak detected"); \
} \
\
for (int i = 0; i < extra_frag_refcounts; i++) { \
net_buf_unref(frag_head); \
} \
\
/* all the buffers should be freed now */ \
zassert_equal(atomic_get(&tx_data->avail_count), tx_data->buf_count, \
"Leak detected"); \
}
TEST_NET_PKT_SHALLOW_CLONE_APPEND_BUF(0)
TEST_NET_PKT_SHALLOW_CLONE_APPEND_BUF(1)
TEST_NET_PKT_SHALLOW_CLONE_APPEND_BUF(2)
void test_main(void) void test_main(void)
{ {
eth_if = net_if_get_default(); eth_if = net_if_get_default();
@ -1111,7 +1193,10 @@ void test_main(void)
ztest_unit_test(test_net_pkt_headroom_copy), ztest_unit_test(test_net_pkt_headroom_copy),
ztest_unit_test(test_net_pkt_get_contiguous_len), ztest_unit_test(test_net_pkt_get_contiguous_len),
ztest_unit_test(test_net_pkt_remove_tail), ztest_unit_test(test_net_pkt_remove_tail),
ztest_unit_test(test_net_pkt_shallow_clone_noleak_buf) ztest_unit_test(test_net_pkt_shallow_clone_noleak_buf),
ztest_unit_test(test_net_pkt_shallow_clone_append_buf_0),
ztest_unit_test(test_net_pkt_shallow_clone_append_buf_1),
ztest_unit_test(test_net_pkt_shallow_clone_append_buf_2)
); );
ztest_run_test_suite(net_pkt_tests); ztest_run_test_suite(net_pkt_tests);