net: buf: Move net_buf_pool objects to dedicated linker area

Moving the net_buf_pool objects to a dedicated area lets us access
them by array offset into this area instead of directly by pointer.
This helps reduce the size of net_buf objects by 4 bytes.

Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
This commit is contained in:
Johan Hedberg 2017-06-03 19:20:27 +03:00 committed by Johan Hedberg
commit 9703927f84
12 changed files with 105 additions and 41 deletions

View file

@ -130,6 +130,12 @@
_k_mem_pool_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
SECTION_DATA_PROLOGUE(_net_buf_pool_area, (OPTIONAL),)
{
_net_buf_pool_list = .;
KEEP(*(SORT_BY_NAME("._net_buf_pool.static.*")))
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
SECTION_DATA_PROLOGUE(net_if, (OPTIONAL),)
{
__net_if_start = .;

View file

@ -408,7 +408,7 @@ struct net_buf {
u8_t flags;
/** Where the buffer should go when freed up. */
struct net_buf_pool *pool;
u8_t pool_id;
/* Union for convenience access to the net_buf_simple members, also
* preserving the old API.
@ -485,7 +485,7 @@ struct net_buf_pool {
.buf_count = _count, \
.uninit_count = _count, \
.avail_count = _count, \
.pool_size = sizeof(_net_buf_pool_##_pool), \
.pool_size = sizeof(_net_buf_##_pool), \
.buf_size = _size, \
.user_data_size = _ud_size, \
.destroy = _destroy, \
@ -529,12 +529,23 @@ struct net_buf_pool {
static struct { \
struct net_buf buf; \
u8_t data[_size] __net_buf_align; \
u8_t ud[ROUND_UP(_ud_size, 4)] __net_buf_align; \
} _net_buf_pool_##_name[_count] __noinit; \
static struct net_buf_pool _name = \
NET_BUF_POOL_INITIALIZER(_name, _net_buf_pool_##_name, \
u8_t ud[ROUND_UP(_ud_size, 4)] __net_buf_align; \
} _net_buf_##_name[_count] __noinit; \
struct net_buf_pool _name __net_buf_align \
__in_section(_net_buf_pool, static, _name) = \
NET_BUF_POOL_INITIALIZER(_name, _net_buf_##_name, \
_count, _size, _ud_size, _destroy)
/**
* @brief Looks up a pool based on its ID.
*
* @param id Pool ID (e.g. from buf->pool_id).
*
* @return Pointer to pool.
*/
struct net_buf_pool *net_buf_pool_get(int id);
/**
* @brief Allocate a new buffer from a pool.
*
@ -590,7 +601,9 @@ struct net_buf *net_buf_get(struct k_fifo *fifo, s32_t timeout);
*/
static inline void net_buf_destroy(struct net_buf *buf)
{
k_lifo_put(&buf->pool->free, buf);
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
k_lifo_put(&pool->free, buf);
}
/**

View file

@ -5,6 +5,9 @@
*/
#include <ztest.h>
#include <net/buf.h>
struct net_buf_pool _net_buf_pool_list[1];
unsigned int irq_lock(void)
{

View file

@ -477,7 +477,7 @@ class SizeCalculator:
"_k_fifo_area", "_k_lifo_area", "_k_stack_area",
"_k_msgq_area", "_k_mbox_area", "_k_pipe_area",
"net_if", "net_if_event", "net_stack", "net_l2_data",
"_k_queue_area"]
"_k_queue_area", "_net_buf_pool_area" ]
# These get copied into RAM only on non-XIP
ro_sections = ["text", "ctors", "init_array", "reset",
"rodata", "devconfig", "net_l2", "vector"]

View file

@ -1055,9 +1055,12 @@ void bt_conn_recv(struct bt_conn *conn, struct net_buf *buf, u8_t flags)
int bt_conn_send_cb(struct bt_conn *conn, struct net_buf *buf,
bt_conn_tx_cb_t cb)
{
struct net_buf_pool *pool;
BT_DBG("conn handle %u buf len %u cb %p", conn->handle, buf->len, cb);
if (buf->pool->user_data_size < BT_BUF_USER_DATA_MIN) {
pool = net_buf_pool_get(buf->pool_id);
if (pool->user_data_size < BT_BUF_USER_DATA_MIN) {
BT_ERR("Too small user data size");
net_buf_unref(buf);
return -EINVAL;

View file

@ -2537,7 +2537,10 @@ static void hci_cmd_done(u16_t opcode, u8_t status, struct net_buf *buf)
{
BT_DBG("opcode 0x%04x status 0x%02x buf %p", opcode, status, buf);
if (buf->pool != &hci_cmd_pool) {
if (net_buf_pool_get(buf->pool_id) != &hci_cmd_pool) {
BT_WARN("pool id %u pool %p != &hci_cmd_pool %p",
buf->pool_id, net_buf_pool_get(buf->pool_id),
&hci_cmd_pool);
return;
}
@ -3845,11 +3848,14 @@ int bt_send(struct net_buf *buf)
int bt_recv(struct net_buf *buf)
{
struct net_buf_pool *pool;
bt_monitor_send(bt_monitor_opcode(buf), buf->data, buf->len);
BT_DBG("buf %p len %u", buf, buf->len);
if (buf->pool->user_data_size < BT_BUF_USER_DATA_MIN) {
pool = net_buf_pool_get(buf->pool_id);
if (pool->user_data_size < BT_BUF_USER_DATA_MIN) {
BT_ERR("Too small user data size");
net_buf_unref(buf);
return -EINVAL;

View file

@ -1005,12 +1005,13 @@ static void le_disconn_rsp(struct bt_l2cap *l2cap, u8_t ident,
static inline struct net_buf *l2cap_alloc_seg(struct net_buf *buf)
{
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
struct net_buf *seg;
/* Try to use original pool if possible */
if (buf->pool->user_data_size >= BT_BUF_USER_DATA_MIN &&
buf->pool->buf_size >= BT_L2CAP_BUF_SIZE(L2CAP_MAX_LE_MPS)) {
seg = net_buf_alloc(buf->pool, K_NO_WAIT);
if (pool->user_data_size >= BT_BUF_USER_DATA_MIN &&
pool->buf_size >= BT_L2CAP_BUF_SIZE(L2CAP_MAX_LE_MPS)) {
seg = net_buf_alloc(pool, K_NO_WAIT);
if (seg) {
net_buf_reserve(seg, BT_L2CAP_CHAN_SEND_RESERVE);
return seg;
@ -1025,6 +1026,7 @@ static struct net_buf *l2cap_chan_create_seg(struct bt_l2cap_le_chan *ch,
struct net_buf *buf,
size_t sdu_hdr_len)
{
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
struct net_buf *seg;
u16_t headroom;
u16_t len;
@ -1035,9 +1037,9 @@ static struct net_buf *l2cap_chan_create_seg(struct bt_l2cap_le_chan *ch,
}
/* Segment if there is no space in the user_data */
if (buf->pool->user_data_size < BT_BUF_USER_DATA_MIN) {
if (pool->user_data_size < BT_BUF_USER_DATA_MIN) {
BT_WARN("Too small buffer user_data_size %u",
buf->pool->user_data_size);
pool->user_data_size);
goto segment;
}

View file

@ -42,6 +42,19 @@
#define WARN_ALLOC_INTERVAL K_FOREVER
#endif
/* Linker-defined symbol bound to the static pool structs */
extern struct net_buf_pool _net_buf_pool_list[];
struct net_buf_pool *net_buf_pool_get(int id)
{
return &_net_buf_pool_list[id];
}
static int pool_id(struct net_buf_pool *pool)
{
return pool - _net_buf_pool_list;
}
/* Helpers to access the storage array, since we don't have access to its
* type at this point anymore.
*/
@ -58,7 +71,7 @@ static inline struct net_buf *pool_get_uninit(struct net_buf_pool *pool,
buf = UNINIT_BUF(pool, pool->buf_count - uninit_count);
buf->pool = pool;
buf->pool_id = pool_id(pool);
buf->size = pool->buf_size;
return buf;
@ -162,8 +175,8 @@ success:
net_buf_reset(buf);
#if defined(CONFIG_NET_BUF_POOL_USAGE)
buf->pool->avail_count--;
NET_BUF_ASSERT(buf->pool->avail_count >= 0);
pool->avail_count--;
NET_BUF_ASSERT(pool->avail_count >= 0);
#endif
return buf;
@ -235,6 +248,7 @@ void net_buf_unref(struct net_buf *buf)
while (buf) {
struct net_buf *frags = buf->frags;
struct net_buf_pool *pool;
#if defined(CONFIG_NET_BUF_LOG)
if (!buf->ref) {
@ -243,8 +257,8 @@ void net_buf_unref(struct net_buf *buf)
return;
}
#endif
NET_BUF_DBG("buf %p ref %u pool %p frags %p", buf, buf->ref,
buf->pool, buf->frags);
NET_BUF_DBG("buf %p ref %u pool_id %u frags %p", buf, buf->ref,
buf->pool_id, buf->frags);
if (--buf->ref > 0) {
return;
@ -252,13 +266,15 @@ void net_buf_unref(struct net_buf *buf)
buf->frags = NULL;
pool = net_buf_pool_get(buf->pool_id);
#if defined(CONFIG_NET_BUF_POOL_USAGE)
buf->pool->avail_count++;
NET_BUF_ASSERT(buf->pool->avail_count <= buf->pool->buf_count);
pool->avail_count++;
NET_BUF_ASSERT(pool->avail_count <= pool->buf_count);
#endif
if (buf->pool->destroy) {
buf->pool->destroy(buf);
if (pool->destroy) {
pool->destroy(buf);
} else {
net_buf_destroy(buf);
}
@ -271,19 +287,22 @@ struct net_buf *net_buf_ref(struct net_buf *buf)
{
NET_BUF_ASSERT(buf);
NET_BUF_DBG("buf %p (old) ref %u pool %p",
buf, buf->ref, buf->pool);
NET_BUF_DBG("buf %p (old) ref %u pool_id %u",
buf, buf->ref, buf->pool_id);
buf->ref++;
return buf;
}
struct net_buf *net_buf_clone(struct net_buf *buf, s32_t timeout)
{
struct net_buf_pool *pool;
struct net_buf *clone;
NET_BUF_ASSERT(buf);
clone = net_buf_alloc(buf->pool, timeout);
pool = net_buf_pool_get(buf->pool_id);
clone = net_buf_alloc(pool, timeout);
if (!clone) {
return NULL;
}

View file

@ -274,8 +274,9 @@ void net_pkt_print_frags(struct net_pkt *pkt)
NET_INFO("[%d] frag %p len %d size %d reserve %d "
"pool %p [sz %d ud_sz %d]",
count, frag, frag->len, frag_size, ll_overhead,
frag->pool, frag->pool->buf_size,
frag->pool->user_data_size);
net_buf_pool_get(frag->pool_id),
net_buf_pool_get(frag->pool_id)->buf_size,
net_buf_pool_get(frag->pool_id)->user_data_size);
count++;
@ -703,9 +704,10 @@ void net_pkt_unref(struct net_pkt *pkt)
frag = pkt->frags;
while (frag) {
NET_DBG("%s (%s) [%d] frag %p ref %d frags %p (%s():%d)",
pool2str(frag->pool), frag->pool->name,
get_frees(frag->pool), frag, frag->ref - 1,
frag->frags, caller, line);
pool2str(net_buf_pool_get(frag->pool_id)),
net_buf_pool_get(frag->pool_id)->name,
get_frees(net_buf_pool_get(frag->pool_id)), frag,
frag->ref - 1, frag->frags, caller, line);
if (!frag->ref) {
const char *func_freed;
@ -781,7 +783,9 @@ struct net_buf *net_pkt_frag_ref(struct net_buf *frag)
#if defined(CONFIG_NET_DEBUG_NET_PKT)
NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
pool2str(frag->pool), frag->pool->name, get_frees(frag->pool),
pool2str(net_buf_pool_get(frag->pool_id)),
net_buf_pool_get(frag->pool_id)->name,
get_frees(net_buf_pool_get(frag->pool_id)),
frag, frag->ref + 1, caller, line);
#endif
@ -803,7 +807,9 @@ void net_pkt_frag_unref(struct net_buf *frag)
#if defined(CONFIG_NET_DEBUG_NET_PKT)
NET_DBG("%s (%s) [%d] frag %p ref %d (%s():%d)",
pool2str(frag->pool), frag->pool->name, get_frees(frag->pool),
pool2str(net_buf_pool_get(frag->pool_id)),
net_buf_pool_get(frag->pool_id)->name,
get_frees(net_buf_pool_get(frag->pool_id)),
frag, frag->ref - 1, caller, line);
if (frag->ref == 1) {

View file

@ -664,13 +664,15 @@ static void allocs_cb(struct net_pkt *pkt,
return;
buf:
if (func_alloc) {
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
if (in_use) {
printk("%p/%d\t%5s\t%5s\t%s():%d\n", buf, buf->ref,
str, net_pkt_pool2str(buf->pool), func_alloc,
str, net_pkt_pool2str(pool), func_alloc,
line_alloc);
} else {
printk("%p\t%5s\t%5s\t%s():%d -> %s():%d\n", buf,
str, net_pkt_pool2str(buf->pool), func_alloc,
str, net_pkt_pool2str(pool), func_alloc,
line_alloc, func_free, line_free);
}
}

View file

@ -71,7 +71,7 @@ NET_BUF_POOL_DEFINE(big_frags_pool, 1, 1280, 0, frag_destroy_big);
static void buf_destroy(struct net_buf *buf)
{
struct net_buf_pool *pool = buf->pool;
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
destroy_called++;
zassert_equal(pool, &bufs_pool, "Invalid free pointer in buffer");
@ -80,7 +80,7 @@ static void buf_destroy(struct net_buf *buf)
static void frag_destroy(struct net_buf *buf)
{
struct net_buf_pool *pool = buf->pool;
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
frag_destroy_called++;
zassert_equal(pool, &frags_pool,
@ -90,7 +90,7 @@ static void frag_destroy(struct net_buf *buf)
static void frag_destroy_big(struct net_buf *buf)
{
struct net_buf_pool *pool = buf->pool;
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
frag_destroy_called++;
zassert_equal(pool, &big_frags_pool,
@ -231,7 +231,8 @@ static void net_buf_test_4(void)
frag = buf->frags;
zassert_equal(frag->pool->user_data_size, 0, "Invalid user data size");
zassert_equal(net_buf_pool_get(frag->pool_id)->user_data_size, 0,
"Invalid user data size");
i = 0;
while (frag) {

View file

@ -5,6 +5,9 @@
*/
#include <ztest.h>
#include <net/buf.h>
struct net_buf_pool _net_buf_pool_list[1];
unsigned int irq_lock(void)
{