net: buf: Redesigned API with split data and meta-data
Redesign of the net_buf_simple and net_buf structs, where the data payload portion is split to a separately allocated chunk of memory. In practice this means that buf->__buf becomes a pointer from having just been a marker (empty array) for where the payload begins right after the meta-data. Fixes #3283 Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
This commit is contained in:
parent
09dcbcc579
commit
dd09cbc1c4
10 changed files with 357 additions and 146 deletions
|
@ -29,28 +29,41 @@ extern "C" {
|
||||||
/* Alignment needed for various parts of the buffer definition */
|
/* Alignment needed for various parts of the buffer definition */
|
||||||
#define __net_buf_align __aligned(sizeof(int))
|
#define __net_buf_align __aligned(sizeof(int))
|
||||||
|
|
||||||
/** @def NET_BUF_SIMPLE
|
/** @def NET_BUF_SIMPLE_DEFINE
|
||||||
* @brief Define a net_buf_simple stack variable and get a pointer to it.
|
* @brief Define a net_buf_simple stack variable.
|
||||||
*
|
*
|
||||||
* This is a helper macro which is used to define a net_buf_simple object on
|
* This is a helper macro which is used to define a net_buf_simple object
|
||||||
* the stack and the get a pointer to it as follows:
|
* on the stack.
|
||||||
*
|
|
||||||
* struct net_buf_simple *my_buf = NET_BUF_SIMPLE(10);
|
|
||||||
*
|
|
||||||
* After creating the object it needs to be initialized by calling
|
|
||||||
* net_buf_simple_init().
|
|
||||||
*
|
*
|
||||||
|
* @param _name Name of the net_buf_simple object.
|
||||||
* @param _size Maximum data storage for the buffer.
|
* @param _size Maximum data storage for the buffer.
|
||||||
*
|
|
||||||
* @return Pointer to stack-allocated net_buf_simple object.
|
|
||||||
*/
|
*/
|
||||||
#define NET_BUF_SIMPLE(_size) \
|
#define NET_BUF_SIMPLE_DEFINE(_name, _size) \
|
||||||
((struct net_buf_simple *)(&(struct { \
|
u8_t net_buf_data_##_name[_size]; \
|
||||||
struct net_buf_simple buf; \
|
struct net_buf_simple _name = { \
|
||||||
u8_t data[_size] __net_buf_align; \
|
.data = net_buf_data_##_name, \
|
||||||
}) { \
|
.len = 0, \
|
||||||
.buf.size = _size, \
|
.size = _size, \
|
||||||
}))
|
.__buf = net_buf_data_##_name, \
|
||||||
|
}
|
||||||
|
|
||||||
|
/** @def NET_BUF_SIMPLE_DEFINE_STATIC
|
||||||
|
* @brief Define a static net_buf_simple variable.
|
||||||
|
*
|
||||||
|
* This is a helper macro which is used to define a static net_buf_simple
|
||||||
|
* object.
|
||||||
|
*
|
||||||
|
* @param _name Name of the net_buf_simple object.
|
||||||
|
* @param _size Maximum data storage for the buffer.
|
||||||
|
*/
|
||||||
|
#define NET_BUF_SIMPLE_DEFINE_STATIC(_name, _size) \
|
||||||
|
static u8_t net_buf_data_##_name[_size]; \
|
||||||
|
static struct net_buf_simple _name = { \
|
||||||
|
.data = net_buf_data_##_name, \
|
||||||
|
.len = 0, \
|
||||||
|
.size = _size, \
|
||||||
|
.__buf = net_buf_data_##_name, \
|
||||||
|
}
|
||||||
|
|
||||||
/** @brief Simple network buffer representation.
|
/** @brief Simple network buffer representation.
|
||||||
*
|
*
|
||||||
|
@ -78,24 +91,66 @@ struct net_buf_simple {
|
||||||
/** Start of the data storage. Not to be accessed directly
|
/** Start of the data storage. Not to be accessed directly
|
||||||
* (the data pointer should be used instead).
|
* (the data pointer should be used instead).
|
||||||
*/
|
*/
|
||||||
u8_t __buf[0] __net_buf_align;
|
u8_t *__buf;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/** @def NET_BUF_SIMPLE
|
||||||
|
* @brief Define a net_buf_simple stack variable and get a pointer to it.
|
||||||
|
*
|
||||||
|
* This is a helper macro which is used to define a net_buf_simple object on
|
||||||
|
* the stack and the get a pointer to it as follows:
|
||||||
|
*
|
||||||
|
* struct net_buf_simple *my_buf = NET_BUF_SIMPLE(10);
|
||||||
|
*
|
||||||
|
* After creating the object it needs to be initialized by calling
|
||||||
|
* net_buf_simple_init().
|
||||||
|
*
|
||||||
|
* @param _size Maximum data storage for the buffer.
|
||||||
|
*
|
||||||
|
* @return Pointer to stack-allocated net_buf_simple object.
|
||||||
|
*/
|
||||||
|
#define NET_BUF_SIMPLE(_size) \
|
||||||
|
((struct net_buf_simple *)(&(struct { \
|
||||||
|
struct net_buf_simple buf; \
|
||||||
|
u8_t data[_size] __net_buf_align; \
|
||||||
|
}) { \
|
||||||
|
.buf.size = _size, \
|
||||||
|
}))
|
||||||
|
|
||||||
/** @brief Initialize a net_buf_simple object.
|
/** @brief Initialize a net_buf_simple object.
|
||||||
*
|
*
|
||||||
* This needs to be called after creating a net_buf_simple object e.g. using
|
* This needs to be called after creating a net_buf_simple object using
|
||||||
* the NET_BUF_SIMPLE macro.
|
* the NET_BUF_SIMPLE macro.
|
||||||
*
|
*
|
||||||
* @param buf Buffer to initialize.
|
* @param buf Buffer to initialize.
|
||||||
* @param reserve_head Headroom to reserve.
|
* @param reserve_head Headroom to reserve.
|
||||||
|
*
|
||||||
|
* @warning This API should *only* be used when the net_buf_simple object
|
||||||
|
* has been created using the NET_BUF_SIMPLE() macro. For any other
|
||||||
|
* kinf of creation there is no need to call this API (in fact,
|
||||||
|
* it will result in undefined behavior).
|
||||||
*/
|
*/
|
||||||
static inline void net_buf_simple_init(struct net_buf_simple *buf,
|
static inline void net_buf_simple_init(struct net_buf_simple *buf,
|
||||||
size_t reserve_head)
|
size_t reserve_head)
|
||||||
{
|
{
|
||||||
|
buf->__buf = (u8_t *)buf + sizeof(*buf);
|
||||||
buf->data = buf->__buf + reserve_head;
|
buf->data = buf->__buf + reserve_head;
|
||||||
buf->len = 0;
|
buf->len = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Reset buffer
|
||||||
|
*
|
||||||
|
* Reset buffer data so it can be reused for other purposes.
|
||||||
|
*
|
||||||
|
* @param buf Buffer to reset.
|
||||||
|
*/
|
||||||
|
static inline void net_buf_simple_reset(struct net_buf_simple *buf)
|
||||||
|
{
|
||||||
|
buf->len = 0;
|
||||||
|
buf->data = buf->__buf;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Prepare data to be added at the end of the buffer
|
* @brief Prepare data to be added at the end of the buffer
|
||||||
*
|
*
|
||||||
|
@ -431,22 +486,30 @@ struct net_buf {
|
||||||
|
|
||||||
/** Amount of data that this buffer can store. */
|
/** Amount of data that this buffer can store. */
|
||||||
u16_t size;
|
u16_t size;
|
||||||
|
|
||||||
|
/** Start of the data storage. Not to be accessed
|
||||||
|
* directly (the data pointer should be used
|
||||||
|
* instead).
|
||||||
|
*/
|
||||||
|
u8_t *__buf;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct net_buf_simple b;
|
struct net_buf_simple b;
|
||||||
};
|
};
|
||||||
|
|
||||||
/** Start of the data storage. Not to be accessed directly
|
/** System metadata for this buffer. */
|
||||||
* (the data pointer should be used instead).
|
u8_t user_data[CONFIG_NET_BUF_USER_DATA_SIZE] __net_buf_align;
|
||||||
*/
|
};
|
||||||
u8_t __buf[0] __net_buf_align;
|
|
||||||
|
|
||||||
/** After __buf (as given by the "size" field, which can be 0),
|
struct net_buf_data_cb {
|
||||||
* there may be so-called "user data", which is actually a system
|
u8_t * (*alloc)(struct net_buf *buf, size_t *size, s32_t timeout);
|
||||||
* metadata for this buffer. This area can be accessed using
|
u8_t * (*ref)(struct net_buf *buf, u8_t *data);
|
||||||
* net_buf_user_data(). (Its size is equal to
|
void (*unref)(struct net_buf *buf, u8_t *data);
|
||||||
* this->pool->user_data_size.)
|
};
|
||||||
*/
|
|
||||||
|
struct net_buf_data_alloc {
|
||||||
|
const struct net_buf_data_cb *cb;
|
||||||
|
void *alloc_data;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct net_buf_pool {
|
struct net_buf_pool {
|
||||||
|
@ -459,12 +522,6 @@ struct net_buf_pool {
|
||||||
/** Number of uninitialized buffers */
|
/** Number of uninitialized buffers */
|
||||||
u16_t uninit_count;
|
u16_t uninit_count;
|
||||||
|
|
||||||
/** Data size of each buffer in the pool */
|
|
||||||
const u16_t buf_size;
|
|
||||||
|
|
||||||
/** Size of the user data associated with each buffer. */
|
|
||||||
const u16_t user_data_size;
|
|
||||||
|
|
||||||
#if defined(CONFIG_NET_BUF_POOL_USAGE)
|
#if defined(CONFIG_NET_BUF_POOL_USAGE)
|
||||||
/** Amount of available buffers in the pool. */
|
/** Amount of available buffers in the pool. */
|
||||||
s16_t avail_count;
|
s16_t avail_count;
|
||||||
|
@ -479,39 +536,86 @@ struct net_buf_pool {
|
||||||
/** Optional destroy callback when buffer is freed. */
|
/** Optional destroy callback when buffer is freed. */
|
||||||
void (*const destroy)(struct net_buf *buf);
|
void (*const destroy)(struct net_buf *buf);
|
||||||
|
|
||||||
/** Helper to access the start of storage (for net_buf_pool_init) */
|
/** Data allocation handlers. */
|
||||||
|
const struct net_buf_data_alloc *alloc;
|
||||||
|
|
||||||
|
/** Start of buffer storage array */
|
||||||
struct net_buf * const __bufs;
|
struct net_buf * const __bufs;
|
||||||
};
|
};
|
||||||
|
|
||||||
#if defined(CONFIG_NET_BUF_POOL_USAGE)
|
#if defined(CONFIG_NET_BUF_POOL_USAGE)
|
||||||
#define NET_BUF_POOL_INITIALIZER(_pool, _bufs, _count, _size, _ud_size, \
|
#define NET_BUF_POOL_INITIALIZER(_pool, _alloc, _bufs, _count, _destroy) \
|
||||||
_destroy) \
|
|
||||||
{ \
|
{ \
|
||||||
.free = _K_LIFO_INITIALIZER(_pool.free), \
|
.alloc = _alloc, \
|
||||||
.__bufs = (struct net_buf *)_bufs, \
|
.free = _K_LIFO_INITIALIZER(_pool.free), \
|
||||||
|
.__bufs = _bufs, \
|
||||||
.buf_count = _count, \
|
.buf_count = _count, \
|
||||||
.uninit_count = _count, \
|
.uninit_count = _count, \
|
||||||
.avail_count = _count, \
|
.avail_count = _count, \
|
||||||
.pool_size = sizeof(_net_buf_##_pool), \
|
|
||||||
.buf_size = _size, \
|
|
||||||
.user_data_size = _ud_size, \
|
|
||||||
.destroy = _destroy, \
|
.destroy = _destroy, \
|
||||||
.name = STRINGIFY(_pool), \
|
.name = STRINGIFY(_pool), \
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
#define NET_BUF_POOL_INITIALIZER(_pool, _bufs, _count, _size, _ud_size, \
|
#define NET_BUF_POOL_INITIALIZER(_pool, _alloc, _bufs, _count, _destroy) \
|
||||||
_destroy) \
|
|
||||||
{ \
|
{ \
|
||||||
.free = _K_LIFO_INITIALIZER(_pool.free), \
|
.alloc = _alloc, \
|
||||||
.__bufs = (struct net_buf *)_bufs, \
|
.free = _K_LIFO_INITIALIZER(_pool.free), \
|
||||||
|
.__bufs = _bufs, \
|
||||||
.buf_count = _count, \
|
.buf_count = _count, \
|
||||||
.uninit_count = _count, \
|
.uninit_count = _count, \
|
||||||
.buf_size = _size, \
|
|
||||||
.user_data_size = _ud_size, \
|
|
||||||
.destroy = _destroy, \
|
.destroy = _destroy, \
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_NET_BUF_POOL_USAGE */
|
#endif /* CONFIG_NET_BUF_POOL_USAGE */
|
||||||
|
|
||||||
|
struct net_buf_pool_fixed {
|
||||||
|
size_t data_size;
|
||||||
|
u8_t *data_pool;
|
||||||
|
};
|
||||||
|
|
||||||
|
extern const struct net_buf_data_cb net_buf_fixed_cb;
|
||||||
|
|
||||||
|
/** @def NET_BUF_POOL_FIXED_DEFINE
|
||||||
|
* @brief Define a new pool for buffers based on fixed-size data
|
||||||
|
*
|
||||||
|
* Defines a net_buf_pool struct and the necessary memory storage (array of
|
||||||
|
* structs) for the needed amount of buffers. After this, the buffers can be
|
||||||
|
* accessed from the pool through net_buf_alloc. The pool is defined as a
|
||||||
|
* static variable, so if it needs to be exported outside the current module
|
||||||
|
* this needs to happen with the help of a separate pointer rather than an
|
||||||
|
* extern declaration.
|
||||||
|
*
|
||||||
|
* The data payload of the buffers will be allocated from a byte array
|
||||||
|
* of fixed sized chunks. This kind of pool does not support blocking on
|
||||||
|
* the data allocation, so the timeout passed to net_buf_alloc will be
|
||||||
|
* always treated as K_NO_WAIT when trying to allocate the data. This means
|
||||||
|
* that allocation failures, i.e. NULL returns, must always be handled
|
||||||
|
* cleanly.
|
||||||
|
*
|
||||||
|
* If provided with a custom destroy callback, this callback is
|
||||||
|
* responsible for eventually calling net_buf_destroy() to complete the
|
||||||
|
* process of returning the buffer to the pool.
|
||||||
|
*
|
||||||
|
* @param _name Name of the pool variable.
|
||||||
|
* @param _count Number of buffers in the pool.
|
||||||
|
* @param _data_size Maximum data payload per buffer.
|
||||||
|
* @param _destroy Optional destroy callback when buffer is freed.
|
||||||
|
*/
|
||||||
|
#define NET_BUF_POOL_FIXED_DEFINE(_name, _count, _data_size, _destroy) \
|
||||||
|
static struct net_buf net_buf_##_name[_count] __noinit; \
|
||||||
|
static u8_t net_buf_data_##_name[_count][_data_size]; \
|
||||||
|
static const struct net_buf_pool_fixed net_buf_fixed_##_name = { \
|
||||||
|
.data_size = _data_size, \
|
||||||
|
.data_pool = (u8_t *)net_buf_data_##_name, \
|
||||||
|
}; \
|
||||||
|
static const struct net_buf_data_alloc net_buf_fixed_alloc_##_name = {\
|
||||||
|
.cb = &net_buf_fixed_cb, \
|
||||||
|
.alloc_data = (void *)&net_buf_fixed_##_name, \
|
||||||
|
}; \
|
||||||
|
struct net_buf_pool _name __net_buf_align \
|
||||||
|
__in_section(_net_buf_pool, static, _name) = \
|
||||||
|
NET_BUF_POOL_INITIALIZER(_name, &net_buf_fixed_alloc_##_name, \
|
||||||
|
net_buf_##_name, _count, _destroy)
|
||||||
|
|
||||||
/** @def NET_BUF_POOL_DEFINE
|
/** @def NET_BUF_POOL_DEFINE
|
||||||
* @brief Define a new pool for buffers
|
* @brief Define a new pool for buffers
|
||||||
*
|
*
|
||||||
|
@ -533,16 +637,8 @@ struct net_buf_pool {
|
||||||
* @param _destroy Optional destroy callback when buffer is freed.
|
* @param _destroy Optional destroy callback when buffer is freed.
|
||||||
*/
|
*/
|
||||||
#define NET_BUF_POOL_DEFINE(_name, _count, _size, _ud_size, _destroy) \
|
#define NET_BUF_POOL_DEFINE(_name, _count, _size, _ud_size, _destroy) \
|
||||||
static struct { \
|
BUILD_ASSERT(_ud_size <= CONFIG_NET_BUF_USER_DATA_SIZE); \
|
||||||
struct net_buf buf; \
|
NET_BUF_POOL_FIXED_DEFINE(_name, _count, _size, _destroy)
|
||||||
u8_t data[_size] __net_buf_align; \
|
|
||||||
u8_t ud[ROUND_UP(_ud_size, 4)] __net_buf_align; \
|
|
||||||
} _net_buf_##_name[_count] __noinit; \
|
|
||||||
struct net_buf_pool _name __net_buf_align \
|
|
||||||
__in_section(_net_buf_pool, static, _name) = \
|
|
||||||
NET_BUF_POOL_INITIALIZER(_name, _net_buf_##_name, \
|
|
||||||
_count, _size, _ud_size, _destroy)
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Looks up a pool based on its ID.
|
* @brief Looks up a pool based on its ID.
|
||||||
|
@ -581,12 +677,43 @@ int net_buf_id(struct net_buf *buf);
|
||||||
* @return New buffer or NULL if out of buffers.
|
* @return New buffer or NULL if out of buffers.
|
||||||
*/
|
*/
|
||||||
#if defined(CONFIG_NET_BUF_LOG)
|
#if defined(CONFIG_NET_BUF_LOG)
|
||||||
struct net_buf *net_buf_alloc_debug(struct net_buf_pool *pool, s32_t timeout,
|
struct net_buf *net_buf_alloc_fixed_debug(struct net_buf_pool *pool,
|
||||||
const char *func, int line);
|
s32_t timeout, const char *func,
|
||||||
#define net_buf_alloc(_pool, _timeout) \
|
int line);
|
||||||
net_buf_alloc_debug(_pool, _timeout, __func__, __LINE__)
|
#define net_buf_alloc_fixed(_pool, _timeout) \
|
||||||
|
net_buf_alloc_fixed_debug(_pool, _timeout, __func__, __LINE__)
|
||||||
#else
|
#else
|
||||||
struct net_buf *net_buf_alloc(struct net_buf_pool *pool, s32_t timeout);
|
struct net_buf *net_buf_alloc_fixed(struct net_buf_pool *pool, s32_t timeout);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define net_buf_alloc(_pool, _timeout) net_buf_alloc_fixed(_pool, _timeout)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Allocate a new buffer from a pool.
|
||||||
|
*
|
||||||
|
* Allocate a new buffer from a pool.
|
||||||
|
*
|
||||||
|
* @param pool Which pool to allocate the buffer from.
|
||||||
|
* @param size Amount of data the buffer must be able to fit.
|
||||||
|
* @param timeout Affects the action taken should the pool be empty.
|
||||||
|
* If K_NO_WAIT, then return immediately. If K_FOREVER, then
|
||||||
|
* wait as long as necessary. Otherwise, wait up to the specified
|
||||||
|
* number of milliseconds before timing out. Note that some types
|
||||||
|
* of data allocators do not support blocking (such as the HEAP
|
||||||
|
* type). In this case it's still possible for net_buf_alloc() to
|
||||||
|
* fail (return NULL) even if it was given K_FOREVER.
|
||||||
|
*
|
||||||
|
* @return New buffer or NULL if out of buffers.
|
||||||
|
*/
|
||||||
|
#if defined(CONFIG_NET_BUF_LOG)
|
||||||
|
struct net_buf *net_buf_alloc_len_debug(struct net_buf_pool *pool, size_t size,
|
||||||
|
s32_t timeout, const char *func,
|
||||||
|
int line);
|
||||||
|
#define net_buf_alloc_len(_pool, _size, _timeout) \
|
||||||
|
net_buf_alloc_len_debug(_pool, _size, _timeout, __func__, __LINE__)
|
||||||
|
#else
|
||||||
|
struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size,
|
||||||
|
s32_t timeout);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -645,7 +772,7 @@ void net_buf_reset(struct net_buf *buf);
|
||||||
* @param buf Buffer to initialize.
|
* @param buf Buffer to initialize.
|
||||||
* @param reserve How much headroom to reserve.
|
* @param reserve How much headroom to reserve.
|
||||||
*/
|
*/
|
||||||
void net_buf_reserve(struct net_buf *buf, size_t reserve);
|
void net_buf_simple_reserve(struct net_buf_simple *buf, size_t reserve);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Put a buffer into a list
|
* @brief Put a buffer into a list
|
||||||
|
@ -733,9 +860,21 @@ struct net_buf *net_buf_clone(struct net_buf *buf, s32_t timeout);
|
||||||
*/
|
*/
|
||||||
static inline void *net_buf_user_data(struct net_buf *buf)
|
static inline void *net_buf_user_data(struct net_buf *buf)
|
||||||
{
|
{
|
||||||
return (void *)ROUND_UP((buf->__buf + buf->size), sizeof(int));
|
return (void *)buf->user_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** @def net_buf_reserve
|
||||||
|
* @brief Initialize buffer with the given headroom.
|
||||||
|
*
|
||||||
|
* Initializes a buffer with a given headroom. The buffer is not expected to
|
||||||
|
* contain any data when this API is called.
|
||||||
|
*
|
||||||
|
* @param buf Buffer to initialize.
|
||||||
|
* @param reserve How much headroom to reserve.
|
||||||
|
*/
|
||||||
|
#define net_buf_reserve(buf, reserve) net_buf_simple_reserve(&(buf)->b, \
|
||||||
|
reserve)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @def net_buf_add
|
* @def net_buf_add
|
||||||
* @brief Prepare data to be added at the end of the buffer
|
* @brief Prepare data to be added at the end of the buffer
|
||||||
|
|
|
@ -46,7 +46,6 @@ CONFIG_BT_DEVICE_NAME="Zephyr"
|
||||||
|
|
||||||
CONFIG_BT_L2CAP_TX_BUF_COUNT=3
|
CONFIG_BT_L2CAP_TX_BUF_COUNT=3
|
||||||
CONFIG_BT_L2CAP_TX_MTU=65
|
CONFIG_BT_L2CAP_TX_MTU=65
|
||||||
CONFIG_BT_L2CAP_TX_USER_DATA_SIZE=4
|
|
||||||
|
|
||||||
CONFIG_BT_HCI_TX_STACK_SIZE=640
|
CONFIG_BT_HCI_TX_STACK_SIZE=640
|
||||||
CONFIG_BT_HCI_HOST=y
|
CONFIG_BT_HCI_HOST=y
|
||||||
|
|
|
@ -25,6 +25,11 @@ BUILD_ASSERT(CONFIG_SYSTEM_WORKQUEUE_PRIORITY < 0);
|
||||||
*/
|
*/
|
||||||
BUILD_ASSERT(CONFIG_BT_HCI_TX_PRIO < CONFIG_BT_RX_PRIO);
|
BUILD_ASSERT(CONFIG_BT_HCI_TX_PRIO < CONFIG_BT_RX_PRIO);
|
||||||
|
|
||||||
|
/* The Bluetooth subsystem requires network buffers to have at least 8 bytes
|
||||||
|
* reserved for user data.
|
||||||
|
*/
|
||||||
|
BUILD_ASSERT(CONFIG_NET_BUF_USER_DATA_SIZE >= 8);
|
||||||
|
|
||||||
#if defined(CONFIG_BT_CTLR)
|
#if defined(CONFIG_BT_CTLR)
|
||||||
/* The Bluetooth Controller's priority receive thread priority shall be higher
|
/* The Bluetooth Controller's priority receive thread priority shall be higher
|
||||||
* than the Bluetooth Host's Tx and the Controller's receive thread priority.
|
* than the Bluetooth Host's Tx and the Controller's receive thread priority.
|
||||||
|
|
|
@ -1055,17 +1055,8 @@ void bt_conn_recv(struct bt_conn *conn, struct net_buf *buf, u8_t flags)
|
||||||
int bt_conn_send_cb(struct bt_conn *conn, struct net_buf *buf,
|
int bt_conn_send_cb(struct bt_conn *conn, struct net_buf *buf,
|
||||||
bt_conn_tx_cb_t cb)
|
bt_conn_tx_cb_t cb)
|
||||||
{
|
{
|
||||||
struct net_buf_pool *pool;
|
|
||||||
|
|
||||||
BT_DBG("conn handle %u buf len %u cb %p", conn->handle, buf->len, cb);
|
BT_DBG("conn handle %u buf len %u cb %p", conn->handle, buf->len, cb);
|
||||||
|
|
||||||
pool = net_buf_pool_get(buf->pool_id);
|
|
||||||
if (pool->user_data_size < BT_BUF_USER_DATA_MIN) {
|
|
||||||
BT_ERR("Too small user data size");
|
|
||||||
net_buf_unref(buf);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (conn->state != BT_CONN_CONNECTED) {
|
if (conn->state != BT_CONN_CONNECTED) {
|
||||||
BT_ERR("not connected!");
|
BT_ERR("not connected!");
|
||||||
net_buf_unref(buf);
|
net_buf_unref(buf);
|
||||||
|
|
|
@ -4290,19 +4290,10 @@ int bt_send(struct net_buf *buf)
|
||||||
|
|
||||||
int bt_recv(struct net_buf *buf)
|
int bt_recv(struct net_buf *buf)
|
||||||
{
|
{
|
||||||
struct net_buf_pool *pool;
|
|
||||||
|
|
||||||
bt_monitor_send(bt_monitor_opcode(buf), buf->data, buf->len);
|
bt_monitor_send(bt_monitor_opcode(buf), buf->data, buf->len);
|
||||||
|
|
||||||
BT_DBG("buf %p len %u", buf, buf->len);
|
BT_DBG("buf %p len %u", buf, buf->len);
|
||||||
|
|
||||||
pool = net_buf_pool_get(buf->pool_id);
|
|
||||||
if (pool->user_data_size < BT_BUF_USER_DATA_MIN) {
|
|
||||||
BT_ERR("Too small user data size");
|
|
||||||
net_buf_unref(buf);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (bt_buf_get_type(buf)) {
|
switch (bt_buf_get_type(buf)) {
|
||||||
#if defined(CONFIG_BT_CONN)
|
#if defined(CONFIG_BT_CONN)
|
||||||
case BT_BUF_ACL_IN:
|
case BT_BUF_ACL_IN:
|
||||||
|
|
|
@ -998,13 +998,10 @@ static inline struct net_buf *l2cap_alloc_seg(struct net_buf *buf)
|
||||||
struct net_buf *seg;
|
struct net_buf *seg;
|
||||||
|
|
||||||
/* Try to use original pool if possible */
|
/* Try to use original pool if possible */
|
||||||
if (pool->user_data_size >= BT_BUF_USER_DATA_MIN &&
|
seg = net_buf_alloc(pool, K_NO_WAIT);
|
||||||
pool->buf_size >= BT_L2CAP_BUF_SIZE(L2CAP_MAX_LE_MPS)) {
|
if (seg) {
|
||||||
seg = net_buf_alloc(pool, K_NO_WAIT);
|
net_buf_reserve(seg, BT_L2CAP_CHAN_SEND_RESERVE);
|
||||||
if (seg) {
|
return seg;
|
||||||
net_buf_reserve(seg, BT_L2CAP_CHAN_SEND_RESERVE);
|
|
||||||
return seg;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Fallback to using global connection tx pool */
|
/* Fallback to using global connection tx pool */
|
||||||
|
@ -1015,7 +1012,6 @@ static struct net_buf *l2cap_chan_create_seg(struct bt_l2cap_le_chan *ch,
|
||||||
struct net_buf *buf,
|
struct net_buf *buf,
|
||||||
size_t sdu_hdr_len)
|
size_t sdu_hdr_len)
|
||||||
{
|
{
|
||||||
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
|
|
||||||
struct net_buf *seg;
|
struct net_buf *seg;
|
||||||
u16_t headroom;
|
u16_t headroom;
|
||||||
u16_t len;
|
u16_t len;
|
||||||
|
@ -1025,13 +1021,6 @@ static struct net_buf *l2cap_chan_create_seg(struct bt_l2cap_le_chan *ch,
|
||||||
goto segment;
|
goto segment;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Segment if there is no space in the user_data */
|
|
||||||
if (pool->user_data_size < BT_BUF_USER_DATA_MIN) {
|
|
||||||
BT_WARN("Too small buffer user_data_size %u",
|
|
||||||
pool->user_data_size);
|
|
||||||
goto segment;
|
|
||||||
}
|
|
||||||
|
|
||||||
headroom = BT_L2CAP_CHAN_SEND_RESERVE + sdu_hdr_len;
|
headroom = BT_L2CAP_CHAN_SEND_RESERVE + sdu_hdr_len;
|
||||||
|
|
||||||
/* Check if original buffer has enough headroom and don't have any
|
/* Check if original buffer has enough headroom and don't have any
|
||||||
|
|
|
@ -15,18 +15,28 @@ config NET_BUF
|
||||||
This option enables support for generic network protocol
|
This option enables support for generic network protocol
|
||||||
buffers.
|
buffers.
|
||||||
|
|
||||||
|
if NET_BUF
|
||||||
|
|
||||||
|
config NET_BUF_USER_DATA_SIZE
|
||||||
|
int "Size of user_data available in every network buffer"
|
||||||
|
default 0
|
||||||
|
range 8 65535
|
||||||
|
help
|
||||||
|
Amount of memory reserved in each network buffer for user data. In
|
||||||
|
most cases this can be left as the default value.
|
||||||
|
|
||||||
config NET_BUF_LOG
|
config NET_BUF_LOG
|
||||||
bool "Network buffer logging"
|
bool "Network buffer logging"
|
||||||
depends on NET_BUF
|
|
||||||
select SYS_LOG
|
select SYS_LOG
|
||||||
default n
|
default n
|
||||||
help
|
help
|
||||||
Enable logs and checks for the generic network buffers.
|
Enable logs and checks for the generic network buffers.
|
||||||
|
|
||||||
|
if NET_BUF_LOG
|
||||||
config SYS_LOG_NET_BUF_LEVEL
|
config SYS_LOG_NET_BUF_LEVEL
|
||||||
int
|
int
|
||||||
prompt "Network buffer Logging level"
|
prompt "Network buffer Logging level"
|
||||||
depends on NET_BUF_LOG && SYS_LOG
|
depends on SYS_LOG
|
||||||
default 1
|
default 1
|
||||||
range 0 4
|
range 0 4
|
||||||
help
|
help
|
||||||
|
@ -41,7 +51,6 @@ config SYS_LOG_NET_BUF_LEVEL
|
||||||
config NET_BUF_WARN_ALLOC_INTERVAL
|
config NET_BUF_WARN_ALLOC_INTERVAL
|
||||||
int
|
int
|
||||||
prompt "Interval of Network buffer allocation warnings"
|
prompt "Interval of Network buffer allocation warnings"
|
||||||
depends on NET_BUF_LOG
|
|
||||||
default 1
|
default 1
|
||||||
range 0 60
|
range 0 60
|
||||||
help
|
help
|
||||||
|
@ -52,7 +61,6 @@ config NET_BUF_WARN_ALLOC_INTERVAL
|
||||||
|
|
||||||
config NET_BUF_SIMPLE_LOG
|
config NET_BUF_SIMPLE_LOG
|
||||||
bool "Network buffer memory debugging"
|
bool "Network buffer memory debugging"
|
||||||
depends on NET_BUF_LOG
|
|
||||||
select SYS_LOG
|
select SYS_LOG
|
||||||
default n
|
default n
|
||||||
help
|
help
|
||||||
|
@ -60,7 +68,6 @@ config NET_BUF_SIMPLE_LOG
|
||||||
|
|
||||||
config NET_BUF_POOL_USAGE
|
config NET_BUF_POOL_USAGE
|
||||||
bool "Network buffer pool usage tracking"
|
bool "Network buffer pool usage tracking"
|
||||||
depends on NET_BUF
|
|
||||||
default n
|
default n
|
||||||
help
|
help
|
||||||
Enable network buffer pool tracking. This means that:
|
Enable network buffer pool tracking. This means that:
|
||||||
|
@ -68,6 +75,9 @@ config NET_BUF_POOL_USAGE
|
||||||
* total size of the pool is calculated
|
* total size of the pool is calculated
|
||||||
* pool name is stored and can be shown in debugging prints
|
* pool name is stored and can be shown in debugging prints
|
||||||
|
|
||||||
|
endif # NET_BUF_LOG
|
||||||
|
endif # NET_BUF
|
||||||
|
|
||||||
config NETWORKING
|
config NETWORKING
|
||||||
bool "Link layer and IP networking support"
|
bool "Link layer and IP networking support"
|
||||||
select NET_BUF
|
select NET_BUF
|
||||||
|
|
150
subsys/net/buf.c
150
subsys/net/buf.c
|
@ -55,22 +55,11 @@ static int pool_id(struct net_buf_pool *pool)
|
||||||
return pool - _net_buf_pool_list;
|
return pool - _net_buf_pool_list;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Helpers to access the storage array, since we don't have access to its
|
|
||||||
* type at this point anymore.
|
|
||||||
*/
|
|
||||||
#define BUF_SIZE(pool) (sizeof(struct net_buf) + \
|
|
||||||
ROUND_UP(pool->buf_size, 4) + \
|
|
||||||
ROUND_UP(pool->user_data_size, 4))
|
|
||||||
#define UNINIT_BUF(pool, n) (struct net_buf *)(((u8_t *)(pool->__bufs)) + \
|
|
||||||
((n) * BUF_SIZE(pool)))
|
|
||||||
|
|
||||||
int net_buf_id(struct net_buf *buf)
|
int net_buf_id(struct net_buf *buf)
|
||||||
{
|
{
|
||||||
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
|
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
|
||||||
u8_t *pool_start = (u8_t *)pool->__bufs;
|
|
||||||
u8_t *buf_ptr = (u8_t *)buf;
|
|
||||||
|
|
||||||
return (buf_ptr - pool_start) / BUF_SIZE(pool);
|
return buf - pool->__bufs;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct net_buf *pool_get_uninit(struct net_buf_pool *pool,
|
static inline struct net_buf *pool_get_uninit(struct net_buf_pool *pool,
|
||||||
|
@ -78,10 +67,9 @@ static inline struct net_buf *pool_get_uninit(struct net_buf_pool *pool,
|
||||||
{
|
{
|
||||||
struct net_buf *buf;
|
struct net_buf *buf;
|
||||||
|
|
||||||
buf = UNINIT_BUF(pool, pool->buf_count - uninit_count);
|
buf = &pool->__bufs[pool->buf_count - uninit_count];
|
||||||
|
|
||||||
buf->pool_id = pool_id(pool);
|
buf->pool_id = pool_id(pool);
|
||||||
buf->size = pool->buf_size;
|
|
||||||
|
|
||||||
return buf;
|
return buf;
|
||||||
}
|
}
|
||||||
|
@ -91,23 +79,67 @@ void net_buf_reset(struct net_buf *buf)
|
||||||
NET_BUF_ASSERT(buf->flags == 0);
|
NET_BUF_ASSERT(buf->flags == 0);
|
||||||
NET_BUF_ASSERT(buf->frags == NULL);
|
NET_BUF_ASSERT(buf->frags == NULL);
|
||||||
|
|
||||||
buf->len = 0;
|
net_buf_simple_reset(&buf->b);
|
||||||
buf->data = buf->__buf;
|
}
|
||||||
|
|
||||||
|
static u8_t *fixed_data_alloc(struct net_buf *buf, size_t *size, s32_t timeout)
|
||||||
|
{
|
||||||
|
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
|
||||||
|
const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
|
||||||
|
|
||||||
|
*size = min(fixed->data_size, *size);
|
||||||
|
|
||||||
|
return fixed->data_pool + fixed->data_size * net_buf_id(buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void fixed_data_unref(struct net_buf *buf, u8_t *data)
|
||||||
|
{
|
||||||
|
/* Nothing needed for fixed-size data pools */
|
||||||
|
}
|
||||||
|
|
||||||
|
const struct net_buf_data_cb net_buf_fixed_cb = {
|
||||||
|
.alloc = fixed_data_alloc,
|
||||||
|
.unref = fixed_data_unref,
|
||||||
|
};
|
||||||
|
|
||||||
|
static u8_t *data_alloc(struct net_buf *buf, size_t *size, s32_t timeout)
|
||||||
|
{
|
||||||
|
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
|
||||||
|
|
||||||
|
return pool->alloc->cb->alloc(buf, size, timeout);
|
||||||
|
}
|
||||||
|
|
||||||
|
static u8_t *data_ref(struct net_buf *buf, u8_t *data)
|
||||||
|
{
|
||||||
|
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
|
||||||
|
|
||||||
|
return pool->alloc->cb->ref(buf, data);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void data_unref(struct net_buf *buf, u8_t *data)
|
||||||
|
{
|
||||||
|
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
|
||||||
|
|
||||||
|
pool->alloc->cb->unref(buf, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_NET_BUF_LOG)
|
#if defined(CONFIG_NET_BUF_LOG)
|
||||||
struct net_buf *net_buf_alloc_debug(struct net_buf_pool *pool, s32_t timeout,
|
struct net_buf *net_buf_alloc_len_debug(struct net_buf_pool *pool, size_t size,
|
||||||
const char *func, int line)
|
s32_t timeout, const char *func,
|
||||||
|
int line)
|
||||||
#else
|
#else
|
||||||
struct net_buf *net_buf_alloc(struct net_buf_pool *pool, s32_t timeout)
|
struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size,
|
||||||
|
s32_t timeout)
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
|
u32_t alloc_start = k_uptime_get_32();
|
||||||
struct net_buf *buf;
|
struct net_buf *buf;
|
||||||
unsigned int key;
|
unsigned int key;
|
||||||
|
|
||||||
NET_BUF_ASSERT(pool);
|
NET_BUF_ASSERT(pool);
|
||||||
|
|
||||||
NET_BUF_DBG("%s():%d: pool %p timeout %d", func, line, pool, timeout);
|
NET_BUF_DBG("%s():%d: pool %p size %zu timeout %d", func, line, pool,
|
||||||
|
size, timeout);
|
||||||
|
|
||||||
/* We need to lock interrupts temporarily to prevent race conditions
|
/* We need to lock interrupts temporarily to prevent race conditions
|
||||||
* when accessing pool->uninit_count.
|
* when accessing pool->uninit_count.
|
||||||
|
@ -178,9 +210,28 @@ struct net_buf *net_buf_alloc(struct net_buf_pool *pool, s32_t timeout)
|
||||||
success:
|
success:
|
||||||
NET_BUF_DBG("allocated buf %p", buf);
|
NET_BUF_DBG("allocated buf %p", buf);
|
||||||
|
|
||||||
|
if (size) {
|
||||||
|
if (timeout != K_NO_WAIT && timeout != K_FOREVER) {
|
||||||
|
u32_t diff = k_uptime_get_32() - alloc_start;
|
||||||
|
|
||||||
|
timeout -= min(timeout, diff);
|
||||||
|
}
|
||||||
|
|
||||||
|
buf->__buf = data_alloc(buf, &size, timeout);
|
||||||
|
if (!buf->__buf) {
|
||||||
|
NET_BUF_ERR("%s():%d: Failed to allocate data",
|
||||||
|
func, line);
|
||||||
|
net_buf_destroy(buf);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
buf->__buf = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
buf->ref = 1;
|
buf->ref = 1;
|
||||||
buf->flags = 0;
|
buf->flags = 0;
|
||||||
buf->frags = NULL;
|
buf->frags = NULL;
|
||||||
|
buf->size = size;
|
||||||
net_buf_reset(buf);
|
net_buf_reset(buf);
|
||||||
|
|
||||||
#if defined(CONFIG_NET_BUF_POOL_USAGE)
|
#if defined(CONFIG_NET_BUF_POOL_USAGE)
|
||||||
|
@ -191,6 +242,25 @@ success:
|
||||||
return buf;
|
return buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(CONFIG_NET_BUF_LOG)
|
||||||
|
struct net_buf *net_buf_alloc_fixed_debug(struct net_buf_pool *pool,
|
||||||
|
s32_t timeout, const char *func,
|
||||||
|
int line)
|
||||||
|
{
|
||||||
|
const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
|
||||||
|
|
||||||
|
return net_buf_alloc_len_debug(pool, fixed->data_size, timeout, func,
|
||||||
|
line);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
struct net_buf *net_buf_alloc_fixed(struct net_buf_pool *pool, s32_t timeout)
|
||||||
|
{
|
||||||
|
const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
|
||||||
|
|
||||||
|
return net_buf_alloc_len(pool, fixed->data_size, timeout);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_NET_BUF_LOG)
|
#if defined(CONFIG_NET_BUF_LOG)
|
||||||
struct net_buf *net_buf_get_debug(struct k_fifo *fifo, s32_t timeout,
|
struct net_buf *net_buf_get_debug(struct k_fifo *fifo, s32_t timeout,
|
||||||
const char *func, int line)
|
const char *func, int line)
|
||||||
|
@ -224,7 +294,7 @@ struct net_buf *net_buf_get(struct k_fifo *fifo, s32_t timeout)
|
||||||
return buf;
|
return buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
void net_buf_reserve(struct net_buf *buf, size_t reserve)
|
void net_buf_simple_reserve(struct net_buf_simple *buf, size_t reserve)
|
||||||
{
|
{
|
||||||
NET_BUF_ASSERT(buf);
|
NET_BUF_ASSERT(buf);
|
||||||
NET_BUF_ASSERT(buf->len == 0);
|
NET_BUF_ASSERT(buf->len == 0);
|
||||||
|
@ -323,6 +393,12 @@ void net_buf_unref(struct net_buf *buf)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (buf->__buf) {
|
||||||
|
data_unref(buf, buf->__buf);
|
||||||
|
buf->__buf = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
buf->data = NULL;
|
||||||
buf->frags = NULL;
|
buf->frags = NULL;
|
||||||
|
|
||||||
pool = net_buf_pool_get(buf->pool_id);
|
pool = net_buf_pool_get(buf->pool_id);
|
||||||
|
@ -354,6 +430,7 @@ struct net_buf *net_buf_ref(struct net_buf *buf)
|
||||||
|
|
||||||
struct net_buf *net_buf_clone(struct net_buf *buf, s32_t timeout)
|
struct net_buf *net_buf_clone(struct net_buf *buf, s32_t timeout)
|
||||||
{
|
{
|
||||||
|
u32_t alloc_start = k_uptime_get_32();
|
||||||
struct net_buf_pool *pool;
|
struct net_buf_pool *pool;
|
||||||
struct net_buf *clone;
|
struct net_buf *clone;
|
||||||
|
|
||||||
|
@ -361,15 +438,38 @@ struct net_buf *net_buf_clone(struct net_buf *buf, s32_t timeout)
|
||||||
|
|
||||||
pool = net_buf_pool_get(buf->pool_id);
|
pool = net_buf_pool_get(buf->pool_id);
|
||||||
|
|
||||||
clone = net_buf_alloc(pool, timeout);
|
clone = net_buf_alloc_len(pool, 0, timeout);
|
||||||
if (!clone) {
|
if (!clone) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
net_buf_reserve(clone, net_buf_headroom(buf));
|
/* If the pool supports data referencing use that. Otherwise
|
||||||
|
* we need to allocate new data and make a copy.
|
||||||
|
*/
|
||||||
|
if (pool->alloc->cb->ref) {
|
||||||
|
clone->__buf = data_ref(buf, buf->__buf);
|
||||||
|
clone->data = buf->data;
|
||||||
|
clone->len = buf->len;
|
||||||
|
clone->size = buf->size;
|
||||||
|
} else {
|
||||||
|
size_t size = buf->size;
|
||||||
|
|
||||||
/* TODO: Add reference to the original buffer instead of copying it. */
|
if (timeout != K_NO_WAIT && timeout != K_FOREVER) {
|
||||||
memcpy(net_buf_add(clone, buf->len), buf->data, buf->len);
|
u32_t diff = k_uptime_get_32() - alloc_start;
|
||||||
|
|
||||||
|
timeout -= min(timeout, diff);
|
||||||
|
}
|
||||||
|
|
||||||
|
clone->__buf = data_alloc(clone, &size, timeout);
|
||||||
|
if (!clone->__buf || size < buf->size) {
|
||||||
|
net_buf_destroy(clone);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
clone->size = size;
|
||||||
|
clone->data = clone->__buf + net_buf_headroom(buf);
|
||||||
|
net_buf_add_mem(clone, buf->data, buf->len);
|
||||||
|
}
|
||||||
|
|
||||||
return clone;
|
return clone;
|
||||||
}
|
}
|
||||||
|
|
|
@ -339,16 +339,6 @@ config NET_BUF_DATA_SIZE
|
||||||
In order to be able to receive at least full IPv6 packet which
|
In order to be able to receive at least full IPv6 packet which
|
||||||
has a size of 1280 bytes, the one should allocate 16 fragments here.
|
has a size of 1280 bytes, the one should allocate 16 fragments here.
|
||||||
|
|
||||||
config NET_BUF_USER_DATA_SIZE
|
|
||||||
int "Size of user_data reserved"
|
|
||||||
default 0
|
|
||||||
default 4 if NET_L2_BT
|
|
||||||
help
|
|
||||||
This is for drivers to set how much user_data shall be included in
|
|
||||||
each network data fragment.
|
|
||||||
Example: For Bluetooth, the user_data shall be at least 4 bytes as
|
|
||||||
that is used for identifying the type of data they are carrying.
|
|
||||||
|
|
||||||
choice
|
choice
|
||||||
prompt "Default Network Interface"
|
prompt "Default Network Interface"
|
||||||
default NET_DEFAULT_IF_FIRST
|
default NET_DEFAULT_IF_FIRST
|
||||||
|
|
|
@ -272,12 +272,9 @@ void net_pkt_print_frags(struct net_pkt *pkt)
|
||||||
frag_size = frag->size;
|
frag_size = frag->size;
|
||||||
ll_overhead = net_buf_headroom(frag);
|
ll_overhead = net_buf_headroom(frag);
|
||||||
|
|
||||||
NET_INFO("[%d] frag %p len %d size %d reserve %d "
|
NET_INFO("[%d] frag %p len %d size %d reserve %d pool %p",
|
||||||
"pool %p [sz %d ud_sz %d]",
|
|
||||||
count, frag, frag->len, frag_size, ll_overhead,
|
count, frag, frag->len, frag_size, ll_overhead,
|
||||||
net_buf_pool_get(frag->pool_id),
|
net_buf_pool_get(frag->pool_id));
|
||||||
net_buf_pool_get(frag->pool_id)->buf_size,
|
|
||||||
net_buf_pool_get(frag->pool_id)->user_data_size);
|
|
||||||
|
|
||||||
count++;
|
count++;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue