net: buf: Switch from k_fifo to k_lifo for free buffers
Using a LIFO instead of a FIFO has the potential benefit that more recently in-use buffers may be "cache-hot" and therefore accessed faster than least recently used (which is what we get with a FIFO). Change-Id: I59bb083ca2e00d0d404406540f7db216742a27cf Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
This commit is contained in:
parent
d659bb020e
commit
692771fbe9
3 changed files with 12 additions and 13 deletions
|
@ -39,9 +39,8 @@ buffers may also contain protocol-specific context, known as user data.
|
|||
Both the maximum data and user data capacity of the buffers is
|
||||
compile-time defined when declaring the buffer pool.
|
||||
|
||||
Since the free buffers are managed with the help of a k_fifo it means
|
||||
the buffers have native support for being passed through other FIFOs
|
||||
as well. This is a very practical feature when the buffers need to be
|
||||
The buffers have native support for being passed through k_fifo kernel
|
||||
objects. This is a very practical feature when the buffers need to be
|
||||
passed from one thread to another. However, since a net_buf may have a
|
||||
fragment chain attached to it, instead of using the :c:func:`k_fifo_put`
|
||||
and :c:func:`k_fifo_get` APIs, special :c:func:`net_buf_put` and
|
||||
|
|
|
@ -434,8 +434,8 @@ struct net_buf {
|
|||
};
|
||||
|
||||
struct net_buf_pool {
|
||||
/** FIFO to place the buffer into when free */
|
||||
struct k_fifo free;
|
||||
/** LIFO to place the buffer into when free */
|
||||
struct k_lifo free;
|
||||
|
||||
/** Number of buffers in pool */
|
||||
const uint16_t buf_count;
|
||||
|
@ -459,7 +459,7 @@ struct net_buf_pool {
|
|||
#define NET_BUF_POOL_INITIALIZER(_pool, _bufs, _count, _size, _ud_size, \
|
||||
_destroy) \
|
||||
{ \
|
||||
.free = K_FIFO_INITIALIZER(_pool.free), \
|
||||
.free = K_LIFO_INITIALIZER(_pool.free), \
|
||||
.__bufs = (struct net_buf *)_bufs, \
|
||||
.buf_count = _count, \
|
||||
.uninit_count = _count, \
|
||||
|
@ -553,7 +553,7 @@ struct net_buf *net_buf_get(struct k_fifo *fifo, int32_t timeout);
|
|||
*/
|
||||
static inline void net_buf_destroy(struct net_buf *buf)
|
||||
{
|
||||
k_fifo_put(&buf->pool->free, buf);
|
||||
k_lifo_put(&buf->pool->free, buf);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -95,10 +95,10 @@ struct net_buf *net_buf_alloc(struct net_buf_pool *pool, int32_t timeout)
|
|||
|
||||
/* If this is not the first access to the pool, we can
|
||||
* be opportunistic and try to fetch a previously used
|
||||
* buffer from the FIFO with K_NO_WAIT.
|
||||
* buffer from the LIFO with K_NO_WAIT.
|
||||
*/
|
||||
if (pool->uninit_count < pool->buf_count) {
|
||||
buf = k_fifo_get(&pool->free, K_NO_WAIT);
|
||||
buf = k_lifo_get(&pool->free, K_NO_WAIT);
|
||||
if (buf) {
|
||||
irq_unlock(key);
|
||||
goto success;
|
||||
|
@ -116,17 +116,17 @@ struct net_buf *net_buf_alloc(struct net_buf_pool *pool, int32_t timeout)
|
|||
|
||||
#if defined(CONFIG_NET_BUF_DEBUG)
|
||||
if (timeout == K_FOREVER) {
|
||||
buf = k_fifo_get(&pool->free, K_NO_WAIT);
|
||||
buf = k_lifo_get(&pool->free, K_NO_WAIT);
|
||||
if (!buf) {
|
||||
NET_BUF_WARN("%s():%d: Pool %p low on buffers.",
|
||||
func, line, pool);
|
||||
buf = k_fifo_get(&pool->free, timeout);
|
||||
buf = k_lifo_get(&pool->free, timeout);
|
||||
}
|
||||
} else {
|
||||
buf = k_fifo_get(&pool->free, timeout);
|
||||
buf = k_lifo_get(&pool->free, timeout);
|
||||
}
|
||||
#else
|
||||
buf = k_fifo_get(&pool->free, timeout);
|
||||
buf = k_lifo_get(&pool->free, timeout);
|
||||
#endif
|
||||
if (!buf) {
|
||||
NET_BUF_ERR("%s():%d: Failed to get free buffer", func, line);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue