From 692771fbe95e9e0555011e3f96c073c078d85664 Mon Sep 17 00:00:00 2001 From: Johan Hedberg Date: Wed, 14 Dec 2016 08:24:19 +0200 Subject: [PATCH] net: buf: Switch from k_fifo to k_lifo for free buffers Using a LIFO instead of a FIFO has the potential benefit that more recently in-use buffers may be "cache-hot" and therefore accessed faster than least recently used (which is what we get with a FIFO). Change-Id: I59bb083ca2e00d0d404406540f7db216742a27cf Signed-off-by: Johan Hedberg --- doc/subsystems/networking/buffers.rst | 5 ++--- include/net/buf.h | 8 ++++---- subsys/net/buf.c | 12 ++++++------ 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/doc/subsystems/networking/buffers.rst b/doc/subsystems/networking/buffers.rst index 634c024d83e..1ad89016dca 100644 --- a/doc/subsystems/networking/buffers.rst +++ b/doc/subsystems/networking/buffers.rst @@ -39,9 +39,8 @@ buffers may also contain protocol-specific context, known as user data. Both the maximum data and user data capacity of the buffers is compile-time defined when declaring the buffer pool. -Since the free buffers are managed with the help of a k_fifo it means -the buffers have native support for being passed through other FIFOs -as well. This is a very practical feature when the buffers need to be +The buffers have native support for being passed through k_fifo kernel +objects. This is a very practical feature when the buffers need to be passed from one thread to another. However, since a net_buf may have a fragment chain attached to it, instead of using the :c:func:`k_fifo_put` and :c:func:`k_fifo_get` APIs, special :c:func:`net_buf_put` and diff --git a/include/net/buf.h b/include/net/buf.h index 931eedd0b52..858cb33fc8f 100644 --- a/include/net/buf.h +++ b/include/net/buf.h @@ -434,8 +434,8 @@ struct net_buf { }; struct net_buf_pool { - /** FIFO to place the buffer into when free */ - struct k_fifo free; + /** LIFO to place the buffer into when free */ + struct k_lifo free; /** Number of buffers in pool */ const uint16_t buf_count; @@ -459,7 +459,7 @@ struct net_buf_pool { #define NET_BUF_POOL_INITIALIZER(_pool, _bufs, _count, _size, _ud_size, \ _destroy) \ { \ - .free = K_FIFO_INITIALIZER(_pool.free), \ + .free = K_LIFO_INITIALIZER(_pool.free), \ .__bufs = (struct net_buf *)_bufs, \ .buf_count = _count, \ .uninit_count = _count, \ @@ -553,7 +553,7 @@ struct net_buf *net_buf_get(struct k_fifo *fifo, int32_t timeout); */ static inline void net_buf_destroy(struct net_buf *buf) { - k_fifo_put(&buf->pool->free, buf); + k_lifo_put(&buf->pool->free, buf); } /** diff --git a/subsys/net/buf.c b/subsys/net/buf.c index afa0a269e55..7e545dc4ded 100644 --- a/subsys/net/buf.c +++ b/subsys/net/buf.c @@ -95,10 +95,10 @@ struct net_buf *net_buf_alloc(struct net_buf_pool *pool, int32_t timeout) /* If this is not the first access to the pool, we can * be opportunistic and try to fetch a previously used - * buffer from the FIFO with K_NO_WAIT. + * buffer from the LIFO with K_NO_WAIT. */ if (pool->uninit_count < pool->buf_count) { - buf = k_fifo_get(&pool->free, K_NO_WAIT); + buf = k_lifo_get(&pool->free, K_NO_WAIT); if (buf) { irq_unlock(key); goto success; @@ -116,17 +116,17 @@ struct net_buf *net_buf_alloc(struct net_buf_pool *pool, int32_t timeout) #if defined(CONFIG_NET_BUF_DEBUG) if (timeout == K_FOREVER) { - buf = k_fifo_get(&pool->free, K_NO_WAIT); + buf = k_lifo_get(&pool->free, K_NO_WAIT); if (!buf) { NET_BUF_WARN("%s():%d: Pool %p low on buffers.", func, line, pool); - buf = k_fifo_get(&pool->free, timeout); + buf = k_lifo_get(&pool->free, timeout); } } else { - buf = k_fifo_get(&pool->free, timeout); + buf = k_lifo_get(&pool->free, timeout); } #else - buf = k_fifo_get(&pool->free, timeout); + buf = k_lifo_get(&pool->free, timeout); #endif if (!buf) { NET_BUF_ERR("%s():%d: Failed to get free buffer", func, line);