From 7720f6ed1c962094a69121065c95dc993c435caa Mon Sep 17 00:00:00 2001 From: Johan Hedberg Date: Sun, 5 Jun 2016 16:55:47 +0300 Subject: [PATCH] net: buf: Introduce support for fragmentation Introduce support buffer fragment chains that are linked together. This is done with the help of a flag while the buffer is inside a FIFO (indicating that the next fragment follows it in the same FIFO) and with the help of a "next" pointer while the buffer is outside of a FIFO. In order to do proper "marshaling" a new net_buf_put() API needs to be always used when inserting a buffer into a FIFO. Respectively, the net_buf_get() and net_buf_get_timeout() functions are extended to support getting buffers from arbitrary FIFOs and reassemble the fragment chain based on the flags that the received buffers contain. The insertion of a fragment chain using net_buf_put() into a FIFO is done atomically with the help of irq_lock/unlock since FIFOs support multiple writers, however since there's ever only a single reader per FIFO similar locking is not necessary there. Change-Id: I0ec579f63ea8d063f50e3f1f4c2e80ec399622d7 Signed-off-by: Johan Hedberg --- include/net/buf.h | 52 +++++++++++++++++++++++++++++++++--------- net/buf.c | 57 ++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 94 insertions(+), 15 deletions(-) diff --git a/include/net/buf.h b/include/net/buf.h index c0ac1372837..9bd6ab5b42c 100644 --- a/include/net/buf.h +++ b/include/net/buf.h @@ -33,9 +33,23 @@ extern "C" { /* Alignment needed for various parts of the buffer definition */ #define __net_buf_align __aligned(sizeof(int)) +/** Flag indicating that the buffer has associated fragments. Only used + * internally by the buffer handling code while the buffer is inside a + * FIFO, meaning this never needs to be explicitly set or unset by the + * net_buf API user. As long as the buffer is outside of a FIFO, i.e. + * in practice always for the user for this API, the buf->frags pointer + * should be used instead. + */ +#define NET_BUF_FRAGS BIT(0) + struct net_buf { - /** FIFO uses first 4 bytes itself, reserve space */ - int _unused; + union { + /** FIFO uses first 4 bytes itself, reserve space */ + int _unused; + + /** Fragments associated with this buffer. */ + struct net_buf *frags; + }; /** Size of the user data associated with this buffer. */ const uint16_t user_data_size; @@ -43,6 +57,9 @@ struct net_buf { /** Reference count. */ uint8_t ref; + /** Bit-field of buffer flags. */ + uint8_t flags; + /** Pointer to the start of data in the buffer. */ uint8_t *data; @@ -115,10 +132,12 @@ struct net_buf { } \ } while (0) -/** @brief Get a new buffer from the pool. +/** @brief Get a new buffer from a FIFO. * - * Get buffer from the available buffers pool with specified type and - * reserved headroom. + * Get buffer from a FIFO. The reserve_head parameter is only relevant + * if the FIFO in question is a free buffers pool, i.e. the buffer will + * end up being initialized upon return. If called for any other FIFO + * the reserve_head parameter will be ignored and should be set to 0. * * @param fifo Which FIFO to take the buffer from. * @param reserve_head How much headroom to reserve. @@ -127,19 +146,21 @@ struct net_buf { * * @warning If there are no available buffers and the function is * called from a task or fiber the call will block until a buffer - * becomes available in the pool. If you want to make sure no blocking + * becomes available in the FIFO. If you want to make sure no blocking * happens use net_buf_get_timeout() instead with TICKS_NONE. */ struct net_buf *net_buf_get(struct nano_fifo *fifo, size_t reserve_head); -/** @brief Get a new buffer from the pool. +/** @brief Get a new buffer from a FIFO. * - * Get buffer from the available buffers pool with specified type and - * reserved headroom. + * Get buffer from a FIFO. The reserve_head parameter is only relevant + * if the FIFO in question is a free buffers pool, i.e. the buffer will + * end up being initialized upon return. If called for any other FIFO + * the reserve_head parameter will be ignored and should be set to 0. * * @param fifo Which FIFO to take the buffer from. * @param reserve_head How much headroom to reserve. - * @param timeout Affects the action taken should the pool (FIFO) be empty. + * @param timeout Affects the action taken should the FIFO be empty. * If TICKS_NONE, then return immediately. If TICKS_UNLIMITED, then * wait as long as necessary. Otherwise, wait up to the specified * number of ticks before timing out. @@ -149,6 +170,17 @@ struct net_buf *net_buf_get(struct nano_fifo *fifo, size_t reserve_head); struct net_buf *net_buf_get_timeout(struct nano_fifo *fifo, size_t reserve_head, int32_t timeout); +/** @brief Put a buffer into a FIFO + * + * Put a buffer to the end of a FIFO. If the buffer contains follow-up + * fragments this function will take care of inserting them as well + * into the FIFO. + * + * @param fifo Which FIFO to put the buffer to. + * @param buf Buffer. + */ +void net_buf_put(struct nano_fifo *fifo, struct net_buf *buf); + /** @brief Decrements the reference count of a buffer. * * Decrements the reference count of a buffer and puts it back into the diff --git a/net/buf.c b/net/buf.c index 6d5b0c23988..7ea5a863296 100644 --- a/net/buf.c +++ b/net/buf.c @@ -46,7 +46,7 @@ struct net_buf *net_buf_get_timeout(struct nano_fifo *fifo, size_t reserve_head, int32_t timeout) { - struct net_buf *buf; + struct net_buf *buf, *frag; NET_BUF_DBG("fifo %p reserve %u timeout %d\n", fifo, reserve_head, timeout); @@ -57,12 +57,34 @@ struct net_buf *net_buf_get_timeout(struct nano_fifo *fifo, return NULL; } - buf->ref = 1; - buf->data = buf->__buf + reserve_head; - buf->len = 0; - NET_BUF_DBG("buf %p fifo %p reserve %u\n", buf, fifo, reserve_head); + /* If this buffer is from the free buffers FIFO there wont be + * any fragments and we can directly proceed with initializing + * and returning it. + */ + if (buf->free == fifo) { + buf->ref = 1; + buf->data = buf->__buf + reserve_head; + buf->len = 0; + buf->flags = 0; + buf->frags = NULL; + + return buf; + } + + /* Get any fragments belonging to this buffer */ + for (frag = buf; (frag->flags & NET_BUF_FRAGS); frag = frag->frags) { + frag->frags = nano_fifo_get(fifo, TICKS_NONE); + NET_BUF_ASSERT(frag->frags); + + /* The fragments flag is only for FIFO-internal usage */ + frag->flags &= ~NET_BUF_FRAGS; + } + + /* Mark the end of the fragment list */ + frag->frags = NULL; + return buf; } @@ -82,6 +104,26 @@ struct net_buf *net_buf_get(struct nano_fifo *fifo, size_t reserve_head) return net_buf_get_timeout(fifo, reserve_head, TICKS_UNLIMITED); } +void net_buf_put(struct nano_fifo *fifo, struct net_buf *buf) +{ + int mask; + + mask = irq_lock(); + + while (buf) { + struct net_buf *frag = buf->frags; + + if (frag) { + buf->flags |= NET_BUF_FRAGS; + } + + nano_fifo_put(fifo, buf); + buf = frag; + } + + irq_unlock(mask); +} + void net_buf_unref(struct net_buf *buf) { NET_BUF_DBG("buf %p ref %u fifo %p\n", buf, buf->ref, buf->free); @@ -91,6 +133,11 @@ void net_buf_unref(struct net_buf *buf) return; } + if (buf->frags) { + net_buf_unref(buf->frags); + buf->frags = NULL; + } + if (buf->destroy) { buf->destroy(buf); } else {