Bluetooth: host: Introduce "view" buffer concept
Instead of allocating segments/fragments and copying data into them, we allocate segments as "views" (or slices) into the original buffer. The view also gives access to the headroom of the original buffer, allowing lower layers to push their headers. We choose not to allow multiple views into the same buffer as the headroom of a view would overlap with the data of the previous view. We mark a buffer as locked (or "in-view") by temporarily setting its headroom to zero. This effectively stops create_view because the requested headroom is not available. Each layer that does some kind of fragmentation and wants to use views for that needs to maintain a buffer pool (bufsize 0, count = max views) and a metadata array (size = max views) for the view mechanism to work. Maximum number of views: number of parallel buffers from the upper layer, e.g. number of L2CAP channels for L2CAP segmentation or number of ACL connections for HCI fragmentation. Reason for the change: 1. prevent deadlocks or (ATT/SMP) requests timing out 2. save time (zero-copy) 3. save memory (gets rid of frag pools) L2CAP CoC: would either allocate from the `alloc_seg` application callback, or worse _steal_ from the same pool, or allocate from the global ACL pool. Conn/HCI: would either allocate from `frag_pool` or the global ACL pool. Signed-off-by: Jonathan Rico <jonathan.rico@nordicsemi.no> Co-authored-by: Aleksander Wasaznik <aleksander.wasaznik@nordicsemi.no>
This commit is contained in:
parent
52dc64f0d9
commit
1c8cae30a8
9 changed files with 408 additions and 212 deletions
|
@ -597,6 +597,7 @@ int bt_l2cap_chan_disconnect(struct bt_l2cap_chan *chan);
|
|||
* @return -EINVAL if `buf` or `chan` is NULL.
|
||||
* @return -EINVAL if `chan` is not either BR/EDR or LE credit-based.
|
||||
* @return -EINVAL if buffer doesn't have enough bytes reserved to fit header.
|
||||
* @return -EINVAL if buffer's reference counter != 1
|
||||
* @return -EMSGSIZE if `buf` is larger than `chan`'s MTU.
|
||||
* @return -ENOTCONN if underlying conn is disconnected.
|
||||
* @return -ESHUTDOWN if L2CAP channel is disconnected.
|
||||
|
|
|
@ -273,6 +273,14 @@ config BT_CONN_TX_USER_DATA_SIZE
|
|||
Necessary user_data size for allowing packet fragmentation when
|
||||
sending over HCI. See `struct tx_meta` in conn.c.
|
||||
|
||||
config BT_CONN_FRAG_COUNT
|
||||
int
|
||||
default BT_MAX_CONN if BT_CONN
|
||||
default BT_ISO_MAX_CHAN if BT_ISO
|
||||
help
|
||||
Internal kconfig that sets the maximum amount of simultaneous data
|
||||
packets in flight. It should be equal to the number of connections.
|
||||
|
||||
if BT_CONN
|
||||
|
||||
config BT_CONN_TX_MAX
|
||||
|
|
|
@ -8,12 +8,16 @@
|
|||
#include <zephyr/bluetooth/buf.h>
|
||||
#include <zephyr/bluetooth/l2cap.h>
|
||||
|
||||
#include "buf_view.h"
|
||||
#include "hci_core.h"
|
||||
#include "conn_internal.h"
|
||||
#include "iso_internal.h"
|
||||
|
||||
#include <zephyr/bluetooth/hci.h>
|
||||
|
||||
#include <zephyr/logging/log.h>
|
||||
LOG_MODULE_REGISTER(bt_buf, CONFIG_BT_LOG_LEVEL);
|
||||
|
||||
#if defined(CONFIG_BT_CONN)
|
||||
#if defined(CONFIG_BT_ISO)
|
||||
#define MAX_EVENT_COUNT CONFIG_BT_MAX_CONN + CONFIG_BT_ISO_MAX_CHAN
|
||||
|
@ -147,3 +151,67 @@ struct net_buf_pool *bt_buf_get_num_complete_pool(void)
|
|||
}
|
||||
#endif /* CONFIG_BT_CONN || CONFIG_BT_ISO */
|
||||
#endif /* ZTEST_UNITTEST */
|
||||
|
||||
struct net_buf *bt_buf_make_view(struct net_buf *view,
|
||||
struct net_buf *parent,
|
||||
size_t len,
|
||||
struct bt_buf_view_meta *meta)
|
||||
{
|
||||
__ASSERT_NO_MSG(len);
|
||||
__ASSERT_NO_MSG(view);
|
||||
/* The whole point of this API is to allow prepending data. If the
|
||||
* headroom is 0, that will not happen.
|
||||
*/
|
||||
__ASSERT_NO_MSG(net_buf_headroom(parent) > 0);
|
||||
|
||||
/* `parent` should have been just re-used instead of trying to make a
|
||||
* view into it.
|
||||
*/
|
||||
__ASSERT_NO_MSG(len < parent->len);
|
||||
|
||||
__ASSERT_NO_MSG(!bt_buf_has_view(parent));
|
||||
|
||||
LOG_DBG("make-view %p viewsize %u meta %p", view, len, meta);
|
||||
|
||||
net_buf_simple_clone(&parent->b, &view->b);
|
||||
view->size = net_buf_headroom(parent) + len;
|
||||
view->len = len;
|
||||
view->flags = NET_BUF_EXTERNAL_DATA;
|
||||
|
||||
/* we have a view, eat `len`'s worth of data from the parent */
|
||||
(void)net_buf_pull(parent, len);
|
||||
|
||||
meta->backup.data = parent->data;
|
||||
parent->data = NULL;
|
||||
|
||||
meta->backup.size = parent->size;
|
||||
parent->size = 0;
|
||||
|
||||
/* The ref to `parent` is moved in by passing `parent` as argument. */
|
||||
/* save backup & "clip" the buffer so the next `make_view` will fail */
|
||||
meta->parent = parent;
|
||||
parent = NULL;
|
||||
|
||||
return view;
|
||||
}
|
||||
|
||||
void bt_buf_destroy_view(struct net_buf *view, struct bt_buf_view_meta *meta)
|
||||
{
|
||||
LOG_DBG("destroy-view %p meta %p", view, meta);
|
||||
__ASSERT_NO_MSG(meta->parent);
|
||||
|
||||
/* "unclip" the parent buf */
|
||||
meta->parent->data = meta->backup.data;
|
||||
meta->parent->size = meta->backup.size;
|
||||
|
||||
net_buf_unref(meta->parent);
|
||||
|
||||
memset(meta, 0, sizeof(*meta));
|
||||
net_buf_destroy(view);
|
||||
}
|
||||
|
||||
bool bt_buf_has_view(const struct net_buf *parent)
|
||||
{
|
||||
/* This is enforced by `make_view`. see comment there. */
|
||||
return parent->size == 0 && parent->data == NULL;
|
||||
}
|
||||
|
|
85
subsys/bluetooth/host/buf_view.h
Normal file
85
subsys/bluetooth/host/buf_view.h
Normal file
|
@ -0,0 +1,85 @@
|
|||
/** @file
|
||||
* @brief Bluetooth "view" buffer abstraction
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (c) 2024 Nordic Semiconductor ASA
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#ifndef ZEPHYR_SUBSYS_BLUETOOTH_HOST_BUF_VIEW_H_
|
||||
#define ZEPHYR_SUBSYS_BLUETOOTH_HOST_BUF_VIEW_H_
|
||||
|
||||
#include <zephyr/net/buf.h>
|
||||
|
||||
|
||||
struct bt_buf_view_meta {
|
||||
struct net_buf *parent;
|
||||
/* saves the data pointers while the parent buffer is locked. */
|
||||
struct net_buf_simple backup;
|
||||
};
|
||||
|
||||
/** @internal
|
||||
*
|
||||
* @brief Create a "view" or "window" into an existing buffer.
|
||||
* - enforces one active view at a time per-buffer
|
||||
* -> this restriction enables prepending data (ie. for headers)
|
||||
* - forbids appending data to the view
|
||||
* - pulls the size of the view from said buffer.
|
||||
*
|
||||
* The "virtual buffer" that is generated has to be allocated from a buffer
|
||||
* pool. This is to allow refcounting and attaching a destroy callback. The
|
||||
* configured size of the buffers in that pool should be zero-length.
|
||||
*
|
||||
* The user-data size is application-dependent, but should be minimized to save
|
||||
* memory. user_data is not used by the view API.
|
||||
*
|
||||
* The view mechanism needs to store extra metadata in order to unlock the
|
||||
* original buffer when the view is destroyed.
|
||||
*
|
||||
* The storage and allocation of the view buf pool and the view metadata is the
|
||||
* application's responsibility.
|
||||
*
|
||||
* @note The `headroom` param is only used for __ASSERT(). The idea is that
|
||||
* it's easier to debug a headroom assert failure at allocation time, rather
|
||||
* than later down the line when a lower layer tries to add its headers and
|
||||
* fails.
|
||||
*
|
||||
* @param view Uninitialized "View" buffer
|
||||
* @param parent Buffer data is pulled from into `view`
|
||||
* @param len Amount to pull
|
||||
* @param meta Uninitialized metadata storage
|
||||
*
|
||||
* @return view if the operation was successful. NULL on error.
|
||||
*/
|
||||
struct net_buf *bt_buf_make_view(struct net_buf *view,
|
||||
struct net_buf *parent,
|
||||
size_t len,
|
||||
struct bt_buf_view_meta *meta);
|
||||
|
||||
/** @internal
|
||||
*
|
||||
* @brief Check if if `parent` has view.
|
||||
*
|
||||
* If `parent` has been passed to @ref bt_buf_make_view() and the resulting
|
||||
* view buffer has not been destroyed.
|
||||
*/
|
||||
bool bt_buf_has_view(const struct net_buf *parent);
|
||||
|
||||
/** @internal
|
||||
*
|
||||
* @brief Destroy the view buffer
|
||||
*
|
||||
* Equivalent of @ref net_buf_destroy.
|
||||
* It is mandatory to call this from the view pool's `destroy` callback.
|
||||
*
|
||||
* This frees the parent buffer, and allows calling @ref bt_buf_make_view again.
|
||||
* The metadata is also freed for re-use.
|
||||
*
|
||||
* @param view View to destroy
|
||||
* @param meta Meta that was given to @ref bt_buf_make_view
|
||||
*/
|
||||
void bt_buf_destroy_view(struct net_buf *view, struct bt_buf_view_meta *meta);
|
||||
|
||||
#endif /* ZEPHYR_SUBSYS_BLUETOOTH_HOST_BUF_VIEW_H_ */
|
|
@ -19,6 +19,7 @@
|
|||
#include <zephyr/bluetooth/conn.h>
|
||||
#include <zephyr/drivers/bluetooth/hci_driver.h>
|
||||
|
||||
#include "host/buf_view.h"
|
||||
#include "host/hci_core.h"
|
||||
#include "host/conn_internal.h"
|
||||
#include "l2cap_br_internal.h"
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include "common/assert.h"
|
||||
#include "common/bt_str.h"
|
||||
|
||||
#include "buf_view.h"
|
||||
#include "addr_internal.h"
|
||||
#include "hci_core.h"
|
||||
#include "id.h"
|
||||
|
@ -103,19 +104,6 @@ NET_BUF_POOL_DEFINE(acl_tx_pool, CONFIG_BT_L2CAP_TX_BUF_COUNT,
|
|||
BT_L2CAP_BUF_SIZE(CONFIG_BT_L2CAP_TX_MTU),
|
||||
CONFIG_BT_CONN_TX_USER_DATA_SIZE, NULL);
|
||||
|
||||
#if CONFIG_BT_L2CAP_TX_FRAG_COUNT > 0
|
||||
/* Dedicated pool for fragment buffers in case queued up TX buffers don't
|
||||
* fit the controllers buffer size. We can't use the acl_tx_pool for the
|
||||
* fragmentation, since it's possible that pool is empty and all buffers
|
||||
* are queued up in the TX queue. In such a situation, trying to allocate
|
||||
* another buffer from the acl_tx_pool would result in a deadlock.
|
||||
*/
|
||||
NET_BUF_POOL_FIXED_DEFINE(frag_pool, CONFIG_BT_L2CAP_TX_FRAG_COUNT,
|
||||
BT_BUF_ACL_SIZE(CONFIG_BT_BUF_ACL_TX_SIZE),
|
||||
CONFIG_BT_CONN_TX_USER_DATA_SIZE, NULL);
|
||||
|
||||
#endif /* CONFIG_BT_L2CAP_TX_FRAG_COUNT > 0 */
|
||||
|
||||
#if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC)
|
||||
const struct bt_conn_auth_cb *bt_auth;
|
||||
sys_slist_t bt_auth_info_cbs = SYS_SLIST_STATIC_INIT(&bt_auth_info_cbs);
|
||||
|
@ -132,6 +120,54 @@ static struct bt_conn sco_conns[CONFIG_BT_MAX_SCO_CONN];
|
|||
#endif /* CONFIG_BT_CLASSIC */
|
||||
#endif /* CONFIG_BT_CONN */
|
||||
|
||||
#if defined(CONFIG_BT_CONN_TX)
|
||||
void frag_destroy(struct net_buf *buf);
|
||||
|
||||
/* Storage for fragments (views) into the upper layers' PDUs. */
|
||||
/* TODO: remove user-data requirements */
|
||||
NET_BUF_POOL_FIXED_DEFINE(fragments, CONFIG_BT_CONN_FRAG_COUNT, 0,
|
||||
CONFIG_BT_CONN_TX_USER_DATA_SIZE, frag_destroy);
|
||||
|
||||
struct frag_md {
|
||||
struct bt_buf_view_meta view_meta;
|
||||
};
|
||||
struct frag_md frag_md_pool[CONFIG_BT_CONN_FRAG_COUNT];
|
||||
|
||||
struct frag_md *get_frag_md(struct net_buf *fragment)
|
||||
{
|
||||
return &frag_md_pool[net_buf_id(fragment)];
|
||||
}
|
||||
|
||||
void bt_tx_irq_raise(void);
|
||||
void frag_destroy(struct net_buf *frag)
|
||||
{
|
||||
/* allow next view to be allocated (and unlock the parent buf) */
|
||||
bt_buf_destroy_view(frag, &get_frag_md(frag)->view_meta);
|
||||
}
|
||||
|
||||
static struct net_buf *get_acl_frag(struct net_buf *outside, size_t winsize)
|
||||
{
|
||||
struct net_buf *window;
|
||||
|
||||
__ASSERT_NO_MSG(!bt_buf_has_view(outside));
|
||||
|
||||
/* Keeping a ref is the caller's responsibility */
|
||||
window = net_buf_alloc_len(&fragments, 0, K_NO_WAIT);
|
||||
if (!window) {
|
||||
return window;
|
||||
}
|
||||
|
||||
__ASSERT_NO_MSG(outside->ref == 1);
|
||||
|
||||
window = bt_buf_make_view(window, net_buf_ref(outside),
|
||||
winsize, &get_frag_md(window)->view_meta);
|
||||
|
||||
LOG_INF("get-acl-frag: outside %p window %p size %d", outside, window, winsize);
|
||||
|
||||
return window;
|
||||
}
|
||||
#endif /* CONFIG_BT_CONN_TX */
|
||||
|
||||
#if defined(CONFIG_BT_ISO)
|
||||
extern struct bt_conn iso_conns[CONFIG_BT_ISO_MAX_CHAN];
|
||||
|
||||
|
@ -661,12 +697,21 @@ fail:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int send_frag(struct bt_conn *conn,
|
||||
struct net_buf *buf, struct net_buf *frag,
|
||||
uint8_t flags)
|
||||
static bool fits_single_ctlr_buf(struct net_buf *buf, struct bt_conn *conn)
|
||||
{
|
||||
return buf->len <= conn_mtu(conn);
|
||||
}
|
||||
|
||||
static int send_frag(struct bt_conn *conn, struct net_buf *buf, uint8_t flags)
|
||||
{
|
||||
struct net_buf *frag;
|
||||
struct bt_conn_tx *tx = NULL;
|
||||
|
||||
if (bt_buf_has_view(buf)) {
|
||||
LOG_ERR("already have view");
|
||||
return -EWOULDBLOCK;
|
||||
}
|
||||
|
||||
/* Check if the controller can accept ACL packets */
|
||||
if (k_sem_take(bt_conn_get_pkts(conn), K_NO_WAIT)) {
|
||||
LOG_DBG("no controller bufs");
|
||||
|
@ -681,12 +726,25 @@ static int send_frag(struct bt_conn *conn,
|
|||
return -ENOTCONN;
|
||||
}
|
||||
|
||||
/* Add the data to the buffer */
|
||||
if (frag) {
|
||||
uint16_t frag_len = MIN(conn_mtu(conn), net_buf_tailroom(frag));
|
||||
if (!fits_single_ctlr_buf(buf, conn)) {
|
||||
uint16_t frag_len = MIN(conn_mtu(conn), buf->len);
|
||||
|
||||
LOG_DBG("send frag: buf %p len %d", buf, frag_len);
|
||||
|
||||
/* will also do a pull */
|
||||
frag = get_acl_frag(buf, frag_len);
|
||||
/* Fragments never have a TX completion callback */
|
||||
tx_data(frag)->cb = NULL;
|
||||
tx_data(frag)->is_cont = false;
|
||||
|
||||
int err = do_send_frag(conn, frag, flags, tx);
|
||||
|
||||
if (err == -EIO) {
|
||||
net_buf_unref(frag);
|
||||
}
|
||||
|
||||
return err;
|
||||
|
||||
net_buf_add_mem(frag, buf->data, frag_len);
|
||||
net_buf_pull(buf, frag_len);
|
||||
} else {
|
||||
if (tx_data(buf)->cb) {
|
||||
tx = conn_tx_alloc();
|
||||
|
@ -709,41 +767,9 @@ static int send_frag(struct bt_conn *conn,
|
|||
*/
|
||||
buf = net_buf_get(&conn->tx_queue, K_NO_WAIT);
|
||||
frag = buf;
|
||||
|
||||
return do_send_frag(conn, frag, flags, tx);
|
||||
}
|
||||
|
||||
return do_send_frag(conn, frag, flags, tx);
|
||||
}
|
||||
|
||||
static struct net_buf *create_frag(struct bt_conn *conn, struct net_buf *buf)
|
||||
{
|
||||
struct net_buf *frag;
|
||||
|
||||
switch (conn->type) {
|
||||
#if defined(CONFIG_BT_ISO)
|
||||
case BT_CONN_TYPE_ISO:
|
||||
frag = bt_iso_create_frag(0);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
#if defined(CONFIG_BT_CONN)
|
||||
frag = bt_conn_create_frag(0);
|
||||
#else
|
||||
return NULL;
|
||||
#endif /* CONFIG_BT_CONN */
|
||||
|
||||
}
|
||||
|
||||
if (conn->state != BT_CONN_CONNECTED) {
|
||||
net_buf_unref(frag);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Fragments never have a TX completion callback */
|
||||
tx_data(frag)->cb = NULL;
|
||||
tx_data(frag)->is_cont = false;
|
||||
tx_data(frag)->iso_has_ts = tx_data(buf)->iso_has_ts;
|
||||
|
||||
return frag;
|
||||
}
|
||||
|
||||
/* Tentatively send a buffer to the HCI driver.
|
||||
|
@ -765,19 +791,22 @@ static struct net_buf *create_frag(struct bt_conn *conn, struct net_buf *buf)
|
|||
*/
|
||||
static int send_buf(struct bt_conn *conn, struct net_buf *buf)
|
||||
{
|
||||
struct net_buf *frag;
|
||||
uint8_t flags;
|
||||
int err;
|
||||
|
||||
LOG_DBG("conn %p buf %p len %u", conn, buf, buf->len);
|
||||
|
||||
if (bt_buf_has_view(buf)) {
|
||||
LOG_DBG("locked by existing view");
|
||||
return -EWOULDBLOCK;
|
||||
}
|
||||
|
||||
/* Send directly if the packet fits the ACL MTU */
|
||||
if (buf->len <= conn_mtu(conn) && !tx_data(buf)->is_cont) {
|
||||
LOG_DBG("send single");
|
||||
return send_frag(conn, buf, NULL, FRAG_SINGLE);
|
||||
return send_frag(conn, buf, FRAG_SINGLE);
|
||||
}
|
||||
|
||||
LOG_DBG("start fragmenting");
|
||||
/*
|
||||
* Send the fragments. For the last one simply use the original
|
||||
* buffer (which works since we've used net_buf_pull on it).
|
||||
|
@ -788,25 +817,22 @@ static int send_buf(struct bt_conn *conn, struct net_buf *buf)
|
|||
}
|
||||
|
||||
while (buf->len > conn_mtu(conn)) {
|
||||
frag = create_frag(conn, buf);
|
||||
if (!frag) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
err = send_frag(conn, buf, frag, flags);
|
||||
tx_data(buf)->is_cont = true;
|
||||
err = send_frag(conn, buf, flags);
|
||||
if (err) {
|
||||
LOG_DBG("%p failed, mark as existing frag", buf);
|
||||
tx_data(buf)->is_cont = flags != FRAG_START;
|
||||
net_buf_unref(frag);
|
||||
return err;
|
||||
}
|
||||
|
||||
flags = FRAG_CONT;
|
||||
}
|
||||
|
||||
LOG_DBG("last frag");
|
||||
tx_data(buf)->is_cont = true;
|
||||
return send_frag(conn, buf, NULL, FRAG_END);
|
||||
bool single = flags == FRAG_START;
|
||||
|
||||
LOG_DBG("send %s", single ? "single" : "last");
|
||||
|
||||
/* Frag is either a direct-send or the last in the series. */
|
||||
return send_frag(conn, buf, single ? FRAG_SINGLE : FRAG_END);
|
||||
}
|
||||
|
||||
static struct k_poll_signal conn_change =
|
||||
|
@ -3372,28 +3398,6 @@ int bt_conn_le_conn_update(struct bt_conn *conn,
|
|||
return bt_hci_cmd_send_sync(BT_HCI_OP_LE_CONN_UPDATE, buf, NULL);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_NET_BUF_LOG)
|
||||
struct net_buf *bt_conn_create_frag_timeout_debug(size_t reserve,
|
||||
k_timeout_t timeout,
|
||||
const char *func, int line)
|
||||
#else
|
||||
struct net_buf *bt_conn_create_frag_timeout(size_t reserve, k_timeout_t timeout)
|
||||
#endif
|
||||
{
|
||||
struct net_buf_pool *pool = NULL;
|
||||
|
||||
#if CONFIG_BT_L2CAP_TX_FRAG_COUNT > 0
|
||||
pool = &frag_pool;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_NET_BUF_LOG)
|
||||
return bt_conn_create_pdu_timeout_debug(pool, reserve, timeout,
|
||||
func, line);
|
||||
#else
|
||||
return bt_conn_create_pdu_timeout(pool, reserve, timeout);
|
||||
#endif /* CONFIG_NET_BUF_LOG */
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC)
|
||||
int bt_conn_auth_cb_register(const struct bt_conn_auth_cb *cb)
|
||||
{
|
||||
|
|
|
@ -48,11 +48,6 @@ NET_BUF_POOL_FIXED_DEFINE(iso_tx_pool, CONFIG_BT_ISO_TX_BUF_COUNT,
|
|||
BT_ISO_SDU_BUF_SIZE(CONFIG_BT_ISO_TX_MTU),
|
||||
CONFIG_BT_CONN_TX_USER_DATA_SIZE, NULL);
|
||||
|
||||
#if CONFIG_BT_ISO_TX_FRAG_COUNT > 0
|
||||
NET_BUF_POOL_FIXED_DEFINE(iso_frag_pool, CONFIG_BT_ISO_TX_FRAG_COUNT,
|
||||
BT_ISO_SDU_BUF_SIZE(CONFIG_BT_ISO_TX_MTU),
|
||||
CONFIG_BT_CONN_TX_USER_DATA_SIZE, NULL);
|
||||
#endif /* CONFIG_BT_ISO_TX_FRAG_COUNT > 0 */
|
||||
#endif /* CONFIG_BT_ISO_UNICAST || CONFIG_BT_ISO_BROADCAST */
|
||||
|
||||
struct bt_conn iso_conns[CONFIG_BT_ISO_MAX_CHAN];
|
||||
|
@ -178,28 +173,6 @@ struct net_buf *bt_iso_create_pdu_timeout(struct net_buf_pool *pool,
|
|||
#endif
|
||||
}
|
||||
|
||||
#if defined(CONFIG_NET_BUF_LOG)
|
||||
struct net_buf *bt_iso_create_frag_timeout_debug(size_t reserve,
|
||||
k_timeout_t timeout,
|
||||
const char *func, int line)
|
||||
#else
|
||||
struct net_buf *bt_iso_create_frag_timeout(size_t reserve, k_timeout_t timeout)
|
||||
#endif
|
||||
{
|
||||
struct net_buf_pool *pool = NULL;
|
||||
|
||||
#if CONFIG_BT_ISO_TX_FRAG_COUNT > 0
|
||||
pool = &iso_frag_pool;
|
||||
#endif /* CONFIG_BT_ISO_TX_FRAG_COUNT > 0 */
|
||||
|
||||
#if defined(CONFIG_NET_BUF_LOG)
|
||||
return bt_conn_create_pdu_timeout_debug(pool, reserve, timeout, func,
|
||||
line);
|
||||
#else
|
||||
return bt_conn_create_pdu_timeout(pool, reserve, timeout);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int hci_le_setup_iso_data_path(const struct bt_conn *iso, uint8_t dir,
|
||||
const struct bt_iso_chan_path *path)
|
||||
{
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <errno.h>
|
||||
#include <zephyr/sys/__assert.h>
|
||||
#include <zephyr/sys/atomic.h>
|
||||
#include <zephyr/sys/check.h>
|
||||
#include <zephyr/sys/iterable_sections.h>
|
||||
#include <zephyr/sys/byteorder.h>
|
||||
#include <zephyr/sys/math_extras.h>
|
||||
|
@ -25,6 +26,7 @@
|
|||
|
||||
#define LOG_DBG_ENABLED IS_ENABLED(CONFIG_BT_L2CAP_LOG_LEVEL_DBG)
|
||||
|
||||
#include "buf_view.h"
|
||||
#include "hci_core.h"
|
||||
#include "conn_internal.h"
|
||||
#include "l2cap_internal.h"
|
||||
|
@ -88,6 +90,86 @@ struct bt_l2cap {
|
|||
static const struct bt_l2cap_ecred_cb *ecred_cb;
|
||||
static struct bt_l2cap bt_l2cap_pool[CONFIG_BT_MAX_CONN];
|
||||
|
||||
static void seg_destroy(struct net_buf *buf);
|
||||
#define SEGMENTS_COUNT CONFIG_BT_MAX_CONN
|
||||
/* see equivalent struct in conn.c */
|
||||
NET_BUF_POOL_FIXED_DEFINE(seg_pool, SEGMENTS_COUNT, 0,
|
||||
CONFIG_BT_CONN_TX_USER_DATA_SIZE, seg_destroy);
|
||||
|
||||
struct seg_md {
|
||||
struct bt_l2cap_le_chan *lechan;
|
||||
struct bt_buf_view_meta view_meta;
|
||||
};
|
||||
|
||||
struct seg_md seg_md_pool[SEGMENTS_COUNT];
|
||||
|
||||
struct seg_md *get_seg_md(struct net_buf *seg)
|
||||
{
|
||||
return &seg_md_pool[net_buf_id(seg)];
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
|
||||
static void l2cap_chan_tx_resume(struct bt_l2cap_le_chan *ch);
|
||||
#endif
|
||||
|
||||
static void seg_destroy(struct net_buf *seg)
|
||||
{
|
||||
/* Only relevant if there is segmentation going on. This is not possible
|
||||
* for LE ACL fixed channels, only for credit-based ones.
|
||||
*/
|
||||
#if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
|
||||
struct bt_l2cap_le_chan *lechan = get_seg_md(seg)->lechan;
|
||||
|
||||
get_seg_md(seg)->lechan = NULL;
|
||||
|
||||
LOG_INF("destroy %p (parent %p)", seg, lechan->tx_buf);
|
||||
|
||||
/* allow next view to be allocated (and unlock the parent buf) */
|
||||
bt_buf_destroy_view(seg, &get_seg_md(seg)->view_meta);
|
||||
|
||||
/* try to allocate and send next view PDU */
|
||||
l2cap_chan_tx_resume(lechan);
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
|
||||
static struct net_buf *get_seg(struct net_buf *sdu,
|
||||
size_t seg_size,
|
||||
struct bt_l2cap_le_chan *lechan)
|
||||
{
|
||||
struct net_buf *view;
|
||||
|
||||
__ASSERT_NO_MSG(!bt_buf_has_view(sdu));
|
||||
|
||||
/* optimization: don't allocate if we know `make_view` will return `sdu` */
|
||||
if ((seg_size >= sdu->len) &&
|
||||
(net_buf_headroom(sdu) >= BT_L2CAP_BUF_SIZE(0))) {
|
||||
|
||||
LOG_INF("view >= bufsize, returning it");
|
||||
|
||||
return sdu;
|
||||
}
|
||||
|
||||
/* Keeping a ref is the caller's responsibility */
|
||||
view = net_buf_alloc(&seg_pool, K_NO_WAIT);
|
||||
if (!view) {
|
||||
/* This should never happen? If pool properly configured. */
|
||||
__ASSERT_NO_MSG(view);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
__ASSERT_NO_MSG(sdu->ref == 1);
|
||||
|
||||
get_seg_md(view)->lechan = lechan;
|
||||
view = bt_buf_make_view(view, net_buf_ref(sdu),
|
||||
seg_size, &get_seg_md(view)->view_meta);
|
||||
|
||||
LOG_INF("alloc-w-view: sdu %p view %p size %d", sdu, view, seg_size);
|
||||
|
||||
return view;
|
||||
}
|
||||
#endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
|
||||
|
||||
void bt_l2cap_register_ecred_cb(const struct bt_l2cap_ecred_cb *cb)
|
||||
{
|
||||
ecred_cb = cb;
|
||||
|
@ -648,6 +730,16 @@ int bt_l2cap_send_cb(struct bt_conn *conn, uint16_t cid, struct net_buf *buf,
|
|||
hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
|
||||
hdr->cid = sys_cpu_to_le16(cid);
|
||||
|
||||
if (buf->ref != 1) {
|
||||
/* The host may alter the buf contents when fragmenting. Higher
|
||||
* layers cannot expect the buf contents to stay intact. Extra
|
||||
* refs suggests a silent data corruption would occur if not for
|
||||
* this error.
|
||||
*/
|
||||
LOG_ERR("Expecting 1 ref, got %d", seg->ref);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return bt_conn_send_cb(conn, buf, cb, user_data);
|
||||
}
|
||||
|
||||
|
@ -924,6 +1016,8 @@ static void l2cap_chan_tx_process(struct k_work *work)
|
|||
|
||||
ch = CONTAINER_OF(k_work_delayable_from_work(work), struct bt_l2cap_le_chan, tx_work);
|
||||
|
||||
LOG_INF("%s: %p", __func__, ch);
|
||||
|
||||
if (bt_l2cap_chan_get_state(&ch->chan) != BT_L2CAP_CONNECTED) {
|
||||
LOG_DBG("Cannot send on non-connected channel");
|
||||
return;
|
||||
|
@ -940,17 +1034,25 @@ static void l2cap_chan_tx_process(struct k_work *work)
|
|||
ret = l2cap_chan_le_send_sdu(ch, buf);
|
||||
if (ret < 0) {
|
||||
if (ret == -EAGAIN) {
|
||||
/* Out of credits or buffer already locked. Work
|
||||
* will be restarted upon receiving credits and
|
||||
* when a segment buffer is freed.
|
||||
*/
|
||||
LOG_INF("out of credits/windows");
|
||||
|
||||
ch->tx_buf = buf;
|
||||
/* If we don't reschedule, and the app doesn't nudge l2cap (e.g. by
|
||||
* sending another SDU), the channel will be stuck in limbo. To
|
||||
* prevent this, we reschedule with a configurable delay.
|
||||
* FIXME: is this still necessary?
|
||||
*/
|
||||
k_work_schedule(&ch->tx_work, K_MSEC(CONFIG_BT_L2CAP_RESCHED_MS));
|
||||
} else {
|
||||
LOG_WRN("Failed to send (err %d), dropping buf %p", ret, buf);
|
||||
l2cap_tx_buf_destroy(ch->chan.conn, buf, ret);
|
||||
}
|
||||
break;
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1820,28 +1922,6 @@ static void le_disconn_rsp(struct bt_l2cap *l2cap, uint8_t ident,
|
|||
bt_l2cap_chan_del(&chan->chan);
|
||||
}
|
||||
|
||||
static struct net_buf *l2cap_alloc_seg(struct bt_l2cap_le_chan *ch)
|
||||
{
|
||||
struct net_buf *seg = NULL;
|
||||
|
||||
/* Use the user-defined allocator */
|
||||
if (ch->chan.ops->alloc_seg) {
|
||||
seg = ch->chan.ops->alloc_seg(&ch->chan);
|
||||
__ASSERT_NO_MSG(seg);
|
||||
}
|
||||
|
||||
/* Fallback to using global connection tx pool */
|
||||
if (!seg) {
|
||||
seg = bt_l2cap_create_pdu_timeout(NULL, 0, K_NO_WAIT);
|
||||
}
|
||||
|
||||
if (seg) {
|
||||
net_buf_reserve(seg, BT_L2CAP_CHAN_SEND_RESERVE);
|
||||
}
|
||||
|
||||
return seg;
|
||||
}
|
||||
|
||||
static void l2cap_chan_tx_resume(struct bt_l2cap_le_chan *ch)
|
||||
{
|
||||
if (!atomic_get(&ch->tx.credits) ||
|
||||
|
@ -1896,8 +1976,6 @@ static void l2cap_chan_seg_sent(struct bt_conn *conn, void *user_data, int err)
|
|||
/* Received segment sent callback for disconnected channel */
|
||||
return;
|
||||
}
|
||||
|
||||
l2cap_chan_tx_resume(BT_L2CAP_LE_CHAN(chan));
|
||||
}
|
||||
|
||||
static bool test_and_dec(atomic_t *target)
|
||||
|
@ -1926,14 +2004,18 @@ static bool test_and_dec(atomic_t *target)
|
|||
* In all cases the original buffer is unaffected so it can be pushed back to
|
||||
* be sent later.
|
||||
*/
|
||||
static int l2cap_chan_le_send(struct bt_l2cap_le_chan *ch,
|
||||
struct net_buf *buf, uint16_t sdu_hdr_len)
|
||||
static int l2cap_chan_le_send_seg(struct bt_l2cap_le_chan *ch, struct net_buf *buf)
|
||||
{
|
||||
struct net_buf *seg;
|
||||
struct net_buf_simple_state state;
|
||||
int len, err;
|
||||
bt_conn_tx_cb_t cb;
|
||||
|
||||
if (bt_buf_has_view(buf)) {
|
||||
LOG_DBG("Already have TX inflight");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (!test_and_dec(&ch->tx.credits)) {
|
||||
LOG_DBG("No credits to transmit packet");
|
||||
return -EAGAIN;
|
||||
|
@ -1942,48 +2024,37 @@ static int l2cap_chan_le_send(struct bt_l2cap_le_chan *ch,
|
|||
/* Save state so it can be restored if we failed to send */
|
||||
net_buf_simple_save(&buf->b, &state);
|
||||
|
||||
if ((buf->len <= ch->tx.mps) &&
|
||||
(net_buf_headroom(buf) >= BT_L2CAP_BUF_SIZE(0))) {
|
||||
LOG_DBG("len <= MPS, not allocating seg for %p", buf);
|
||||
/* move `buf` to `seg`. `buf` now borrows `seg`. */
|
||||
seg = buf;
|
||||
seg = get_seg(buf, ch->tx.mps, ch);
|
||||
|
||||
len = seg->len;
|
||||
} else {
|
||||
LOG_DBG("allocating segment for %p (%u bytes left)", buf, buf->len);
|
||||
seg = l2cap_alloc_seg(ch);
|
||||
if (!seg) {
|
||||
LOG_DBG("failed to allocate seg for %p", buf);
|
||||
atomic_inc(&ch->tx.credits);
|
||||
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
/* Don't send more than TX MPS */
|
||||
len = MIN(net_buf_tailroom(seg), ch->tx.mps);
|
||||
|
||||
/* Limit if original buffer is smaller than the segment */
|
||||
len = MIN(buf->len, len);
|
||||
|
||||
net_buf_add_mem(seg, buf->data, len);
|
||||
net_buf_pull(buf, len);
|
||||
CHECKIF(!seg) {
|
||||
/* Future work: Give the channel a tx state
|
||||
* machine, so that we remember that we took a
|
||||
* credit and don't need to give it back here.
|
||||
*/
|
||||
LOG_WRN("Out of segment buffers.");
|
||||
atomic_inc(&ch->tx.credits);
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
LOG_DBG("ch %p cid 0x%04x len %u credits %lu", ch, ch->tx.cid, seg->len,
|
||||
atomic_get(&ch->tx.credits));
|
||||
|
||||
len = seg->len - sdu_hdr_len;
|
||||
len = seg->len;
|
||||
|
||||
/* SDU will be considered sent when there is no data left in the
|
||||
* buffer, or if there will be no data left, if we are sending `buf`
|
||||
* directly.
|
||||
*/
|
||||
if (buf->len == 0 || (buf == seg && buf->len == len)) {
|
||||
LOG_INF("last PDU");
|
||||
cb = l2cap_chan_sdu_sent;
|
||||
} else {
|
||||
LOG_INF("send PDU left %u", buf->len);
|
||||
cb = l2cap_chan_seg_sent;
|
||||
}
|
||||
|
||||
len = seg->len;
|
||||
|
||||
/* Forward the PDU to the lower layer.
|
||||
*
|
||||
* Note: after this call, anything in buf->user_data should be
|
||||
|
@ -1993,8 +2064,11 @@ static int l2cap_chan_le_send(struct bt_l2cap_le_chan *ch,
|
|||
err = bt_l2cap_send_cb(ch->chan.conn, ch->tx.cid, seg,
|
||||
cb, UINT_TO_POINTER(ch->tx.cid));
|
||||
|
||||
/* The only possible error is enotconn, in that case the data will be discarded anyways */
|
||||
__ASSERT_NO_MSG(!err || err == -ENOTCONN);
|
||||
|
||||
if (err) {
|
||||
LOG_DBG("Unable to send seg %d", err);
|
||||
LOG_INF("Unable to send seg %d", err);
|
||||
atomic_inc(&ch->tx.credits);
|
||||
|
||||
/* The host takes ownership of the reference in seg when
|
||||
|
@ -2005,19 +2079,11 @@ static int l2cap_chan_le_send(struct bt_l2cap_le_chan *ch,
|
|||
buf == seg ? "orig" : "seg");
|
||||
|
||||
if (seg == buf) {
|
||||
/* move `seg` to `buf` */
|
||||
/* move `buf` back to caller */
|
||||
} else {
|
||||
net_buf_unref(seg);
|
||||
}
|
||||
|
||||
if (err == -ENOBUFS) {
|
||||
/* Restore state since segment could not be sent */
|
||||
net_buf_simple_restore(&buf->b, &state);
|
||||
err = -EAGAIN;
|
||||
}
|
||||
|
||||
/* move `buf` back to caller */
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -2049,7 +2115,10 @@ static int l2cap_chan_le_send_sdu(struct bt_l2cap_le_chan *ch,
|
|||
rem_len = buf->len;
|
||||
|
||||
while (sent != rem_len) {
|
||||
ret = l2cap_chan_le_send(ch, buf, 0);
|
||||
/* `buf` is moved only when it's full consumed.
|
||||
* (ie. sent == rem_len)
|
||||
*/
|
||||
ret = l2cap_chan_le_send_seg(ch, buf);
|
||||
if (ret < 0) {
|
||||
LOG_DBG("failed to send buf (ch %p cid 0x%04x sent %d)",
|
||||
ch, ch->tx.cid, sent);
|
||||
|
@ -3128,6 +3197,11 @@ int bt_l2cap_chan_send(struct bt_l2cap_chan *chan, struct net_buf *buf)
|
|||
|
||||
LOG_DBG("chan %p buf %p len %zu", chan, buf, buf->len);
|
||||
|
||||
if (buf->ref != 1) {
|
||||
LOG_DBG("Expecting 1 ref, got %d", buf->ref);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!chan->conn || chan->conn->state != BT_CONN_CONNECTED) {
|
||||
return -ENOTCONN;
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@ CREATE_FLAG(flag_l2cap_connected);
|
|||
#define L2CAP_CHANS NUM_PERIPHERALS
|
||||
#define SDU_NUM 20
|
||||
#define SDU_LEN 3000
|
||||
#define NUM_SEGMENTS 100
|
||||
#define RESCHEDULE_DELAY K_MSEC(100)
|
||||
|
||||
static void sdu_destroy(struct net_buf *buf)
|
||||
|
@ -30,13 +29,6 @@ static void sdu_destroy(struct net_buf *buf)
|
|||
net_buf_destroy(buf);
|
||||
}
|
||||
|
||||
static void segment_destroy(struct net_buf *buf)
|
||||
{
|
||||
LOG_DBG("%p", buf);
|
||||
|
||||
net_buf_destroy(buf);
|
||||
}
|
||||
|
||||
static void rx_destroy(struct net_buf *buf)
|
||||
{
|
||||
LOG_DBG("%p", buf);
|
||||
|
@ -49,11 +41,6 @@ NET_BUF_POOL_DEFINE(sdu_tx_pool,
|
|||
CONFIG_BT_MAX_CONN, BT_L2CAP_SDU_BUF_SIZE(SDU_LEN),
|
||||
CONFIG_BT_CONN_TX_USER_DATA_SIZE, sdu_destroy);
|
||||
|
||||
NET_BUF_POOL_DEFINE(segment_pool,
|
||||
/* MTU + 4 l2cap hdr + 4 ACL hdr */
|
||||
NUM_SEGMENTS, BT_L2CAP_BUF_SIZE(CONFIG_BT_L2CAP_TX_MTU),
|
||||
CONFIG_BT_CONN_TX_USER_DATA_SIZE, segment_destroy);
|
||||
|
||||
/* Only one SDU per link will be received at a time */
|
||||
NET_BUF_POOL_DEFINE(sdu_rx_pool,
|
||||
CONFIG_BT_MAX_CONN, BT_L2CAP_SDU_BUF_SIZE(SDU_LEN),
|
||||
|
@ -62,7 +49,6 @@ NET_BUF_POOL_DEFINE(sdu_rx_pool,
|
|||
static uint8_t tx_data[SDU_LEN];
|
||||
static uint16_t rx_cnt;
|
||||
static uint8_t disconnect_counter;
|
||||
static uint32_t max_seg_allocated;
|
||||
|
||||
struct test_ctx {
|
||||
struct k_work_delayable work_item;
|
||||
|
@ -113,19 +99,6 @@ int l2cap_chan_send(struct bt_l2cap_chan *chan, uint8_t *data, size_t len)
|
|||
return ret;
|
||||
}
|
||||
|
||||
struct net_buf *alloc_seg_cb(struct bt_l2cap_chan *chan)
|
||||
{
|
||||
struct net_buf *buf = net_buf_alloc(&segment_pool, K_NO_WAIT);
|
||||
|
||||
if ((NUM_SEGMENTS - segment_pool.avail_count) > max_seg_allocated) {
|
||||
max_seg_allocated++;
|
||||
}
|
||||
|
||||
ASSERT(buf, "Ran out of segment buffers");
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
struct net_buf *alloc_buf_cb(struct bt_l2cap_chan *chan)
|
||||
{
|
||||
return net_buf_alloc(&sdu_rx_pool, K_NO_WAIT);
|
||||
|
@ -163,7 +136,19 @@ int recv_cb(struct bt_l2cap_chan *chan, struct net_buf *buf)
|
|||
rx_cnt++;
|
||||
|
||||
/* Verify SDU data matches TX'd data. */
|
||||
ASSERT(memcmp(buf->data, tx_data, buf->len) == 0, "RX data doesn't match TX");
|
||||
int pos = memcmp(buf->data, tx_data, buf->len);
|
||||
|
||||
if (pos != 0) {
|
||||
LOG_ERR("RX data doesn't match TX: pos %d", pos);
|
||||
LOG_HEXDUMP_ERR(buf->data, buf->len, "RX data");
|
||||
LOG_HEXDUMP_INF(tx_data, buf->len, "TX data");
|
||||
|
||||
for (uint16_t p = 0; p < buf->len; p++) {
|
||||
__ASSERT(buf->data[p] == tx_data[p],
|
||||
"Failed rx[%d]=%x != expect[%d]=%x",
|
||||
p, buf->data[p], p, tx_data[p]);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -192,7 +177,6 @@ static struct bt_l2cap_chan_ops ops = {
|
|||
.connected = l2cap_chan_connected_cb,
|
||||
.disconnected = l2cap_chan_disconnected_cb,
|
||||
.alloc_buf = alloc_buf_cb,
|
||||
.alloc_seg = alloc_seg_cb,
|
||||
.recv = recv_cb,
|
||||
.sent = sent_cb,
|
||||
};
|
||||
|
@ -474,8 +458,6 @@ static void test_central_main(void)
|
|||
}
|
||||
LOG_DBG("All peripherals disconnected.");
|
||||
|
||||
LOG_INF("Max segment pool usage: %u bufs", max_seg_allocated);
|
||||
|
||||
PASS("L2CAP STRESS Central passed\n");
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue