Bluetooth: ATT: Internalize ATT PDU TX pool

Why?
- metadata is easier to manage as an array + index
  - less error-prone -> less memory-management bugs
- we can. because of the previous refactor
- PDU allocations are more predictable
- ATT buffer size can be optimized by app
- isolates ATT from the rest of the ACL users
  - decouples ATT PDU size from e.g. SMP w/ LESC

Drawbacks:
- higher memory usage
- kconfig change

The higher memory use is only temporary, as this will be followed-up
with more refactors that should bring it back down.

Signed-off-by: Jonathan Rico <jonathan.rico@nordicsemi.no>
Co-authored-by: Aleksander Wasaznik <aleksander.wasaznik@nordicsemi.no>
This commit is contained in:
Jonathan Rico 2023-11-29 15:17:17 +01:00 committed by Johan Hedberg
commit a05a47573a
10 changed files with 66 additions and 95 deletions

View file

@ -185,6 +185,9 @@ Bootloader
Bluetooth Bluetooth
========= =========
* ATT now has its own TX buffer pool.
If extra ATT buffers were configured using :kconfig:option:`CONFIG_BT_L2CAP_TX_BUF_COUNT`,
they now instead should be configured through :kconfig:option:`CONFIG_BT_ATT_TX_COUNT`.
* The HCI implementation for both the Host and the Controller sides has been * The HCI implementation for both the Host and the Controller sides has been
renamed for the IPC transport. The ``CONFIG_BT_RPMSG`` Kconfig option is now renamed for the IPC transport. The ``CONFIG_BT_RPMSG`` Kconfig option is now
:kconfig:option:`CONFIG_BT_HCI_IPC`, and the ``zephyr,bt-hci-rpmsg-ipc`` :kconfig:option:`CONFIG_BT_HCI_IPC`, and the ``zephyr,bt-hci-rpmsg-ipc``

View file

@ -1336,7 +1336,7 @@ struct bt_gatt_exchange_params {
* @retval -ENOMEM ATT request queue is full and blocking would cause deadlock. * @retval -ENOMEM ATT request queue is full and blocking would cause deadlock.
* Allow a pending request to resolve before retrying, or call this function * Allow a pending request to resolve before retrying, or call this function
* outside the BT RX thread to get blocking behavior. Queue size is controlled * outside the BT RX thread to get blocking behavior. Queue size is controlled
* by @kconfig{CONFIG_BT_L2CAP_TX_BUF_COUNT}. * by @kconfig{CONFIG_BT_ATT_TX_COUNT}.
* *
* @retval -EALREADY The MTU exchange procedure has been already performed. * @retval -EALREADY The MTU exchange procedure has been already performed.
*/ */
@ -1502,7 +1502,7 @@ struct bt_gatt_discover_params {
* @retval -ENOMEM ATT request queue is full and blocking would cause deadlock. * @retval -ENOMEM ATT request queue is full and blocking would cause deadlock.
* Allow a pending request to resolve before retrying, or call this function * Allow a pending request to resolve before retrying, or call this function
* outside the BT RX thread to get blocking behavior. Queue size is controlled * outside the BT RX thread to get blocking behavior. Queue size is controlled
* by @kconfig{CONFIG_BT_L2CAP_TX_BUF_COUNT}. * by @kconfig{CONFIG_BT_ATT_TX_COUNT}.
*/ */
int bt_gatt_discover(struct bt_conn *conn, int bt_gatt_discover(struct bt_conn *conn,
struct bt_gatt_discover_params *params); struct bt_gatt_discover_params *params);
@ -1617,7 +1617,7 @@ struct bt_gatt_read_params {
* @retval -ENOMEM ATT request queue is full and blocking would cause deadlock. * @retval -ENOMEM ATT request queue is full and blocking would cause deadlock.
* Allow a pending request to resolve before retrying, or call this function * Allow a pending request to resolve before retrying, or call this function
* outside the BT RX thread to get blocking behavior. Queue size is controlled * outside the BT RX thread to get blocking behavior. Queue size is controlled
* by @kconfig{CONFIG_BT_L2CAP_TX_BUF_COUNT}. * by @kconfig{CONFIG_BT_ATT_TX_COUNT}.
*/ */
int bt_gatt_read(struct bt_conn *conn, struct bt_gatt_read_params *params); int bt_gatt_read(struct bt_conn *conn, struct bt_gatt_read_params *params);
@ -1670,7 +1670,7 @@ struct bt_gatt_write_params {
* @retval -ENOMEM ATT request queue is full and blocking would cause deadlock. * @retval -ENOMEM ATT request queue is full and blocking would cause deadlock.
* Allow a pending request to resolve before retrying, or call this function * Allow a pending request to resolve before retrying, or call this function
* outside Bluetooth event context to get blocking behavior. Queue size is * outside Bluetooth event context to get blocking behavior. Queue size is
* controlled by @kconfig{CONFIG_BT_L2CAP_TX_BUF_COUNT}. * controlled by @kconfig{CONFIG_BT_ATT_TX_COUNT}.
*/ */
int bt_gatt_write(struct bt_conn *conn, struct bt_gatt_write_params *params); int bt_gatt_write(struct bt_conn *conn, struct bt_gatt_write_params *params);
@ -1707,7 +1707,7 @@ int bt_gatt_write(struct bt_conn *conn, struct bt_gatt_write_params *params);
* @retval -ENOMEM ATT request queue is full and blocking would cause deadlock. * @retval -ENOMEM ATT request queue is full and blocking would cause deadlock.
* Allow a pending request to resolve before retrying, or call this function * Allow a pending request to resolve before retrying, or call this function
* outside the BT RX thread to get blocking behavior. Queue size is controlled * outside the BT RX thread to get blocking behavior. Queue size is controlled
* by @kconfig{CONFIG_BT_L2CAP_TX_BUF_COUNT}. * by @kconfig{CONFIG_BT_ATT_TX_COUNT}.
*/ */
int bt_gatt_write_without_response_cb(struct bt_conn *conn, uint16_t handle, int bt_gatt_write_without_response_cb(struct bt_conn *conn, uint16_t handle,
const void *data, uint16_t length, const void *data, uint16_t length,
@ -1733,7 +1733,7 @@ int bt_gatt_write_without_response_cb(struct bt_conn *conn, uint16_t handle,
* @retval -ENOMEM ATT request queue is full and blocking would cause deadlock. * @retval -ENOMEM ATT request queue is full and blocking would cause deadlock.
* Allow a pending request to resolve before retrying, or call this function * Allow a pending request to resolve before retrying, or call this function
* outside the BT RX thread to get blocking behavior. Queue size is controlled * outside the BT RX thread to get blocking behavior. Queue size is controlled
* by @kconfig{CONFIG_BT_L2CAP_TX_BUF_COUNT}. * by @kconfig{CONFIG_BT_ATT_TX_COUNT}.
*/ */
static inline int bt_gatt_write_without_response(struct bt_conn *conn, static inline int bt_gatt_write_without_response(struct bt_conn *conn,
uint16_t handle, const void *data, uint16_t handle, const void *data,
@ -1895,7 +1895,7 @@ struct bt_gatt_subscribe_params {
* @retval -ENOMEM ATT request queue is full and blocking would cause deadlock. * @retval -ENOMEM ATT request queue is full and blocking would cause deadlock.
* Allow a pending request to resolve before retrying, or call this function * Allow a pending request to resolve before retrying, or call this function
* outside the BT RX thread to get blocking behavior. Queue size is controlled * outside the BT RX thread to get blocking behavior. Queue size is controlled
* by @kconfig{CONFIG_BT_L2CAP_TX_BUF_COUNT}. * by @kconfig{CONFIG_BT_ATT_TX_COUNT}.
*/ */
int bt_gatt_subscribe(struct bt_conn *conn, int bt_gatt_subscribe(struct bt_conn *conn,
struct bt_gatt_subscribe_params *params); struct bt_gatt_subscribe_params *params);
@ -1941,7 +1941,7 @@ int bt_gatt_resubscribe(uint8_t id, const bt_addr_le_t *peer,
* @retval -ENOMEM ATT request queue is full and blocking would cause deadlock. * @retval -ENOMEM ATT request queue is full and blocking would cause deadlock.
* Allow a pending request to resolve before retrying, or call this function * Allow a pending request to resolve before retrying, or call this function
* outside the BT RX thread to get blocking behavior. Queue size is controlled * outside the BT RX thread to get blocking behavior. Queue size is controlled
* by @kconfig{CONFIG_BT_L2CAP_TX_BUF_COUNT}. * by @kconfig{CONFIG_BT_ATT_TX_COUNT}.
*/ */
int bt_gatt_unsubscribe(struct bt_conn *conn, int bt_gatt_unsubscribe(struct bt_conn *conn,
struct bt_gatt_subscribe_params *params); struct bt_gatt_subscribe_params *params);

View file

@ -8,7 +8,7 @@ CONFIG_UTF8=y
CONFIG_BT_SMP=y CONFIG_BT_SMP=y
CONFIG_BT_KEYS_OVERWRITE_OLDEST=y CONFIG_BT_KEYS_OVERWRITE_OLDEST=y
CONFIG_BT_L2CAP_TX_BUF_COUNT=20 CONFIG_BT_ATT_TX_COUNT=20
# TMAP support # TMAP support
CONFIG_BT_TMAP=y CONFIG_BT_TMAP=y

View file

@ -7,7 +7,7 @@ CONFIG_UTF8=y
CONFIG_BT_SMP=y CONFIG_BT_SMP=y
CONFIG_BT_KEYS_OVERWRITE_OLDEST=y CONFIG_BT_KEYS_OVERWRITE_OLDEST=y
CONFIG_BT_L2CAP_TX_BUF_COUNT=20 CONFIG_BT_ATT_TX_COUNT=20
# TMAP support # TMAP support
CONFIG_BT_TMAP=y CONFIG_BT_TMAP=y

View file

@ -41,7 +41,7 @@ LOG_MODULE_REGISTER(bt_tbs_client, CONFIG_BT_TBS_CLIENT_LOG_LEVEL);
IS_ENABLED(CONFIG_BT_TBS_CLIENT_CALL_FRIENDLY_NAME) + \ IS_ENABLED(CONFIG_BT_TBS_CLIENT_CALL_FRIENDLY_NAME) + \
IS_ENABLED(CONFIG_BT_TBS_CLIENT_INCOMING_CALL)) IS_ENABLED(CONFIG_BT_TBS_CLIENT_INCOMING_CALL))
BUILD_ASSERT(CONFIG_BT_L2CAP_TX_BUF_COUNT >= TBS_CLIENT_BUF_COUNT, "Too few L2CAP buffers"); BUILD_ASSERT(CONFIG_BT_ATT_TX_COUNT >= TBS_CLIENT_BUF_COUNT, "Too few ATT buffers");
#include "common/bt_str.h" #include "common/bt_str.h"

View file

@ -5,6 +5,15 @@
menu "ATT and GATT Options" menu "ATT and GATT Options"
config BT_ATT_TX_COUNT
int "Number of ATT buffers"
default BT_BUF_ACL_TX_COUNT
default 3
range 1 255
help
These buffers are only used for sending anything over ATT.
Requests, responses, indications, confirmations, notifications.
config BT_ATT_PREPARE_COUNT config BT_ATT_PREPARE_COUNT
int "Number of ATT prepare write buffers" int "Number of ATT prepare write buffers"
default 0 default 0

View file

@ -69,7 +69,7 @@ NET_BUF_POOL_DEFINE(prep_pool, CONFIG_BT_ATT_PREPARE_COUNT, BT_ATT_BUF_SIZE,
#endif /* CONFIG_BT_ATT_PREPARE_COUNT */ #endif /* CONFIG_BT_ATT_PREPARE_COUNT */
K_MEM_SLAB_DEFINE(req_slab, sizeof(struct bt_att_req), K_MEM_SLAB_DEFINE(req_slab, sizeof(struct bt_att_req),
CONFIG_BT_L2CAP_TX_BUF_COUNT, __alignof__(struct bt_att_req)); CONFIG_BT_ATT_TX_COUNT, __alignof__(struct bt_att_req));
enum { enum {
ATT_CONNECTED, ATT_CONNECTED,
@ -176,30 +176,20 @@ static struct bt_att_req cancel;
*/ */
static k_tid_t att_handle_rsp_thread; static k_tid_t att_handle_rsp_thread;
#define bt_att_tx_meta_data(buf) (((struct bt_att_tx_meta *)net_buf_user_data(buf))->data) static struct bt_att_tx_meta_data tx_meta_data_storage[CONFIG_BT_ATT_TX_COUNT];
static struct bt_att_tx_meta_data tx_meta_data_storage[CONFIG_BT_CONN_TX_MAX]; NET_BUF_POOL_DEFINE(att_pool, CONFIG_BT_ATT_TX_COUNT,
K_FIFO_DEFINE(free_att_tx_meta_data); BT_L2CAP_SDU_BUF_SIZE(BT_ATT_BUF_SIZE),
CONFIG_BT_CONN_TX_USER_DATA_SIZE, NULL);
static struct bt_att_tx_meta_data *tx_meta_data_alloc(k_timeout_t timeout) struct bt_att_tx_meta_data *bt_att_get_tx_meta_data(const struct net_buf *buf)
{ {
/* The meta data always get freed in the system workqueue, __ASSERT_NO_MSG(net_buf_pool_get(buf->pool_id) == &att_pool);
* so if we're in the same workqueue but there are no immediate
* contexts available, there's no chance we'll get one by waiting. /* Metadata lifetime is implicitly tied to the buffer lifetime.
* Treat it as part of the buffer itself.
*/ */
if (k_current_get() == &k_sys_work_q.thread) { return &tx_meta_data_storage[net_buf_id((struct net_buf *)buf)];
return k_fifo_get(&free_att_tx_meta_data, K_NO_WAIT);
}
return k_fifo_get(&free_att_tx_meta_data, timeout);
}
static inline void tx_meta_data_free(struct bt_att_tx_meta_data *data)
{
__ASSERT_NO_MSG(data);
(void)memset(data, 0, sizeof(*data));
k_fifo_put(&free_att_tx_meta_data, data);
} }
static int bt_att_chan_send(struct bt_att_chan *chan, struct net_buf *buf); static int bt_att_chan_send(struct bt_att_chan *chan, struct net_buf *buf);
@ -266,7 +256,7 @@ static int chan_send(struct bt_att_chan *chan, struct net_buf *buf)
struct bt_att_hdr *hdr; struct bt_att_hdr *hdr;
struct net_buf_simple_state state; struct net_buf_simple_state state;
int err; int err;
struct bt_att_tx_meta_data *data = bt_att_tx_meta_data(buf); struct bt_att_tx_meta_data *data = bt_att_get_tx_meta_data(buf);
struct bt_att_chan *prev_chan = data->att_chan; struct bt_att_chan *prev_chan = data->att_chan;
hdr = (void *)buf->data; hdr = (void *)buf->data;
@ -325,7 +315,6 @@ static int chan_send(struct bt_att_chan *chan, struct net_buf *buf)
err = bt_smp_sign(chan->att->conn, buf); err = bt_smp_sign(chan->att->conn, buf);
if (err) { if (err) {
LOG_ERR("Error signing data"); LOG_ERR("Error signing data");
tx_meta_data_free(bt_att_tx_meta_data(buf));
net_buf_unref(buf); net_buf_unref(buf);
return err; return err;
} }
@ -370,12 +359,14 @@ static struct net_buf *get_first_buf_matching_chan(struct k_fifo *fifo, struct b
struct k_fifo skipped; struct k_fifo skipped;
struct net_buf *buf; struct net_buf *buf;
struct net_buf *ret = NULL; struct net_buf *ret = NULL;
struct bt_att_tx_meta_data *meta;
k_fifo_init(&skipped); k_fifo_init(&skipped);
while ((buf = net_buf_get(fifo, K_NO_WAIT))) { while ((buf = net_buf_get(fifo, K_NO_WAIT))) {
meta = bt_att_get_tx_meta_data(buf);
if (!ret && if (!ret &&
att_chan_matches_chan_opt(chan, bt_att_tx_meta_data(buf)->chan_opt)) { att_chan_matches_chan_opt(chan, meta->chan_opt)) {
ret = buf; ret = buf;
} else { } else {
net_buf_put(&skipped, buf); net_buf_put(&skipped, buf);
@ -400,10 +391,11 @@ static struct bt_att_req *get_first_req_matching_chan(sys_slist_t *reqs, struct
{ {
if (IS_ENABLED(CONFIG_BT_EATT)) { if (IS_ENABLED(CONFIG_BT_EATT)) {
sys_snode_t *curr, *prev = NULL; sys_snode_t *curr, *prev = NULL;
struct bt_att_tx_meta_data *meta = NULL;
SYS_SLIST_FOR_EACH_NODE(reqs, curr) { SYS_SLIST_FOR_EACH_NODE(reqs, curr) {
if (att_chan_matches_chan_opt( meta = bt_att_get_tx_meta_data(ATT_REQ(curr)->buf);
chan, bt_att_tx_meta_data(ATT_REQ(curr)->buf)->chan_opt)) { if (att_chan_matches_chan_opt(chan, meta->chan_opt)) {
break; break;
} }
@ -546,7 +538,6 @@ static void chan_cfm_sent(struct bt_conn *conn, struct bt_att_tx_meta_data *user
LOG_DBG("chan %p", chan); LOG_DBG("chan %p", chan);
tx_meta_data_free(data);
} }
static void chan_rsp_sent(struct bt_conn *conn, struct bt_att_tx_meta_data *user_data) static void chan_rsp_sent(struct bt_conn *conn, struct bt_att_tx_meta_data *user_data)
@ -556,7 +547,6 @@ static void chan_rsp_sent(struct bt_conn *conn, struct bt_att_tx_meta_data *user
LOG_DBG("chan %p", chan); LOG_DBG("chan %p", chan);
tx_meta_data_free(data);
} }
static void chan_req_sent(struct bt_conn *conn, struct bt_att_tx_meta_data *user_data) static void chan_req_sent(struct bt_conn *conn, struct bt_att_tx_meta_data *user_data)
@ -571,7 +561,6 @@ static void chan_req_sent(struct bt_conn *conn, struct bt_att_tx_meta_data *user
k_work_reschedule(&chan->timeout_work, BT_ATT_TIMEOUT); k_work_reschedule(&chan->timeout_work, BT_ATT_TIMEOUT);
} }
tx_meta_data_free(user_data);
} }
static void chan_tx_complete(struct bt_conn *conn, struct bt_att_tx_meta_data *user_data) static void chan_tx_complete(struct bt_conn *conn, struct bt_att_tx_meta_data *user_data)
@ -584,7 +573,6 @@ static void chan_tx_complete(struct bt_conn *conn, struct bt_att_tx_meta_data *u
LOG_DBG("TX Complete chan %p CID 0x%04X", chan, chan->chan.tx.cid); LOG_DBG("TX Complete chan %p CID 0x%04X", chan, chan->chan.tx.cid);
tx_meta_data_free(data);
if (func) { if (func) {
for (uint16_t i = 0; i < attr_count; i++) { for (uint16_t i = 0; i < attr_count; i++) {
@ -593,11 +581,6 @@ static void chan_tx_complete(struct bt_conn *conn, struct bt_att_tx_meta_data *u
} }
} }
static void chan_unknown(struct bt_conn *conn, struct bt_att_tx_meta_data *user_data)
{
tx_meta_data_free(user_data);
}
static bt_att_tx_cb_t chan_cb(const struct net_buf *buf) static bt_att_tx_cb_t chan_cb(const struct net_buf *buf)
{ {
const att_type_t op_type = att_op_get_type(buf->data[0]); const att_type_t op_type = att_op_get_type(buf->data[0]);
@ -615,9 +598,8 @@ static bt_att_tx_cb_t chan_cb(const struct net_buf *buf)
return chan_tx_complete; return chan_tx_complete;
default: default:
__ASSERT(false, "Unknown op type 0x%02X", op_type); __ASSERT(false, "Unknown op type 0x%02X", op_type);
return NULL;
} }
return chan_unknown;
} }
static void att_cfm_sent(struct bt_conn *conn, void *user_data, int err) static void att_cfm_sent(struct bt_conn *conn, void *user_data, int err)
@ -661,8 +643,6 @@ static void att_unknown(struct bt_conn *conn, void *user_data, int err)
if (!err) { if (!err) {
att_sent(conn, user_data); att_sent(conn, user_data);
} }
chan_unknown(conn, user_data);
} }
static bt_conn_tx_cb_t att_cb(const struct net_buf *buf) static bt_conn_tx_cb_t att_cb(const struct net_buf *buf)
@ -682,9 +662,11 @@ static bt_conn_tx_cb_t att_cb(const struct net_buf *buf)
return att_tx_complete; return att_tx_complete;
default: default:
__ASSERT(false, "Unknown op type 0x%02X", op_type); __ASSERT(false, "Unknown op type 0x%02X", op_type);
/* This only ever runs if asserts are disabled.
* In any case, all bets are off.
*/
return att_unknown;
} }
return att_unknown;
} }
static struct net_buf *bt_att_chan_create_pdu(struct bt_att_chan *chan, uint8_t op, size_t len) static struct net_buf *bt_att_chan_create_pdu(struct bt_att_chan *chan, uint8_t op, size_t len)
@ -710,25 +692,24 @@ static struct net_buf *bt_att_chan_create_pdu(struct bt_att_chan *chan, uint8_t
timeout = K_FOREVER; timeout = K_FOREVER;
} }
buf = bt_l2cap_create_pdu_timeout(NULL, 0, timeout); /* This will reserve headspace for lower layers */
buf = bt_l2cap_create_pdu_timeout(&att_pool, 0, timeout);
if (!buf) { if (!buf) {
LOG_ERR("Unable to allocate buffer for op 0x%02x", op); LOG_ERR("Unable to allocate buffer for op 0x%02x", op);
return NULL; return NULL;
} }
data = tx_meta_data_alloc(timeout); data = bt_att_get_tx_meta_data(buf);
if (!data) { /* If we got a buf from `att_pool`, then the metadata slot at its index
LOG_WRN("Unable to allocate ATT TX meta"); * is officially ours to use.
net_buf_unref(buf); */
return NULL; memset(data, 0, sizeof(*data));
}
if (IS_ENABLED(CONFIG_BT_EATT)) { if (IS_ENABLED(CONFIG_BT_EATT)) {
net_buf_reserve(buf, BT_L2CAP_SDU_BUF_SIZE(0)); net_buf_reserve(buf, BT_L2CAP_SDU_BUF_SIZE(0));
} }
data->att_chan = chan; data->att_chan = chan;
bt_att_tx_meta_data(buf) = data;
hdr = net_buf_add(buf, sizeof(*hdr)); hdr = net_buf_add(buf, sizeof(*hdr));
hdr->code = op; hdr->code = op;
@ -742,7 +723,7 @@ static int bt_att_chan_send(struct bt_att_chan *chan, struct net_buf *buf)
((struct bt_att_hdr *)buf->data)->code); ((struct bt_att_hdr *)buf->data)->code);
if (IS_ENABLED(CONFIG_BT_EATT) && if (IS_ENABLED(CONFIG_BT_EATT) &&
!att_chan_matches_chan_opt(chan, bt_att_tx_meta_data(buf)->chan_opt)) { !att_chan_matches_chan_opt(chan, bt_att_get_tx_meta_data(buf)->chan_opt)) {
return -EINVAL; return -EINVAL;
} }
@ -1095,7 +1076,6 @@ static uint8_t att_find_info_rsp(struct bt_att_chan *chan, uint16_t start_handle
bt_gatt_foreach_attr(start_handle, end_handle, find_info_cb, &data); bt_gatt_foreach_attr(start_handle, end_handle, find_info_cb, &data);
if (!data.rsp) { if (!data.rsp) {
tx_meta_data_free(bt_att_tx_meta_data(data.buf));
net_buf_unref(data.buf); net_buf_unref(data.buf);
/* Respond since handle is set */ /* Respond since handle is set */
send_err_rsp(chan, BT_ATT_OP_FIND_INFO_REQ, start_handle, send_err_rsp(chan, BT_ATT_OP_FIND_INFO_REQ, start_handle,
@ -1258,7 +1238,6 @@ static uint8_t att_find_type_rsp(struct bt_att_chan *chan, uint16_t start_handle
/* If error has not been cleared, no service has been found */ /* If error has not been cleared, no service has been found */
if (data.err) { if (data.err) {
tx_meta_data_free(bt_att_tx_meta_data(data.buf));
net_buf_unref(data.buf); net_buf_unref(data.buf);
/* Respond since handle is set */ /* Respond since handle is set */
send_err_rsp(chan, BT_ATT_OP_FIND_TYPE_REQ, start_handle, send_err_rsp(chan, BT_ATT_OP_FIND_TYPE_REQ, start_handle,
@ -1489,7 +1468,6 @@ static uint8_t att_read_type_rsp(struct bt_att_chan *chan, struct bt_uuid *uuid,
bt_gatt_foreach_attr(start_handle, end_handle, read_type_cb, &data); bt_gatt_foreach_attr(start_handle, end_handle, read_type_cb, &data);
if (data.err) { if (data.err) {
tx_meta_data_free(bt_att_tx_meta_data(data.buf));
net_buf_unref(data.buf); net_buf_unref(data.buf);
/* Response here since handle is set */ /* Response here since handle is set */
send_err_rsp(chan, BT_ATT_OP_READ_TYPE_REQ, start_handle, send_err_rsp(chan, BT_ATT_OP_READ_TYPE_REQ, start_handle,
@ -1611,7 +1589,6 @@ static uint8_t att_read_rsp(struct bt_att_chan *chan, uint8_t op, uint8_t rsp,
/* In case of error discard data and respond with an error */ /* In case of error discard data and respond with an error */
if (data.err) { if (data.err) {
tx_meta_data_free(bt_att_tx_meta_data(data.buf));
net_buf_unref(data.buf); net_buf_unref(data.buf);
/* Respond here since handle is set */ /* Respond here since handle is set */
send_err_rsp(chan, op, handle, data.err); send_err_rsp(chan, op, handle, data.err);
@ -1695,7 +1672,6 @@ static uint8_t att_read_mult_req(struct bt_att_chan *chan, struct net_buf *buf)
/* Stop reading in case of error */ /* Stop reading in case of error */
if (data.err) { if (data.err) {
tx_meta_data_free(bt_att_tx_meta_data(data.buf));
net_buf_unref(data.buf); net_buf_unref(data.buf);
/* Respond here since handle is set */ /* Respond here since handle is set */
send_err_rsp(chan, BT_ATT_OP_READ_MULT_REQ, handle, send_err_rsp(chan, BT_ATT_OP_READ_MULT_REQ, handle,
@ -1790,7 +1766,6 @@ static uint8_t att_read_mult_vl_req(struct bt_att_chan *chan, struct net_buf *bu
/* Stop reading in case of error */ /* Stop reading in case of error */
if (data.err) { if (data.err) {
tx_meta_data_free(bt_att_tx_meta_data(data.buf));
net_buf_unref(data.buf); net_buf_unref(data.buf);
/* Respond here since handle is set */ /* Respond here since handle is set */
send_err_rsp(chan, BT_ATT_OP_READ_MULT_VL_REQ, handle, send_err_rsp(chan, BT_ATT_OP_READ_MULT_VL_REQ, handle,
@ -1907,7 +1882,6 @@ static uint8_t att_read_group_rsp(struct bt_att_chan *chan, struct bt_uuid *uuid
bt_gatt_foreach_attr(start_handle, end_handle, read_group_cb, &data); bt_gatt_foreach_attr(start_handle, end_handle, read_group_cb, &data);
if (!data.rsp->len) { if (!data.rsp->len) {
tx_meta_data_free(bt_att_tx_meta_data(data.buf));
net_buf_unref(data.buf); net_buf_unref(data.buf);
/* Respond here since handle is set */ /* Respond here since handle is set */
send_err_rsp(chan, BT_ATT_OP_READ_GROUP_REQ, start_handle, send_err_rsp(chan, BT_ATT_OP_READ_GROUP_REQ, start_handle,
@ -2057,7 +2031,6 @@ static uint8_t att_write_rsp(struct bt_att_chan *chan, uint8_t req, uint8_t rsp,
if (data.err) { if (data.err) {
/* In case of error discard data and respond with an error */ /* In case of error discard data and respond with an error */
if (rsp) { if (rsp) {
tx_meta_data_free(bt_att_tx_meta_data(data.buf));
net_buf_unref(data.buf); net_buf_unref(data.buf);
/* Respond here since handle is set */ /* Respond here since handle is set */
send_err_rsp(chan, req, handle, data.err); send_err_rsp(chan, req, handle, data.err);
@ -2988,7 +2961,6 @@ static void att_reset(struct bt_att *att)
#if CONFIG_BT_ATT_PREPARE_COUNT > 0 #if CONFIG_BT_ATT_PREPARE_COUNT > 0
/* Discard queued buffers */ /* Discard queued buffers */
while ((buf = net_buf_slist_get(&att->prep_queue))) { while ((buf = net_buf_slist_get(&att->prep_queue))) {
tx_meta_data_free(bt_att_tx_meta_data(buf));
net_buf_unref(buf); net_buf_unref(buf);
} }
#endif /* CONFIG_BT_ATT_PREPARE_COUNT > 0 */ #endif /* CONFIG_BT_ATT_PREPARE_COUNT > 0 */
@ -3000,7 +2972,6 @@ static void att_reset(struct bt_att *att)
#endif /* CONFIG_BT_EATT */ #endif /* CONFIG_BT_EATT */
while ((buf = net_buf_get(&att->tx_queue, K_NO_WAIT))) { while ((buf = net_buf_get(&att->tx_queue, K_NO_WAIT))) {
tx_meta_data_free(bt_att_tx_meta_data(buf));
net_buf_unref(buf); net_buf_unref(buf);
} }
@ -3036,7 +3007,6 @@ static void att_chan_detach(struct bt_att_chan *chan)
/* Release pending buffers */ /* Release pending buffers */
while ((buf = net_buf_get(&chan->tx_queue, K_NO_WAIT))) { while ((buf = net_buf_get(&chan->tx_queue, K_NO_WAIT))) {
tx_meta_data_free(bt_att_tx_meta_data(buf));
net_buf_unref(buf); net_buf_unref(buf);
} }
@ -3155,13 +3125,11 @@ static uint8_t att_req_retry(struct bt_att_chan *att_chan)
} }
if (req->encode(buf, req->len, req->user_data)) { if (req->encode(buf, req->len, req->user_data)) {
tx_meta_data_free(bt_att_tx_meta_data(buf));
net_buf_unref(buf); net_buf_unref(buf);
return BT_ATT_ERR_UNLIKELY; return BT_ATT_ERR_UNLIKELY;
} }
if (chan_send(att_chan, buf)) { if (chan_send(att_chan, buf)) {
tx_meta_data_free(bt_att_tx_meta_data(buf));
net_buf_unref(buf); net_buf_unref(buf);
return BT_ATT_ERR_UNLIKELY; return BT_ATT_ERR_UNLIKELY;
} }
@ -3257,13 +3225,9 @@ static void bt_att_released(struct bt_l2cap_chan *ch)
{ {
struct bt_att_chan *chan = ATT_CHAN(ch); struct bt_att_chan *chan = ATT_CHAN(ch);
/* Traverse the ATT bearer's TX queue and free the metadata. */ /* Empty the ATT bearer's TX queue */
while (!sys_slist_is_empty(&chan->tx_cb_queue)) { while (!sys_slist_is_empty(&chan->tx_cb_queue)) {
sys_snode_t *tx_meta_data_node = sys_slist_get(&chan->tx_cb_queue); (void)sys_slist_get(&chan->tx_cb_queue);
struct bt_att_tx_meta_data *tx_meta_data = CONTAINER_OF(
tx_meta_data_node, struct bt_att_tx_meta_data, tx_cb_queue_node);
tx_meta_data_free(tx_meta_data);
} }
LOG_DBG("chan %p", chan); LOG_DBG("chan %p", chan);
@ -3754,11 +3718,6 @@ static void bt_eatt_init(void)
void bt_att_init(void) void bt_att_init(void)
{ {
k_fifo_init(&free_att_tx_meta_data);
for (size_t i = 0; i < ARRAY_SIZE(tx_meta_data_storage); i++) {
k_fifo_put(&free_att_tx_meta_data, &tx_meta_data_storage[i]);
}
bt_gatt_init(); bt_gatt_init();
if (IS_ENABLED(CONFIG_BT_EATT)) { if (IS_ENABLED(CONFIG_BT_EATT)) {
@ -3839,7 +3798,6 @@ void bt_att_req_free(struct bt_att_req *req)
LOG_DBG("req %p", req); LOG_DBG("req %p", req);
if (req->buf) { if (req->buf) {
tx_meta_data_free(bt_att_tx_meta_data(req->buf));
net_buf_unref(req->buf); net_buf_unref(req->buf);
req->buf = NULL; req->buf = NULL;
} }
@ -3856,7 +3814,6 @@ int bt_att_send(struct bt_conn *conn, struct net_buf *buf)
att = att_get(conn); att = att_get(conn);
if (!att) { if (!att) {
tx_meta_data_free(bt_att_tx_meta_data(buf));
net_buf_unref(buf); net_buf_unref(buf);
return -ENOTCONN; return -ENOTCONN;
} }
@ -3996,7 +3953,7 @@ bool bt_att_out_of_sync_sent_on_fixed(struct bt_conn *conn)
void bt_att_set_tx_meta_data(struct net_buf *buf, bt_gatt_complete_func_t func, void *user_data, void bt_att_set_tx_meta_data(struct net_buf *buf, bt_gatt_complete_func_t func, void *user_data,
enum bt_att_chan_opt chan_opt) enum bt_att_chan_opt chan_opt)
{ {
struct bt_att_tx_meta_data *data = bt_att_tx_meta_data(buf); struct bt_att_tx_meta_data *data = bt_att_get_tx_meta_data(buf);
data->func = func; data->func = func;
data->user_data = user_data; data->user_data = user_data;
@ -4006,7 +3963,7 @@ void bt_att_set_tx_meta_data(struct net_buf *buf, bt_gatt_complete_func_t func,
void bt_att_increment_tx_meta_data_attr_count(struct net_buf *buf, uint16_t attr_count) void bt_att_increment_tx_meta_data_attr_count(struct net_buf *buf, uint16_t attr_count)
{ {
struct bt_att_tx_meta_data *data = bt_att_tx_meta_data(buf); struct bt_att_tx_meta_data *data = bt_att_get_tx_meta_data(buf);
data->attr_count += attr_count; data->attr_count += attr_count;
} }
@ -4014,9 +3971,11 @@ void bt_att_increment_tx_meta_data_attr_count(struct net_buf *buf, uint16_t attr
bool bt_att_tx_meta_data_match(const struct net_buf *buf, bt_gatt_complete_func_t func, bool bt_att_tx_meta_data_match(const struct net_buf *buf, bt_gatt_complete_func_t func,
const void *user_data, enum bt_att_chan_opt chan_opt) const void *user_data, enum bt_att_chan_opt chan_opt)
{ {
return ((bt_att_tx_meta_data(buf)->func == func) && const struct bt_att_tx_meta_data *meta = bt_att_get_tx_meta_data(buf);
(bt_att_tx_meta_data(buf)->user_data == user_data) &&
(bt_att_tx_meta_data(buf)->chan_opt == chan_opt)); return ((meta->func == func) &&
(meta->user_data == user_data) &&
(meta->chan_opt == chan_opt));
} }
bool bt_att_chan_opt_valid(struct bt_conn *conn, enum bt_att_chan_opt chan_opt) bool bt_att_chan_opt_valid(struct bt_conn *conn, enum bt_att_chan_opt chan_opt)

View file

@ -20,7 +20,7 @@ CONFIG_BT_SHELL=y
CONFIG_BT_DEVICE_NAME="audio test shell" CONFIG_BT_DEVICE_NAME="audio test shell"
CONFIG_BT_DEVICE_NAME_DYNAMIC=y CONFIG_BT_DEVICE_NAME_DYNAMIC=y
# TBS Client may require up to 12 buffers # TBS Client may require up to 12 buffers
CONFIG_BT_L2CAP_TX_BUF_COUNT=12 CONFIG_BT_ATT_TX_COUNT=12
CONFIG_BT_ID_MAX=2 CONFIG_BT_ID_MAX=2
CONFIG_BT_FILTER_ACCEPT_LIST=y CONFIG_BT_FILTER_ACCEPT_LIST=y
CONFIG_BT_REMOTE_INFO=y CONFIG_BT_REMOTE_INFO=y

View file

@ -94,7 +94,7 @@ CONFIG_BT_HAS_PRESET_NAME_DYNAMIC=y
CONFIG_BT_CSIP_SET_MEMBER=y CONFIG_BT_CSIP_SET_MEMBER=y
# CCP # CCP
CONFIG_BT_L2CAP_TX_BUF_COUNT=12 CONFIG_BT_ATT_TX_COUNT=12
CONFIG_BT_TBS_CLIENT_GTBS=y CONFIG_BT_TBS_CLIENT_GTBS=y
CONFIG_BT_TBS_CLIENT_TBS=n CONFIG_BT_TBS_CLIENT_TBS=n

View file

@ -7,7 +7,7 @@ CONFIG_BT_CENTRAL=y
CONFIG_BT_PERIPHERAL=y CONFIG_BT_PERIPHERAL=y
CONFIG_BT_DEVICE_NAME="bsim_test_audio" CONFIG_BT_DEVICE_NAME="bsim_test_audio"
# TBS Client may require up to 12 buffers # TBS Client may require up to 12 buffers
CONFIG_BT_L2CAP_TX_BUF_COUNT=12 CONFIG_BT_ATT_TX_COUNT=12
CONFIG_BT_ATT_PREPARE_COUNT=5 CONFIG_BT_ATT_PREPARE_COUNT=5
CONFIG_BT_MAX_CONN=3 CONFIG_BT_MAX_CONN=3
CONFIG_BT_MAX_PAIRED=3 CONFIG_BT_MAX_PAIRED=3