bluetooth: buf: Add a callback for freed buffer in rx pool

The Bluetooth data buffer API currently lacks a mechanism to notify when
a buffer is freed in the RX pool. This limitation forces HCI drivers to
adopt inefficient workarounds to manage buffer allocation.

HCI drivers face two suboptimal options:

- Blocking calls: Use bt_buf_get_rx with K_FOREVER, which blocks the
  execution context until a buffer becomes available.
- Polling: Repeatedly call bt_buf_get_rx with K_NO_WAIT, which increases
  CPU load and reduces efficiency.

This commit introduces a callback mechanism that is triggered each time
a buffer is freed in the RX pool. With this feature, HCI drivers can:

- Call bt_buf_get_rx with K_NO_WAIT.
- Wait for the callback notification if a NULL buffer is returned,
  avoiding unnecessary polling.

The new callback improves efficiency by enabling event-driven behavior
for buffer management, reducing CPU overhead while maintaining
responsiveness.

Signed-off-by: Pavel Vasilyev <pavel.vasilyev@nordicsemi.no>
This commit is contained in:
Pavel Vasilyev 2024-11-07 09:50:37 +01:00 committed by Benjamin Cabé
commit c2488fdd30
5 changed files with 130 additions and 12 deletions

View file

@ -138,6 +138,27 @@ BUILD_ASSERT(BT_BUF_ACL_RX_COUNT <= BT_BUF_ACL_RX_COUNT_MAX,
*/
struct net_buf *bt_buf_get_rx(enum bt_buf_type type, k_timeout_t timeout);
/** A callback to notify about freed buffer in the incoming data pool.
*
* This callback is called when a buffer of a given type is freed and can be requested through the
* @ref bt_buf_get_rx function. However, this callback is called from the context of the buffer
* freeing operation and must not attempt to allocate a new buffer from the same pool.
*
* @warning When this callback is called, the scheduler is locked and the callee must not perform
* any action that makes the current thread unready. This callback must only be used for very
* short non-blocking operation (e.g. submitting a work item).
*
* @param type_mask A bit mask of buffer types that have been freed.
*/
typedef void (*bt_buf_rx_freed_cb_t)(enum bt_buf_type type_mask);
/** Set the callback to notify about freed buffer in the incoming data pool.
*
* @param cb Callback to notify about freed buffer in the incoming data pool. If NULL, the callback
* is disabled.
*/
void bt_buf_rx_freed_cb_set(bt_buf_rx_freed_cb_t cb);
/** Allocate a buffer for outgoing data
*
* This will set the buffer type so bt_buf_set_type() does not need to

View file

@ -25,6 +25,26 @@ LOG_MODULE_REGISTER(bt_buf, CONFIG_BT_LOG_LEVEL);
*/
#define SYNC_EVT_SIZE (BT_BUF_RESERVE + BT_HCI_EVT_HDR_SIZE + 255)
static bt_buf_rx_freed_cb_t buf_rx_freed_cb;
static void buf_rx_freed_notify(enum bt_buf_type mask)
{
k_sched_lock();
if (buf_rx_freed_cb) {
buf_rx_freed_cb(mask);
}
k_sched_unlock();
}
#if defined(CONFIG_BT_ISO_RX)
static void iso_rx_freed_cb(void)
{
buf_rx_freed_notify(BT_BUF_ISO_IN);
}
#endif
/* Pool for RX HCI buffers that are always freed by `bt_recv`
* before it returns.
*
@ -40,17 +60,37 @@ NET_BUF_POOL_FIXED_DEFINE(discardable_pool, CONFIG_BT_BUF_EVT_DISCARDABLE_COUNT,
sizeof(struct bt_buf_data), NULL);
#if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
NET_BUF_POOL_DEFINE(acl_in_pool, BT_BUF_ACL_RX_COUNT,
BT_BUF_ACL_SIZE(CONFIG_BT_BUF_ACL_RX_SIZE),
sizeof(struct acl_data), bt_hci_host_num_completed_packets);
static void acl_in_pool_destroy(struct net_buf *buf)
{
bt_hci_host_num_completed_packets(buf);
buf_rx_freed_notify(BT_BUF_ACL_IN);
}
NET_BUF_POOL_FIXED_DEFINE(evt_pool, CONFIG_BT_BUF_EVT_RX_COUNT,
BT_BUF_EVT_RX_SIZE, sizeof(struct bt_buf_data),
NULL);
static void evt_pool_destroy(struct net_buf *buf)
{
net_buf_destroy(buf);
buf_rx_freed_notify(BT_BUF_EVT);
}
NET_BUF_POOL_DEFINE(acl_in_pool, BT_BUF_ACL_RX_COUNT, BT_BUF_ACL_SIZE(CONFIG_BT_BUF_ACL_RX_SIZE),
sizeof(struct acl_data), acl_in_pool_destroy);
NET_BUF_POOL_FIXED_DEFINE(evt_pool, CONFIG_BT_BUF_EVT_RX_COUNT, BT_BUF_EVT_RX_SIZE,
sizeof(struct bt_buf_data), evt_pool_destroy);
#else
NET_BUF_POOL_FIXED_DEFINE(hci_rx_pool, BT_BUF_RX_COUNT,
BT_BUF_RX_SIZE, sizeof(struct acl_data),
NULL);
static void hci_rx_pool_destroy(struct net_buf *buf)
{
net_buf_destroy(buf);
/* When ACL Flow Control is disabled, a single pool is used for events and acl data.
* Therefore the callback will always notify about both types of buffers, BT_BUF_EVT and
* BT_BUF_ACL_IN.
*/
buf_rx_freed_notify(BT_BUF_EVT | BT_BUF_ACL_IN);
}
NET_BUF_POOL_FIXED_DEFINE(hci_rx_pool, BT_BUF_RX_COUNT, BT_BUF_RX_SIZE, sizeof(struct acl_data),
hci_rx_pool_destroy);
#endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
struct net_buf *bt_buf_get_rx(enum bt_buf_type type, k_timeout_t timeout)
@ -82,6 +122,19 @@ struct net_buf *bt_buf_get_rx(enum bt_buf_type type, k_timeout_t timeout)
return buf;
}
void bt_buf_rx_freed_cb_set(bt_buf_rx_freed_cb_t cb)
{
k_sched_lock();
buf_rx_freed_cb = cb;
#if defined(CONFIG_BT_ISO_RX)
bt_iso_buf_rx_freed_cb_set(cb != NULL ? iso_rx_freed_cb : NULL);
#endif
k_sched_unlock();
}
struct net_buf *bt_buf_get_evt(uint8_t evt, bool discardable,
k_timeout_t timeout)
{

View file

@ -35,8 +35,20 @@ static uint8_t raw_mode = BT_HCI_RAW_MODE_H4;
static uint8_t raw_mode;
#endif
NET_BUF_POOL_FIXED_DEFINE(hci_rx_pool, BT_BUF_RX_COUNT,
BT_BUF_RX_SIZE, sizeof(struct bt_buf_data), NULL);
static bt_buf_rx_freed_cb_t buf_rx_freed_cb;
static void hci_rx_buf_destroy(struct net_buf *buf)
{
net_buf_destroy(buf);
if (buf_rx_freed_cb) {
/* bt_buf_get_rx is used for all types of RX buffers */
buf_rx_freed_cb(BT_BUF_EVT | BT_BUF_ACL_IN | BT_BUF_ISO_IN);
}
}
NET_BUF_POOL_FIXED_DEFINE(hci_rx_pool, BT_BUF_RX_COUNT, BT_BUF_RX_SIZE, sizeof(struct bt_buf_data),
hci_rx_buf_destroy);
NET_BUF_POOL_FIXED_DEFINE(hci_cmd_pool, CONFIG_BT_BUF_CMD_TX_COUNT,
BT_BUF_CMD_SIZE(CONFIG_BT_BUF_CMD_TX_SIZE),
sizeof(struct bt_buf_data), NULL);
@ -93,6 +105,11 @@ struct net_buf *bt_buf_get_rx(enum bt_buf_type type, k_timeout_t timeout)
return buf;
}
void bt_buf_rx_freed_cb_set(bt_buf_rx_freed_cb_t cb)
{
buf_rx_freed_cb = cb;
}
struct net_buf *bt_buf_get_tx(enum bt_buf_type type, k_timeout_t timeout,
const void *data, size_t size)
{

View file

@ -51,8 +51,20 @@ LOG_MODULE_REGISTER(bt_iso, CONFIG_BT_ISO_LOG_LEVEL);
#define iso_chan(_iso) ((_iso)->iso.chan);
#if defined(CONFIG_BT_ISO_RX)
static bt_iso_buf_rx_freed_cb_t buf_rx_freed_cb;
static void iso_rx_buf_destroy(struct net_buf *buf)
{
net_buf_destroy(buf);
if (buf_rx_freed_cb) {
buf_rx_freed_cb();
}
}
NET_BUF_POOL_FIXED_DEFINE(iso_rx_pool, CONFIG_BT_ISO_RX_BUF_COUNT,
BT_ISO_SDU_BUF_SIZE(CONFIG_BT_ISO_RX_MTU), sizeof(struct iso_data), NULL);
BT_ISO_SDU_BUF_SIZE(CONFIG_BT_ISO_RX_MTU), sizeof(struct iso_data),
iso_rx_buf_destroy);
static struct bt_iso_recv_info iso_info_data[CONFIG_BT_ISO_RX_BUF_COUNT];
#define iso_info(buf) (&iso_info_data[net_buf_id(buf)])
@ -553,6 +565,11 @@ struct net_buf *bt_iso_get_rx(k_timeout_t timeout)
return buf;
}
void bt_iso_buf_rx_freed_cb_set(bt_iso_buf_rx_freed_cb_t cb)
{
buf_rx_freed_cb = cb;
}
void bt_iso_recv(struct bt_conn *iso, struct net_buf *buf, uint8_t flags)
{
struct bt_hci_iso_sdu_hdr *hdr;

View file

@ -87,6 +87,16 @@ void hci_iso(struct net_buf *buf);
/* Allocates RX buffer */
struct net_buf *bt_iso_get_rx(k_timeout_t timeout);
/** A callback used to notify about freed buffer in the iso rx pool. */
typedef void (*bt_iso_buf_rx_freed_cb_t)(void);
/** Set a callback to notify about freed buffer in the iso rx pool.
*
* @param cb Callback to notify about freed buffer in the iso rx pool. If NULL, the callback is
* disabled.
*/
void bt_iso_buf_rx_freed_cb_set(bt_iso_buf_rx_freed_cb_t cb);
/* Process CIS Established event */
void hci_le_cis_established(struct net_buf *buf);