Bluetooth: host: make HCI fragmentation async
Make the ACL fragmentation asynchronous, freeing the TX thread for sending commands when the ACL buffers are full. Signed-off-by: Jonathan Rico <jonathan.rico@nordicsemi.no>
This commit is contained in:
parent
cbd1e67574
commit
c3e5fabbf1
1 changed files with 82 additions and 24 deletions
|
@ -45,6 +45,11 @@ LOG_MODULE_REGISTER(bt_conn);
|
|||
|
||||
struct tx_meta {
|
||||
struct bt_conn_tx *tx;
|
||||
/* This flag indicates if the current buffer has already been partially
|
||||
* sent to the controller (ie, the next fragments should be sent as
|
||||
* continuations).
|
||||
*/
|
||||
bool is_cont;
|
||||
};
|
||||
|
||||
#define tx_data(buf) ((struct tx_meta *)net_buf_user_data(buf))
|
||||
|
@ -429,6 +434,8 @@ int bt_conn_send_cb(struct bt_conn *conn, struct net_buf *buf,
|
|||
tx_data(buf)->tx = NULL;
|
||||
}
|
||||
|
||||
tx_data(buf)->is_cont = false;
|
||||
|
||||
net_buf_put(&conn->tx_queue, buf);
|
||||
return 0;
|
||||
}
|
||||
|
@ -497,7 +504,7 @@ static int send_iso(struct bt_conn *conn, struct net_buf *buf, uint8_t flags)
|
|||
return bt_send(buf);
|
||||
}
|
||||
|
||||
static bool send_frag(struct bt_conn *conn, struct net_buf *buf, uint8_t flags,
|
||||
static int send_frag(struct bt_conn *conn, struct net_buf *buf, uint8_t flags,
|
||||
bool always_consume)
|
||||
{
|
||||
struct bt_conn_tx *tx = tx_data(buf)->tx;
|
||||
|
@ -505,16 +512,34 @@ static bool send_frag(struct bt_conn *conn, struct net_buf *buf, uint8_t flags,
|
|||
unsigned int key;
|
||||
int err = 0;
|
||||
|
||||
LOG_DBG("conn %p buf %p len %u flags 0x%02x", conn, buf, buf->len, flags);
|
||||
/* Check if the controller can accept ACL packets */
|
||||
if (k_sem_take(bt_conn_get_pkts(conn), K_MSEC(100))) {
|
||||
/* not `goto fail`, we don't want to free the tx context: in the
|
||||
* case where it is the original buffer, it will contain the
|
||||
* callback ptr.
|
||||
*/
|
||||
LOG_DBG("no CTLR bufs");
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
/* Wait until the controller can accept ACL packets */
|
||||
k_sem_take(bt_conn_get_pkts(conn), K_FOREVER);
|
||||
if (flags == FRAG_SINGLE || flags == FRAG_END) {
|
||||
/* De-queue the buffer now that we know we can send it.
|
||||
* Only applies if the buffer to be sent is the original buffer,
|
||||
* and not one of its fragments.
|
||||
* This buffer was fetched from the FIFO using a peek operation.
|
||||
*/
|
||||
buf = net_buf_get(&conn->tx_queue, K_NO_WAIT);
|
||||
}
|
||||
|
||||
/* Check for disconnection while waiting for pkts_sem */
|
||||
if (conn->state != BT_CONN_CONNECTED) {
|
||||
err = -ENOTCONN;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
LOG_DBG("conn %p buf %p len %u flags 0x%02x", conn, buf, buf->len,
|
||||
flags);
|
||||
|
||||
/* Add to pending, it must be done before bt_buf_set_type */
|
||||
key = irq_lock();
|
||||
if (tx) {
|
||||
|
@ -552,12 +577,21 @@ static bool send_frag(struct bt_conn *conn, struct net_buf *buf, uint8_t flags,
|
|||
(*pending_no_cb)--;
|
||||
}
|
||||
irq_unlock(key);
|
||||
|
||||
/* We don't want to end up in a situation where send_acl/iso
|
||||
* returns the same error code as when we don't get a buffer in
|
||||
* time.
|
||||
*/
|
||||
err = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
/* If we get here, something has seriously gone wrong:
|
||||
* We also need to destroy the `parent` buf.
|
||||
*/
|
||||
k_sem_give(bt_conn_get_pkts(conn));
|
||||
if (tx) {
|
||||
/* `buf` might not get destroyed, and its `tx` pointer will still be reachable.
|
||||
|
@ -570,7 +604,7 @@ fail:
|
|||
if (always_consume) {
|
||||
net_buf_unref(buf);
|
||||
}
|
||||
return false;
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline uint16_t conn_mtu(struct bt_conn *conn)
|
||||
|
@ -619,6 +653,7 @@ static struct net_buf *create_frag(struct bt_conn *conn, struct net_buf *buf)
|
|||
|
||||
/* Fragments never have a TX completion callback */
|
||||
tx_data(frag)->tx = NULL;
|
||||
tx_data(frag)->is_cont = false;
|
||||
|
||||
frag_len = MIN(conn_mtu(conn), net_buf_tailroom(frag));
|
||||
|
||||
|
@ -628,42 +663,51 @@ static struct net_buf *create_frag(struct bt_conn *conn, struct net_buf *buf)
|
|||
return frag;
|
||||
}
|
||||
|
||||
static bool send_buf(struct bt_conn *conn, struct net_buf *buf)
|
||||
static int send_buf(struct bt_conn *conn, struct net_buf *buf)
|
||||
{
|
||||
struct net_buf *frag;
|
||||
uint8_t flags;
|
||||
int err;
|
||||
|
||||
LOG_DBG("conn %p buf %p len %u", conn, buf, buf->len);
|
||||
|
||||
/* Send directly if the packet fits the ACL MTU */
|
||||
if (buf->len <= conn_mtu(conn)) {
|
||||
if (buf->len <= conn_mtu(conn) && !tx_data(buf)->is_cont) {
|
||||
LOG_DBG("send single");
|
||||
return send_frag(conn, buf, FRAG_SINGLE, false);
|
||||
}
|
||||
|
||||
/* Create & enqueue first fragment */
|
||||
frag = create_frag(conn, buf);
|
||||
if (!frag) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!send_frag(conn, frag, FRAG_START, true)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
LOG_DBG("start fragmenting");
|
||||
/*
|
||||
* Send the fragments. For the last one simply use the original
|
||||
* buffer (which works since we've used net_buf_pull on it.
|
||||
*/
|
||||
flags = FRAG_START;
|
||||
if (tx_data(buf)->is_cont) {
|
||||
flags = FRAG_CONT;
|
||||
}
|
||||
|
||||
while (buf->len > conn_mtu(conn)) {
|
||||
frag = create_frag(conn, buf);
|
||||
if (!frag) {
|
||||
return false;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (!send_frag(conn, frag, FRAG_CONT, true)) {
|
||||
return false;
|
||||
err = send_frag(conn, frag, flags, false);
|
||||
if (err) {
|
||||
LOG_DBG("%p failed, mark as existing frag", buf);
|
||||
tx_data(buf)->is_cont = flags != FRAG_START;
|
||||
/* Put the frag back into the original buffer */
|
||||
net_buf_push_mem(buf, frag->data, frag->len);
|
||||
net_buf_unref(frag);
|
||||
return err;
|
||||
}
|
||||
|
||||
flags = FRAG_CONT;
|
||||
}
|
||||
|
||||
LOG_DBG("last frag");
|
||||
tx_data(buf)->is_cont = true;
|
||||
return send_frag(conn, buf, FRAG_END, false);
|
||||
}
|
||||
|
||||
|
@ -779,6 +823,7 @@ int bt_conn_prepare_events(struct k_poll_event events[])
|
|||
void bt_conn_process_tx(struct bt_conn *conn)
|
||||
{
|
||||
struct net_buf *buf;
|
||||
int err;
|
||||
|
||||
LOG_DBG("conn %p", conn);
|
||||
|
||||
|
@ -789,10 +834,23 @@ void bt_conn_process_tx(struct bt_conn *conn)
|
|||
return;
|
||||
}
|
||||
|
||||
/* Get next ACL packet for connection */
|
||||
buf = net_buf_get(&conn->tx_queue, K_NO_WAIT);
|
||||
/* Get next ACL packet for connection. The buffer will only get dequeued
|
||||
* if there is a free controller buffer to put it in.
|
||||
*
|
||||
* Important: no operations should be done on `buf` until it is properly
|
||||
* dequeued from the FIFO, using the `net_buf_get()` API.
|
||||
*/
|
||||
buf = k_fifo_peek_head(&conn->tx_queue);
|
||||
BT_ASSERT(buf);
|
||||
if (!send_buf(conn, buf)) {
|
||||
|
||||
/* Since we used `peek`, the queue still owns the reference to the
|
||||
* buffer, so we need to take an explicit additional reference here.
|
||||
*/
|
||||
buf = net_buf_ref(buf);
|
||||
err = send_buf(conn, buf);
|
||||
net_buf_unref(buf);
|
||||
|
||||
if (err == -EIO) {
|
||||
struct bt_conn_tx *tx = tx_data(buf)->tx;
|
||||
|
||||
tx_data(buf)->tx = NULL;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue