Bluetooth: host: Change TX pattern (push -> pull)
The current TX pattern in the host is to try to push a buffer through all the layers up until it is ingested by the controller. Since sending can fail at any layer, we need error-handling and separate retry logic on pretty much all layers. That logic obscures the "happy path" for people trying ot understand the code. This commit inverts the control, in a way that doesn't require changing the host or HCI driver API (yet): Layers don't send buffers synchronously, they instead put their buffer in a private queue of their own and raise a TX flag on the lower layer. Think of it as a `READY` interrupt line that has to be serviced by the lower layer. Sending is now non-blocking, rate depends on the size of buffer pools. There is a single TX processing function. This can be thought as the Interrupt Service Routine that will handle the `READY` interrupt from the layers above. That `tx_processor()` will then attempt to allocate enough resources in order to send the buffer through to the controller. This allocation logic does not block. After acquiring all the resources, the TX processor will attempt to pull data from the upper layer. The upper layer has to figure out which buffer to pass to the controller. This is a good spot to put scheduling or QoS logic in the upper layer. Notes: - user-facing API for tuning QoS will be implemented in a future patch - this scheme could (and probably will) be extended to upper layers (e.g. ATT, L2CAP CoC segmentation). - this patch removes the `pending_no_cb()` memory optimization for clarity/correctness. It might get re-implemented after a stabilization period. Hopefully with more documentation. Signed-off-by: Jonathan Rico <jonathan.rico@nordicsemi.no> Co-authored-by: Aleksander Wasaznik <aleksander.wasaznik@nordicsemi.no>
This commit is contained in:
parent
1c8cae30a8
commit
28535fe2f2
13 changed files with 968 additions and 534 deletions
|
@ -191,7 +191,7 @@ struct bt_l2cap_le_chan {
|
|||
*/
|
||||
struct bt_l2cap_le_endpoint tx;
|
||||
#if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
|
||||
/** Channel Transmission queue */
|
||||
/** Channel Transmission queue (for SDUs) */
|
||||
struct k_fifo tx_queue;
|
||||
/** Channel Pending Transmission buffer */
|
||||
struct net_buf *tx_buf;
|
||||
|
@ -218,6 +218,13 @@ struct bt_l2cap_le_chan {
|
|||
struct k_work_delayable rtx_work;
|
||||
struct k_work_sync rtx_sync;
|
||||
#endif
|
||||
|
||||
/** @internal To be used with @ref bt_conn.upper_data_ready */
|
||||
sys_snode_t _pdu_ready;
|
||||
/** @internal To be used with @ref bt_conn.upper_data_ready */
|
||||
atomic_t _pdu_ready_lock;
|
||||
/** @internal Queue of net bufs not yet sent to lower layer */
|
||||
struct k_fifo _pdu_tx_queue;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -260,6 +267,13 @@ struct bt_l2cap_br_chan {
|
|||
/* Response Timeout eXpired (RTX) timer */
|
||||
struct k_work_delayable rtx_work;
|
||||
struct k_work_sync rtx_sync;
|
||||
|
||||
/** @internal To be used with @ref bt_conn.upper_data_ready */
|
||||
sys_snode_t _pdu_ready;
|
||||
/** @internal To be used with @ref bt_conn.upper_data_ready */
|
||||
atomic_t _pdu_ready_lock;
|
||||
/** @internal Queue of net bufs not yet sent to lower layer */
|
||||
struct k_fifo _pdu_tx_queue;
|
||||
};
|
||||
|
||||
/** @brief L2CAP Channel operations structure. */
|
||||
|
|
|
@ -267,8 +267,8 @@ config BT_LIM_ADV_TIMEOUT
|
|||
|
||||
config BT_CONN_TX_USER_DATA_SIZE
|
||||
int
|
||||
default 24 if 64BIT
|
||||
default 12
|
||||
default 32 if 64BIT
|
||||
default 16
|
||||
help
|
||||
Necessary user_data size for allowing packet fragmentation when
|
||||
sending over HCI. See `struct tx_meta` in conn.c.
|
||||
|
@ -285,12 +285,12 @@ if BT_CONN
|
|||
|
||||
config BT_CONN_TX_MAX
|
||||
int "Maximum number of pending TX buffers with a callback"
|
||||
default BT_L2CAP_TX_BUF_COUNT
|
||||
range BT_L2CAP_TX_BUF_COUNT 255
|
||||
default BT_BUF_ACL_TX_COUNT
|
||||
range BT_BUF_ACL_TX_COUNT 255
|
||||
help
|
||||
Maximum number of pending TX buffers that have an associated
|
||||
callback. Normally this can be left to the default value, which
|
||||
is equal to the number of TX buffers in the stack-internal pool.
|
||||
is equal to the number of TX buffers in the controller.
|
||||
|
||||
config BT_CONN_PARAM_ANY
|
||||
bool "Accept any values for connection parameters"
|
||||
|
|
|
@ -214,6 +214,8 @@ static bool l2cap_br_chan_add(struct bt_conn *conn, struct bt_l2cap_chan *chan,
|
|||
return false;
|
||||
}
|
||||
|
||||
k_fifo_init(&ch->_pdu_tx_queue);
|
||||
|
||||
/* All dynamic channels have the destroy handler which makes sure that
|
||||
* the RTX work structure is properly released with a cancel sync.
|
||||
* The fixed signal channel is only removed when disconnected and the
|
||||
|
@ -239,18 +241,71 @@ static uint8_t l2cap_br_get_ident(void)
|
|||
return ident;
|
||||
}
|
||||
|
||||
static void raise_data_ready(struct bt_l2cap_br_chan *br_chan)
|
||||
{
|
||||
if (!atomic_set(&br_chan->_pdu_ready_lock, 1)) {
|
||||
sys_slist_append(&br_chan->chan.conn->l2cap_data_ready,
|
||||
&br_chan->_pdu_ready);
|
||||
LOG_DBG("data ready raised");
|
||||
} else {
|
||||
LOG_DBG("data ready already");
|
||||
}
|
||||
|
||||
bt_conn_data_ready(br_chan->chan.conn);
|
||||
}
|
||||
|
||||
static void lower_data_ready(struct bt_l2cap_br_chan *br_chan)
|
||||
{
|
||||
struct bt_conn *conn = br_chan->chan.conn;
|
||||
sys_snode_t *s = sys_slist_get(&conn->l2cap_data_ready);
|
||||
|
||||
__ASSERT_NO_MSG(s == &br_chan->_pdu_ready);
|
||||
(void)s;
|
||||
|
||||
atomic_t old = atomic_set(&br_chan->_pdu_ready_lock, 0);
|
||||
|
||||
__ASSERT_NO_MSG(old);
|
||||
(void)old;
|
||||
}
|
||||
|
||||
static void cancel_data_ready(struct bt_l2cap_br_chan *br_chan)
|
||||
{
|
||||
struct bt_conn *conn = br_chan->chan.conn;
|
||||
|
||||
sys_slist_find_and_remove(&conn->l2cap_data_ready,
|
||||
&br_chan->_pdu_ready);
|
||||
|
||||
atomic_set(&br_chan->_pdu_ready_lock, 0);
|
||||
}
|
||||
|
||||
int bt_l2cap_br_send_cb(struct bt_conn *conn, uint16_t cid, struct net_buf *buf,
|
||||
bt_conn_tx_cb_t cb, void *user_data)
|
||||
{
|
||||
struct bt_l2cap_hdr *hdr;
|
||||
struct bt_l2cap_chan *ch = bt_l2cap_br_lookup_tx_cid(conn, cid);
|
||||
struct bt_l2cap_br_chan *br_chan = CONTAINER_OF(ch, struct bt_l2cap_br_chan, chan);
|
||||
|
||||
LOG_DBG("conn %p cid %u len %zu", conn, cid, buf->len);
|
||||
LOG_DBG("chan %p buf %p len %zu", br_chan, buf, buf->len);
|
||||
|
||||
hdr = net_buf_push(buf, sizeof(*hdr));
|
||||
hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
|
||||
hdr->cid = sys_cpu_to_le16(cid);
|
||||
|
||||
return bt_conn_send_cb(conn, buf, cb, user_data);
|
||||
if (buf->user_data_size < sizeof(struct closure)) {
|
||||
LOG_DBG("not enough room in user_data %d < %d pool %u",
|
||||
buf->user_data_size,
|
||||
CONFIG_BT_CONN_TX_USER_DATA_SIZE,
|
||||
buf->pool_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
LOG_DBG("push PDU: cb %p userdata %p", cb, user_data);
|
||||
|
||||
make_closure(buf->user_data, cb, user_data);
|
||||
net_buf_put(&br_chan->_pdu_tx_queue, buf);
|
||||
raise_data_ready(br_chan);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Send the buffer and release it in case of failure.
|
||||
|
@ -287,6 +342,63 @@ static void l2cap_br_chan_send_req(struct bt_l2cap_br_chan *chan,
|
|||
k_work_reschedule(&chan->rtx_work, timeout);
|
||||
}
|
||||
|
||||
/* L2CAP channel wants to send a PDU */
|
||||
static bool chan_has_data(struct bt_l2cap_br_chan *br_chan)
|
||||
{
|
||||
return !k_fifo_is_empty(&br_chan->_pdu_tx_queue);
|
||||
}
|
||||
|
||||
struct net_buf *l2cap_br_data_pull(struct bt_conn *conn, size_t amount)
|
||||
{
|
||||
const sys_snode_t *pdu_ready = sys_slist_peek_head(&conn->l2cap_data_ready);
|
||||
|
||||
if (!pdu_ready) {
|
||||
LOG_DBG("nothing to send on this conn");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct bt_l2cap_br_chan *br_chan = CONTAINER_OF(pdu_ready,
|
||||
struct bt_l2cap_br_chan,
|
||||
_pdu_ready);
|
||||
|
||||
/* Leave the PDU buffer in the queue until we have sent all its
|
||||
* fragments.
|
||||
*/
|
||||
struct net_buf *pdu = k_fifo_peek_head(&br_chan->_pdu_tx_queue);
|
||||
|
||||
__ASSERT(pdu, "signaled ready but no PDUs in the TX queue");
|
||||
|
||||
if (bt_buf_has_view(pdu)) {
|
||||
LOG_ERR("already have view on %p", pdu);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* We can't interleave ACL fragments from different channels for the
|
||||
* same ACL conn -> we have to wait until a full L2 PDU is transferred
|
||||
* before switching channels.
|
||||
*/
|
||||
bool last_frag = amount >= pdu->len;
|
||||
|
||||
if (last_frag) {
|
||||
LOG_DBG("last frag, removing %p", pdu);
|
||||
struct net_buf *b = k_fifo_get(&br_chan->_pdu_tx_queue, K_NO_WAIT);
|
||||
|
||||
__ASSERT_NO_MSG(b == pdu);
|
||||
(void)b;
|
||||
|
||||
LOG_DBG("chan %p done", br_chan);
|
||||
lower_data_ready(br_chan);
|
||||
|
||||
/* Append channel to list if it still has data */
|
||||
if (chan_has_data(br_chan)) {
|
||||
LOG_DBG("chan %p ready", br_chan);
|
||||
raise_data_ready(br_chan);
|
||||
}
|
||||
}
|
||||
|
||||
return pdu;
|
||||
}
|
||||
|
||||
static void l2cap_br_get_info(struct bt_l2cap_br *l2cap, uint16_t info_type)
|
||||
{
|
||||
struct bt_l2cap_info_req *info;
|
||||
|
@ -775,6 +887,7 @@ void bt_l2cap_br_chan_set_state(struct bt_l2cap_chan *chan,
|
|||
void bt_l2cap_br_chan_del(struct bt_l2cap_chan *chan)
|
||||
{
|
||||
const struct bt_l2cap_chan_ops *ops = chan->ops;
|
||||
struct bt_l2cap_br_chan *br_chan = CONTAINER_OF(chan, struct bt_l2cap_br_chan, chan);
|
||||
|
||||
LOG_DBG("conn %p chan %p", chan->conn, chan);
|
||||
|
||||
|
@ -782,6 +895,15 @@ void bt_l2cap_br_chan_del(struct bt_l2cap_chan *chan)
|
|||
goto destroy;
|
||||
}
|
||||
|
||||
cancel_data_ready(br_chan);
|
||||
|
||||
/* Remove buffers on the PDU TX queue. */
|
||||
while (chan_has_data(br_chan)) {
|
||||
struct net_buf *buf = net_buf_get(&br_chan->_pdu_tx_queue, K_NO_WAIT);
|
||||
|
||||
net_buf_unref(buf);
|
||||
}
|
||||
|
||||
if (ops->disconnected) {
|
||||
ops->disconnected(chan);
|
||||
}
|
||||
|
|
|
@ -46,3 +46,6 @@ void l2cap_br_encrypt_change(struct bt_conn *conn, uint8_t hci_status);
|
|||
|
||||
/* Handle received data */
|
||||
void bt_l2cap_br_recv(struct bt_conn *conn, struct net_buf *buf);
|
||||
|
||||
/* Pull HCI fragments from buffers intended for `conn` */
|
||||
struct net_buf *l2cap_br_data_pull(struct bt_conn *conn, size_t amount);
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -80,8 +80,6 @@ enum {
|
|||
BT_CONN_CTE_REQ_ENABLED, /* CTE request procedure is enabled */
|
||||
BT_CONN_CTE_RSP_ENABLED, /* CTE response procedure is enabled */
|
||||
|
||||
BT_CONN_TX_WOULDBLOCK_FREE_TX, /** #bt_conn_process_tx wouldblock on #free_tx */
|
||||
|
||||
/* Total number of flags - must be at the end of the enum */
|
||||
BT_CONN_NUM_FLAGS,
|
||||
};
|
||||
|
@ -170,6 +168,9 @@ struct bt_conn_iso {
|
|||
|
||||
/** Stored information about the ISO stream */
|
||||
struct bt_iso_info info;
|
||||
|
||||
/** Queue from which conn will pull data */
|
||||
struct k_fifo txq;
|
||||
};
|
||||
|
||||
typedef void (*bt_conn_tx_cb_t)(struct bt_conn *conn, void *user_data, int err);
|
||||
|
@ -179,9 +180,6 @@ struct bt_conn_tx {
|
|||
|
||||
bt_conn_tx_cb_t cb;
|
||||
void *user_data;
|
||||
|
||||
/* Number of pending packets without a callback after this one */
|
||||
uint32_t pending_no_cb;
|
||||
};
|
||||
|
||||
struct acl_data {
|
||||
|
@ -227,12 +225,8 @@ struct bt_conn {
|
|||
uint16_t rx_len;
|
||||
struct net_buf *rx;
|
||||
|
||||
/* Sent but not acknowledged TX packets with a callback */
|
||||
/* Pending TX that are awaiting the NCP event. len(tx_pending) == in_ll */
|
||||
sys_slist_t tx_pending;
|
||||
/* Sent but not acknowledged TX packets without a callback before
|
||||
* the next packet (if any) in tx_pending.
|
||||
*/
|
||||
uint32_t pending_no_cb;
|
||||
|
||||
/* Completed TX for which we need to call the callback */
|
||||
sys_slist_t tx_complete;
|
||||
|
@ -240,9 +234,6 @@ struct bt_conn {
|
|||
struct k_work tx_complete_work;
|
||||
#endif /* CONFIG_BT_CONN_TX */
|
||||
|
||||
/* Queue for outgoing ACL data */
|
||||
struct k_fifo tx_queue;
|
||||
|
||||
/* Active L2CAP channels */
|
||||
sys_slist_t channels;
|
||||
|
||||
|
@ -271,12 +262,85 @@ struct bt_conn {
|
|||
uint16_t subversion;
|
||||
} rv;
|
||||
#endif
|
||||
|
||||
/* Callback into the higher-layers (L2CAP / ISO) to return a buffer for
|
||||
* sending `amount` of bytes to HCI.
|
||||
*
|
||||
* Scheduling from which channel to pull (e.g. for L2CAP) is done at the
|
||||
* upper layer's discretion.
|
||||
*/
|
||||
struct net_buf * (*tx_data_pull)(struct bt_conn *conn, size_t amount);
|
||||
|
||||
/* Get (and clears for ACL conns) callback and user-data for `buf`. */
|
||||
void (*get_and_clear_cb)(struct bt_conn *conn, struct net_buf *buf,
|
||||
bt_conn_tx_cb_t *cb, void **ud);
|
||||
|
||||
/* Return true if upper layer has data to send over HCI */
|
||||
bool (*has_data)(struct bt_conn *conn);
|
||||
|
||||
/* For ACL: List of data-ready L2 channels. Used by TX processor for
|
||||
* pulling HCI fragments. Channels are only ever removed from this list
|
||||
* when a whole PDU (ie all its frags) have been sent.
|
||||
*/
|
||||
sys_slist_t l2cap_data_ready;
|
||||
|
||||
/* Node for putting this connection in a data-ready mode for the bt_dev.
|
||||
* This will be used by the TX processor to then fetch HCI frags from it.
|
||||
*/
|
||||
sys_snode_t _conn_ready;
|
||||
atomic_t _conn_ready_lock;
|
||||
|
||||
/* Holds the number of packets that have been sent to the controller but
|
||||
* not yet ACKd (by receiving an Number of Completed Packets). This
|
||||
* variable can be used for deriving a QoS or waterlevel scheme in order
|
||||
* to maximize throughput/latency.
|
||||
* It's an optimization so we don't chase `tx_pending` all the time.
|
||||
*/
|
||||
atomic_t in_ll;
|
||||
|
||||
/* Next buffer should be an ACL/ISO HCI fragment */
|
||||
bool next_is_frag;
|
||||
|
||||
/* Must be at the end so that everything else in the structure can be
|
||||
* memset to zero without affecting the ref.
|
||||
*/
|
||||
atomic_t ref;
|
||||
};
|
||||
|
||||
/* Holds the callback and a user-data field for the upper layer. This callback
|
||||
* shall be called when the buffer is ACK'd by the controller (by a Num Complete
|
||||
* Packets event) or if the connection dies.
|
||||
*
|
||||
* Flow control in the spec be crazy, look it up. LL is allowed to choose
|
||||
* between sending NCP events always or not at all on disconnect.
|
||||
*
|
||||
* We pack the struct to make sure it fits in the net_buf user_data field.
|
||||
*/
|
||||
struct closure {
|
||||
void *cb;
|
||||
void *data;
|
||||
} __packed;
|
||||
|
||||
#if defined(CONFIG_BT_CONN_TX_USER_DATA_SIZE)
|
||||
BUILD_ASSERT(sizeof(struct closure) < CONFIG_BT_CONN_TX_USER_DATA_SIZE);
|
||||
#endif
|
||||
|
||||
static inline void make_closure(void *storage, void *cb, void *data)
|
||||
{
|
||||
((struct closure *)storage)->cb = cb;
|
||||
((struct closure *)storage)->data = data;
|
||||
}
|
||||
|
||||
static inline void *closure_cb(void *storage)
|
||||
{
|
||||
return ((struct closure *)storage)->cb;
|
||||
}
|
||||
|
||||
static inline void *closure_data(void *storage)
|
||||
{
|
||||
return ((struct closure *)storage)->data;
|
||||
}
|
||||
|
||||
void bt_conn_reset_rx_state(struct bt_conn *conn);
|
||||
|
||||
/* Process incoming data for a connection */
|
||||
|
@ -301,11 +365,6 @@ int bt_conn_send_cb(struct bt_conn *conn, struct net_buf *buf,
|
|||
int bt_conn_send_iso_cb(struct bt_conn *conn, struct net_buf *buf,
|
||||
bt_conn_tx_cb_t cb, bool has_ts);
|
||||
|
||||
static inline int bt_conn_send(struct bt_conn *conn, struct net_buf *buf)
|
||||
{
|
||||
return bt_conn_send_cb(conn, buf, NULL, NULL);
|
||||
}
|
||||
|
||||
/* Check if a connection object with the peer already exists */
|
||||
bool bt_conn_exists_le(uint8_t id, const bt_addr_le_t *peer);
|
||||
|
||||
|
@ -484,4 +543,8 @@ struct k_sem *bt_conn_get_pkts(struct bt_conn *conn);
|
|||
|
||||
/* k_poll related helpers for the TX thread */
|
||||
int bt_conn_prepare_events(struct k_poll_event events[]);
|
||||
void bt_conn_process_tx(struct bt_conn *conn);
|
||||
|
||||
/* To be called by upper layers when they want to send something.
|
||||
* Functions just like an IRQ.
|
||||
*/
|
||||
void bt_conn_data_ready(struct bt_conn *conn);
|
||||
|
|
|
@ -528,37 +528,29 @@ static void hci_num_completed_packets(struct net_buf *buf)
|
|||
}
|
||||
|
||||
while (count--) {
|
||||
struct bt_conn_tx *tx;
|
||||
sys_snode_t *node;
|
||||
unsigned int key;
|
||||
|
||||
key = irq_lock();
|
||||
|
||||
if (conn->pending_no_cb) {
|
||||
conn->pending_no_cb--;
|
||||
irq_unlock(key);
|
||||
k_sem_give(bt_conn_get_pkts(conn));
|
||||
continue;
|
||||
}
|
||||
k_sem_give(bt_conn_get_pkts(conn));
|
||||
|
||||
/* move the next TX context from the `pending` list to
|
||||
* the `complete` list.
|
||||
*/
|
||||
node = sys_slist_get(&conn->tx_pending);
|
||||
irq_unlock(key);
|
||||
|
||||
if (!node) {
|
||||
LOG_ERR("packets count mismatch");
|
||||
__ASSERT_NO_MSG(0);
|
||||
break;
|
||||
}
|
||||
|
||||
tx = CONTAINER_OF(node, struct bt_conn_tx, node);
|
||||
sys_slist_append(&conn->tx_complete, node);
|
||||
|
||||
key = irq_lock();
|
||||
conn->pending_no_cb = tx->pending_no_cb;
|
||||
tx->pending_no_cb = 0U;
|
||||
sys_slist_append(&conn->tx_complete, &tx->node);
|
||||
irq_unlock(key);
|
||||
/* align the `pending` value */
|
||||
__ASSERT_NO_MSG(atomic_get(&conn->in_ll));
|
||||
atomic_dec(&conn->in_ll);
|
||||
|
||||
/* TX context free + callback happens in there */
|
||||
k_work_submit(&conn->tx_complete_work);
|
||||
k_sem_give(bt_conn_get_pkts(conn));
|
||||
}
|
||||
|
||||
bt_conn_unref(conn);
|
||||
|
@ -2946,34 +2938,14 @@ static void process_events(struct k_poll_event *ev, int count)
|
|||
LOG_DBG("ev->state %u", ev->state);
|
||||
|
||||
switch (ev->state) {
|
||||
case K_POLL_STATE_SIGNALED:
|
||||
break;
|
||||
case K_POLL_STATE_SEM_AVAILABLE:
|
||||
/* After this fn is exec'd, `bt_conn_prepare_events()`
|
||||
* will be called once again, and this time buffers will
|
||||
* be available, so the FIFO will be added to the poll
|
||||
* list instead of the ctlr buffers semaphore.
|
||||
*/
|
||||
break;
|
||||
case K_POLL_STATE_FIFO_DATA_AVAILABLE:
|
||||
if (ev->tag == BT_EVENT_CMD_TX) {
|
||||
send_cmd();
|
||||
} else if (IS_ENABLED(CONFIG_BT_CONN) ||
|
||||
IS_ENABLED(CONFIG_BT_ISO)) {
|
||||
struct bt_conn *conn;
|
||||
|
||||
if (ev->tag == BT_EVENT_CONN_TX_QUEUE) {
|
||||
conn = CONTAINER_OF(ev->fifo,
|
||||
struct bt_conn,
|
||||
tx_queue);
|
||||
bt_conn_process_tx(conn);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case K_POLL_STATE_NOT_READY:
|
||||
break;
|
||||
default:
|
||||
LOG_WRN("Unexpected k_poll event state %u", ev->state);
|
||||
__ASSERT_NO_MSG(0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -3014,11 +2986,6 @@ static void hci_tx_thread(void *p1, void *p2, void *p3)
|
|||
events[0].state = K_POLL_STATE_NOT_READY;
|
||||
ev_count = 1;
|
||||
|
||||
/* This adds the FIFO per-connection */
|
||||
if (IS_ENABLED(CONFIG_BT_CONN) || IS_ENABLED(CONFIG_BT_ISO)) {
|
||||
ev_count += bt_conn_prepare_events(&events[1]);
|
||||
}
|
||||
|
||||
LOG_DBG("Calling k_poll with %d events", ev_count);
|
||||
|
||||
err = k_poll(events, ev_count, K_FOREVER);
|
||||
|
|
|
@ -309,6 +309,10 @@ struct bt_dev_le {
|
|||
*/
|
||||
uint8_t rl_entries;
|
||||
#endif /* CONFIG_BT_SMP */
|
||||
/* List of `struct bt_conn` that have either pending data to send, or
|
||||
* something to process (e.g. a disconnection event).
|
||||
*/
|
||||
sys_slist_t conn_ready;
|
||||
};
|
||||
|
||||
#if defined(CONFIG_BT_CLASSIC)
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <zephyr/bluetooth/conn.h>
|
||||
#include <zephyr/bluetooth/iso.h>
|
||||
|
||||
#include "host/buf_view.h"
|
||||
#include "host/hci_core.h"
|
||||
#include "host/conn_internal.h"
|
||||
#include "iso_internal.h"
|
||||
|
@ -75,9 +76,9 @@ struct bt_iso_big bigs[CONFIG_BT_ISO_MAX_BIG];
|
|||
static struct bt_iso_big *lookup_big_by_handle(uint8_t big_handle);
|
||||
#endif /* CONFIG_BT_ISO_BROADCAST */
|
||||
|
||||
#if defined(CONFIG_BT_ISO_TX)
|
||||
static void bt_iso_send_cb(struct bt_conn *iso, void *user_data, int err)
|
||||
static void bt_iso_sent_cb(struct bt_conn *iso, void *user_data, int err)
|
||||
{
|
||||
#if defined(CONFIG_BT_ISO_TX)
|
||||
struct bt_iso_chan *chan = iso->iso.chan;
|
||||
struct bt_iso_chan_ops *ops;
|
||||
|
||||
|
@ -88,8 +89,8 @@ static void bt_iso_send_cb(struct bt_conn *iso, void *user_data, int err)
|
|||
if (!err && ops != NULL && ops->sent != NULL) {
|
||||
ops->sent(chan);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_BT_ISO_TX */
|
||||
}
|
||||
|
||||
void hci_iso(struct net_buf *buf)
|
||||
{
|
||||
|
@ -136,12 +137,33 @@ void hci_iso(struct net_buf *buf)
|
|||
bt_conn_unref(iso);
|
||||
}
|
||||
|
||||
/* Pull data from the ISO layer */
|
||||
static struct net_buf *iso_data_pull(struct bt_conn *conn, size_t amount);
|
||||
|
||||
/* Returns true if the ISO layer has data to send on this conn */
|
||||
static bool iso_has_data(struct bt_conn *conn);
|
||||
|
||||
static void iso_get_and_clear_cb(struct bt_conn *conn, struct net_buf *buf,
|
||||
bt_conn_tx_cb_t *cb, void **ud)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_BT_ISO_TX)) {
|
||||
*cb = bt_iso_sent_cb;
|
||||
} else {
|
||||
*cb = NULL;
|
||||
}
|
||||
|
||||
*ud = NULL;
|
||||
}
|
||||
|
||||
static struct bt_conn *iso_new(void)
|
||||
{
|
||||
struct bt_conn *iso = bt_conn_new(iso_conns, ARRAY_SIZE(iso_conns));
|
||||
|
||||
if (iso) {
|
||||
iso->type = BT_CONN_TYPE_ISO;
|
||||
iso->tx_data_pull = iso_data_pull;
|
||||
iso->get_and_clear_cb = iso_get_and_clear_cb;
|
||||
iso->has_data = iso_has_data;
|
||||
} else {
|
||||
LOG_DBG("Could not create new ISO");
|
||||
}
|
||||
|
@ -230,6 +252,7 @@ static void bt_iso_chan_add(struct bt_conn *iso, struct bt_iso_chan *chan)
|
|||
/* Attach ISO channel to the connection */
|
||||
chan->iso = iso;
|
||||
iso->iso.chan = chan;
|
||||
k_fifo_init(&iso->iso.txq);
|
||||
|
||||
LOG_DBG("iso %p chan %p", iso, chan);
|
||||
}
|
||||
|
@ -702,6 +725,61 @@ void bt_iso_recv(struct bt_conn *iso, struct net_buf *buf, uint8_t flags)
|
|||
}
|
||||
#endif /* CONFIG_BT_ISO_RX */
|
||||
|
||||
static bool iso_has_data(struct bt_conn *conn)
|
||||
{
|
||||
#if defined(CONFIG_BT_ISO_TX)
|
||||
return !k_fifo_is_empty(&conn->iso.txq);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static struct net_buf *iso_data_pull(struct bt_conn *conn, size_t amount)
|
||||
{
|
||||
#if defined(CONFIG_BT_ISO_TX)
|
||||
LOG_DBG("conn %p amount %d", conn, amount);
|
||||
|
||||
/* Leave the PDU buffer in the queue until we have sent all its
|
||||
* fragments.
|
||||
*/
|
||||
struct net_buf *frag = k_fifo_peek_head(&conn->iso.txq);
|
||||
|
||||
if (!frag) {
|
||||
LOG_DBG("signaled ready but no frag available");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (conn->iso.chan->state != BT_ISO_STATE_CONNECTED) {
|
||||
LOG_DBG("channel has been disconnected");
|
||||
struct net_buf *b = k_fifo_get(&conn->iso.txq, K_NO_WAIT);
|
||||
(void)b;
|
||||
__ASSERT_NO_MSG(b == frag);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (bt_buf_has_view(frag)) {
|
||||
/* This should not happen. conn.c should wait until the view is
|
||||
* destroyed before requesting more data.
|
||||
*/
|
||||
LOG_DBG("already have view");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool last_frag = amount >= frag->len;
|
||||
|
||||
if (last_frag) {
|
||||
LOG_DBG("last frag, pop buf");
|
||||
struct net_buf *b = k_fifo_get(&conn->iso.txq, K_NO_WAIT);
|
||||
(void)b;
|
||||
__ASSERT_NO_MSG(b == frag);
|
||||
}
|
||||
|
||||
return frag;
|
||||
#else
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BT_ISO_TX)
|
||||
static uint16_t iso_chan_max_data_len(const struct bt_iso_chan *chan)
|
||||
{
|
||||
|
@ -723,6 +801,30 @@ static uint16_t iso_chan_max_data_len(const struct bt_iso_chan *chan)
|
|||
return max_data_len;
|
||||
}
|
||||
|
||||
int conn_iso_send(struct bt_conn *conn, struct net_buf *buf, enum bt_iso_timestamp has_ts)
|
||||
{
|
||||
if (buf->user_data_size < CONFIG_BT_CONN_TX_USER_DATA_SIZE) {
|
||||
LOG_ERR("not enough room in user_data %d < %d pool %u",
|
||||
buf->user_data_size,
|
||||
CONFIG_BT_CONN_TX_USER_DATA_SIZE,
|
||||
buf->pool_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* push the TS flag on the buffer itself.
|
||||
* It will be popped and read back by conn before adding the ISO HCI header.
|
||||
*/
|
||||
net_buf_push_u8(buf, has_ts);
|
||||
|
||||
net_buf_put(&conn->iso.txq, buf);
|
||||
LOG_DBG("%p put on list", buf);
|
||||
|
||||
/* only one ISO channel per conn-object */
|
||||
bt_conn_data_ready(conn);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int validate_send(const struct bt_iso_chan *chan, const struct net_buf *buf,
|
||||
uint8_t hdr_size)
|
||||
{
|
||||
|
@ -783,7 +885,8 @@ int bt_iso_chan_send(struct bt_iso_chan *chan, struct net_buf *buf, uint16_t seq
|
|||
|
||||
iso_conn = chan->iso;
|
||||
|
||||
return bt_conn_send_iso_cb(iso_conn, buf, bt_iso_send_cb, false);
|
||||
LOG_DBG("send-iso (no ts)");
|
||||
return conn_iso_send(iso_conn, buf, BT_ISO_TS_ABSENT);
|
||||
}
|
||||
|
||||
int bt_iso_chan_send_ts(struct bt_iso_chan *chan, struct net_buf *buf, uint16_t seq_num,
|
||||
|
@ -808,7 +911,8 @@ int bt_iso_chan_send_ts(struct bt_iso_chan *chan, struct net_buf *buf, uint16_t
|
|||
|
||||
iso_conn = chan->iso;
|
||||
|
||||
return bt_conn_send_iso_cb(iso_conn, buf, bt_iso_send_cb, true);
|
||||
LOG_DBG("send-iso (ts)");
|
||||
return conn_iso_send(iso_conn, buf, BT_ISO_TS_PRESENT);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BT_ISO_CENTRAL) || defined(CONFIG_BT_ISO_BROADCASTER)
|
||||
|
|
|
@ -158,3 +158,11 @@ void bt_iso_chan_set_state(struct bt_iso_chan *chan, enum bt_iso_state state);
|
|||
|
||||
/* Process incoming data for a connection */
|
||||
void bt_iso_recv(struct bt_conn *iso, struct net_buf *buf, uint8_t flags);
|
||||
|
||||
/* Whether the HCI ISO data packet contains a timestamp or not.
|
||||
* Per spec, the TS flag can only be set for the first fragment.
|
||||
*/
|
||||
enum bt_iso_timestamp {
|
||||
BT_ISO_TS_ABSENT = 0,
|
||||
BT_ISO_TS_PRESENT,
|
||||
};
|
||||
|
|
|
@ -122,7 +122,7 @@ static void seg_destroy(struct net_buf *seg)
|
|||
|
||||
get_seg_md(seg)->lechan = NULL;
|
||||
|
||||
LOG_INF("destroy %p (parent %p)", seg, lechan->tx_buf);
|
||||
LOG_DBG("destroy %p (parent %p)", seg, lechan->tx_buf);
|
||||
|
||||
/* allow next view to be allocated (and unlock the parent buf) */
|
||||
bt_buf_destroy_view(seg, &get_seg_md(seg)->view_meta);
|
||||
|
@ -145,7 +145,7 @@ static struct net_buf *get_seg(struct net_buf *sdu,
|
|||
if ((seg_size >= sdu->len) &&
|
||||
(net_buf_headroom(sdu) >= BT_L2CAP_BUF_SIZE(0))) {
|
||||
|
||||
LOG_INF("view >= bufsize, returning it");
|
||||
LOG_DBG("view >= bufsize, returning it");
|
||||
|
||||
return sdu;
|
||||
}
|
||||
|
@ -164,7 +164,7 @@ static struct net_buf *get_seg(struct net_buf *sdu,
|
|||
view = bt_buf_make_view(view, net_buf_ref(sdu),
|
||||
seg_size, &get_seg_md(view)->view_meta);
|
||||
|
||||
LOG_INF("alloc-w-view: sdu %p view %p size %d", sdu, view, seg_size);
|
||||
LOG_DBG("alloc-w-view: sdu %p view %p size %d", sdu, view, seg_size);
|
||||
|
||||
return view;
|
||||
}
|
||||
|
@ -322,9 +322,12 @@ void bt_l2cap_chan_set_state(struct bt_l2cap_chan *chan,
|
|||
#endif /* CONFIG_BT_L2CAP_LOG_LEVEL_DBG */
|
||||
#endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
|
||||
|
||||
static void cancel_data_ready(struct bt_l2cap_le_chan *lechan);
|
||||
static bool chan_has_data(struct bt_l2cap_le_chan *lechan);
|
||||
void bt_l2cap_chan_del(struct bt_l2cap_chan *chan)
|
||||
{
|
||||
const struct bt_l2cap_chan_ops *ops = chan->ops;
|
||||
struct bt_l2cap_le_chan *le_chan = BT_L2CAP_LE_CHAN(chan);
|
||||
|
||||
LOG_DBG("conn %p chan %p", chan->conn, chan);
|
||||
|
||||
|
@ -332,6 +335,17 @@ void bt_l2cap_chan_del(struct bt_l2cap_chan *chan)
|
|||
goto destroy;
|
||||
}
|
||||
|
||||
cancel_data_ready(le_chan);
|
||||
|
||||
/* Remove buffers on the PDU TX queue. We can't do that in
|
||||
* `l2cap_chan_destroy()` as it is not called for fixed channels.
|
||||
*/
|
||||
while (chan_has_data(le_chan)) {
|
||||
struct net_buf *buf = net_buf_get(&le_chan->_pdu_tx_queue, K_NO_WAIT);
|
||||
|
||||
net_buf_unref(buf);
|
||||
}
|
||||
|
||||
if (ops->disconnected) {
|
||||
ops->disconnected(chan);
|
||||
}
|
||||
|
@ -465,6 +479,8 @@ void bt_l2cap_connected(struct bt_conn *conn)
|
|||
return;
|
||||
}
|
||||
|
||||
k_fifo_init(&le_chan->_pdu_tx_queue);
|
||||
|
||||
if (chan->ops->connected) {
|
||||
chan->ops->connected(chan);
|
||||
}
|
||||
|
@ -719,18 +735,59 @@ struct net_buf *bt_l2cap_create_pdu_timeout(struct net_buf_pool *pool,
|
|||
timeout);
|
||||
}
|
||||
|
||||
int bt_l2cap_send_cb(struct bt_conn *conn, uint16_t cid, struct net_buf *buf,
|
||||
static void raise_data_ready(struct bt_l2cap_le_chan *le_chan)
|
||||
{
|
||||
if (!atomic_set(&le_chan->_pdu_ready_lock, 1)) {
|
||||
sys_slist_append(&le_chan->chan.conn->l2cap_data_ready,
|
||||
&le_chan->_pdu_ready);
|
||||
LOG_DBG("data ready raised");
|
||||
} else {
|
||||
LOG_DBG("data ready already");
|
||||
}
|
||||
|
||||
bt_conn_data_ready(le_chan->chan.conn);
|
||||
}
|
||||
|
||||
static void lower_data_ready(struct bt_l2cap_le_chan *le_chan)
|
||||
{
|
||||
struct bt_conn *conn = le_chan->chan.conn;
|
||||
sys_snode_t *s = sys_slist_get(&conn->l2cap_data_ready);
|
||||
|
||||
__ASSERT_NO_MSG(s == &le_chan->_pdu_ready);
|
||||
(void)s;
|
||||
|
||||
atomic_t old = atomic_set(&le_chan->_pdu_ready_lock, 0);
|
||||
|
||||
__ASSERT_NO_MSG(old);
|
||||
(void)old;
|
||||
}
|
||||
|
||||
static void cancel_data_ready(struct bt_l2cap_le_chan *le_chan)
|
||||
{
|
||||
struct bt_conn *conn = le_chan->chan.conn;
|
||||
|
||||
sys_slist_find_and_remove(&conn->l2cap_data_ready,
|
||||
&le_chan->_pdu_ready);
|
||||
atomic_set(&le_chan->_pdu_ready_lock, 0);
|
||||
}
|
||||
|
||||
int bt_l2cap_send_cb(struct bt_conn *conn, uint16_t cid, struct net_buf *seg,
|
||||
bt_conn_tx_cb_t cb, void *user_data)
|
||||
{
|
||||
struct bt_l2cap_hdr *hdr;
|
||||
|
||||
LOG_DBG("conn %p cid %u len %zu", conn, cid, buf->len);
|
||||
LOG_DBG("conn %p cid %u len %zu", conn, cid, seg->len);
|
||||
|
||||
hdr = net_buf_push(buf, sizeof(*hdr));
|
||||
hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
|
||||
hdr = net_buf_push(seg, sizeof(*hdr));
|
||||
hdr->len = sys_cpu_to_le16(seg->len - sizeof(*hdr));
|
||||
hdr->cid = sys_cpu_to_le16(cid);
|
||||
|
||||
if (buf->ref != 1) {
|
||||
/* TODO: un-foreach this: ATT, SMP & L2CAP CoC _know_ the channel */
|
||||
struct bt_l2cap_chan *ch = bt_l2cap_le_lookup_tx_cid(conn, cid);
|
||||
|
||||
struct bt_l2cap_le_chan *chan = CONTAINER_OF(ch, struct bt_l2cap_le_chan, chan);
|
||||
|
||||
if (seg->ref != 1) {
|
||||
/* The host may alter the buf contents when fragmenting. Higher
|
||||
* layers cannot expect the buf contents to stay intact. Extra
|
||||
* refs suggests a silent data corruption would occur if not for
|
||||
|
@ -740,7 +797,93 @@ int bt_l2cap_send_cb(struct bt_conn *conn, uint16_t cid, struct net_buf *buf,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
return bt_conn_send_cb(conn, buf, cb, user_data);
|
||||
if (seg->user_data_size < sizeof(struct closure)) {
|
||||
LOG_DBG("not enough room in user_data %d < %d pool %u",
|
||||
seg->user_data_size,
|
||||
CONFIG_BT_CONN_TX_USER_DATA_SIZE,
|
||||
seg->pool_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
make_closure(seg->user_data, cb, user_data);
|
||||
LOG_DBG("push: cb %p userdata %p", cb, user_data);
|
||||
|
||||
net_buf_put(&chan->_pdu_tx_queue, seg);
|
||||
|
||||
raise_data_ready(chan); /* tis just a flag */
|
||||
|
||||
return 0; /* look ma, no failures */
|
||||
}
|
||||
|
||||
/* L2CAP channel wants to send a PDU */
|
||||
static bool chan_has_data(struct bt_l2cap_le_chan *lechan)
|
||||
{
|
||||
return !k_fifo_is_empty(&lechan->_pdu_tx_queue);
|
||||
}
|
||||
|
||||
struct net_buf *l2cap_data_pull(struct bt_conn *conn, size_t amount)
|
||||
{
|
||||
sys_snode_t *pdu_ready = sys_slist_peek_head(&conn->l2cap_data_ready);
|
||||
|
||||
if (!pdu_ready) {
|
||||
LOG_DBG("nothing to send on this conn");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct bt_l2cap_le_chan *lechan = CONTAINER_OF(pdu_ready,
|
||||
struct bt_l2cap_le_chan,
|
||||
_pdu_ready);
|
||||
|
||||
/* For dynamic channels, we always have credits to send that segment/PDU
|
||||
* as the channel is only marked "ready" when a PDU is segmented out
|
||||
* from an SDU. We only do that segmentation when we know we have at
|
||||
* least one credit (ie can send >= 1 PDU).
|
||||
*/
|
||||
|
||||
/* Leave the PDU buffer in the queue until we have sent all its
|
||||
* fragments.
|
||||
*/
|
||||
struct net_buf *pdu = k_fifo_peek_head(&lechan->_pdu_tx_queue);
|
||||
|
||||
__ASSERT(pdu, "signaled ready but no PDUs in the TX queue");
|
||||
|
||||
if (bt_buf_has_view(pdu)) {
|
||||
LOG_ERR("already have view on %p", pdu);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* We can't interleave ACL fragments from different channels for the
|
||||
* same ACL conn -> we have to wait until a full L2 PDU is transferred
|
||||
* before switching channels.
|
||||
*/
|
||||
bool last_frag = amount >= pdu->len;
|
||||
|
||||
if (last_frag) {
|
||||
LOG_DBG("last frag, removing %p", pdu);
|
||||
struct net_buf *b = k_fifo_get(&lechan->_pdu_tx_queue, K_NO_WAIT);
|
||||
|
||||
__ASSERT_NO_MSG(b == pdu);
|
||||
(void)b;
|
||||
|
||||
/* Lowering the "request to send" and raising it again allows
|
||||
* fair scheduling of channels on an ACL link: the channel is
|
||||
* marked as "ready to send" by adding a reference to it on a
|
||||
* FIFO on `conn`. Adding it again will send it to the back of
|
||||
* the queue.
|
||||
*
|
||||
* TODO: add a user-controlled QoS function.
|
||||
*/
|
||||
LOG_DBG("chan %p done", lechan);
|
||||
lower_data_ready(lechan);
|
||||
|
||||
/* Append channel to list if it still has data */
|
||||
if (chan_has_data(lechan)) {
|
||||
LOG_DBG("chan %p ready", lechan);
|
||||
raise_data_ready(lechan);
|
||||
}
|
||||
}
|
||||
|
||||
return pdu;
|
||||
}
|
||||
|
||||
static void l2cap_send_reject(struct bt_conn *conn, uint8_t ident,
|
||||
|
@ -1016,7 +1159,7 @@ static void l2cap_chan_tx_process(struct k_work *work)
|
|||
|
||||
ch = CONTAINER_OF(k_work_delayable_from_work(work), struct bt_l2cap_le_chan, tx_work);
|
||||
|
||||
LOG_INF("%s: %p", __func__, ch);
|
||||
LOG_DBG("%p", ch);
|
||||
|
||||
if (bt_l2cap_chan_get_state(&ch->chan) != BT_L2CAP_CONNECTED) {
|
||||
LOG_DBG("Cannot send on non-connected channel");
|
||||
|
@ -1038,7 +1181,7 @@ static void l2cap_chan_tx_process(struct k_work *work)
|
|||
* will be restarted upon receiving credits and
|
||||
* when a segment buffer is freed.
|
||||
*/
|
||||
LOG_INF("out of credits/windows");
|
||||
LOG_DBG("out of credits/windows");
|
||||
|
||||
ch->tx_buf = buf;
|
||||
/* If we don't reschedule, and the app doesn't nudge l2cap (e.g. by
|
||||
|
@ -1064,6 +1207,7 @@ static void l2cap_chan_tx_init(struct bt_l2cap_le_chan *chan)
|
|||
(void)memset(&chan->tx, 0, sizeof(chan->tx));
|
||||
atomic_set(&chan->tx.credits, 0);
|
||||
k_fifo_init(&chan->tx_queue);
|
||||
k_fifo_init(&chan->_pdu_tx_queue);
|
||||
k_work_init_delayable(&chan->tx_work, l2cap_chan_tx_process);
|
||||
}
|
||||
|
||||
|
@ -1109,12 +1253,12 @@ static void l2cap_chan_destroy(struct bt_l2cap_chan *chan)
|
|||
le_chan->tx_buf = NULL;
|
||||
}
|
||||
|
||||
/* Remove buffers on the TX queue */
|
||||
/* Remove buffers on the SDU TX queue */
|
||||
while ((buf = net_buf_get(&le_chan->tx_queue, K_NO_WAIT))) {
|
||||
l2cap_tx_buf_destroy(chan->conn, buf, -ESHUTDOWN);
|
||||
}
|
||||
|
||||
/* Remove buffers on the RX queue */
|
||||
/* Remove buffers on the SDU RX queue */
|
||||
while ((buf = net_buf_get(&le_chan->rx_queue, K_NO_WAIT))) {
|
||||
net_buf_unref(buf);
|
||||
}
|
||||
|
@ -1924,8 +2068,13 @@ static void le_disconn_rsp(struct bt_l2cap *l2cap, uint8_t ident,
|
|||
|
||||
static void l2cap_chan_tx_resume(struct bt_l2cap_le_chan *ch)
|
||||
{
|
||||
/* Resume sending PDUs when both conditions are met:
|
||||
* - we have at least one credit
|
||||
* - there is at least one SDU waiting to be sent
|
||||
*/
|
||||
if (!atomic_get(&ch->tx.credits) ||
|
||||
(k_fifo_is_empty(&ch->tx_queue) && !ch->tx_buf)) {
|
||||
LOG_DBG("ch %p idle", ch);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2046,10 +2195,10 @@ static int l2cap_chan_le_send_seg(struct bt_l2cap_le_chan *ch, struct net_buf *b
|
|||
* directly.
|
||||
*/
|
||||
if (buf->len == 0 || (buf == seg && buf->len == len)) {
|
||||
LOG_INF("last PDU");
|
||||
LOG_DBG("last PDU");
|
||||
cb = l2cap_chan_sdu_sent;
|
||||
} else {
|
||||
LOG_INF("send PDU left %u", buf->len);
|
||||
LOG_DBG("send PDU left %u", buf->len);
|
||||
cb = l2cap_chan_seg_sent;
|
||||
}
|
||||
|
||||
|
@ -2068,7 +2217,7 @@ static int l2cap_chan_le_send_seg(struct bt_l2cap_le_chan *ch, struct net_buf *b
|
|||
__ASSERT_NO_MSG(!err || err == -ENOTCONN);
|
||||
|
||||
if (err) {
|
||||
LOG_INF("Unable to send seg %d", err);
|
||||
LOG_DBG("Unable to send seg %d", err);
|
||||
atomic_inc(&ch->tx.credits);
|
||||
|
||||
/* The host takes ownership of the reference in seg when
|
||||
|
|
|
@ -252,3 +252,6 @@ void bt_l2cap_register_ecred_cb(const struct bt_l2cap_ecred_cb *cb);
|
|||
|
||||
/* Returns a server if it exists for given psm. */
|
||||
struct bt_l2cap_server *bt_l2cap_server_lookup_psm(uint16_t psm);
|
||||
|
||||
/* Pull data from the L2CAP layer */
|
||||
struct net_buf *l2cap_data_pull(struct bt_conn *conn, size_t amount);
|
||||
|
|
|
@ -153,7 +153,8 @@ bool ll_data_path_sink_create(uint16_t handle, struct ll_iso_datapath *datapath,
|
|||
|
||||
#define BUF_ALLOC_TIMEOUT_MS (30) /* milliseconds */
|
||||
NET_BUF_POOL_FIXED_DEFINE(tx_pool, CONFIG_BT_ISO_TX_BUF_COUNT,
|
||||
BT_ISO_SDU_BUF_SIZE(CONFIG_BT_ISO_TX_MTU), 8, NULL);
|
||||
BT_ISO_SDU_BUF_SIZE(CONFIG_BT_ISO_TX_MTU),
|
||||
CONFIG_BT_CONN_TX_USER_DATA_SIZE, NULL);
|
||||
|
||||
static struct k_work_delayable iso_send_work;
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue