/* l2cap.c - L2CAP handling */ /* * Copyright (c) 2015-2016 Intel Corporation * * SPDX-License-Identifier: Apache-2.0 */ #include #include #include #include #include #include #include #include #include #include #define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_L2CAP) #define LOG_MODULE_NAME bt_l2cap #include "common/log.h" #include "hci_core.h" #include "conn_internal.h" #include "l2cap_internal.h" #include "keys.h" #define LE_CHAN_RTX(_w) CONTAINER_OF(_w, struct bt_l2cap_le_chan, chan.rtx_work) #define CHAN_RX(_w) CONTAINER_OF(_w, struct bt_l2cap_le_chan, rx_work) #define L2CAP_LE_MIN_MTU 23 #define L2CAP_ECRED_MIN_MTU 64 #define L2CAP_LE_MAX_CREDITS (CONFIG_BT_BUF_ACL_RX_COUNT - 1) #define L2CAP_LE_CID_DYN_START 0x0040 #define L2CAP_LE_CID_DYN_END 0x007f #define L2CAP_LE_CID_IS_DYN(_cid) \ (_cid >= L2CAP_LE_CID_DYN_START && _cid <= L2CAP_LE_CID_DYN_END) #define L2CAP_LE_PSM_FIXED_START 0x0001 #define L2CAP_LE_PSM_FIXED_END 0x007f #define L2CAP_LE_PSM_DYN_START 0x0080 #define L2CAP_LE_PSM_DYN_END 0x00ff #define L2CAP_LE_PSM_IS_DYN(_psm) \ (_psm >= L2CAP_LE_PSM_DYN_START && _psm <= L2CAP_LE_PSM_DYN_END) #define L2CAP_CONN_TIMEOUT K_SECONDS(40) #define L2CAP_DISC_TIMEOUT K_SECONDS(2) #define L2CAP_RTX_TIMEOUT K_SECONDS(2) #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL) /* Dedicated pool for disconnect buffers so they are guaranteed to be send * even in case of data congestion due to flooding. */ NET_BUF_POOL_FIXED_DEFINE(disc_pool, 1, BT_L2CAP_BUF_SIZE( sizeof(struct bt_l2cap_sig_hdr) + sizeof(struct bt_l2cap_disconn_req)), NULL); #define L2CAP_ECRED_CHAN_MAX 5 #define l2cap_lookup_ident(conn, ident) __l2cap_lookup_ident(conn, ident, false) #define l2cap_remove_ident(conn, ident) __l2cap_lookup_ident(conn, ident, true) struct data_sent { uint16_t len; }; #define data_sent(buf) ((struct data_sent *)net_buf_user_data(buf)) static sys_slist_t servers; #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */ /* L2CAP signalling channel specific context */ struct bt_l2cap { /* The channel this context is associated with */ struct bt_l2cap_le_chan chan; }; static struct bt_l2cap bt_l2cap_pool[CONFIG_BT_MAX_CONN]; static uint8_t get_ident(void) { static uint8_t ident; ident++; /* handle integer overflow (0 is not valid) */ if (!ident) { ident++; } return ident; } #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL) static struct bt_l2cap_le_chan *l2cap_chan_alloc_cid(struct bt_conn *conn, struct bt_l2cap_chan *chan) { struct bt_l2cap_le_chan *ch = BT_L2CAP_LE_CHAN(chan); uint16_t cid; /* * No action needed if there's already a CID allocated, e.g. in * the case of a fixed channel. */ if (ch->rx.cid > 0) { return ch; } for (cid = L2CAP_LE_CID_DYN_START; cid <= L2CAP_LE_CID_DYN_END; cid++) { if (!bt_l2cap_le_lookup_rx_cid(conn, cid)) { ch->rx.cid = cid; return ch; } } return NULL; } static struct bt_l2cap_le_chan * __l2cap_lookup_ident(struct bt_conn *conn, uint16_t ident, bool remove) { struct bt_l2cap_chan *chan; sys_snode_t *prev = NULL; SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) { if (chan->ident == ident) { if (remove) { sys_slist_remove(&conn->channels, prev, &chan->node); } return BT_L2CAP_LE_CHAN(chan); } prev = &chan->node; } return NULL; } #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */ void bt_l2cap_chan_remove(struct bt_conn *conn, struct bt_l2cap_chan *ch) { struct bt_l2cap_chan *chan; sys_snode_t *prev = NULL; SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) { if (chan == ch) { sys_slist_remove(&conn->channels, prev, &chan->node); return; } prev = &chan->node; } } const char *bt_l2cap_chan_state_str(bt_l2cap_chan_state_t state) { switch (state) { case BT_L2CAP_DISCONNECTED: return "disconnected"; case BT_L2CAP_CONNECT: return "connect"; case BT_L2CAP_CONFIG: return "config"; case BT_L2CAP_CONNECTED: return "connected"; case BT_L2CAP_DISCONNECT: return "disconnect"; default: return "unknown"; } } #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL) #if defined(CONFIG_BT_DEBUG_L2CAP) void bt_l2cap_chan_set_state_debug(struct bt_l2cap_chan *chan, bt_l2cap_chan_state_t state, const char *func, int line) { BT_DBG("chan %p psm 0x%04x %s -> %s", chan, chan->psm, bt_l2cap_chan_state_str(chan->state), bt_l2cap_chan_state_str(state)); /* check transitions validness */ switch (state) { case BT_L2CAP_DISCONNECTED: /* regardless of old state always allows this state */ break; case BT_L2CAP_CONNECT: if (chan->state != BT_L2CAP_DISCONNECTED) { BT_WARN("%s()%d: invalid transition", func, line); } break; case BT_L2CAP_CONFIG: if (chan->state != BT_L2CAP_CONNECT) { BT_WARN("%s()%d: invalid transition", func, line); } break; case BT_L2CAP_CONNECTED: if (chan->state != BT_L2CAP_CONFIG && chan->state != BT_L2CAP_CONNECT) { BT_WARN("%s()%d: invalid transition", func, line); } break; case BT_L2CAP_DISCONNECT: if (chan->state != BT_L2CAP_CONFIG && chan->state != BT_L2CAP_CONNECTED) { BT_WARN("%s()%d: invalid transition", func, line); } break; default: BT_ERR("%s()%d: unknown (%u) state was set", func, line, state); return; } chan->state = state; } #else void bt_l2cap_chan_set_state(struct bt_l2cap_chan *chan, bt_l2cap_chan_state_t state) { chan->state = state; } #endif /* CONFIG_BT_DEBUG_L2CAP */ #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */ void bt_l2cap_chan_del(struct bt_l2cap_chan *chan) { const struct bt_l2cap_chan_ops *ops = chan->ops; BT_DBG("conn %p chan %p", chan->conn, chan); if (!chan->conn) { goto destroy; } if (ops->disconnected) { ops->disconnected(chan); } chan->conn = NULL; destroy: #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL) /* Reset internal members of common channel */ bt_l2cap_chan_set_state(chan, BT_L2CAP_DISCONNECTED); chan->psm = 0U; #endif if (chan->destroy) { chan->destroy(chan); } if (ops->released) { ops->released(chan); } } static void l2cap_rtx_timeout(struct k_work *work) { struct bt_l2cap_le_chan *chan = LE_CHAN_RTX(work); struct bt_conn *conn = chan->chan.conn; BT_ERR("chan %p timeout", chan); bt_l2cap_chan_remove(conn, &chan->chan); bt_l2cap_chan_del(&chan->chan); #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL) /* Remove other channels if pending on the same ident */ while ((chan = l2cap_remove_ident(conn, chan->chan.ident))) { bt_l2cap_chan_del(&chan->chan); } #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */ } #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL) static void l2cap_chan_le_recv(struct bt_l2cap_le_chan *chan, struct net_buf *buf); static void l2cap_rx_process(struct k_work *work) { struct bt_l2cap_le_chan *ch = CHAN_RX(work); struct net_buf *buf; while ((buf = net_buf_get(&ch->rx_queue, K_NO_WAIT))) { BT_DBG("ch %p buf %p", ch, buf); l2cap_chan_le_recv(ch, buf); net_buf_unref(buf); } } #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */ void bt_l2cap_chan_add(struct bt_conn *conn, struct bt_l2cap_chan *chan, bt_l2cap_chan_destroy_t destroy) { /* Attach channel to the connection */ sys_slist_append(&conn->channels, &chan->node); chan->conn = conn; chan->destroy = destroy; BT_DBG("conn %p chan %p", conn, chan); } static bool l2cap_chan_add(struct bt_conn *conn, struct bt_l2cap_chan *chan, bt_l2cap_chan_destroy_t destroy) { struct bt_l2cap_le_chan *ch; #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL) ch = l2cap_chan_alloc_cid(conn, chan); #else ch = BT_L2CAP_LE_CHAN(chan); #endif if (!ch) { BT_ERR("Unable to allocate L2CAP channel ID"); return false; } /* All dynamic channels have the destroy handler which makes sure that * the RTX work structure is properly released with a cancel sync. * The fixed signal channel is only removed when disconnected and the * disconnected handler is always called from the workqueue itself so * canceling from there should always succeed. */ k_work_init_delayable(&chan->rtx_work, l2cap_rtx_timeout); atomic_clear(chan->status); bt_l2cap_chan_add(conn, chan, destroy); #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL) if (L2CAP_LE_CID_IS_DYN(ch->rx.cid)) { k_work_init(&ch->rx_work, l2cap_rx_process); k_fifo_init(&ch->rx_queue); bt_l2cap_chan_set_state(chan, BT_L2CAP_CONNECT); } #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */ return true; } void bt_l2cap_connected(struct bt_conn *conn) { struct bt_l2cap_chan *chan; if (IS_ENABLED(CONFIG_BT_BREDR) && conn->type == BT_CONN_TYPE_BR) { bt_l2cap_br_connected(conn); return; } STRUCT_SECTION_FOREACH(bt_l2cap_fixed_chan, fchan) { struct bt_l2cap_le_chan *ch; if (fchan->accept(conn, &chan) < 0) { continue; } ch = BT_L2CAP_LE_CHAN(chan); /* Fill up remaining fixed channel context attached in * fchan->accept() */ ch->rx.cid = fchan->cid; ch->tx.cid = fchan->cid; if (!l2cap_chan_add(conn, chan, fchan->destroy)) { return; } if (chan->ops->connected) { chan->ops->connected(chan); } /* Always set output status to fixed channels */ atomic_set_bit(chan->status, BT_L2CAP_STATUS_OUT); if (chan->ops->status) { chan->ops->status(chan, chan->status); } } } void bt_l2cap_disconnected(struct bt_conn *conn) { struct bt_l2cap_chan *chan, *next; SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&conn->channels, chan, next, node) { bt_l2cap_chan_del(chan); } } static struct net_buf *l2cap_create_le_sig_pdu(struct net_buf *buf, uint8_t code, uint8_t ident, uint16_t len) { struct bt_l2cap_sig_hdr *hdr; struct net_buf_pool *pool = NULL; #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL) if (code == BT_L2CAP_DISCONN_REQ) { pool = &disc_pool; } #endif /* Don't wait more than the minimum RTX timeout of 2 seconds */ buf = bt_l2cap_create_pdu_timeout(pool, 0, L2CAP_RTX_TIMEOUT); if (!buf) { /* If it was not possible to allocate a buffer within the * timeout return NULL. */ BT_ERR("Unable to allocate buffer for op 0x%02x", code); return NULL; } hdr = net_buf_add(buf, sizeof(*hdr)); hdr->code = code; hdr->ident = ident; hdr->len = sys_cpu_to_le16(len); return buf; } /* Send the buffer and release it in case of failure. * Any other cleanup in failure to send should be handled by the disconnected * handler. */ static inline void l2cap_send(struct bt_conn *conn, uint16_t cid, struct net_buf *buf) { if (bt_l2cap_send(conn, cid, buf)) { net_buf_unref(buf); } } #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL) static void l2cap_chan_send_req(struct bt_l2cap_chan *chan, struct net_buf *buf, k_timeout_t timeout) { if (bt_l2cap_send(chan->conn, BT_L2CAP_CID_LE_SIG, buf)) { net_buf_unref(buf); return; } /* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part A] page 126: * * The value of this timer is implementation-dependent but the minimum * initial value is 1 second and the maximum initial value is 60 * seconds. One RTX timer shall exist for each outstanding signaling * request, including each Echo Request. The timer disappears on the * final expiration, when the response is received, or the physical * link is lost. */ k_work_reschedule(&chan->rtx_work, timeout); } static int l2cap_le_conn_req(struct bt_l2cap_le_chan *ch) { struct net_buf *buf; struct bt_l2cap_le_conn_req *req; ch->chan.ident = get_ident(); buf = l2cap_create_le_sig_pdu(NULL, BT_L2CAP_LE_CONN_REQ, ch->chan.ident, sizeof(*req)); if (!buf) { return -ENOMEM; } req = net_buf_add(buf, sizeof(*req)); req->psm = sys_cpu_to_le16(ch->chan.psm); req->scid = sys_cpu_to_le16(ch->rx.cid); req->mtu = sys_cpu_to_le16(ch->rx.mtu); req->mps = sys_cpu_to_le16(ch->rx.mps); req->credits = sys_cpu_to_le16(ch->rx.init_credits); l2cap_chan_send_req(&ch->chan, buf, L2CAP_CONN_TIMEOUT); return 0; } #if defined(CONFIG_BT_L2CAP_ECRED) static int l2cap_ecred_conn_req(struct bt_l2cap_chan **chan, int channels) { struct net_buf *buf; struct bt_l2cap_ecred_conn_req *req; struct bt_l2cap_le_chan *ch; int i; uint8_t ident; if (!chan || !channels) { return -EINVAL; } ident = get_ident(); buf = l2cap_create_le_sig_pdu(NULL, BT_L2CAP_ECRED_CONN_REQ, ident, sizeof(*req) + (channels * sizeof(uint16_t))); req = net_buf_add(buf, sizeof(*req)); ch = BT_L2CAP_LE_CHAN(chan[0]); /* Init common parameters */ req->psm = sys_cpu_to_le16(ch->chan.psm); req->mtu = sys_cpu_to_le16(ch->rx.mtu); req->mps = sys_cpu_to_le16(ch->rx.mps); req->credits = sys_cpu_to_le16(ch->rx.init_credits); for (i = 0; i < channels; i++) { ch = BT_L2CAP_LE_CHAN(chan[i]); ch->chan.ident = ident; net_buf_add_le16(buf, ch->rx.cid); } l2cap_chan_send_req(*chan, buf, L2CAP_CONN_TIMEOUT); return 0; } #endif /* defined(CONFIG_BT_L2CAP_ECRED) */ static void l2cap_le_encrypt_change(struct bt_l2cap_chan *chan, uint8_t status) { int err; /* Skip channels that are not pending waiting for encryption */ if (!atomic_test_and_clear_bit(chan->status, BT_L2CAP_STATUS_ENCRYPT_PENDING)) { return; } if (status) { goto fail; } #if defined(CONFIG_BT_L2CAP_ECRED) if (chan->ident) { struct bt_l2cap_chan *echan[L2CAP_ECRED_CHAN_MAX]; struct bt_l2cap_le_chan *ch; int i = 0; while ((ch = l2cap_remove_ident(chan->conn, chan->ident))) { echan[i++] = &ch->chan; } /* Retry ecred connect */ l2cap_ecred_conn_req(echan, i); return; } #endif /* defined(CONFIG_BT_L2CAP_ECRED) */ /* Retry to connect */ err = l2cap_le_conn_req(BT_L2CAP_LE_CHAN(chan)); if (err) { goto fail; } return; fail: bt_l2cap_chan_remove(chan->conn, chan); bt_l2cap_chan_del(chan); } #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */ void bt_l2cap_security_changed(struct bt_conn *conn, uint8_t hci_status) { struct bt_l2cap_chan *chan, *next; if (IS_ENABLED(CONFIG_BT_BREDR) && conn->type == BT_CONN_TYPE_BR) { l2cap_br_encrypt_change(conn, hci_status); return; } SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&conn->channels, chan, next, node) { #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL) l2cap_le_encrypt_change(chan, hci_status); #endif if (chan->ops->encrypt_change) { chan->ops->encrypt_change(chan, hci_status); } } } struct net_buf *bt_l2cap_create_pdu_timeout(struct net_buf_pool *pool, size_t reserve, k_timeout_t timeout) { return bt_conn_create_pdu_timeout(pool, sizeof(struct bt_l2cap_hdr) + reserve, timeout); } int bt_l2cap_send_cb(struct bt_conn *conn, uint16_t cid, struct net_buf *buf, bt_conn_tx_cb_t cb, void *user_data) { struct bt_l2cap_hdr *hdr; BT_DBG("conn %p cid %u len %zu", conn, cid, net_buf_frags_len(buf)); hdr = net_buf_push(buf, sizeof(*hdr)); hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr)); hdr->cid = sys_cpu_to_le16(cid); return bt_conn_send_cb(conn, buf, cb, user_data); } static void l2cap_send_reject(struct bt_conn *conn, uint8_t ident, uint16_t reason, void *data, uint8_t data_len) { struct bt_l2cap_cmd_reject *rej; struct net_buf *buf; buf = l2cap_create_le_sig_pdu(NULL, BT_L2CAP_CMD_REJECT, ident, sizeof(*rej) + data_len); if (!buf) { return; } rej = net_buf_add(buf, sizeof(*rej)); rej->reason = sys_cpu_to_le16(reason); if (data) { net_buf_add_mem(buf, data, data_len); } l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf); } static void le_conn_param_rsp(struct bt_l2cap *l2cap, struct net_buf *buf) { struct bt_l2cap_conn_param_rsp *rsp = (void *)buf->data; if (buf->len < sizeof(*rsp)) { BT_ERR("Too small LE conn param rsp"); return; } BT_DBG("LE conn param rsp result %u", sys_le16_to_cpu(rsp->result)); } static void le_conn_param_update_req(struct bt_l2cap *l2cap, uint8_t ident, struct net_buf *buf) { struct bt_conn *conn = l2cap->chan.chan.conn; struct bt_le_conn_param param; struct bt_l2cap_conn_param_rsp *rsp; struct bt_l2cap_conn_param_req *req = (void *)buf->data; bool accepted; if (buf->len < sizeof(*req)) { BT_ERR("Too small LE conn update param req"); return; } if (conn->role != BT_HCI_ROLE_CENTRAL) { l2cap_send_reject(conn, ident, BT_L2CAP_REJ_NOT_UNDERSTOOD, NULL, 0); return; } param.interval_min = sys_le16_to_cpu(req->min_interval); param.interval_max = sys_le16_to_cpu(req->max_interval); param.latency = sys_le16_to_cpu(req->latency); param.timeout = sys_le16_to_cpu(req->timeout); BT_DBG("min 0x%04x max 0x%04x latency: 0x%04x timeout: 0x%04x", param.interval_min, param.interval_max, param.latency, param.timeout); buf = l2cap_create_le_sig_pdu(buf, BT_L2CAP_CONN_PARAM_RSP, ident, sizeof(*rsp)); if (!buf) { return; } accepted = le_param_req(conn, ¶m); rsp = net_buf_add(buf, sizeof(*rsp)); if (accepted) { rsp->result = sys_cpu_to_le16(BT_L2CAP_CONN_PARAM_ACCEPTED); } else { rsp->result = sys_cpu_to_le16(BT_L2CAP_CONN_PARAM_REJECTED); } l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf); if (accepted) { bt_conn_le_conn_update(conn, ¶m); } } struct bt_l2cap_chan *bt_l2cap_le_lookup_tx_cid(struct bt_conn *conn, uint16_t cid) { struct bt_l2cap_chan *chan; SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) { if (BT_L2CAP_LE_CHAN(chan)->tx.cid == cid) { return chan; } } return NULL; } struct bt_l2cap_chan *bt_l2cap_le_lookup_rx_cid(struct bt_conn *conn, uint16_t cid) { struct bt_l2cap_chan *chan; SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) { if (BT_L2CAP_LE_CHAN(chan)->rx.cid == cid) { return chan; } } return NULL; } #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL) static struct bt_l2cap_server *l2cap_server_lookup_psm(uint16_t psm) { struct bt_l2cap_server *server; SYS_SLIST_FOR_EACH_CONTAINER(&servers, server, node) { if (server->psm == psm) { return server; } } return NULL; } int bt_l2cap_server_register(struct bt_l2cap_server *server) { if (!server->accept) { return -EINVAL; } if (server->psm) { if (server->psm < L2CAP_LE_PSM_FIXED_START || server->psm > L2CAP_LE_PSM_DYN_END) { return -EINVAL; } /* Check if given PSM is already in use */ if (l2cap_server_lookup_psm(server->psm)) { BT_DBG("PSM already registered"); return -EADDRINUSE; } } else { uint16_t psm; for (psm = L2CAP_LE_PSM_DYN_START; psm <= L2CAP_LE_PSM_DYN_END; psm++) { if (!l2cap_server_lookup_psm(psm)) { break; } } if (psm > L2CAP_LE_PSM_DYN_END) { BT_WARN("No free dynamic PSMs available"); return -EADDRNOTAVAIL; } BT_DBG("Allocated PSM 0x%04x for new server", psm); server->psm = psm; } if (server->sec_level > BT_SECURITY_L4) { return -EINVAL; } else if (server->sec_level < BT_SECURITY_L1) { /* Level 0 is only applicable for BR/EDR */ server->sec_level = BT_SECURITY_L1; } BT_DBG("PSM 0x%04x", server->psm); sys_slist_append(&servers, &server->node); return 0; } static void l2cap_chan_rx_init(struct bt_l2cap_le_chan *chan) { BT_DBG("chan %p", chan); /* Use existing MTU if defined */ if (!chan->rx.mtu) { /* If application has not provide the incoming L2CAP SDU MTU use * an MTU that does not require segmentation. */ chan->rx.mtu = BT_L2CAP_SDU_RX_MTU; } /* MPS shall not be bigger than MTU + BT_L2CAP_SDU_HDR_SIZE as the * remaining bytes cannot be used. */ chan->rx.mps = MIN(chan->rx.mtu + BT_L2CAP_SDU_HDR_SIZE, BT_L2CAP_RX_MTU); /* Truncate MTU if channel have disabled segmentation but still have * set an MTU which requires it. */ if (!chan->chan.ops->alloc_buf && (chan->rx.mps < chan->rx.mtu + BT_L2CAP_SDU_HDR_SIZE)) { BT_WARN("Segmentation disabled but MTU > MPS, truncating MTU"); chan->rx.mtu = chan->rx.mps - BT_L2CAP_SDU_HDR_SIZE; } /* Use existing credits if defined */ if (!chan->rx.init_credits) { if (chan->chan.ops->alloc_buf) { /* Auto tune credits to receive a full packet */ chan->rx.init_credits = ceiling_fraction(chan->rx.mtu, BT_L2CAP_RX_MTU); } else { chan->rx.init_credits = L2CAP_LE_MAX_CREDITS; } } atomic_set(&chan->rx.credits, 0); if (BT_DBG_ENABLED && chan->rx.init_credits * chan->rx.mps < chan->rx.mtu + BT_L2CAP_SDU_HDR_SIZE) { BT_WARN("Not enough credits for a full packet"); } } static struct net_buf *l2cap_chan_le_get_tx_buf(struct bt_l2cap_le_chan *ch) { struct net_buf *buf; /* Return current buffer */ if (ch->tx_buf) { buf = ch->tx_buf; ch->tx_buf = NULL; return buf; } return net_buf_get(&ch->tx_queue, K_NO_WAIT); } static int l2cap_chan_le_send_sdu(struct bt_l2cap_le_chan *ch, struct net_buf **buf, uint16_t sent); static void l2cap_chan_tx_process(struct k_work *work) { struct bt_l2cap_le_chan *ch; struct net_buf *buf; ch = CONTAINER_OF(work, struct bt_l2cap_le_chan, tx_work); /* Resume tx in case there are buffers in the queue */ while ((buf = l2cap_chan_le_get_tx_buf(ch))) { int sent = data_sent(buf)->len; BT_DBG("buf %p sent %u", buf, sent); sent = l2cap_chan_le_send_sdu(ch, &buf, sent); if (sent < 0) { if (sent == -EAGAIN) { ch->tx_buf = buf; } else { net_buf_unref(buf); } break; } } } static void l2cap_chan_tx_init(struct bt_l2cap_le_chan *chan) { BT_DBG("chan %p", chan); (void)memset(&chan->tx, 0, sizeof(chan->tx)); atomic_set(&chan->tx.credits, 0); k_fifo_init(&chan->tx_queue); k_work_init(&chan->tx_work, l2cap_chan_tx_process); } static void l2cap_chan_tx_give_credits(struct bt_l2cap_le_chan *chan, uint16_t credits) { BT_DBG("chan %p credits %u", chan, credits); atomic_add(&chan->tx.credits, credits); if (!atomic_test_and_set_bit(chan->chan.status, BT_L2CAP_STATUS_OUT) && chan->chan.ops->status) { chan->chan.ops->status(&chan->chan, chan->chan.status); } } static void l2cap_chan_rx_give_credits(struct bt_l2cap_le_chan *chan, uint16_t credits) { BT_DBG("chan %p credits %u", chan, credits); atomic_add(&chan->rx.credits, credits); } static void l2cap_chan_destroy(struct bt_l2cap_chan *chan) { struct bt_l2cap_le_chan *ch = BT_L2CAP_LE_CHAN(chan); struct net_buf *buf; BT_DBG("chan %p cid 0x%04x", ch, ch->rx.cid); /* Cancel ongoing work. Since the channel can be re-used after this * we need to sync to make sure that the kernel does not have it * in its queue anymore. */ k_work_cancel_delayable_sync(&chan->rtx_work, &chan->rtx_sync); if (ch->tx_buf) { net_buf_unref(ch->tx_buf); ch->tx_buf = NULL; } /* Remove buffers on the TX queue */ while ((buf = net_buf_get(&ch->tx_queue, K_NO_WAIT))) { net_buf_unref(buf); } /* Remove buffers on the RX queue */ while ((buf = net_buf_get(&ch->rx_queue, K_NO_WAIT))) { net_buf_unref(buf); } /* Destroy segmented SDU if it exists */ if (ch->_sdu) { net_buf_unref(ch->_sdu); ch->_sdu = NULL; ch->_sdu_len = 0U; } } static uint16_t le_err_to_result(int err) { switch (err) { case -ENOMEM: return BT_L2CAP_LE_ERR_NO_RESOURCES; case -EACCES: return BT_L2CAP_LE_ERR_AUTHORIZATION; case -EPERM: return BT_L2CAP_LE_ERR_KEY_SIZE; case -ENOTSUP: /* This handle the cases where a fixed channel is registered but * for some reason (e.g. controller not suporting a feature) * cannot be used. */ return BT_L2CAP_LE_ERR_PSM_NOT_SUPP; default: return BT_L2CAP_LE_ERR_UNACCEPT_PARAMS; } } static uint16_t l2cap_chan_accept(struct bt_conn *conn, struct bt_l2cap_server *server, uint16_t scid, uint16_t mtu, uint16_t mps, uint16_t credits, struct bt_l2cap_chan **chan) { struct bt_l2cap_le_chan *ch; int err; BT_DBG("conn %p scid 0x%04x chan %p", conn, scid, chan); if (!L2CAP_LE_CID_IS_DYN(scid)) { return BT_L2CAP_LE_ERR_INVALID_SCID; } *chan = bt_l2cap_le_lookup_tx_cid(conn, scid); if (*chan) { return BT_L2CAP_LE_ERR_SCID_IN_USE; } /* Request server to accept the new connection and allocate the * channel. */ err = server->accept(conn, chan); if (err < 0) { return le_err_to_result(err); } if (!(*chan)->ops->recv) { BT_ERR("Mandatory callback 'recv' missing"); return BT_L2CAP_LE_ERR_UNACCEPT_PARAMS; } (*chan)->required_sec_level = server->sec_level; if (!l2cap_chan_add(conn, *chan, l2cap_chan_destroy)) { return BT_L2CAP_LE_ERR_NO_RESOURCES; } ch = BT_L2CAP_LE_CHAN(*chan); /* Init TX parameters */ l2cap_chan_tx_init(ch); ch->tx.cid = scid; ch->tx.mps = mps; ch->tx.mtu = mtu; ch->tx.init_credits = credits; l2cap_chan_tx_give_credits(ch, credits); /* Init RX parameters */ l2cap_chan_rx_init(ch); l2cap_chan_rx_give_credits(ch, ch->rx.init_credits); /* Set channel PSM */ (*chan)->psm = server->psm; /* Update state */ bt_l2cap_chan_set_state(*chan, BT_L2CAP_CONNECTED); if ((*chan)->ops->connected) { (*chan)->ops->connected(*chan); } return BT_L2CAP_LE_SUCCESS; } static uint16_t l2cap_check_security(struct bt_conn *conn, struct bt_l2cap_server *server) { const struct bt_keys *keys = bt_keys_find_addr(conn->id, &conn->le.dst); bool ltk_present; if (IS_ENABLED(CONFIG_BT_CONN_DISABLE_SECURITY)) { return BT_L2CAP_LE_SUCCESS; } if (conn->sec_level >= server->sec_level) { return BT_L2CAP_LE_SUCCESS; } if (conn->sec_level > BT_SECURITY_L1) { return BT_L2CAP_LE_ERR_AUTHENTICATION; } if (keys) { if (conn->role == BT_HCI_ROLE_CENTRAL) { ltk_present = keys->id & (BT_KEYS_LTK_P256 | BT_KEYS_PERIPH_LTK); } else { ltk_present = keys->id & (BT_KEYS_LTK_P256 | BT_KEYS_LTK); } } else { ltk_present = false; } /* If an LTK or an STK is available and encryption is required * (LE security mode 1) but encryption is not enabled, the * service request shall be rejected with the error code * "Insufficient Encryption". */ if (ltk_present) { return BT_L2CAP_LE_ERR_ENCRYPTION; } return BT_L2CAP_LE_ERR_AUTHENTICATION; } static void le_conn_req(struct bt_l2cap *l2cap, uint8_t ident, struct net_buf *buf) { struct bt_conn *conn = l2cap->chan.chan.conn; struct bt_l2cap_chan *chan; struct bt_l2cap_le_chan *ch; struct bt_l2cap_server *server; struct bt_l2cap_le_conn_req *req = (void *)buf->data; struct bt_l2cap_le_conn_rsp *rsp; uint16_t psm, scid, mtu, mps, credits; uint16_t result; if (buf->len < sizeof(*req)) { BT_ERR("Too small LE conn req packet size"); return; } psm = sys_le16_to_cpu(req->psm); scid = sys_le16_to_cpu(req->scid); mtu = sys_le16_to_cpu(req->mtu); mps = sys_le16_to_cpu(req->mps); credits = sys_le16_to_cpu(req->credits); BT_DBG("psm 0x%02x scid 0x%04x mtu %u mps %u credits %u", psm, scid, mtu, mps, credits); if (mtu < L2CAP_LE_MIN_MTU || mps < L2CAP_LE_MIN_MTU) { BT_ERR("Invalid LE-Conn Req params"); return; } buf = l2cap_create_le_sig_pdu(buf, BT_L2CAP_LE_CONN_RSP, ident, sizeof(*rsp)); if (!buf) { return; } rsp = net_buf_add(buf, sizeof(*rsp)); (void)memset(rsp, 0, sizeof(*rsp)); /* Check if there is a server registered */ server = l2cap_server_lookup_psm(psm); if (!server) { rsp->result = sys_cpu_to_le16(BT_L2CAP_LE_ERR_PSM_NOT_SUPP); goto rsp; } /* Check if connection has minimum required security level */ result = l2cap_check_security(conn, server); if (result != BT_L2CAP_LE_SUCCESS) { rsp->result = sys_cpu_to_le16(result); goto rsp; } result = l2cap_chan_accept(conn, server, scid, mtu, mps, credits, &chan); if (result != BT_L2CAP_LE_SUCCESS) { rsp->result = sys_cpu_to_le16(result); goto rsp; } ch = BT_L2CAP_LE_CHAN(chan); /* Prepare response protocol data */ rsp->dcid = sys_cpu_to_le16(ch->rx.cid); rsp->mps = sys_cpu_to_le16(ch->rx.mps); rsp->mtu = sys_cpu_to_le16(ch->rx.mtu); rsp->credits = sys_cpu_to_le16(ch->rx.init_credits); rsp->result = BT_L2CAP_LE_SUCCESS; rsp: l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf); } #if defined(CONFIG_BT_L2CAP_ECRED) static void le_ecred_conn_req(struct bt_l2cap *l2cap, uint8_t ident, struct net_buf *buf) { struct bt_conn *conn = l2cap->chan.chan.conn; struct bt_l2cap_chan *chan[L2CAP_ECRED_CHAN_MAX]; struct bt_l2cap_le_chan *ch = NULL; struct bt_l2cap_server *server; struct bt_l2cap_ecred_conn_req *req; struct bt_l2cap_ecred_conn_rsp *rsp; uint16_t psm, mtu, mps, credits, result = BT_L2CAP_LE_SUCCESS; uint16_t scid, dcid[L2CAP_ECRED_CHAN_MAX]; int i = 0; uint8_t req_cid_count; /* set dcid to zeros here, in case of all connections refused error */ memset(dcid, 0, sizeof(dcid)); if (buf->len < sizeof(*req)) { BT_ERR("Too small LE conn req packet size"); result = BT_L2CAP_LE_ERR_INVALID_PARAMS; req_cid_count = 0; goto response; } req = net_buf_pull_mem(buf, sizeof(*req)); req_cid_count = buf->len / sizeof(scid); if (buf->len > sizeof(dcid)) { BT_ERR("Too large LE conn req packet size"); req_cid_count = L2CAP_ECRED_CHAN_MAX; result = BT_L2CAP_LE_ERR_INVALID_PARAMS; goto response; } psm = sys_le16_to_cpu(req->psm); mtu = sys_le16_to_cpu(req->mtu); mps = sys_le16_to_cpu(req->mps); credits = sys_le16_to_cpu(req->credits); BT_DBG("psm 0x%02x mtu %u mps %u credits %u", psm, mtu, mps, credits); if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MTU) { BT_ERR("Invalid ecred conn req params"); result = BT_L2CAP_LE_ERR_INVALID_PARAMS; goto response; } /* Check if there is a server registered */ server = l2cap_server_lookup_psm(psm); if (!server) { result = BT_L2CAP_LE_ERR_PSM_NOT_SUPP; goto response; } /* Check if connection has minimum required security level */ result = l2cap_check_security(conn, server); if (result != BT_L2CAP_LE_SUCCESS) { goto response; } while (buf->len >= sizeof(scid)) { uint16_t rc; scid = net_buf_pull_le16(buf); rc = l2cap_chan_accept(conn, server, scid, mtu, mps, credits, &chan[i]); if (rc != BT_L2CAP_LE_SUCCESS) { result = rc; } switch (rc) { case BT_L2CAP_LE_SUCCESS: ch = BT_L2CAP_LE_CHAN(chan[i]); dcid[i++] = sys_cpu_to_le16(ch->rx.cid); continue; /* Some connections refused – invalid Source CID */ /* Some connections refused – Source CID already allocated */ /* Some connections refused – not enough resources * available. */ default: /* If a Destination CID is 0x0000, the channel was not * established. */ dcid[i++] = 0x0000; continue; } } response: buf = l2cap_create_le_sig_pdu(buf, BT_L2CAP_ECRED_CONN_RSP, ident, sizeof(*rsp) + (sizeof(scid) * req_cid_count)); if (!buf) { return; } rsp = net_buf_add(buf, sizeof(*rsp)); (void)memset(rsp, 0, sizeof(*rsp)); if (ch) { rsp->mps = sys_cpu_to_le16(ch->rx.mps); rsp->mtu = sys_cpu_to_le16(ch->rx.mtu); rsp->credits = sys_cpu_to_le16(ch->rx.init_credits); } rsp->result = sys_cpu_to_le16(result); net_buf_add_mem(buf, dcid, sizeof(scid) * req_cid_count); l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf); } static void le_ecred_reconf_req(struct bt_l2cap *l2cap, uint8_t ident, struct net_buf *buf) { struct bt_conn *conn = l2cap->chan.chan.conn; struct bt_l2cap_chan *chans[L2CAP_ECRED_CHAN_MAX]; struct bt_l2cap_ecred_reconf_req *req; struct bt_l2cap_ecred_reconf_rsp *rsp; uint16_t mtu, mps; uint16_t scid, result = BT_L2CAP_RECONF_SUCCESS; int chan_count = 0; bool mps_reduced = false; if (buf->len < sizeof(*req)) { BT_ERR("Too small ecred reconf req packet size"); return; } req = net_buf_pull_mem(buf, sizeof(*req)); mtu = sys_le16_to_cpu(req->mtu); mps = sys_le16_to_cpu(req->mps); if (mps < L2CAP_ECRED_MIN_MTU) { result = BT_L2CAP_RECONF_OTHER_UNACCEPT; goto response; } if (mtu < L2CAP_ECRED_MIN_MTU) { result = BT_L2CAP_RECONF_INVALID_MTU; goto response; } while (buf->len >= sizeof(scid)) { struct bt_l2cap_chan *chan; scid = net_buf_pull_le16(buf); chan = bt_l2cap_le_lookup_tx_cid(conn, scid); if (!chan) { result = BT_L2CAP_RECONF_INVALID_CID; continue; } /* If the MTU value is decreased for any of the included * channels, then the receiver shall disconnect all * included channels. */ if (BT_L2CAP_LE_CHAN(chan)->tx.mtu > mtu) { BT_ERR("chan %p decreased MTU %u -> %u", chan, BT_L2CAP_LE_CHAN(chan)->tx.mtu, mtu); result = BT_L2CAP_RECONF_INVALID_MTU; bt_l2cap_chan_disconnect(chan); goto response; } if (BT_L2CAP_LE_CHAN(chan)->tx.mps > mps) { mps_reduced = true; } chans[chan_count] = chan; chan_count++; } /* As per BT Core Spec V5.2 Vol. 3, Part A, section 7.11 * The request (...) shall not decrease the MPS of a channel * if more than one channel is specified. */ if (mps_reduced && chan_count > 1) { result = BT_L2CAP_RECONF_INVALID_MPS; goto response; } for (int i = 0; i < chan_count; i++) { BT_L2CAP_LE_CHAN(chans[i])->tx.mtu = mtu; BT_L2CAP_LE_CHAN(chans[i])->tx.mps = mps; if (chans[i]->ops->reconfigured) { chans[i]->ops->reconfigured(chans[i]); } } BT_DBG("mtu %u mps %u", mtu, mps); response: buf = l2cap_create_le_sig_pdu(buf, BT_L2CAP_ECRED_RECONF_RSP, ident, sizeof(*rsp)); rsp = net_buf_add(buf, sizeof(*rsp)); rsp->result = sys_cpu_to_le16(result); l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf); } static void le_ecred_reconf_rsp(struct bt_l2cap *l2cap, uint8_t ident, struct net_buf *buf) { struct bt_conn *conn = l2cap->chan.chan.conn; struct bt_l2cap_ecred_reconf_rsp *rsp; struct bt_l2cap_le_chan *ch; uint16_t result; if (buf->len < sizeof(*rsp)) { BT_ERR("Too small ecred reconf rsp packet size"); return; } rsp = net_buf_pull_mem(buf, sizeof(*rsp)); result = sys_le16_to_cpu(rsp->result); while ((ch = l2cap_lookup_ident(conn, ident))) { if (result == BT_L2CAP_LE_SUCCESS) { ch->rx.mtu = ch->pending_rx_mtu; } ch->pending_rx_mtu = 0; ch->chan.ident = 0U; if (ch->chan.ops->reconfigured) { ch->chan.ops->reconfigured(&ch->chan); } } } #endif /* defined(CONFIG_BT_L2CAP_ECRED) */ static struct bt_l2cap_le_chan *l2cap_remove_rx_cid(struct bt_conn *conn, uint16_t cid) { struct bt_l2cap_chan *chan; sys_snode_t *prev = NULL; /* Protect fixed channels against accidental removal */ if (!L2CAP_LE_CID_IS_DYN(cid)) { return NULL; } SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) { if (BT_L2CAP_LE_CHAN(chan)->rx.cid == cid) { sys_slist_remove(&conn->channels, prev, &chan->node); return BT_L2CAP_LE_CHAN(chan); } prev = &chan->node; } return NULL; } static void le_disconn_req(struct bt_l2cap *l2cap, uint8_t ident, struct net_buf *buf) { struct bt_conn *conn = l2cap->chan.chan.conn; struct bt_l2cap_le_chan *chan; struct bt_l2cap_disconn_req *req = (void *)buf->data; struct bt_l2cap_disconn_rsp *rsp; uint16_t dcid; if (buf->len < sizeof(*req)) { BT_ERR("Too small LE conn req packet size"); return; } dcid = sys_le16_to_cpu(req->dcid); BT_DBG("dcid 0x%04x scid 0x%04x", dcid, sys_le16_to_cpu(req->scid)); chan = l2cap_remove_rx_cid(conn, dcid); if (!chan) { struct bt_l2cap_cmd_reject_cid_data data; data.scid = req->scid; data.dcid = req->dcid; l2cap_send_reject(conn, ident, BT_L2CAP_REJ_INVALID_CID, &data, sizeof(data)); return; } buf = l2cap_create_le_sig_pdu(buf, BT_L2CAP_DISCONN_RSP, ident, sizeof(*rsp)); if (!buf) { return; } rsp = net_buf_add(buf, sizeof(*rsp)); rsp->dcid = sys_cpu_to_le16(chan->rx.cid); rsp->scid = sys_cpu_to_le16(chan->tx.cid); bt_l2cap_chan_del(&chan->chan); l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf); } static int l2cap_change_security(struct bt_l2cap_le_chan *chan, uint16_t err) { struct bt_conn *conn = chan->chan.conn; bt_security_t sec; int ret; if (atomic_test_bit(chan->chan.status, BT_L2CAP_STATUS_ENCRYPT_PENDING)) { return -EINPROGRESS; } switch (err) { case BT_L2CAP_LE_ERR_ENCRYPTION: if (conn->sec_level >= BT_SECURITY_L2) { return -EALREADY; } sec = BT_SECURITY_L2; break; case BT_L2CAP_LE_ERR_AUTHENTICATION: if (conn->sec_level < BT_SECURITY_L2) { sec = BT_SECURITY_L2; } else if (conn->sec_level < BT_SECURITY_L3) { sec = BT_SECURITY_L3; } else if (conn->sec_level < BT_SECURITY_L4) { sec = BT_SECURITY_L4; } else { return -EALREADY; } break; default: return -EINVAL; } ret = bt_conn_set_security(chan->chan.conn, sec); if (ret < 0) { return ret; } atomic_set_bit(chan->chan.status, BT_L2CAP_STATUS_ENCRYPT_PENDING); return 0; } #if defined(CONFIG_BT_L2CAP_ECRED) static void le_ecred_conn_rsp(struct bt_l2cap *l2cap, uint8_t ident, struct net_buf *buf) { struct bt_conn *conn = l2cap->chan.chan.conn; struct bt_l2cap_le_chan *chan; struct bt_l2cap_ecred_conn_rsp *rsp; uint16_t dcid, mtu, mps, credits, result; if (buf->len < sizeof(*rsp)) { BT_ERR("Too small ecred conn rsp packet size"); return; } rsp = net_buf_pull_mem(buf, sizeof(*rsp)); mtu = sys_le16_to_cpu(rsp->mtu); mps = sys_le16_to_cpu(rsp->mps); credits = sys_le16_to_cpu(rsp->credits); result = sys_le16_to_cpu(rsp->result); BT_DBG("mtu 0x%04x mps 0x%04x credits 0x%04x result %u", mtu, mps, credits, result); switch (result) { case BT_L2CAP_LE_ERR_AUTHENTICATION: case BT_L2CAP_LE_ERR_ENCRYPTION: while ((chan = l2cap_lookup_ident(conn, ident))) { /* Cancel RTX work */ k_work_cancel_delayable(&chan->chan.rtx_work); /* If security needs changing wait it to be completed */ if (!l2cap_change_security(chan, result)) { return; } bt_l2cap_chan_remove(conn, &chan->chan); bt_l2cap_chan_del(&chan->chan); } break; case BT_L2CAP_LE_SUCCESS: /* Some connections refused – invalid Source CID */ case BT_L2CAP_LE_ERR_INVALID_SCID: /* Some connections refused – Source CID already allocated */ case BT_L2CAP_LE_ERR_SCID_IN_USE: /* Some connections refused – not enough resources available */ case BT_L2CAP_LE_ERR_NO_RESOURCES: while ((chan = l2cap_lookup_ident(conn, ident))) { struct bt_l2cap_chan *c; /* Cancel RTX work */ k_work_cancel_delayable(&chan->chan.rtx_work); dcid = net_buf_pull_le16(buf); BT_DBG("dcid 0x%04x", dcid); /* If a Destination CID is 0x0000, the channel was not * established. */ if (!dcid) { bt_l2cap_chan_remove(conn, &chan->chan); bt_l2cap_chan_del(&chan->chan); continue; } c = bt_l2cap_le_lookup_tx_cid(conn, dcid); if (c) { /* If a device receives a * L2CAP_CREDIT_BASED_CONNECTION_RSP packet * with an already assigned Destination CID, * then both the original channel and the new * channel shall be immediately discarded and * not used. */ bt_l2cap_chan_remove(conn, &chan->chan); bt_l2cap_chan_del(&chan->chan); bt_l2cap_chan_disconnect(c); continue; } chan->tx.cid = dcid; chan->chan.ident = 0U; chan->tx.mtu = mtu; chan->tx.mps = mps; /* Update state */ bt_l2cap_chan_set_state(&chan->chan, BT_L2CAP_CONNECTED); if (chan->chan.ops->connected) { chan->chan.ops->connected(&chan->chan); } /* Give credits */ l2cap_chan_tx_give_credits(chan, credits); l2cap_chan_rx_give_credits(chan, chan->rx.init_credits); } break; case BT_L2CAP_LE_ERR_PSM_NOT_SUPP: default: while ((chan = l2cap_remove_ident(conn, ident))) { bt_l2cap_chan_del(&chan->chan); } break; } } #endif /* CONFIG_BT_L2CAP_ECRED */ static void le_conn_rsp(struct bt_l2cap *l2cap, uint8_t ident, struct net_buf *buf) { struct bt_conn *conn = l2cap->chan.chan.conn; struct bt_l2cap_le_chan *chan; struct bt_l2cap_le_conn_rsp *rsp = (void *)buf->data; uint16_t dcid, mtu, mps, credits, result; if (buf->len < sizeof(*rsp)) { BT_ERR("Too small LE conn rsp packet size"); return; } dcid = sys_le16_to_cpu(rsp->dcid); mtu = sys_le16_to_cpu(rsp->mtu); mps = sys_le16_to_cpu(rsp->mps); credits = sys_le16_to_cpu(rsp->credits); result = sys_le16_to_cpu(rsp->result); BT_DBG("dcid 0x%04x mtu %u mps %u credits %u result 0x%04x", dcid, mtu, mps, credits, result); /* Keep the channel in case of security errors */ if (result == BT_L2CAP_LE_SUCCESS || result == BT_L2CAP_LE_ERR_AUTHENTICATION || result == BT_L2CAP_LE_ERR_ENCRYPTION) { chan = l2cap_lookup_ident(conn, ident); } else { chan = l2cap_remove_ident(conn, ident); } if (!chan) { BT_ERR("Cannot find channel for ident %u", ident); return; } /* Cancel RTX work */ k_work_cancel_delayable(&chan->chan.rtx_work); /* Reset ident since it got a response */ chan->chan.ident = 0U; switch (result) { case BT_L2CAP_LE_SUCCESS: chan->tx.cid = dcid; chan->tx.mtu = mtu; chan->tx.mps = mps; /* Update state */ bt_l2cap_chan_set_state(&chan->chan, BT_L2CAP_CONNECTED); if (chan->chan.ops->connected) { chan->chan.ops->connected(&chan->chan); } /* Give credits */ l2cap_chan_tx_give_credits(chan, credits); l2cap_chan_rx_give_credits(chan, chan->rx.init_credits); break; case BT_L2CAP_LE_ERR_AUTHENTICATION: case BT_L2CAP_LE_ERR_ENCRYPTION: /* If security needs changing wait it to be completed */ if (l2cap_change_security(chan, result) == 0) { return; } bt_l2cap_chan_remove(conn, &chan->chan); __fallthrough; default: bt_l2cap_chan_del(&chan->chan); } } static void le_disconn_rsp(struct bt_l2cap *l2cap, uint8_t ident, struct net_buf *buf) { struct bt_conn *conn = l2cap->chan.chan.conn; struct bt_l2cap_le_chan *chan; struct bt_l2cap_disconn_rsp *rsp = (void *)buf->data; uint16_t scid; if (buf->len < sizeof(*rsp)) { BT_ERR("Too small LE disconn rsp packet size"); return; } scid = sys_le16_to_cpu(rsp->scid); BT_DBG("dcid 0x%04x scid 0x%04x", sys_le16_to_cpu(rsp->dcid), scid); chan = l2cap_remove_rx_cid(conn, scid); if (!chan) { return; } bt_l2cap_chan_del(&chan->chan); } static inline struct net_buf *l2cap_alloc_seg(struct net_buf *buf) { struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id); struct net_buf *seg; /* Try to use original pool if possible */ seg = net_buf_alloc(pool, K_NO_WAIT); if (seg) { net_buf_reserve(seg, BT_L2CAP_CHAN_SEND_RESERVE); return seg; } /* Fallback to using global connection tx pool */ return bt_l2cap_create_pdu_timeout(NULL, 0, K_NO_WAIT); } static struct net_buf *l2cap_chan_create_seg(struct bt_l2cap_le_chan *ch, struct net_buf *buf, size_t sdu_hdr_len) { struct net_buf *seg; uint16_t headroom; uint16_t len; /* Segment if data (+ data headroom) is bigger than MPS */ if (buf->len + sdu_hdr_len > ch->tx.mps) { goto segment; } headroom = BT_L2CAP_CHAN_SEND_RESERVE + sdu_hdr_len; /* Check if original buffer has enough headroom and don't have any * fragments. */ if (net_buf_headroom(buf) >= headroom && !buf->frags) { if (sdu_hdr_len) { /* Push SDU length if set */ net_buf_push_le16(buf, net_buf_frags_len(buf)); } return net_buf_ref(buf); } segment: seg = l2cap_alloc_seg(buf); if (!seg) { return NULL; } if (sdu_hdr_len) { net_buf_add_le16(seg, net_buf_frags_len(buf)); } /* Don't send more that TX MPS including SDU length */ len = MIN(net_buf_tailroom(seg), ch->tx.mps - sdu_hdr_len); /* Limit if original buffer is smaller than the segment */ len = MIN(buf->len, len); net_buf_add_mem(seg, buf->data, len); net_buf_pull(buf, len); BT_DBG("ch %p seg %p len %u", ch, seg, seg->len); return seg; } static void l2cap_chan_tx_resume(struct bt_l2cap_le_chan *ch) { if (!atomic_get(&ch->tx.credits) || (k_fifo_is_empty(&ch->tx_queue) && !ch->tx_buf)) { return; } k_work_submit(&ch->tx_work); } static void l2cap_chan_sdu_sent(struct bt_conn *conn, void *user_data) { uint16_t cid = POINTER_TO_UINT(user_data); struct bt_l2cap_chan *chan; BT_DBG("conn %p CID 0x%04x", conn, cid); chan = bt_l2cap_le_lookup_tx_cid(conn, cid); if (!chan) { /* Received SDU sent callback for disconnected channel */ return; } if (chan->ops->sent) { chan->ops->sent(chan); } l2cap_chan_tx_resume(BT_L2CAP_LE_CHAN(chan)); } static void l2cap_chan_seg_sent(struct bt_conn *conn, void *user_data) { uint16_t cid = POINTER_TO_UINT(user_data); struct bt_l2cap_chan *chan; BT_DBG("conn %p CID 0x%04x", conn, cid); chan = bt_l2cap_le_lookup_tx_cid(conn, cid); if (!chan) { /* Received segment sent callback for disconnected channel */ return; } l2cap_chan_tx_resume(BT_L2CAP_LE_CHAN(chan)); } static bool test_and_dec(atomic_t *target) { atomic_t old_value, new_value; do { old_value = atomic_get(target); if (!old_value) { return false; } new_value = old_value - 1; } while (atomic_cas(target, old_value, new_value) == 0); return true; } /* This returns -EAGAIN whenever a segment cannot be send immediately which can * happen under the following circuntances: * * 1. There are no credits * 2. There are no buffers * 3. There are no TX contexts * * In all cases the original buffer is unaffected so it can be pushed back to * be sent later. */ static int l2cap_chan_le_send(struct bt_l2cap_le_chan *ch, struct net_buf *buf, uint16_t sdu_hdr_len) { struct net_buf *seg; struct net_buf_simple_state state; int len, err; if (!test_and_dec(&ch->tx.credits)) { BT_WARN("No credits to transmit packet"); return -EAGAIN; } /* Save state so it can be restored if we failed to send */ net_buf_simple_save(&buf->b, &state); seg = l2cap_chan_create_seg(ch, buf, sdu_hdr_len); if (!seg) { atomic_inc(&ch->tx.credits); return -EAGAIN; } BT_DBG("ch %p cid 0x%04x len %u credits %lu", ch, ch->tx.cid, seg->len, atomic_get(&ch->tx.credits)); len = seg->len - sdu_hdr_len; /* Set a callback if there is no data left in the buffer and sent * callback has been set. */ if ((buf == seg || !buf->len) && ch->chan.ops->sent) { err = bt_l2cap_send_cb(ch->chan.conn, ch->tx.cid, seg, l2cap_chan_sdu_sent, UINT_TO_POINTER(ch->tx.cid)); } else { err = bt_l2cap_send_cb(ch->chan.conn, ch->tx.cid, seg, l2cap_chan_seg_sent, UINT_TO_POINTER(ch->tx.cid)); } if (err) { BT_WARN("Unable to send seg %d", err); atomic_inc(&ch->tx.credits); /* If the segment is not the original buffer release it since it * won't be needed anymore. */ if (seg != buf) { net_buf_unref(seg); } if (err == -ENOBUFS) { /* Restore state since segment could not be sent */ net_buf_simple_restore(&buf->b, &state); return -EAGAIN; } return err; } /* Check if there is no credits left clear output status and notify its * change. */ if (!atomic_get(&ch->tx.credits)) { atomic_clear_bit(ch->chan.status, BT_L2CAP_STATUS_OUT); if (ch->chan.ops->status) { ch->chan.ops->status(&ch->chan, ch->chan.status); } } return len; } static int l2cap_chan_le_send_sdu(struct bt_l2cap_le_chan *ch, struct net_buf **buf, uint16_t sent) { int ret, total_len; struct net_buf *frag; total_len = net_buf_frags_len(*buf) + sent; if (total_len > ch->tx.mtu) { return -EMSGSIZE; } frag = *buf; if (!frag->len && frag->frags) { frag = frag->frags; } if (!sent) { /* Add SDU length for the first segment */ ret = l2cap_chan_le_send(ch, frag, BT_L2CAP_SDU_HDR_SIZE); if (ret < 0) { if (ret == -EAGAIN) { /* Store sent data into user_data */ data_sent(frag)->len = sent; } *buf = frag; return ret; } sent = ret; } /* Send remaining segments */ for (ret = 0; sent < total_len; sent += ret) { /* Proceed to next fragment */ if (!frag->len) { frag = net_buf_frag_del(NULL, frag); } ret = l2cap_chan_le_send(ch, frag, 0); if (ret < 0) { if (ret == -EAGAIN) { /* Store sent data into user_data */ data_sent(frag)->len = sent; } *buf = frag; return ret; } } BT_DBG("ch %p cid 0x%04x sent %u total_len %u", ch, ch->tx.cid, sent, total_len); net_buf_unref(frag); return ret; } static void le_credits(struct bt_l2cap *l2cap, uint8_t ident, struct net_buf *buf) { struct bt_conn *conn = l2cap->chan.chan.conn; struct bt_l2cap_chan *chan; struct bt_l2cap_le_credits *ev = (void *)buf->data; struct bt_l2cap_le_chan *ch; uint16_t credits, cid; if (buf->len < sizeof(*ev)) { BT_ERR("Too small LE Credits packet size"); return; } cid = sys_le16_to_cpu(ev->cid); credits = sys_le16_to_cpu(ev->credits); BT_DBG("cid 0x%04x credits %u", cid, credits); chan = bt_l2cap_le_lookup_tx_cid(conn, cid); if (!chan) { BT_ERR("Unable to find channel of LE Credits packet"); return; } ch = BT_L2CAP_LE_CHAN(chan); if (atomic_get(&ch->tx.credits) + credits > UINT16_MAX) { BT_ERR("Credits overflow"); bt_l2cap_chan_disconnect(chan); return; } l2cap_chan_tx_give_credits(ch, credits); BT_DBG("chan %p total credits %lu", ch, atomic_get(&ch->tx.credits)); l2cap_chan_tx_resume(ch); } static void reject_cmd(struct bt_l2cap *l2cap, uint8_t ident, struct net_buf *buf) { struct bt_conn *conn = l2cap->chan.chan.conn; struct bt_l2cap_le_chan *chan; /* Check if there is a outstanding channel */ chan = l2cap_remove_ident(conn, ident); if (!chan) { return; } bt_l2cap_chan_del(&chan->chan); } #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */ static int l2cap_recv(struct bt_l2cap_chan *chan, struct net_buf *buf) { struct bt_l2cap *l2cap = CONTAINER_OF(chan, struct bt_l2cap, chan); struct bt_l2cap_sig_hdr *hdr; uint16_t len; if (buf->len < sizeof(*hdr)) { BT_ERR("Too small L2CAP signaling PDU"); return 0; } hdr = net_buf_pull_mem(buf, sizeof(*hdr)); len = sys_le16_to_cpu(hdr->len); BT_DBG("Signaling code 0x%02x ident %u len %u", hdr->code, hdr->ident, len); if (buf->len != len) { BT_ERR("L2CAP length mismatch (%u != %u)", buf->len, len); return 0; } if (!hdr->ident) { BT_ERR("Invalid ident value in L2CAP PDU"); return 0; } switch (hdr->code) { case BT_L2CAP_CONN_PARAM_RSP: le_conn_param_rsp(l2cap, buf); break; #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL) case BT_L2CAP_LE_CONN_REQ: le_conn_req(l2cap, hdr->ident, buf); break; case BT_L2CAP_LE_CONN_RSP: le_conn_rsp(l2cap, hdr->ident, buf); break; case BT_L2CAP_DISCONN_REQ: le_disconn_req(l2cap, hdr->ident, buf); break; case BT_L2CAP_DISCONN_RSP: le_disconn_rsp(l2cap, hdr->ident, buf); break; case BT_L2CAP_LE_CREDITS: le_credits(l2cap, hdr->ident, buf); break; case BT_L2CAP_CMD_REJECT: reject_cmd(l2cap, hdr->ident, buf); break; #if defined(CONFIG_BT_L2CAP_ECRED) case BT_L2CAP_ECRED_CONN_REQ: le_ecred_conn_req(l2cap, hdr->ident, buf); break; case BT_L2CAP_ECRED_CONN_RSP: le_ecred_conn_rsp(l2cap, hdr->ident, buf); break; case BT_L2CAP_ECRED_RECONF_REQ: le_ecred_reconf_req(l2cap, hdr->ident, buf); break; case BT_L2CAP_ECRED_RECONF_RSP: le_ecred_reconf_rsp(l2cap, hdr->ident, buf); break; #endif /* defined(CONFIG_BT_L2CAP_ECRED) */ #else case BT_L2CAP_CMD_REJECT: /* Ignored */ break; #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */ case BT_L2CAP_CONN_PARAM_REQ: if (IS_ENABLED(CONFIG_BT_CENTRAL)) { le_conn_param_update_req(l2cap, hdr->ident, buf); break; } __fallthrough; default: BT_WARN("Unknown L2CAP PDU code 0x%02x", hdr->code); l2cap_send_reject(chan->conn, hdr->ident, BT_L2CAP_REJ_NOT_UNDERSTOOD, NULL, 0); break; } return 0; } #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL) static void l2cap_chan_shutdown(struct bt_l2cap_chan *chan) { struct bt_l2cap_le_chan *ch = BT_L2CAP_LE_CHAN(chan); struct net_buf *buf; BT_DBG("chan %p", chan); atomic_set_bit(chan->status, BT_L2CAP_STATUS_SHUTDOWN); /* Destroy segmented SDU if it exists */ if (ch->_sdu) { net_buf_unref(ch->_sdu); ch->_sdu = NULL; ch->_sdu_len = 0U; } /* Cleanup outstanding request */ if (ch->tx_buf) { net_buf_unref(ch->tx_buf); ch->tx_buf = NULL; } /* Remove buffers on the TX queue */ while ((buf = net_buf_get(&ch->tx_queue, K_NO_WAIT))) { net_buf_unref(buf); } /* Remove buffers on the RX queue */ while ((buf = net_buf_get(&ch->rx_queue, K_NO_WAIT))) { net_buf_unref(buf); } /* Update status */ if (chan->ops->status) { chan->ops->status(chan, chan->status); } } static void l2cap_chan_send_credits(struct bt_l2cap_le_chan *chan, struct net_buf *buf, uint16_t credits) { struct bt_l2cap_le_credits *ev; /* Cap the number of credits given */ if (credits > chan->rx.init_credits) { credits = chan->rx.init_credits; } buf = l2cap_create_le_sig_pdu(buf, BT_L2CAP_LE_CREDITS, get_ident(), sizeof(*ev)); if (!buf) { BT_ERR("Unable to send credits update"); /* Disconnect would probably not work either so the only * option left is to shutdown the channel. */ l2cap_chan_shutdown(&chan->chan); return; } l2cap_chan_rx_give_credits(chan, credits); ev = net_buf_add(buf, sizeof(*ev)); ev->cid = sys_cpu_to_le16(chan->rx.cid); ev->credits = sys_cpu_to_le16(credits); l2cap_send(chan->chan.conn, BT_L2CAP_CID_LE_SIG, buf); BT_DBG("chan %p credits %lu", chan, atomic_get(&chan->rx.credits)); } static void l2cap_chan_update_credits(struct bt_l2cap_le_chan *chan, struct net_buf *buf) { uint16_t credits; atomic_val_t old_credits = atomic_get(&chan->rx.credits); /* Restore enough credits to complete the sdu */ credits = ((chan->_sdu_len - net_buf_frags_len(buf)) + (chan->rx.mps - 1)) / chan->rx.mps; if (credits < old_credits) { return; } credits -= old_credits; l2cap_chan_send_credits(chan, buf, credits); } int bt_l2cap_chan_recv_complete(struct bt_l2cap_chan *chan, struct net_buf *buf) { struct bt_l2cap_le_chan *ch = BT_L2CAP_LE_CHAN(chan); struct bt_conn *conn = chan->conn; uint16_t credits; __ASSERT_NO_MSG(chan); __ASSERT_NO_MSG(buf); if (!conn) { return -ENOTCONN; } if (conn->type != BT_CONN_TYPE_LE) { return -ENOTSUP; } BT_DBG("chan %p buf %p", chan, buf); /* Restore credits used by packet */ memcpy(&credits, net_buf_user_data(buf), sizeof(credits)); l2cap_chan_send_credits(ch, buf, credits); net_buf_unref(buf); return 0; } static struct net_buf *l2cap_alloc_frag(k_timeout_t timeout, void *user_data) { struct bt_l2cap_le_chan *chan = user_data; struct net_buf *frag = NULL; frag = chan->chan.ops->alloc_buf(&chan->chan); if (!frag) { return NULL; } BT_DBG("frag %p tailroom %zu", frag, net_buf_tailroom(frag)); return frag; } static void l2cap_chan_le_recv_sdu(struct bt_l2cap_le_chan *chan, struct net_buf *buf, uint16_t seg) { int err; BT_DBG("chan %p len %zu", chan, net_buf_frags_len(buf)); /* Receiving complete SDU, notify channel and reset SDU buf */ err = chan->chan.ops->recv(&chan->chan, buf); if (err < 0) { if (err != -EINPROGRESS) { BT_ERR("err %d", err); bt_l2cap_chan_disconnect(&chan->chan); net_buf_unref(buf); } return; } l2cap_chan_send_credits(chan, buf, seg); net_buf_unref(buf); } static void l2cap_chan_le_recv_seg(struct bt_l2cap_le_chan *chan, struct net_buf *buf) { uint16_t len; uint16_t seg = 0U; len = net_buf_frags_len(chan->_sdu); if (len) { memcpy(&seg, net_buf_user_data(chan->_sdu), sizeof(seg)); } if (len + buf->len > chan->_sdu_len) { BT_ERR("SDU length mismatch"); bt_l2cap_chan_disconnect(&chan->chan); return; } seg++; /* Store received segments in user_data */ memcpy(net_buf_user_data(chan->_sdu), &seg, sizeof(seg)); BT_DBG("chan %p seg %d len %zu", chan, seg, net_buf_frags_len(buf)); /* Append received segment to SDU */ len = net_buf_append_bytes(chan->_sdu, buf->len, buf->data, K_NO_WAIT, l2cap_alloc_frag, chan); if (len != buf->len) { BT_ERR("Unable to store SDU"); bt_l2cap_chan_disconnect(&chan->chan); return; } if (net_buf_frags_len(chan->_sdu) < chan->_sdu_len) { /* Give more credits if remote has run out of them, this * should only happen if the remote cannot fully utilize the * MPS for some reason. */ if (!atomic_get(&chan->rx.credits) && seg == chan->rx.init_credits) { l2cap_chan_update_credits(chan, buf); } return; } buf = chan->_sdu; chan->_sdu = NULL; chan->_sdu_len = 0U; l2cap_chan_le_recv_sdu(chan, buf, seg); } static void l2cap_chan_le_recv(struct bt_l2cap_le_chan *chan, struct net_buf *buf) { uint16_t sdu_len; int err; if (!test_and_dec(&chan->rx.credits)) { BT_ERR("No credits to receive packet"); bt_l2cap_chan_disconnect(&chan->chan); return; } /* Check if segments already exist */ if (chan->_sdu) { l2cap_chan_le_recv_seg(chan, buf); return; } if (buf->len < 2) { BT_WARN("Too short data packet"); bt_l2cap_chan_disconnect(&chan->chan); return; } sdu_len = net_buf_pull_le16(buf); BT_DBG("chan %p len %u sdu_len %u", chan, buf->len, sdu_len); if (sdu_len > chan->rx.mtu) { BT_ERR("Invalid SDU length"); bt_l2cap_chan_disconnect(&chan->chan); return; } /* Always allocate buffer from the channel if supported. */ if (chan->chan.ops->alloc_buf) { chan->_sdu = chan->chan.ops->alloc_buf(&chan->chan); if (!chan->_sdu) { BT_ERR("Unable to allocate buffer for SDU"); bt_l2cap_chan_disconnect(&chan->chan); return; } chan->_sdu_len = sdu_len; l2cap_chan_le_recv_seg(chan, buf); return; } err = chan->chan.ops->recv(&chan->chan, buf); if (err) { if (err != -EINPROGRESS) { BT_ERR("err %d", err); bt_l2cap_chan_disconnect(&chan->chan); } return; } l2cap_chan_send_credits(chan, buf, 1); } static void l2cap_chan_recv_queue(struct bt_l2cap_le_chan *chan, struct net_buf *buf) { if (chan->chan.state == BT_L2CAP_DISCONNECT) { BT_WARN("Ignoring data received while disconnecting"); net_buf_unref(buf); return; } if (atomic_test_bit(chan->chan.status, BT_L2CAP_STATUS_SHUTDOWN)) { BT_WARN("Ignoring data received while channel has shutdown"); net_buf_unref(buf); return; } if (!L2CAP_LE_PSM_IS_DYN(chan->chan.psm)) { l2cap_chan_le_recv(chan, buf); net_buf_unref(buf); return; } net_buf_put(&chan->rx_queue, buf); k_work_submit(&chan->rx_work); } #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */ static void l2cap_chan_recv(struct bt_l2cap_chan *chan, struct net_buf *buf, bool complete) { #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL) struct bt_l2cap_le_chan *ch = BT_L2CAP_LE_CHAN(chan); if (L2CAP_LE_CID_IS_DYN(ch->rx.cid)) { if (complete) { l2cap_chan_recv_queue(ch, buf); } else { /* if packet was not complete this means peer device * overflowed our RX and channel shall be disconnected */ bt_l2cap_chan_disconnect(chan); net_buf_unref(buf); } return; } #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */ BT_DBG("chan %p len %u", chan, buf->len); chan->ops->recv(chan, buf); net_buf_unref(buf); } void bt_l2cap_recv(struct bt_conn *conn, struct net_buf *buf, bool complete) { struct bt_l2cap_hdr *hdr; struct bt_l2cap_chan *chan; uint16_t cid; if (IS_ENABLED(CONFIG_BT_BREDR) && conn->type == BT_CONN_TYPE_BR) { bt_l2cap_br_recv(conn, buf); return; } if (buf->len < sizeof(*hdr)) { BT_ERR("Too small L2CAP PDU received"); net_buf_unref(buf); return; } hdr = net_buf_pull_mem(buf, sizeof(*hdr)); cid = sys_le16_to_cpu(hdr->cid); BT_DBG("Packet for CID %u len %u", cid, buf->len); chan = bt_l2cap_le_lookup_rx_cid(conn, cid); if (!chan) { BT_WARN("Ignoring data for unknown channel ID 0x%04x", cid); net_buf_unref(buf); return; } l2cap_chan_recv(chan, buf, complete); } int bt_l2cap_update_conn_param(struct bt_conn *conn, const struct bt_le_conn_param *param) { struct bt_l2cap_conn_param_req *req; struct net_buf *buf; int err; buf = l2cap_create_le_sig_pdu(NULL, BT_L2CAP_CONN_PARAM_REQ, get_ident(), sizeof(*req)); if (!buf) { return -ENOMEM; } req = net_buf_add(buf, sizeof(*req)); req->min_interval = sys_cpu_to_le16(param->interval_min); req->max_interval = sys_cpu_to_le16(param->interval_max); req->latency = sys_cpu_to_le16(param->latency); req->timeout = sys_cpu_to_le16(param->timeout); err = bt_l2cap_send(conn, BT_L2CAP_CID_LE_SIG, buf); if (err) { net_buf_unref(buf); return err; } return 0; } static void l2cap_connected(struct bt_l2cap_chan *chan) { BT_DBG("ch %p cid 0x%04x", BT_L2CAP_LE_CHAN(chan), BT_L2CAP_LE_CHAN(chan)->rx.cid); } static void l2cap_disconnected(struct bt_l2cap_chan *chan) { BT_DBG("ch %p cid 0x%04x", BT_L2CAP_LE_CHAN(chan), BT_L2CAP_LE_CHAN(chan)->rx.cid); /* Cancel RTX work on signal channel. * Disconnected callback is always called from system worqueue * so this should always succeed. */ (void)k_work_cancel_delayable(&chan->rtx_work); } static int l2cap_accept(struct bt_conn *conn, struct bt_l2cap_chan **chan) { int i; static const struct bt_l2cap_chan_ops ops = { .connected = l2cap_connected, .disconnected = l2cap_disconnected, .recv = l2cap_recv, }; BT_DBG("conn %p handle %u", conn, conn->handle); for (i = 0; i < ARRAY_SIZE(bt_l2cap_pool); i++) { struct bt_l2cap *l2cap = &bt_l2cap_pool[i]; if (l2cap->chan.chan.conn) { continue; } l2cap->chan.chan.ops = &ops; *chan = &l2cap->chan.chan; return 0; } BT_ERR("No available L2CAP context for conn %p", conn); return -ENOMEM; } BT_L2CAP_CHANNEL_DEFINE(le_fixed_chan, BT_L2CAP_CID_LE_SIG, l2cap_accept, NULL); void bt_l2cap_init(void) { if (IS_ENABLED(CONFIG_BT_BREDR)) { bt_l2cap_br_init(); } } #if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL) static int l2cap_le_connect(struct bt_conn *conn, struct bt_l2cap_le_chan *ch, uint16_t psm) { int err; if (psm < L2CAP_LE_PSM_FIXED_START || psm > L2CAP_LE_PSM_DYN_END) { return -EINVAL; } l2cap_chan_tx_init(ch); l2cap_chan_rx_init(ch); if (!l2cap_chan_add(conn, &ch->chan, l2cap_chan_destroy)) { return -ENOMEM; } ch->chan.psm = psm; if (conn->sec_level < ch->chan.required_sec_level) { err = bt_conn_set_security(conn, ch->chan.required_sec_level); if (err) { goto fail; } atomic_set_bit(ch->chan.status, BT_L2CAP_STATUS_ENCRYPT_PENDING); return 0; } err = l2cap_le_conn_req(ch); if (err) { goto fail; } return 0; fail: bt_l2cap_chan_remove(conn, &ch->chan); bt_l2cap_chan_del(&ch->chan); return err; } #if defined(CONFIG_BT_L2CAP_ECRED) static int l2cap_ecred_init(struct bt_conn *conn, struct bt_l2cap_le_chan *ch, uint16_t psm) { if (psm < L2CAP_LE_PSM_FIXED_START || psm > L2CAP_LE_PSM_DYN_END) { return -EINVAL; } l2cap_chan_tx_init(ch); l2cap_chan_rx_init(ch); if (!l2cap_chan_add(conn, &ch->chan, l2cap_chan_destroy)) { return -ENOMEM; } ch->chan.psm = psm; BT_DBG("ch %p psm 0x%02x mtu %u mps %u credits %u", ch, ch->chan.psm, ch->rx.mtu, ch->rx.mps, ch->rx.init_credits); return 0; } int bt_l2cap_ecred_chan_connect(struct bt_conn *conn, struct bt_l2cap_chan **chan, uint16_t psm) { int i, err; BT_DBG("conn %p chan %p psm 0x%04x", conn, chan, psm); if (!conn || !chan) { return -EINVAL; } /* Init non-null channels */ for (i = 0; i < L2CAP_ECRED_CHAN_MAX; i++) { if (!chan[i]) { break; } err = l2cap_ecred_init(conn, BT_L2CAP_LE_CHAN(chan[i]), psm); if (err < 0) { i--; goto fail; } } return l2cap_ecred_conn_req(chan, i); fail: /* Remove channels added */ for (; i >= 0; i--) { if (!chan[i]) { continue; } bt_l2cap_chan_remove(conn, chan[i]); } return err; } static struct bt_l2cap_le_chan *l2cap_find_pending_reconf(struct bt_conn *conn) { struct bt_l2cap_chan *chan; SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) { if (BT_L2CAP_LE_CHAN(chan)->pending_rx_mtu) { return BT_L2CAP_LE_CHAN(chan); } } return NULL; } int bt_l2cap_ecred_chan_reconfigure(struct bt_l2cap_chan **chans, uint16_t mtu) { struct bt_l2cap_ecred_reconf_req *req; struct bt_conn *conn = NULL; struct bt_l2cap_le_chan *ch; struct net_buf *buf; uint8_t ident; int i; BT_DBG("chans %p mtu 0x%04x", chans, mtu); if (!chans) { return -EINVAL; } for (i = 0; i < L2CAP_ECRED_CHAN_MAX; i++) { if (!chans[i]) { break; } /* validate that all channels are from same connection */ if (conn) { if (conn != chans[i]->conn) { return -EINVAL; } } else { conn = chans[i]->conn; } /* validate MTU is not decreased */ if (mtu < BT_L2CAP_LE_CHAN(chans[i])->rx.mtu) { return -EINVAL; } } if (i == 0) { return -EINVAL; } if (!conn) { return -ENOTCONN; } if (conn->type != BT_CONN_TYPE_LE) { return -EINVAL; } /* allow only 1 request at time */ if (l2cap_find_pending_reconf(conn)) { return -EBUSY; } ident = get_ident(); buf = l2cap_create_le_sig_pdu(NULL, BT_L2CAP_ECRED_RECONF_REQ, ident, sizeof(*req) + (i * sizeof(uint16_t))); if (!buf) { return -ENOMEM; } req = net_buf_add(buf, sizeof(*req)); req->mtu = sys_cpu_to_le16(mtu); /* MPS shall not be bigger than MTU + BT_L2CAP_SDU_HDR_SIZE * as the remaining bytes cannot be used. */ req->mps = sys_cpu_to_le16(MIN(mtu + BT_L2CAP_SDU_HDR_SIZE, BT_L2CAP_RX_MTU)); for (int j = 0; j < i; j++) { ch = BT_L2CAP_LE_CHAN(chans[j]); ch->chan.ident = ident; ch->pending_rx_mtu = mtu; net_buf_add_le16(buf, ch->rx.cid); }; /* we use first channel for sending and timeouting */ l2cap_chan_send_req(chans[0], buf, L2CAP_CONN_TIMEOUT); return 0; } #endif /* defined(CONFIG_BT_L2CAP_ECRED) */ int bt_l2cap_chan_connect(struct bt_conn *conn, struct bt_l2cap_chan *chan, uint16_t psm) { BT_DBG("conn %p chan %p psm 0x%04x", conn, chan, psm); if (!conn || conn->state != BT_CONN_CONNECTED) { return -ENOTCONN; } if (!chan) { return -EINVAL; } if (IS_ENABLED(CONFIG_BT_BREDR) && conn->type == BT_CONN_TYPE_BR) { return bt_l2cap_br_chan_connect(conn, chan, psm); } if (chan->required_sec_level > BT_SECURITY_L4) { return -EINVAL; } else if (chan->required_sec_level == BT_SECURITY_L0) { chan->required_sec_level = BT_SECURITY_L1; } return l2cap_le_connect(conn, BT_L2CAP_LE_CHAN(chan), psm); } int bt_l2cap_chan_disconnect(struct bt_l2cap_chan *chan) { struct bt_conn *conn = chan->conn; struct net_buf *buf; struct bt_l2cap_disconn_req *req; struct bt_l2cap_le_chan *ch; if (!conn) { return -ENOTCONN; } if (IS_ENABLED(CONFIG_BT_BREDR) && conn->type == BT_CONN_TYPE_BR) { return bt_l2cap_br_chan_disconnect(chan); } ch = BT_L2CAP_LE_CHAN(chan); BT_DBG("chan %p scid 0x%04x dcid 0x%04x", chan, ch->rx.cid, ch->tx.cid); ch->chan.ident = get_ident(); buf = l2cap_create_le_sig_pdu(NULL, BT_L2CAP_DISCONN_REQ, ch->chan.ident, sizeof(*req)); if (!buf) { return -ENOMEM; } req = net_buf_add(buf, sizeof(*req)); req->dcid = sys_cpu_to_le16(ch->tx.cid); req->scid = sys_cpu_to_le16(ch->rx.cid); l2cap_chan_send_req(chan, buf, L2CAP_DISC_TIMEOUT); bt_l2cap_chan_set_state(chan, BT_L2CAP_DISCONNECT); return 0; } int bt_l2cap_chan_send(struct bt_l2cap_chan *chan, struct net_buf *buf) { struct bt_l2cap_le_chan *ch = BT_L2CAP_LE_CHAN(chan); int err; if (!buf) { return -EINVAL; } BT_DBG("chan %p buf %p len %zu", chan, buf, net_buf_frags_len(buf)); if (!chan->conn || chan->conn->state != BT_CONN_CONNECTED) { return -ENOTCONN; } if (atomic_test_bit(chan->status, BT_L2CAP_STATUS_SHUTDOWN)) { return -ESHUTDOWN; } if (IS_ENABLED(CONFIG_BT_BREDR) && chan->conn->type == BT_CONN_TYPE_BR) { return bt_l2cap_br_chan_send(chan, buf); } /* Queue if there are pending segments left from previous packet or * there are no credits available. */ if (ch->tx_buf || !k_fifo_is_empty(&ch->tx_queue) || !atomic_get(&ch->tx.credits)) { data_sent(buf)->len = 0; net_buf_put(&ch->tx_queue, buf); k_work_submit(&ch->tx_work); return 0; } err = l2cap_chan_le_send_sdu(ch, &buf, 0); if (err < 0) { if (err == -EAGAIN && data_sent(buf)->len) { /* Queue buffer if at least one segment could be sent */ net_buf_put(&ch->tx_queue, buf); return data_sent(buf)->len; } BT_ERR("failed to send message %d", err); } return err; } #endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */