Bluetooth: controller: ISO TX data path including ISOAL

- ISO TX data path for HCI and support for vendor path
- ISO-AL segmentation of framed PDUs
- Insertion of segment headers
- Reconstruction and storing of CIG reference point in ULL
- Calculation and insertion of of Time-Offset
- Exit error spooling in ISO-AL on detecting start
- ISO-AL TX unframed fragmentation

Signed-off-by: Nirosharn Amarasinghe <niag@demant.com>
Signed-off-by: Morten Priess <mtpr@oticon.com>
This commit is contained in:
Morten Priess 2022-04-20 14:11:00 +02:00 committed by Carles Cufí
commit e3342fe01e
19 changed files with 1273 additions and 368 deletions

View file

@ -31,6 +31,9 @@
#include "hal/ecb.h"
#include "hal/ccm.h"
#include "hal/ticker.h"
#include "ticker/ticker.h"
#include "ll_sw/pdu.h"
@ -58,6 +61,7 @@
#include "ll_sw/ull_conn_types.h"
#include "ll_sw/ull_iso_types.h"
#include "ll_sw/ull_conn_iso_types.h"
#include "ll_sw/ull_conn_iso_internal.h"
#include "ll_sw/ull_df_types.h"
#include "ll_sw/ull_adv_internal.h"
@ -5160,15 +5164,21 @@ int hci_acl_handle(struct net_buf *buf, struct net_buf **evt)
int hci_iso_handle(struct net_buf *buf, struct net_buf **evt)
{
struct bt_hci_iso_data_hdr *iso_data_hdr;
struct isoal_sdu_tx sdu_frag_tx;
struct bt_hci_iso_hdr *iso_hdr;
uint16_t stream_handle;
struct node_tx_iso *tx;
struct ll_iso_datapath *dp_in;
struct ll_iso_stream_hdr *hdr;
uint32_t *time_stamp;
uint16_t handle;
uint8_t pb_flag;
uint8_t ts_flag;
uint8_t flags;
uint16_t slen;
uint16_t len;
*evt = NULL;
iso_data_hdr = NULL;
*evt = NULL;
hdr = NULL;
dp_in = NULL;
if (buf->len < sizeof(*iso_hdr)) {
BT_ERR("No HCI ISO header");
@ -5184,113 +5194,201 @@ int hci_iso_handle(struct net_buf *buf, struct net_buf **evt)
return -EINVAL;
}
/* assigning flags first because handle will be overwritten */
/* Assigning flags first because handle will be overwritten */
flags = bt_iso_flags(handle);
if (bt_iso_flags_ts(flags)) {
struct bt_hci_iso_ts_data_hdr *iso_ts_data_hdr;
iso_ts_data_hdr = net_buf_pull_mem(buf,
sizeof(*iso_ts_data_hdr));
}
iso_data_hdr = net_buf_pull_mem(buf, sizeof(*iso_data_hdr));
slen = iso_data_hdr->slen;
#if defined(CONFIG_BT_CTLR_ADV_ISO)
/* Check invalid BIS PDU length */
if (slen > LL_BIS_OCTETS_TX_MAX) {
BT_ERR("Invalid HCI ISO Data length");
return -EINVAL;
}
/* Get BIS stream handle and stream context */
pb_flag = bt_iso_flags_pb(flags);
ts_flag = bt_iso_flags_ts(flags);
handle = bt_iso_handle(handle);
if (handle < BT_CTLR_ADV_ISO_STREAM_HANDLE_BASE) {
return -EINVAL;
}
stream_handle = handle - BT_CTLR_ADV_ISO_STREAM_HANDLE_BASE;
struct lll_adv_iso_stream *stream;
stream = ull_adv_iso_stream_get(stream_handle);
if (!stream) {
BT_ERR("Invalid BIS stream");
return -EINVAL;
}
struct ll_adv_iso_set *adv_iso;
adv_iso = ull_adv_iso_by_stream_get(stream_handle);
if (!adv_iso) {
BT_ERR("No BIG associated with stream handle");
return -EINVAL;
}
/* Get free node tx */
tx = ll_iso_tx_mem_acquire();
if (!tx) {
BT_ERR("ISO Tx Buffer Overflow");
data_buf_overflow(evt, BT_OVERFLOW_LINK_ISO);
return -ENOBUFS;
}
struct pdu_bis *pdu = (void *)tx->pdu;
/* FIXME: Update to use correct LLID for BIS and CIS */
switch (bt_iso_flags_pb(flags)) {
case BT_ISO_SINGLE:
pdu->ll_id = PDU_BIS_LLID_COMPLETE_END;
break;
default:
ll_iso_tx_mem_release(tx);
return -EINVAL;
}
pdu->len = slen;
memcpy(pdu->payload, buf->data, slen);
struct lll_adv_iso *lll_iso;
lll_iso = &adv_iso->lll;
uint64_t pkt_seq_num;
pkt_seq_num = lll_iso->payload_count / lll_iso->bn;
if (((pkt_seq_num - stream->pkt_seq_num) & BIT64_MASK(39)) <=
BIT64_MASK(38)) {
stream->pkt_seq_num = pkt_seq_num;
} else {
pkt_seq_num = stream->pkt_seq_num;
}
tx->payload_count = pkt_seq_num * lll_iso->bn;
stream->pkt_seq_num++;
#else /* CONFIG_BT_CTLR_CONN_ISO */
/* FIXME: Add Connected ISO implementation */
stream_handle = 0U;
/* NOTE: Keeping the code below to pass compilation until Connected ISO
* integration.
/* Extract time stamp */
/* Set default to current time
* BT Core V5.3 : Vol 6 Low Energy Controller : Part G IS0-AL:
* 3.1 Time_Offset in framed PDUs :
* The Controller transmitting a SDU may use any of the following
* methods to determine the value of the SDU reference time:
* -- A captured time stamp of the SDU
* -- A time stamp provided by the higher layer
* -- A computed time stamp based on a sequence counter provided by the
* higher layer (Not implemented)
* -- Any other method of determining Time_Offset (Not implemented)
*/
/* Get free node tx */
tx = ll_iso_tx_mem_acquire();
if (!tx) {
BT_ERR("ISO Tx Buffer Overflow");
data_buf_overflow(evt, BT_OVERFLOW_LINK_ISO);
return -ENOBUFS;
if (ts_flag) {
/* Overwrite time stamp with HCI provided time stamp */
time_stamp = net_buf_pull_mem(buf, sizeof(*time_stamp));
len -= sizeof(*time_stamp);
sdu_frag_tx.time_stamp = *time_stamp;
} else {
sdu_frag_tx.time_stamp =
HAL_TICKER_TICKS_TO_US(ticker_ticks_now_get());
}
/* Extract ISO data header if included (PB_Flag 0b00 or 0b10) */
if ((pb_flag & 0x01) == 0) {
iso_data_hdr = net_buf_pull_mem(buf, sizeof(*iso_data_hdr));
len -= sizeof(*iso_data_hdr);
sdu_frag_tx.packet_sn = iso_data_hdr->sn;
sdu_frag_tx.iso_sdu_length = iso_data_hdr->slen;
} else {
sdu_frag_tx.packet_sn = 0;
sdu_frag_tx.iso_sdu_length = 0;
}
/* Packet boudary flags should be bitwise identical to the SDU state
* 0b00 BT_ISO_START
* 0b01 BT_ISO_CONT
* 0b10 BT_ISO_SINGLE
* 0b11 BT_ISO_END
*/
sdu_frag_tx.sdu_state = pb_flag;
/* Fill in SDU buffer fields */
sdu_frag_tx.dbuf = buf->data;
sdu_frag_tx.size = len;
#if defined(CONFIG_BT_CTLR_CONN_ISO)
/* Extract source handle from CIS or BIS handle by way of header and
* data path
*/
if (IS_CIS_HANDLE(handle)) {
struct ll_conn_iso_stream *cis =
ll_iso_stream_connected_get(handle);
if (!cis) {
return -EINVAL;
}
struct ll_conn_iso_group *cig = cis->group;
hdr = &(cis->hdr);
/* Set target event as the current event. This might cause some
* misalignment between SDU interval and ISO interval in the
* case of a burst from the application or late release. However
* according to the specifications:
* BT Core V5.3 : Vol 6 Low Energy Controller : Part B LL Spec:
* 4.5.13.3 Connected Isochronous Data:
* This burst is associated with the corresponding CIS event but
* the payloads may be transmitted in later events as well.
* If flush timeout is greater than one, use the current event,
* otherwise postpone to the next.
*
* TODO: Calculate the best possible target event based on CIS
* reference, FT and event_count.
*/
sdu_frag_tx.target_event = cis->lll.event_count +
(cis->lll.tx.flush_timeout > 1 ? 0 : 1);
sdu_frag_tx.cig_ref_point = cig->cig_ref_point;
/* Get controller's input data path for CIS */
dp_in = hdr->datapath_in;
if (!dp_in || dp_in->path_id != BT_HCI_DATAPATH_ID_HCI) {
BT_ERR("Input data path not set for HCI");
return -EINVAL;
}
/* Get input data path's source handle */
isoal_source_handle_t source = dp_in->source_hdl;
/* Start Fragmentation */
if (isoal_tx_sdu_fragment(source, &sdu_frag_tx)) {
return -EINVAL;
}
/* TODO: Assign *evt if an immediate response is required */
return 0;
}
#endif /* CONFIG_BT_CTLR_CONN_ISO */
if (ll_iso_tx_mem_enqueue(stream_handle, tx)) {
BT_ERR("Invalid ISO Tx Enqueue");
ll_iso_tx_mem_release(tx);
return -EINVAL;
}
#if defined(CONFIG_BT_CTLR_ADV_ISO)
if (IS_ADV_ISO_HANDLE(handle)) {
/* FIXME: Use ISOAL */
struct node_tx_iso *tx;
uint16_t stream_handle;
uint16_t slen;
return 0;
/* FIXME: Code only expects header present */
slen = iso_data_hdr ? iso_data_hdr->slen : 0;
/* Check invalid BIS PDU length */
if (slen > LL_BIS_OCTETS_TX_MAX) {
BT_ERR("Invalid HCI ISO Data length");
return -EINVAL;
}
/* Get BIS stream handle and stream context */
handle = bt_iso_handle(handle);
if (handle < BT_CTLR_ADV_ISO_STREAM_HANDLE_BASE) {
return -EINVAL;
}
stream_handle = handle - BT_CTLR_ADV_ISO_STREAM_HANDLE_BASE;
struct lll_adv_iso_stream *stream;
stream = ull_adv_iso_stream_get(stream_handle);
if (!stream) {
BT_ERR("Invalid BIS stream");
return -EINVAL;
}
struct ll_adv_iso_set *adv_iso;
adv_iso = ull_adv_iso_by_stream_get(stream_handle);
if (!adv_iso) {
BT_ERR("No BIG associated with stream handle");
return -EINVAL;
}
/* Get free node tx */
tx = ll_iso_tx_mem_acquire();
if (!tx) {
BT_ERR("ISO Tx Buffer Overflow");
data_buf_overflow(evt, BT_OVERFLOW_LINK_ISO);
return -ENOBUFS;
}
struct pdu_bis *pdu = (void *)tx->pdu;
/* FIXME: Update to use correct LLID for BIS and CIS */
switch (bt_iso_flags_pb(flags)) {
case BT_ISO_SINGLE:
pdu->ll_id = PDU_BIS_LLID_COMPLETE_END;
break;
default:
ll_iso_tx_mem_release(tx);
return -EINVAL;
}
pdu->len = slen;
memcpy(pdu->payload, buf->data, slen);
struct lll_adv_iso *lll_iso;
lll_iso = &adv_iso->lll;
uint64_t pkt_seq_num;
pkt_seq_num = lll_iso->payload_count / lll_iso->bn;
if (((pkt_seq_num - stream->pkt_seq_num) & BIT64_MASK(39)) <=
BIT64_MASK(38)) {
stream->pkt_seq_num = pkt_seq_num;
} else {
pkt_seq_num = stream->pkt_seq_num;
}
tx->payload_count = pkt_seq_num * lll_iso->bn;
stream->pkt_seq_num++;
if (ll_iso_tx_mem_enqueue(stream_handle, tx, NULL)) {
BT_ERR("Invalid ISO Tx Enqueue");
ll_iso_tx_mem_release(tx);
return -EINVAL;
}
return 0;
}
#endif /* CONFIG_BT_CTLR_ADV_ISO */
return -EINVAL;
}
#endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */

View file

@ -111,9 +111,9 @@ isoal_status_t sink_sdu_emit_hci(const struct isoal_sink *sink_ctx,
{
struct bt_hci_iso_ts_data_hdr *data_hdr;
uint16_t packet_status_flag;
uint16_t slen, slen_packed;
struct bt_hci_iso_hdr *hdr;
uint16_t handle_packed;
uint16_t slen_packed;
struct net_buf *buf;
uint16_t handle;
uint8_t ts, pb;
@ -131,12 +131,8 @@ isoal_status_t sink_sdu_emit_hci(const struct isoal_sink *sink_ctx,
return ISOAL_STATUS_OK;
}
#endif /* CONFIG_BT_CTLR_CONN_ISO_HCI_DATAPATH_SKIP_INVALID_DATA */
data_hdr = net_buf_push(buf, BT_HCI_ISO_TS_DATA_HDR_SIZE);
hdr = net_buf_push(buf, BT_HCI_ISO_HDR_SIZE);
handle = sink_ctx->session.handle;
pb = sink_ctx->sdu_production.sdu_state;
pb = sink_ctx->sdu_production.sdu_state;
len = sink_ctx->sdu_production.sdu_written;
/*
* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
@ -154,29 +150,37 @@ isoal_status_t sink_sdu_emit_hci(const struct isoal_sink *sink_ctx,
* Time_Stamp field. This bit shall only be set if the PB_Flag field equals 0b00 or
* 0b10.
*/
ts = !(pb & 1);
ts = (pb & 0x1) == 0x0;
if (ts) {
data_hdr = net_buf_push(buf, BT_HCI_ISO_TS_DATA_HDR_SIZE);
packet_status_flag = valid_sdu->status;
/* TODO: Validity of length might need to be reconsidered here. Not handled
* in ISO-AL.
* BT Core V5.3 : Vol 4 HCI I/F : Part G HCI Func. Spec.:
* 5.4.5 HCI ISO Data packets
* If Packet_Status_Flag equals 0b10 then PB_Flag shall equal 0b10.
* When Packet_Status_Flag is set to 0b10 in packets from the Controller to
* the Host, there is no data and ISO_SDU_Length shall be set to zero.
*/
slen_packed = bt_iso_pkt_len_pack(len, packet_status_flag);
data_hdr->ts = sys_cpu_to_le32((uint32_t) valid_sdu->timestamp);
data_hdr->data.sn = sys_cpu_to_le16((uint16_t) valid_sdu->seqn);
data_hdr->data.slen = sys_cpu_to_le16(slen_packed);
len += BT_HCI_ISO_TS_DATA_HDR_SIZE;
}
hdr = net_buf_push(buf, BT_HCI_ISO_HDR_SIZE);
handle = sink_ctx->session.handle;
handle_packed = bt_iso_handle_pack(handle, pb, ts);
len = sink_ctx->sdu_production.sdu_written + BT_HCI_ISO_TS_DATA_HDR_SIZE;
hdr->handle = sys_cpu_to_le16(handle_packed);
hdr->len = sys_cpu_to_le16(len);
packet_status_flag = valid_sdu->status;
/* TODO: Validity of length might need to be reconsidered here. Not handled in
* ISO-AL.
* BT Core V5.3 : Vol 4 HCI I/F : Part G HCI Func. Spec.:
* 5.4.5 HCI ISO Data packets
* If Packet_Status_Flag equals 0b10 then PB_Flag shall equal 0b10.
* When Packet_Status_Flag is set to 0b10 in packets from the Controller to the
* Host, there is no data and ISO_SDU_Length shall be set to zero.
*/
slen = sink_ctx->sdu_production.sdu_written;
slen_packed = bt_iso_pkt_len_pack(slen, packet_status_flag);
data_hdr->ts = sys_cpu_to_le32((uint32_t) valid_sdu->timestamp);
data_hdr->data.sn = sys_cpu_to_le16((uint16_t) valid_sdu->seqn);
data_hdr->data.slen = sys_cpu_to_le16(slen_packed);
/* send fragment up the chain */
bt_recv(buf);
}

View file

@ -321,7 +321,8 @@ void ll_iso_rx_mem_release(void **node_rx);
/* Downstream - ISO Data */
void *ll_iso_tx_mem_acquire(void);
void ll_iso_tx_mem_release(void *tx);
int ll_iso_tx_mem_enqueue(uint16_t handle, void *tx);
int ll_iso_tx_mem_enqueue(uint16_t handle, void *tx, void *link);
void ll_iso_link_tx_release(void *link);
/* External co-operation */
void ll_timeslice_ticker_id_get(uint8_t * const instance_index,

View file

@ -117,6 +117,7 @@ static void isoal_sink_deallocate(isoal_sink_handle_t hdl)
*
* @param handle[in] Connection handle
* @param role[in] Peripheral, Central or Broadcast
* @param framed[in] Framed case
* @param burst_number[in] Burst Number
* @param flush_timeout[in] Flush timeout
* @param sdu_interval[in] SDU interval
@ -133,6 +134,7 @@ static void isoal_sink_deallocate(isoal_sink_handle_t hdl)
isoal_status_t isoal_sink_create(
uint16_t handle,
uint8_t role,
uint8_t framed,
uint8_t burst_number,
uint8_t flush_timeout,
uint32_t sdu_interval,
@ -155,6 +157,7 @@ isoal_status_t isoal_sink_create(
struct isoal_sink_session *session = &isoal_global.sink_state[*hdl].session;
session->handle = handle;
session->framed = framed;
/* Todo: Next section computing various constants, should potentially be a
* function in itself as a number of the dependencies could be changed while
@ -193,6 +196,11 @@ isoal_status_t isoal_sink_create(
* BIG reference anchor point +
* BIG_Sync_Delay + SDU_interval + ISO_Interval - Time_Offset.
*/
/* TODO: This needs to be rechecked.
* Latency should be in us but flush_timeout and iso_interval are
* integers.
* (i.e. Correct calculation should require iso_interval x 1250us)
*/
if (role == BT_CONN_ROLE_PERIPHERAL) {
isoal_global.sink_state[*hdl].session.latency_unframed =
stream_sync_delay + ((flush_timeout - 1) * iso_interval);
@ -479,6 +487,29 @@ static isoal_status_t isoal_rx_unframed_consume(struct isoal_sink *sink,
*/
pdu_padding = (length == 0) && (llid == PDU_BIS_LLID_START_CONTINUE) &&
(!pdu_err || sp->fsm == ISOAL_ERR_SPOOL);
seq_err = (meta->payload_number != (sp->prev_pdu_id+1));
/* If there are no buffers available, the PDUs received by the ISO-AL
* may not be in sequence even though this is expected for unframed rx.
* It would be necessary to exit the ISOAL_ERR_SPOOL state as the PDU
* count and as a result the last_pdu detection is no longer reliable.
*/
if (sp->fsm == ISOAL_ERR_SPOOL && !pdu_err && !seq_err &&
/* Previous sequence error should have move to the
* ISOAL_ERR_SPOOL state and emitted the SDU in production. No
* PDU error so LLID and length are reliable and no sequence
* error so this PDU is the next in order.
*/
((sp->prev_pdu_is_end || sp->prev_pdu_is_padding) &&
((llid == PDU_BIS_LLID_START_CONTINUE && length > 0) ||
(llid == PDU_BIS_LLID_COMPLETE_END && length == 0)))) {
/* Detected a start of a new SDU as the last PDU was an end
* fragment or padding and the current is the start of a new SDU
* (either filled or zero length). Move to ISOAL_START
* immediately.
*/
sp->fsm = ISOAL_START;
}
if (sp->fsm == ISOAL_START) {
struct isoal_sdu_produced *sdu;
@ -499,7 +530,6 @@ static isoal_status_t isoal_rx_unframed_consume(struct isoal_sink *sink,
sdu->timestamp = anchorpoint + latency;
} else {
sp->pdu_cnt++;
seq_err = (meta->payload_number != (sp->prev_pdu_id+1));
}
last_pdu = (sp->pdu_cnt == session->pdus_per_sdu);
@ -564,6 +594,20 @@ static isoal_status_t isoal_rx_unframed_consume(struct isoal_sink *sink,
sp->sdu_status |= ISOAL_SDU_STATUS_LOST_DATA;
}
/* BT Core V5.3 : Vol 4 HCI I/F : Part G HCI Func. Spec.:
* 5.4.5 HCI ISO Data packets
* If Packet_Status_Flag equals 0b10 then PB_Flag shall equal 0b10.
* When Packet_Status_Flag is set to 0b10 in packets from the Controller to the
* Host, there is no data and ISO_SDU_Length shall be set to zero.
*
* TODO: Move to hci_driver to allow vendor path to have dedicated handling.
*/
if (sp->sdu_status == ISOAL_SDU_STATUS_LOST_DATA) {
sp->sdu_written = 0;
end_of_packet = 1;
length = 0;
}
/* Append valid PDU to SDU */
if (!pdu_padding) {
err |= isoal_rx_append_to_sdu(sink, pdu_meta, 0,
@ -573,6 +617,8 @@ static isoal_status_t isoal_rx_unframed_consume(struct isoal_sink *sink,
/* Update next state */
sp->fsm = next_state;
sp->prev_pdu_id = meta->payload_number;
sp->prev_pdu_is_end = !pdu_err && llid == PDU_BIS_LLID_COMPLETE_END;
sp->prev_pdu_is_padding = !pdu_err && pdu_padding;
return err;
}
@ -814,12 +860,10 @@ isoal_status_t isoal_rx_pdu_recombine(isoal_sink_handle_t sink_hdl,
const struct isoal_pdu_rx *pdu_meta)
{
struct isoal_sink *sink = &isoal_global.sink_state[sink_hdl];
isoal_status_t err = ISOAL_STATUS_ERR_SDU_ALLOC;
isoal_status_t err = ISOAL_STATUS_OK;
if (sink->sdu_production.mode != ISOAL_PRODUCTION_MODE_DISABLED) {
bool pdu_framed = (pdu_meta->pdu->ll_id == PDU_BIS_LLID_FRAMED);
if (pdu_framed) {
if (sink && sink->sdu_production.mode != ISOAL_PRODUCTION_MODE_DISABLED) {
if (sink->session.framed) {
err = isoal_rx_framed_consume(sink, pdu_meta);
} else {
err = isoal_rx_unframed_consume(sink, pdu_meta);
@ -868,6 +912,7 @@ static void isoal_source_deallocate(isoal_source_handle_t hdl)
*
* @param handle[in] Connection handle
* @param role[in] Peripheral, Central or Broadcast
* @param framed[in] Framed case
* @param burst_number[in] Burst Number
* @param flush_timeout[in] Flush timeout
* @param max_octets[in] Maximum PDU size (Max_PDU_C_To_P / Max_PDU_P_To_C)
@ -886,6 +931,7 @@ static void isoal_source_deallocate(isoal_source_handle_t hdl)
isoal_status_t isoal_source_create(
uint16_t handle,
uint8_t role,
uint8_t framed,
uint8_t burst_number,
uint8_t flush_timeout,
uint8_t max_octets,
@ -910,6 +956,9 @@ isoal_status_t isoal_source_create(
struct isoal_source_session *session = &isoal_global.source_state[*hdl].session;
session->handle = handle;
session->framed = framed;
session->burst_number = burst_number;
session->iso_interval = iso_interval;
/* Todo: Next section computing various constants, should potentially be a
* function in itself as a number of the dependencies could be changed while
@ -990,15 +1039,19 @@ void isoal_source_destroy(isoal_source_handle_t hdl)
* Queue the PDU in production in the relevant LL transmit queue. If the
* attmept to release the PDU fails, the buffer linked to the PDU will be released
* and it will not be possible to retry the emit operation on the same PDU.
* @param[in] source_ctx ISO-AL source reference for this CIS / BIS
* @param[in] produced_pdu PDU in production
* @param[in] pdu_ll_id LLID to be set indicating the type of fragment
* @param[in] payload_size Length of the data written to the PDU
* @param[in] source_ctx ISO-AL source reference for this CIS / BIS
* @param[in] produced_pdu PDU in production
* @param[in] pdu_ll_id LLID to be set indicating the type of fragment
* @param[in] sdu_fragments Nummber of SDU HCI fragments consumed
* @param[in] payload_number CIS / BIS payload number
* @param[in] payload_size Length of the data written to the PDU
* @return Error status of the operation
*/
static isoal_status_t isoal_tx_pdu_emit(const struct isoal_source *source_ctx,
const struct isoal_pdu_produced *produced_pdu,
const uint8_t pdu_ll_id,
const uint8_t sdu_fragments,
const uint64_t payload_number,
const isoal_pdu_len_t payload_size)
{
struct node_tx_iso *node_tx;
@ -1010,6 +1063,9 @@ static isoal_status_t isoal_tx_pdu_emit(const struct isoal_source *source_ctx,
/* Retrieve Node handle */
node_tx = produced_pdu->contents.handle;
/* Set payload number */
node_tx->payload_count = payload_number & 0x7fffffffff;
node_tx->sdu_fragments = sdu_fragments;
/* Set PDU LLID */
produced_pdu->contents.pdu->ll_id = pdu_ll_id;
/* Set PDU length */
@ -1077,17 +1133,17 @@ static isoal_status_t isoal_tx_allocate_pdu(struct isoal_source *source,
/**
* Attempt to emit the PDU in production if it is complete.
* @param[in] source ISO-AL source reference
* @param[in] end_of_sdu SDU end has been reached
* @param[in] pdu_ll_id LLID / PDU fragment type as Start, Cont, End, Single (Unframed) or Framed
* @param[in] source ISO-AL source reference
* @param[in] force_emit Request PDU emit
* @param[in] pdu_ll_id LLID / PDU fragment type as Start, Cont, End, Single (Unframed) or Framed
* @return Error status of operation
*/
static isoal_status_t isoal_tx_try_emit_pdu(struct isoal_source *source,
bool end_of_sdu,
bool force_emit,
uint8_t pdu_ll_id)
{
struct isoal_pdu_production *pp;
struct isoal_pdu_produced *pdu;
struct isoal_pdu_produced *pdu;
isoal_status_t err;
err = ISOAL_STATUS_OK;
@ -1095,14 +1151,19 @@ static isoal_status_t isoal_tx_try_emit_pdu(struct isoal_source *source,
pdu = &pp->pdu;
/* Emit a PDU */
const bool pdu_complete = (pp->pdu_available == 0) || end_of_sdu;
const bool pdu_complete = (pp->pdu_available == 0) || force_emit;
if (end_of_sdu) {
if (force_emit) {
pp->pdu_available = 0;
}
if (pdu_complete) {
err = isoal_tx_pdu_emit(source, pdu, pdu_ll_id, pp->pdu_written);
/* Emit PDU and increment the payload number */
err = isoal_tx_pdu_emit(source, pdu, pdu_ll_id,
pp->sdu_fragments,
pp->payload_number,
pp->pdu_written);
pp->payload_number++;
}
return err;
@ -1160,10 +1221,25 @@ static isoal_status_t isoal_tx_unframed_produce(struct isoal_source *source,
*/
session->seqn++;
/* Update payload counter in case time has passed since last
* SDU. This should mean that event count * burst number should
* be greater than the current payload number. In the event of
* an SDU interval smaller than the ISO interval, multiple SDUs
* will be sent in the same event. As such the current payload
* number should be retained. Payload numbers are indexed at 0
* and valid until the PDU is emitted.
*/
pp->payload_number = MAX(pp->payload_number,
(tx_sdu->target_event * session->burst_number));
/* Reset PDU fragmentation count for this SDU */
pp->pdu_cnt = 0;
pp->sdu_fragments = 0;
}
pp->sdu_fragments++;
/* PDUs should be created until the SDU fragment has been fragmented or
* if this is the last fragment of the SDU, until the required padding
* PDU(s) are sent.
@ -1172,7 +1248,6 @@ static isoal_status_t isoal_tx_unframed_produce(struct isoal_source *source,
((packet_available > 0) || padding_pdu || zero_length_sdu)) {
const isoal_status_t err_alloc = isoal_tx_allocate_pdu(source, tx_sdu);
struct isoal_pdu_produced *pdu = &pp->pdu;
err |= err_alloc;
/*
@ -1246,6 +1321,316 @@ static isoal_status_t isoal_tx_unframed_produce(struct isoal_source *source,
return err;
}
/**
* @brief Inserts a segmentation header at the current write point in the PDU
* under production.
* @param source source handle
* @param sc start / continuation bit value to be written
* @param cmplt complete bit value to be written
* @param time_offset value of time offset to be written
* @return status
*/
static isoal_status_t isoal_insert_seg_header_timeoffset(struct isoal_source *source,
const bool sc,
const bool cmplt,
const uint32_t time_offset)
{
struct isoal_source_session *session;
struct isoal_pdu_production *pp;
struct isoal_pdu_produced *pdu;
struct pdu_iso_sdu_sh seg_hdr;
isoal_status_t err;
uint8_t write_size;
session = &source->session;
pp = &source->pdu_production;
pdu = &pp->pdu;
write_size = PDU_ISO_SEG_HDR_SIZE + (sc ? 0 : PDU_ISO_SEG_TIMEOFFSET_SIZE);
memset(&seg_hdr, 0, sizeof(seg_hdr));
/* Check if there is enough space left in the PDU. This should not fail
* as the calling should also check before requesting insertion of a
* new header.
*/
if (pp->pdu_available < write_size) {
return ISOAL_STATUS_ERR_UNSPECIFIED;
}
seg_hdr.sc = sc;
seg_hdr.cmplt = cmplt;
seg_hdr.length = sc ? 0 : PDU_ISO_SEG_TIMEOFFSET_SIZE;
if (!sc) {
seg_hdr.timeoffset = time_offset;
}
/* Store header */
pp->seg_hdr_sc = seg_hdr.sc;
pp->seg_hdr_length = seg_hdr.length;
/* Save location of last segmentation header so that it can be updated
* as data is written.
*/
pp->last_seg_hdr_loc = pp->pdu_written;
/* Write to PDU */
err = session->pdu_write(&pdu->contents,
pp->pdu_written,
(uint8_t *) &seg_hdr,
write_size);
pp->pdu_written += write_size;
pp->pdu_available -= write_size;
return err;
}
/**
* @breif Updates the cmplt flag and length in the last segmentation header written
* @param source source handle
* @param cmplt ew value for complete flag
* param add_length length to add
* @return status
*/
static isoal_status_t isoal_update_seg_header_cmplt_length(struct isoal_source *source,
const bool cmplt,
const uint8_t add_length)
{
struct isoal_source_session *session;
struct isoal_pdu_production *pp;
struct isoal_pdu_produced *pdu;
struct pdu_iso_sdu_sh seg_hdr;
session = &source->session;
pp = &source->pdu_production;
pdu = &pp->pdu;
memset(&seg_hdr, 0, sizeof(seg_hdr));
seg_hdr.sc = pp->seg_hdr_sc;
/* Update the complete flag and length */
seg_hdr.cmplt = cmplt;
pp->seg_hdr_length += add_length;
seg_hdr.length = pp->seg_hdr_length;
/* Re-write the segmentation header at the same location */
return session->pdu_write(&pdu->contents,
pp->last_seg_hdr_loc,
(uint8_t *) &seg_hdr,
PDU_ISO_SEG_HDR_SIZE);
}
/**
* @brief Fragment received SDU and produce framed PDUs
* @details Destination source may have an already partially built PDU
*
* @param source[in,out] Destination source with bookkeeping state
* @param tx_sdu[in] SDU with packet boundary information
*
* @return Status
*/
static isoal_status_t isoal_tx_framed_produce(struct isoal_source *source,
const struct isoal_sdu_tx *tx_sdu)
{
struct isoal_source_session *session;
struct isoal_pdu_production *pp;
isoal_sdu_len_t packet_available;
const uint8_t *sdu_payload;
uint32_t time_offset;
bool zero_length_sdu;
isoal_status_t err;
bool padding_pdu;
uint8_t ll_id;
session = &source->session;
pp = &source->pdu_production;
padding_pdu = false;
err = ISOAL_STATUS_OK;
time_offset = 0;
packet_available = tx_sdu->size;
sdu_payload = tx_sdu->dbuf;
LL_ASSERT(sdu_payload);
zero_length_sdu = (packet_available == 0 &&
tx_sdu->sdu_state == BT_ISO_SINGLE);
if (tx_sdu->sdu_state == BT_ISO_START ||
tx_sdu->sdu_state == BT_ISO_SINGLE) {
/* Start of a new SDU */
/* Initialize to info provided in SDU */
uint32_t actual_cig_ref_point = tx_sdu->cig_ref_point;
uint64_t actual_event = tx_sdu->target_event;
/* Update sequence number for received SDU
*
* BT Core V5.3 : Vol 6 Low Energy Controller : Part G IS0-AL:
* 2 ISOAL Features :
* SDUs received by the ISOAL from the upper layer shall be
* given a sequence number which is initialized to 0 when the
* CIS or BIS is created.
*
* NOTE: The upper layer may synchronize its sequence number
* with the sequence number in the ISOAL once the Datapath is
* configured and the link is established.
*/
session->seqn++;
/* Reset PDU production state */
pp->pdu_state = BT_ISO_START;
/* Update payload counter in case time has passed since the last
* SDU. This should mean that event count * burst number should
* be greater than the current payload number. In the event of
* an SDU interval smaller than the ISO interval, multiple SDUs
* will be sent in the same event. As such the current payload
* number should be retained. Payload numbers are indexed at 0
* and valid until the PDU is emitted.
*/
pp->payload_number = MAX(pp->payload_number,
(tx_sdu->target_event * session->burst_number));
/* Get actual event for this payload number */
actual_event = pp->payload_number / session->burst_number;
/* Get cig reference point for this PDU based on the actual
* event being set. This might introduce some errors as the cig
* refernce point for future events could drift. However as the
* time offset calculation requires an absolute value, this
* seems to be the best candidate.
*/
if (actual_event > tx_sdu->target_event) {
actual_cig_ref_point = tx_sdu->cig_ref_point +
((actual_event - tx_sdu->target_event) * session->iso_interval *
CONN_INT_UNIT_US);
}
/* Check if time stamp on packet is later than the CIG reference
* point and adjust targets. This could happen if the SDU has
* been time-stampped at the controller when received via HCI.
*
* BT Core V5.3 : Vol 6 Low Energy Controller : Part G IS0-AL:
* 3.1 Time_Offset in framed PDUs :
* The Time_Offset shall be a positive value.
*/
if (actual_cig_ref_point <= tx_sdu->time_stamp) {
/* Advance target to next event */
actual_event++;
actual_cig_ref_point += session->iso_interval * CONN_INT_UNIT_US;
/* Set payload number */
pp->payload_number = actual_event * session->burst_number;
}
/* Calculate the time offset */
LL_ASSERT(actual_cig_ref_point > tx_sdu->time_stamp);
time_offset = actual_cig_ref_point - tx_sdu->time_stamp;
/* Reset PDU fragmentation count for this SDU */
pp->pdu_cnt = 0;
pp->sdu_fragments = 0;
}
pp->sdu_fragments++;
/* PDUs should be created until the SDU fragment has been fragmented or if
* this is the last fragment of the SDU, until the required padding PDU(s)
* are sent.
*/
while ((err == ISOAL_STATUS_OK) &&
((packet_available > 0) || padding_pdu || zero_length_sdu)) {
const isoal_status_t err_alloc = isoal_tx_allocate_pdu(source, tx_sdu);
struct isoal_pdu_produced *pdu = &pp->pdu;
err |= err_alloc;
if (pp->pdu_state == BT_ISO_START) {
/* Start of a new SDU. Segmentation header and time-offset
* should be inserted.
*/
err |= isoal_insert_seg_header_timeoffset(source,
false, false,
time_offset);
pp->pdu_state = BT_ISO_CONT;
} else if (!padding_pdu && pp->pdu_state == BT_ISO_CONT && pp->pdu_written == 0) {
/* Continuing an SDU in a new PDU. Segmentation header
* alone should be inserted.
*/
err |= isoal_insert_seg_header_timeoffset(source,
true, false,
0);
}
/*
* For this PDU we can only consume of packet, bounded by:
* - What can fit in the destination PDU.
* - What remains of the packet.
*/
const size_t consume_len = MIN(
packet_available,
pp->pdu_available
);
if (consume_len > 0) {
err |= session->pdu_write(&pdu->contents,
pp->pdu_written,
sdu_payload,
consume_len);
sdu_payload += consume_len;
pp->pdu_written += consume_len;
pp->pdu_available -= consume_len;
packet_available -= consume_len;
}
/* End of the SDU is reached at the end of the last SDU fragment
* or if this is a single fragment SDU
*/
bool end_of_sdu = (packet_available == 0) &&
((tx_sdu->sdu_state == BT_ISO_SINGLE) ||
(tx_sdu->sdu_state == BT_ISO_END));
/* Update complete flag in last segmentation header */
err |= isoal_update_seg_header_cmplt_length(source, end_of_sdu, consume_len);
/* LLID is fixed for framed PDUs */
ll_id = PDU_BIS_LLID_FRAMED;
/* NOTE: Ideally even if the end of the SDU is reached, the PDU
* should not be emitted as long as there is space left. If the
* PDU is not released, it might require a flush timeout to
* trigger the release as receiving an SDU per SDU interval is
* not guaranteed. As there is no trigger for this in the
* ISO-AL, the PDU is released. This does mean that the
* bandwidth of this implementation will be less that the ideal
* supported by framed PDUs. Ideally ISOAL_SEGMENT_MIN_SIZE
* should be used to assess if there is sufficient usable space
* left in the PDU.
*/
bool release_pdu = end_of_sdu;
const isoal_status_t err_emit = isoal_tx_try_emit_pdu(source, release_pdu, ll_id);
err |= err_emit;
/* TODO: Send padding PDU(s) if required
*
* BT Core V5.3 : Vol 6 Low Energy Controller : Part G IS0-AL:
* 2 ISOAL Features :
* Padding is required when the data does not add up to the
* configured number of PDUs that are specified in the BN
* parameter per CIS or BIS event.
*
* When padding PDUs as opposed to null PDUs are required for
* framed production is not clear.
*/
padding_pdu = false;
zero_length_sdu = false;
}
return err;
}
/**
* @brief Deep copy a SDU, fragment into PDU(s)
@ -1256,14 +1641,16 @@ static isoal_status_t isoal_tx_unframed_produce(struct isoal_source *source,
* @return Status
*/
isoal_status_t isoal_tx_sdu_fragment(isoal_source_handle_t source_hdl,
const struct isoal_sdu_tx *tx_sdu)
struct isoal_sdu_tx *tx_sdu)
{
struct isoal_source *source = &isoal_global.source_state[source_hdl];
isoal_status_t err = ISOAL_STATUS_ERR_PDU_ALLOC;
struct isoal_source *source;
isoal_status_t err;
source = &isoal_global.source_state[source_hdl];
err = ISOAL_STATUS_ERR_PDU_ALLOC;
if (source->pdu_production.mode != ISOAL_PRODUCTION_MODE_DISABLED) {
/* TODO: consider how to separate framed and unframed production
* BT Core V5.3 : Vol 6 Low Energy Controller : Part G IS0-AL:
/* BT Core V5.3 : Vol 6 Low Energy Controller : Part G IS0-AL:
* 2 ISOAL Features :
* (1) Unframed PDUs shall only be used when the ISO_Interval
* is equal to or an integer multiple of the SDU_Interval
@ -1273,10 +1660,8 @@ isoal_status_t isoal_tx_sdu_fragment(isoal_source_handle_t source_hdl,
* (2) When the Host requests the use of framed PDUs, the
* Controller shall use framed PDUs.
*/
bool pdu_framed = false;
if (pdu_framed) {
/* TODO: add framed handling */
if (source->session.framed) {
err = isoal_tx_framed_produce(source, tx_sdu);
} else {
err = isoal_tx_unframed_produce(source, tx_sdu);
}

View file

@ -171,6 +171,18 @@ struct isoal_sdu_tx {
* can be directly assigned to the SDU state
*/
uint8_t sdu_state;
/** Packet sequence number from HCI ISO Data Header (ISO Data Load
* Field)
*/
uint16_t packet_sn;
/** ISO SDU length from HCI ISO Data Header (ISO Data Load Field) */
uint16_t iso_sdu_length;
/** Time stamp from HCI or vendor specific path (us) */
uint32_t time_stamp;
/** CIG Reference of target event (us, compensated for drift) */
uint32_t cig_ref_point;
/** Target Event of SDU */
uint64_t target_event:39;
};
@ -236,6 +248,7 @@ struct isoal_sink_session {
isoal_sdu_cnt_t seqn;
uint16_t handle;
uint8_t pdus_per_sdu;
uint8_t framed;
uint32_t latency_unframed;
uint32_t latency_framed;
};
@ -247,6 +260,9 @@ struct isoal_sdu_production {
struct isoal_sdu_produced sdu;
/* Bookkeeping */
isoal_pdu_cnt_t prev_pdu_id : 39;
/* Assumes that isoal_pdu_cnt_t is a uint64_t bit field */
uint64_t prev_pdu_is_end:1;
uint64_t prev_pdu_is_padding:1;
enum {
ISOAL_START,
ISOAL_CONTINUE,
@ -341,6 +357,9 @@ struct isoal_source_session {
struct isoal_source_config param;
isoal_sdu_cnt_t seqn;
uint16_t handle;
uint16_t iso_interval;
uint8_t framed;
uint8_t burst_number;
uint8_t pdus_per_sdu;
uint8_t max_pdu_size;
uint32_t latency_unframed;
@ -350,12 +369,19 @@ struct isoal_source_session {
struct isoal_pdu_production {
/* Permit atomic enable/disable of PDU production */
volatile isoal_production_mode_t mode;
/* We are constructing an PDU from {<1 or =1 or >1} SDUs */
/* We are constructing a PDU from {<1 or =1 or >1} SDUs */
struct isoal_pdu_produced pdu;
uint8_t pdu_state;
/* PDUs produced for current SDU */
uint8_t pdu_cnt;
uint64_t payload_number:39;
uint64_t seg_hdr_sc:1;
uint64_t seg_hdr_length:8;
uint64_t sdu_fragments:8;
isoal_pdu_len_t pdu_written;
isoal_pdu_len_t pdu_available;
/* Location (byte index) of last segmentation header */
isoal_pdu_len_t last_seg_hdr_loc;
};
struct isoal_source {
@ -372,6 +398,7 @@ isoal_status_t isoal_reset(void);
isoal_status_t isoal_sink_create(uint16_t handle,
uint8_t role,
uint8_t framed,
uint8_t burst_number,
uint8_t flush_timeout,
uint32_t sdu_interval,
@ -406,6 +433,7 @@ isoal_status_t sink_sdu_write_hci(void *dbuf,
isoal_status_t isoal_source_create(uint16_t handle,
uint8_t role,
uint8_t framed,
uint8_t burst_number,
uint8_t flush_timeout,
uint8_t max_octets,
@ -428,7 +456,7 @@ void isoal_source_disable(isoal_source_handle_t hdl);
void isoal_source_destroy(isoal_source_handle_t hdl);
isoal_status_t isoal_tx_sdu_fragment(isoal_source_handle_t source_hdl,
const struct isoal_sdu_tx *tx_sdu);
struct isoal_sdu_tx *tx_sdu);
void isoal_tx_pdu_release(isoal_source_handle_t source_hdl,
struct node_tx_iso *node_tx);

View file

@ -376,9 +376,9 @@ struct node_rx_ftr {
/* Meta-information for isochronous PDUs in node_rx_hdr */
struct node_rx_iso_meta {
uint64_t payload_number : 39; /* cisPayloadNumber */
uint32_t timestamp; /* Time of reception */
uint8_t status; /* Status of reception (OK/not OK) */
uint64_t payload_number:39; /* cisPayloadNumber */
uint64_t status:8; /* Status of reception (OK/not OK) */
uint32_t timestamp; /* Time of reception */
};
/* Define invalid/unassigned Controller state/role instance handle */
@ -532,6 +532,8 @@ static inline void lll_hdr_init(void *lll, void *parent)
#define iso_rx_sched() ll_rx_sched()
#endif /* CONFIG_BT_CTLR_ISO_VENDOR_DATA_PATH */
struct node_tx_iso;
void lll_done_score(void *param, uint8_t result);
int lll_init(void);
@ -570,6 +572,8 @@ void ull_rx_sched(void);
void ull_rx_sched_done(void);
void ull_iso_rx_put(memq_link_t *link, void *rx);
void ull_iso_rx_sched(void);
void *ull_iso_tx_ack_dequeue(void);
void ull_iso_lll_ack_enqueue(uint16_t handle, struct node_tx_iso *tx);
struct event_done_extra *ull_event_done_extra_get(void);
struct event_done_extra *ull_done_extra_type_set(uint8_t type);
void *ull_event_done(void *param);

View file

@ -5,10 +5,11 @@
*/
struct lll_conn_iso_stream_rxtx {
uint8_t phy; /* PHY */
uint8_t burst_number; /* Burst number (BN) */
uint8_t flush_timeout; /* Flush timeout (FT) */
uint8_t max_octets; /* Maximum PDU size */
uint64_t payload_number:39; /* cisPayloadNumber */
uint8_t phy; /* PHY */
uint8_t burst_number; /* Burst number (BN) */
uint8_t flush_timeout; /* Flush timeout (FT) */
uint8_t max_octets; /* Maximum PDU size */
};
struct lll_conn_iso_stream {
@ -27,12 +28,13 @@ struct lll_conn_iso_stream {
/* Event and payload counters */
uint64_t event_count : 39; /* cisEventCount */
uint64_t rx_payload_number : 39; /* cisPayloadNumber */
/* Acknowledgment and flow control */
uint8_t sn:1; /* Sequence number */
uint8_t nesn:1; /* Next expected sequence number */
uint8_t cie:1; /* Close isochronous event */
uint8_t flushed:1; /* 1 if CIS LLL has been flushed */
uint8_t datapath_ready_rx:1;/* 1 if datapath for RX is ready */
/* Resumption information */
uint8_t next_subevent; /* Next subevent to schedule */

View file

@ -12,5 +12,6 @@ struct node_tx_iso {
};
uint64_t payload_count:39; /* bisPayloadCounter/cisPayloadCounter */
uint64_t sdu_fragments : 8;
uint8_t pdu[];
};

View file

@ -688,25 +688,23 @@ struct pdu_data_llctrl_cte_rsp {
} __packed;
struct pdu_data_llctrl_cis_req {
uint8_t cig_id;
uint8_t cis_id;
uint8_t c_phy;
uint8_t p_phy;
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
uint16_t c_max_sdu:12;
uint16_t rfu0:3;
uint16_t framed:1;
uint16_t p_max_sdu:12;
uint16_t rfu1:4;
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
uint16_t framed:1;
uint16_t rfu0:3;
uint16_t c_max_sdu:12;
uint16_t rfu1:4;
uint16_t p_max_sdu:12;
#else
#error "Unsupported endianness"
#endif
uint8_t cig_id;
uint8_t cis_id;
uint8_t c_phy;
uint8_t p_phy;
/* c_max_sdu:12
* rfu:3
* framed:1
* NOTE: This layout as bitfields is not portable for BE using
* endianness conversion macros.
*/
uint8_t c_max_sdu_packed[2];
/* p_max_sdu:12
* rfu:4
* NOTE: This layout as bitfields is not portable for BE using
* endianness conversion macros.
*/
uint8_t p_max_sdu[2];
uint8_t c_sdu_interval[3];
uint8_t p_sdu_interval[3];
uint16_t c_max_pdu;
@ -904,6 +902,15 @@ struct pdu_iso_sdu_sh {
#endif /* __BYTE_ORDER__ */
} __packed;
enum pdu_cis_llid {
/** Unframed complete or end fragment */
PDU_CIS_LLID_COMPLETE_END = 0x00,
/** Unframed start or continuation fragment */
PDU_CIS_LLID_START_CONTINUE = 0x01,
/** Framed; one or more segments of a SDU */
PDU_CIS_LLID_FRAMED = 0x02
};
struct pdu_cis {
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
uint8_t ll_id:2;

View file

@ -40,6 +40,7 @@
#include "lll/lll_df_types.h"
#include "lll_sync.h"
#include "lll_sync_iso.h"
#include "lll_iso_tx.h"
#include "lll_conn.h"
#include "lll_df.h"
@ -468,12 +469,25 @@ static MEMQ_DECLARE(ull_done);
#if defined(CONFIG_BT_CONN)
static MFIFO_DEFINE(ll_pdu_rx_free, sizeof(void *), LL_PDU_RX_CNT);
static MFIFO_DEFINE(tx_ack, sizeof(struct lll_tx),
CONFIG_BT_BUF_ACL_TX_COUNT);
static void *mark_update;
#endif /* CONFIG_BT_CONN */
#if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_CONN_ISO)
#if defined(CONFIG_BT_CONN)
#define BT_BUF_ACL_TX_COUNT CONFIG_BT_BUF_ACL_TX_COUNT
#else
#define BT_BUF_ACL_TX_COUNT 0
#endif /* CONFIG_BT_CONN */
#if defined(CONFIG_BT_CTLR_CONN_ISO)
#define BT_CTLR_ISO_TX_BUFFERS CONFIG_BT_CTLR_ISO_TX_BUFFERS
#else
#define BT_CTLR_ISO_TX_BUFFERS 0
#endif /* CONFIG_BT_CTLR_CONN_ISO */
static MFIFO_DEFINE(tx_ack, sizeof(struct lll_tx),
BT_BUF_ACL_TX_COUNT + BT_CTLR_ISO_TX_BUFFERS);
#endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_CONN_ISO */
static void *mark_disable;
static inline int init_reset(void);
@ -861,13 +875,6 @@ ll_rx_get_again:
*node_rx = NULL;
#if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
cmplt = ull_iso_tx_ack_get(handle);
if (cmplt) {
return cmplt;
}
#endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
link = memq_peek(memq_ll_rx.head, memq_ll_rx.tail, (void **)&rx);
if (link) {
#if defined(CONFIG_BT_CONN)
@ -2392,13 +2399,43 @@ static uint8_t tx_cmplt_get(uint16_t *handle, uint8_t *first, uint8_t last)
*handle = tx->handle;
cmplt = 0U;
do {
struct node_tx *node_tx;
struct pdu_data *p;
struct node_tx *tx_node;
node_tx = tx->node;
p = (void *)node_tx->pdu;
if (!node_tx || (node_tx == (void *)1) ||
(((uint32_t)node_tx & ~3) &&
#if defined(CONFIG_BT_CTLR_BROADCAST_ISO) || \
defined(CONFIG_BT_CTLR_CONN_ISO)
if (IS_CIS_HANDLE(tx->handle) ||
IS_ADV_ISO_HANDLE(tx->handle)) {
struct node_tx_iso *tx_node_iso;
uint8_t fragments;
tx_node_iso = tx->node;
p = (void *)tx_node_iso->pdu;
/* TODO: We may need something more advanced for framed */
if (p->ll_id == PDU_CIS_LLID_COMPLETE_END ||
p->ll_id == PDU_BIS_LLID_COMPLETE_END) {
/* We must count each SDU HCI fragment */
fragments = tx_node_iso->sdu_fragments;
if (fragments == 0) {
/* FIXME: If ISOAL is not used for TX,
* sdu_fragments is not incremented. In
* that case we assume unfragmented for
* now.
*/
fragments = 1;
}
cmplt += fragments;
}
ll_iso_link_tx_release(tx_node_iso->link);
ll_iso_tx_mem_release(tx_node_iso);
goto next_ack;
}
#endif /* CONFIG_BT_CTLR_BROADCAST_ISO || CONFIG_BT_CTLR_CONN_ISO */
tx_node = tx->node;
p = (void *)tx_node->pdu;
if (!tx_node || (tx_node == (void *)1) ||
(((uint32_t)tx_node & ~3) &&
(p->ll_id == PDU_DATA_LLID_DATA_START ||
p->ll_id == PDU_DATA_LLID_DATA_CONTINUE))) {
/* data packet, hence count num cmplt */
@ -2409,10 +2446,15 @@ static uint8_t tx_cmplt_get(uint16_t *handle, uint8_t *first, uint8_t last)
tx->node = (void *)2;
}
if (((uint32_t)node_tx & ~3)) {
ll_tx_mem_release(node_tx);
if (((uint32_t)tx_node & ~3)) {
ll_tx_mem_release(tx_node);
}
#if defined(CONFIG_BT_CTLR_BROADCAST_ISO) || \
defined(CONFIG_BT_CTLR_CONN_ISO)
next_ack:
#endif /* CONFIG_BT_CTLR_BROADCAST_ISO || CONFIG_BT_CTLR_CONN_ISO */
tx = mfifo_dequeue_iter_get(mfifo_tx_ack.m, mfifo_tx_ack.s,
mfifo_tx_ack.n, mfifo_tx_ack.f,
last, first);

View file

@ -145,7 +145,8 @@ static inline void event_phy_upd_ind_prep(struct ll_conn *conn,
#endif /* CONFIG_BT_CTLR_PHY */
#if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
static inline void event_send_cis_rsp(struct ll_conn *conn);
static inline void event_send_cis_rsp(struct ll_conn *conn,
uint16_t event_counter);
static inline void event_peripheral_iso_prep(struct ll_conn *conn,
uint16_t event_counter,
uint32_t ticks_at_expire);
@ -531,7 +532,12 @@ uint8_t ll_terminate_ind_send(uint16_t handle, uint8_t reason)
{
struct ll_conn *conn;
if (!IS_ACL_HANDLE(handle)) {
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
@ -561,7 +567,6 @@ uint8_t ll_terminate_ind_send(uint16_t handle, uint8_t reason)
if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
ull_periph_latency_cancel(conn, handle);
}
return 0;
}
@ -1140,18 +1145,18 @@ int ull_conn_llcp(struct ll_conn *conn, uint32_t ticks_at_expire, uint16_t lazy)
#if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
} else if (conn->llcp_cis.req != conn->llcp_cis.ack) {
struct lll_conn *lll = &conn->lll;
uint16_t event_counter;
/* Calculate current event counter */
event_counter = lll->event_counter +
lll->latency_prepare + lazy;
if (conn->llcp_cis.state == LLCP_CIS_STATE_RSP_WAIT) {
/* Handle CIS response */
event_send_cis_rsp(conn);
event_send_cis_rsp(conn, event_counter);
} else if (conn->llcp_cis.state ==
LLCP_CIS_STATE_INST_WAIT) {
struct lll_conn *lll = &conn->lll;
uint16_t event_counter;
/* Calculate current event counter */
event_counter = lll->event_counter +
lll->latency_prepare + lazy;
/* Start CIS peripheral */
event_peripheral_iso_prep(conn,
event_counter,
@ -6108,30 +6113,56 @@ static inline uint8_t phy_upd_ind_recv(struct ll_conn *conn, memq_link_t *link,
#endif /* CONFIG_BT_CTLR_PHY */
#if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
void event_send_cis_rsp(struct ll_conn *conn)
void event_send_cis_rsp(struct ll_conn *conn, uint16_t event_counter)
{
struct node_tx *tx;
/* If waiting for accept/reject from host, do nothing */
if (((conn->llcp_cis.req - conn->llcp_cis.ack) & 0xFF) == CIS_REQUEST_AWAIT_HOST) {
if (((conn->llcp_cis.req - conn->llcp_cis.ack) & 0xFF) ==
CIS_REQUEST_AWAIT_HOST) {
return;
}
tx = mem_acquire(&mem_conn_tx_ctrl.free);
if (tx) {
struct pdu_data *pdu = (void *)tx->pdu;
uint16_t conn_event_count;
ull_pdu_data_init(pdu);
pdu->ll_id = PDU_DATA_LLID_CTRL;
pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_CIS_RSP;
/* Try to request extra time to setup the CIS. If central's
* CIS_IND is delayed, or it decides to do differently, this
* still might not be possible. Only applies if instance is
* less than two events in the future.
*
* In the example below it is shown how the CIS_IND is adjusted
* by peripheral increasing the event_counter in the CIS_RSP.
* This improves the peripheral's chances of setting up the CIS
* in due time. Current event counter is left most column.
*
* Without correction (LATE) With correction (OK)
* --------------------------------------------------------
* 10 ==> CIS_REQ E=15 10 ==> CIS_REQ E=15
* 14 <== CIS_RSP E=15 14 <== CIS_RSP E=16 (14+2)
* 15 ==> CIS_IND E=16 15 ==> CIS_IND E=17
* 16 ==> (+ offset) First PDU 16 Peripheral setup
* 16 Peripheral setup 17 ==> (+ offset) First PDU
* 17 Peripheral ready
*
* TODO: Port to new LLCP procedures
*/
conn_event_count = MAX(conn->llcp_cis.conn_event_count,
event_counter + 2);
sys_put_le24(conn->llcp_cis.cis_offset_min,
pdu->llctrl.cis_rsp.cis_offset_min);
sys_put_le24(conn->llcp_cis.cis_offset_max,
pdu->llctrl.cis_rsp.cis_offset_max);
pdu->llctrl.cis_rsp.conn_event_count =
sys_cpu_to_le16(conn->llcp_cis.conn_event_count);
sys_cpu_to_le16(conn_event_count);
pdu->len = offsetof(struct pdu_data_llctrl, cis_rsp) +
sizeof(struct pdu_data_llctrl_cis_rsp);
@ -6145,8 +6176,26 @@ void event_send_cis_rsp(struct ll_conn *conn)
void event_peripheral_iso_prep(struct ll_conn *conn, uint16_t event_counter,
uint32_t ticks_at_expire)
{
if (event_counter == conn->llcp_cis.conn_event_count) {
ull_peripheral_iso_start(conn, ticks_at_expire);
struct ll_conn_iso_group *cig;
uint16_t start_event_count;
start_event_count = conn->llcp_cis.conn_event_count;
cig = ll_conn_iso_group_get_by_id(conn->llcp_cis.cig_id);
LL_ASSERT(cig);
if (!cig->started) {
/* Start ISO peripheral one event before the requested instant
* for first CIS. This is done to be able to accept small CIS
* offsets.
*/
start_event_count--;
}
/* Start ISO peripheral one event before the requested instant */
if (event_counter == start_event_count) {
/* Start CIS peripheral */
ull_peripheral_iso_start(conn, ticks_at_expire, conn->llcp_cis.cis_handle);
conn->llcp_cis.state = LLCP_CIS_STATE_REQ;
conn->llcp_cis.ack = conn->llcp_cis.req;
@ -6163,17 +6212,17 @@ static uint8_t cis_req_recv(struct ll_conn *conn, memq_link_t *link,
void *node;
conn->llcp_cis.cig_id = req->cig_id;
conn->llcp_cis.framed = req->framed;
conn->llcp_cis.c_max_sdu = sys_le16_to_cpu(req->c_max_sdu);
conn->llcp_cis.p_max_sdu = sys_le16_to_cpu(req->p_max_sdu);
conn->llcp_cis.c_max_sdu = (uint16_t)(req->c_max_sdu_packed[1] & 0x0F) << 8 |
req->c_max_sdu_packed[0];
conn->llcp_cis.p_max_sdu = (uint16_t)(req->p_max_sdu[1] & 0x0F) << 8 | req->p_max_sdu[0];
conn->llcp_cis.cis_offset_min = sys_get_le24(req->cis_offset_min);
conn->llcp_cis.cis_offset_max = sys_get_le24(req->cis_offset_max);
conn->llcp_cis.conn_event_count =
sys_le16_to_cpu(req->conn_event_count);
conn->llcp_cis.conn_event_count = sys_le16_to_cpu(req->conn_event_count);
/* Acquire resources for new CIS */
err = ull_peripheral_iso_acquire(conn, &pdu->llctrl.cis_req, &cis_handle);
if (err) {
(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
return err;
}
@ -6192,7 +6241,7 @@ static uint8_t cis_req_recv(struct ll_conn *conn, memq_link_t *link,
conn_iso_req = node;
conn_iso_req->cig_id = req->cig_id;
conn_iso_req->cis_id = req->cis_id;
conn_iso_req->cis_handle = sys_le16_to_cpu(cis_handle);
conn_iso_req->cis_handle = cis_handle;
return 0;
}
@ -7518,7 +7567,13 @@ static inline int ctrl_rx(memq_link_t *link, struct node_rx_pdu **rx,
err = cis_req_recv(conn, link, rx, pdu_rx);
if (err) {
conn->llcp_terminate.reason_final = err;
if (err == BT_HCI_ERR_INVALID_LL_PARAM) {
nack = reject_ext_ind_send(conn, *rx,
PDU_DATA_LLCTRL_TYPE_CIS_REQ,
BT_HCI_ERR_UNSUPP_LL_PARAM_VAL);
} else {
conn->llcp_terminate.reason_final = err;
}
}
break;
}

View file

@ -6,6 +6,7 @@
#include <zephyr.h>
#include <sys/byteorder.h>
#include <bluetooth/bluetooth.h>
#include "util/mem.h"
#include "util/memq.h"
@ -27,11 +28,23 @@
#include "ull_internal.h"
#include "lll/lll_vendor.h"
#include "ll.h"
#define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_HCI_DRIVER)
#define LOG_MODULE_NAME bt_ctlr_ull_conn_iso
#include "common/log.h"
#include "hal/debug.h"
/* Used by LISTIFY */
#define _INIT_MAYFLY_ARRAY(_i, _l, _fp) \
{ ._link = &_l[_i], .fp = _fp },
/* Declare static initialized array of mayflies with associated link element */
#define DECLARE_MAYFLY_ARRAY(_name, _fp, _cnt) \
static memq_link_t _links[_cnt]; \
static struct mayfly _name[_cnt] = \
{ LISTIFY(_cnt, _INIT_MAYFLY_ARRAY, (), _links, _fp) }
static int init_reset(void);
static void ticker_update_cig_op_cb(uint32_t status, void *param);
@ -43,6 +56,8 @@ static void cis_disabled_cb(void *param);
static void ticker_stop_op_cb(uint32_t status, void *param);
static void cig_disable(void *param);
static void cig_disabled_cb(void *param);
static void disable(uint16_t handle);
static void cis_tx_lll_flush(void *param);
static struct ll_conn_iso_stream cis_pool[CONFIG_BT_CTLR_CONN_ISO_STREAMS];
static void *cis_free;
@ -88,8 +103,11 @@ struct ll_conn_iso_stream *ll_conn_iso_stream_acquire(void)
{
struct ll_conn_iso_stream *cis = mem_acquire(&cis_free);
cis->hdr.datapath_in = NULL;
cis->hdr.datapath_out = NULL;
if (cis) {
cis->hdr.datapath_in = NULL;
cis->hdr.datapath_out = NULL;
}
return cis;
}
@ -124,7 +142,8 @@ struct ll_conn_iso_stream *ll_iso_stream_connected_get(uint16_t handle)
}
cis = ll_conn_iso_stream_get(handle);
if (cis->lll.handle != handle) {
if ((cis->group == NULL) || (cis->lll.handle != handle)) {
/* CIS does not belong to a group or has inconsistent handle */
return NULL;
}
@ -217,7 +236,7 @@ void ull_conn_iso_cis_established(struct ll_conn_iso_stream *cis)
est = (void *)node_rx->pdu;
est->status = 0;
est->cis_handle = sys_le16_to_cpu(cis->lll.handle);
est->cis_handle = cis->lll.handle;
ll_rx_put(node_rx->hdr.link, node_rx);
ll_rx_sched();
@ -279,7 +298,7 @@ void ull_conn_iso_done(struct node_rx_event_done *done)
* has completed and the stream is released and callback is provided, the
* cis_released_cb callback is invoked.
*
* @param cis Pointer to connected ISO stream to stop
* @param cis Pointer to connected ISO stream to stop
* @param cis_released_cb Callback to invoke when the CIS has been released.
* NULL to ignore.
* @param reason Termination reason
@ -352,6 +371,11 @@ void ull_conn_iso_resume_ticker_start(struct lll_event *resume_event,
cig = resume_event->prepare_param.param;
ticker_id = TICKER_ID_CONN_ISO_RESUME_BASE + cig->handle;
if (cig->resume_cis != LLL_HANDLE_INVALID) {
/* Restarting resume ticker - must be stopped first */
(void)ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_LLL,
ticker_id, NULL, NULL);
}
cig->resume_cis = cis_handle;
if (0) {
@ -407,8 +431,16 @@ int ull_conn_iso_reset(void)
static int init_reset(void)
{
struct ll_conn_iso_stream *cis;
struct ll_conn_iso_group *cig;
uint16_t handle;
int err;
/* Disable all active CIGs (uses blocking ull_ticker_stop_with_mark) */
for (handle = 0U; handle < CONFIG_BT_CTLR_CONN_ISO_GROUPS; handle++) {
disable(handle);
}
/* Initialize CIS pool */
mem_init(cis_pool, sizeof(struct ll_conn_iso_stream),
sizeof(cis_pool) / sizeof(struct ll_conn_iso_stream),
@ -419,8 +451,17 @@ static int init_reset(void)
sizeof(cig_pool) / sizeof(struct ll_conn_iso_group),
&cig_free);
for (int h = 0; h < CONFIG_BT_CTLR_CONN_ISO_GROUPS; h++) {
ll_conn_iso_group_get(h)->cig_id = 0xFF;
for (handle = 0; handle < CONFIG_BT_CTLR_CONN_ISO_GROUPS; handle++) {
cig = ll_conn_iso_group_get(handle);
cig->cig_id = 0xFF;
cig->started = 0;
cig->lll.num_cis = 0;
}
for (handle = LL_CIS_HANDLE_BASE; handle <= LAST_VALID_CIS_HANDLE; handle++) {
cis = ll_conn_iso_stream_get(handle);
cis->cis_id = 0;
cis->group = NULL;
}
/* Initialize LLL */
@ -456,6 +497,7 @@ static void ticker_resume_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
{
static memq_link_t link;
static struct mayfly mfy = {0, 0, &link, NULL, lll_resume};
struct lll_conn_iso_group *cig;
struct lll_event *resume_event;
uint32_t ret;
@ -471,6 +513,10 @@ static void ticker_resume_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
resume_event->prepare_param.force = force;
mfy.param = resume_event;
/* Mark resume as done */
cig = resume_event->prepare_param.param;
cig->resume_cis = LLL_HANDLE_INVALID;
/* Kick LLL resume */
ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
0, &mfy);
@ -496,15 +542,38 @@ static void cis_disabled_cb(void *param)
cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
LL_ASSERT(cis);
if (cis->teardown) {
if (cis->lll.flushed) {
ll_iso_stream_released_cb_t cis_released_cb;
struct node_rx_pdu *node_terminate;
struct ll_conn *conn;
conn = ll_conn_get(cis->lll.acl_handle);
cis_released_cb = cis->released_cb;
/* Create and enqueue termination node */
/* Remove data path and ISOAL sink/source associated with this CIS
* for both directions.
*/
ll_remove_iso_path(cis->lll.handle, BT_HCI_DATAPATH_DIR_CTLR_TO_HOST);
ll_remove_iso_path(cis->lll.handle, BT_HCI_DATAPATH_DIR_HOST_TO_CTLR);
ll_conn_iso_stream_release(cis);
cig->lll.num_cis--;
/* Check if removed CIS has an ACL disassociation callback. Invoke
* the callback to allow cleanup.
*/
if (cis_released_cb) {
/* CIS removed - notify caller */
cis_released_cb(conn);
}
} else if (cis->teardown) {
DECLARE_MAYFLY_ARRAY(mfys, cis_tx_lll_flush,
CONFIG_BT_CTLR_CONN_ISO_GROUPS);
struct node_rx_pdu *node_terminate;
uint32_t ret;
/* Create and enqueue termination node. This shall prevent
* further enqueuing of TX nodes for terminating CIS.
*/
node_terminate = ull_pdu_rx_alloc();
LL_ASSERT(node_terminate);
node_terminate->hdr.handle = cis->lll.handle;
@ -514,16 +583,27 @@ static void cis_disabled_cb(void *param)
ll_rx_put(node_terminate->hdr.link, node_terminate);
ll_rx_sched();
ll_conn_iso_stream_release(cis);
cig->lll.num_cis--;
if (cig->lll.resume_cis == cis->lll.handle) {
/* Resume pending for terminating CIS - stop ticker */
(void)ticker_stop(TICKER_INSTANCE_ID_CTLR,
TICKER_USER_ID_ULL_HIGH,
TICKER_ID_CONN_ISO_RESUME_BASE +
ll_conn_iso_group_handle_get(cig),
NULL, NULL);
/* Check if removed CIS had an ACL disassociation callback. Invoke
* the callback to allow cleanup.
*/
if (cis_released_cb) {
/* CIS removed - notify caller */
cis_released_cb(conn);
cig->lll.resume_cis = LLL_HANDLE_INVALID;
}
/* We need to flush TX nodes in LLL before releasing the stream.
* More than one CIG may be terminating at the same time, so
* enqueue a mayfly instance for this CIG.
*/
mfys[cig->lll.handle].param = &cis->lll;
ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
TICKER_USER_ID_LLL, 1, &mfys[cig->lll.handle]);
LL_ASSERT(!ret);
return;
}
}
@ -543,6 +623,44 @@ static void cis_disabled_cb(void *param)
}
}
static void cis_tx_lll_flush(void *param)
{
DECLARE_MAYFLY_ARRAY(mfys, cis_disabled_cb, CONFIG_BT_CTLR_CONN_ISO_GROUPS);
struct lll_conn_iso_stream *lll;
struct ll_conn_iso_stream *cis;
struct ll_conn_iso_group *cig;
struct node_tx *tx;
memq_link_t *link;
uint32_t ret;
lll = param;
lll->flushed = 1;
cis = ll_conn_iso_stream_get(lll->handle);
cig = cis->group;
/* Flush in LLL - may return TX nodes to ack queue */
lll_conn_iso_flush(lll->handle, lll);
link = memq_dequeue(lll->memq_tx.tail, &lll->memq_tx.head, (void **)&tx);
while (link) {
/* Create instant NACK */
ll_tx_ack_put(lll->handle, tx);
link->next = tx->next;
tx->next = link;
link = memq_dequeue(lll->memq_tx.tail, &lll->memq_tx.head,
(void **)&tx);
}
/* Resume CIS teardown in ULL_HIGH context */
mfys[cig->lll.handle].param = &cig->lll;
ret = mayfly_enqueue(TICKER_USER_ID_LLL,
TICKER_USER_ID_ULL_HIGH, 1, &mfys[cig->lll.handle]);
LL_ASSERT(!ret);
}
static void ticker_stop_op_cb(uint32_t status, void *param)
{
static memq_link_t link;
@ -596,8 +714,27 @@ static void cig_disabled_cb(void *param)
struct ll_conn_iso_group *cig;
cig = HDR_LLL2ULL(param);
cig->cig_id = 0xFF;
cig->started = 0;
ll_conn_iso_group_release(cig);
/* TODO: Flush pending TX in LLL */
}
static void disable(uint16_t handle)
{
struct ll_conn_iso_group *cig;
int err;
cig = ll_conn_iso_group_get(handle);
(void)ull_ticker_stop_with_mark(TICKER_ID_CONN_ISO_RESUME_BASE + handle,
cig, &cig->lll);
err = ull_ticker_stop_with_mark(TICKER_ID_CONN_ISO_BASE + handle,
cig, &cig->lll);
LL_ASSERT(err == 0 || err == -EALREADY);
cig->lll.handle = LLL_HANDLE_INVALID;
cig->lll.resume_cis = LLL_HANDLE_INVALID;
}

View file

@ -18,11 +18,12 @@ struct ll_conn_iso_stream {
uint8_t terminate_reason;
uint32_t offset; /* Offset of CIS from ACL event in us */
ll_iso_stream_released_cb_t released_cb; /* CIS release callback */
uint8_t established : 1; /* 0 if CIS has not yet been established.
uint8_t framed:1;
uint8_t established:1; /* 0 if CIS has not yet been established.
* 1 if CIS has been established and host
* notified.
*/
uint8_t teardown : 1; /* 1 if CIS teardown has been initiated */
uint8_t teardown:1; /* 1 if CIS teardown has been initiated */
};
struct ll_conn_iso_group {
@ -40,8 +41,12 @@ struct ll_conn_iso_group {
*/
uint32_t c_sdu_interval;
uint32_t p_sdu_interval;
uint32_t cig_ref_point; /* CIG reference point timestamp (us) based on
* controller's clock.
*/
uint16_t iso_interval;
uint8_t cig_id;
uint8_t started:1; /* 1 if CIG started and ticker is running */
};
struct node_rx_conn_iso_req {

View file

@ -335,7 +335,6 @@ struct ll_conn {
uint8_t cis_id;
uint32_t c_max_sdu:12;
uint32_t p_max_sdu:12;
uint32_t framed:1;
uint32_t cis_offset_min;
uint32_t cis_offset_max;
uint16_t conn_event_count;

View file

@ -17,6 +17,7 @@
#include "util/mayfly.h"
#include "pdu.h"
#include "hal/ticker.h"
#include "lll.h"
#include "lll/lll_adv_types.h"
@ -109,21 +110,20 @@ static void iso_rx_demux(void *param);
#endif /* CONFIG_BT_CTLR_SYNC_ISO) || CONFIG_BT_CTLR_CONN_ISO */
#if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
#define ISO_TX_BUF_SIZE MROUND(offsetof(struct node_tx_iso, pdu) + \
offsetof(struct pdu_iso, payload) + \
CONFIG_BT_CTLR_ISO_TX_BUFFER_SIZE)
#define NODE_TX_BUFFER_SIZE MROUND(offsetof(struct node_tx_iso, pdu) + \
offsetof(struct pdu_iso, payload) + \
CONFIG_BT_CTLR_ISO_TX_BUFFER_SIZE)
static struct {
void *free;
uint8_t pool[ISO_TX_BUF_SIZE * CONFIG_BT_CTLR_ISO_TX_BUFFERS];
uint8_t pool[NODE_TX_BUFFER_SIZE * CONFIG_BT_CTLR_ISO_TX_BUFFERS];
} mem_iso_tx;
static struct {
void *free;
uint8_t pool[sizeof(memq_link_t) * CONFIG_BT_CTLR_ISO_TX_BUFFERS];
} mem_link_tx;
} mem_link_iso_tx;
static MFIFO_DEFINE(iso_ack, sizeof(struct lll_tx),
CONFIG_BT_CTLR_ISO_TX_BUFFERS);
#endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
/* Must be implemented by vendor */
@ -177,12 +177,14 @@ __weak bool ll_data_path_sink_create(struct ll_iso_datapath *datapath,
}
/* Could be implemented by vendor */
__weak bool ll_data_path_source_create(struct ll_iso_datapath *datapath,
__weak bool ll_data_path_source_create(uint16_t handle,
struct ll_iso_datapath *datapath,
isoal_source_pdu_alloc_cb *pdu_alloc,
isoal_source_pdu_write_cb *pdu_write,
isoal_source_pdu_emit_cb *pdu_emit,
isoal_source_pdu_release_cb *pdu_release)
{
ARG_UNUSED(handle);
ARG_UNUSED(datapath);
ARG_UNUSED(pdu_alloc);
ARG_UNUSED(pdu_write);
@ -234,6 +236,7 @@ uint8_t ll_setup_iso_path(uint16_t handle, uint8_t path_dir, uint8_t path_id,
uint32_t sdu_interval;
uint8_t burst_number;
isoal_status_t err;
uint8_t framed;
uint8_t role;
#if defined(CONFIG_BT_CTLR_CONN_ISO)
@ -244,7 +247,30 @@ uint8_t ll_setup_iso_path(uint16_t handle, uint8_t path_dir, uint8_t path_id,
struct ll_conn_iso_group *cig = NULL;
if (IS_CIS_HANDLE(handle)) {
struct ll_conn *conn;
cis = ll_conn_iso_stream_get(handle);
if (!cis->group) {
/* CIS does not belong to a CIG */
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
conn = ll_connected_get(cis->lll.acl_handle);
if (conn) {
/* If we're still waiting for accept/response from
* host, path setup is premature and we must return
* disallowed status.
*/
#if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
const uint8_t cis_waiting = (conn->llcp_cis.state ==
LLCP_CIS_STATE_RSP_WAIT);
if (cis_waiting) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
#endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
}
cig = cis->group;
dp_in = cis->hdr.datapath_in;
dp_out = cis->hdr.datapath_out;
@ -316,6 +342,7 @@ uint8_t ll_setup_iso_path(uint16_t handle, uint8_t path_dir, uint8_t path_id,
iso_interval = cig->iso_interval;
stream_sync_delay = cis->sync_delay;
group_sync_delay = cig->sync_delay;
framed = cis->framed;
if (path_dir == BT_HCI_DATAPATH_DIR_CTLR_TO_HOST) {
/* Create sink for RX data path */
@ -335,7 +362,7 @@ uint8_t ll_setup_iso_path(uint16_t handle, uint8_t path_dir, uint8_t path_id,
if (path_id == BT_HCI_DATAPATH_ID_HCI) {
/* Not vendor specific, thus alloc and emit functions known */
err = isoal_sink_create(handle, role,
err = isoal_sink_create(handle, role, framed,
burst_number, flush_timeout,
sdu_interval, iso_interval,
stream_sync_delay, group_sync_delay,
@ -349,7 +376,7 @@ uint8_t ll_setup_iso_path(uint16_t handle, uint8_t path_dir, uint8_t path_id,
/* Request vendor sink callbacks for path */
if (ll_data_path_sink_create(dp, &sdu_alloc, &sdu_emit, &sdu_write)) {
err = isoal_sink_create(handle, role,
err = isoal_sink_create(handle, role, framed,
burst_number, flush_timeout,
sdu_interval, iso_interval,
stream_sync_delay, group_sync_delay,
@ -397,13 +424,14 @@ uint8_t ll_setup_iso_path(uint16_t handle, uint8_t path_dir, uint8_t path_id,
pdu_release = ll_iso_pdu_release;
if (path_is_vendor_specific(path_id)) {
if (!ll_data_path_source_create(dp, &pdu_alloc, &pdu_write,
if (!ll_data_path_source_create(handle, dp,
&pdu_alloc, &pdu_write,
&pdu_emit, &pdu_release)) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
}
err = isoal_source_create(handle, role,
err = isoal_source_create(handle, role, framed,
burst_number, flush_timeout, max_octets,
sdu_interval, iso_interval,
stream_sync_delay, group_sync_delay,
@ -427,13 +455,14 @@ uint8_t ll_setup_iso_path(uint16_t handle, uint8_t path_dir, uint8_t path_id,
lll_iso = &sync_iso->lll;
role = 1U; /* FIXME: Set role from LLL struct */
framed = 0;
burst_number = lll_iso->bn;
sdu_interval = lll_iso->sdu_interval;
iso_interval = lll_iso->iso_interval;
if (path_id == BT_HCI_DATAPATH_ID_HCI) {
/* Not vendor specific, thus alloc and emit functions known */
err = isoal_sink_create(handle, role,
err = isoal_sink_create(handle, role, framed,
burst_number, flush_timeout,
sdu_interval, iso_interval,
stream_sync_delay, group_sync_delay,
@ -447,7 +476,7 @@ uint8_t ll_setup_iso_path(uint16_t handle, uint8_t path_dir, uint8_t path_id,
/* Request vendor sink callbacks for path */
if (ll_data_path_sink_create(dp, &sdu_alloc, &sdu_emit, &sdu_write)) {
err = isoal_sink_create(handle, role,
err = isoal_sink_create(handle, role, framed,
burst_number, flush_timeout,
sdu_interval, iso_interval,
stream_sync_delay, group_sync_delay,
@ -500,12 +529,16 @@ uint8_t ll_remove_iso_path(uint16_t handle, uint8_t path_dir)
if (path_dir == BT_HCI_DATAPATH_DIR_HOST_TO_CTLR) {
dp = hdr->datapath_in;
if (dp) {
isoal_source_destroy(dp->source_hdl);
hdr->datapath_in = NULL;
ull_iso_datapath_release(dp);
}
} else if (path_dir == BT_HCI_DATAPATH_DIR_CTLR_TO_HOST) {
dp = hdr->datapath_out;
if (dp) {
isoal_sink_destroy(dp->sink_hdl);
hdr->datapath_out = NULL;
ull_iso_datapath_release(dp);
}
@ -535,6 +568,8 @@ uint8_t ll_remove_iso_path(uint16_t handle, uint8_t path_dir)
dp = stream->dp;
if (dp) {
isoal_sink_destroy(dp->sink_hdl);
stream->dp = NULL;
isoal_sink_destroy(dp->sink_hdl);
ull_iso_datapath_release(dp);
@ -627,30 +662,32 @@ void ll_iso_tx_mem_release(void *node_tx)
mem_release(node_tx, &mem_iso_tx.free);
}
int ll_iso_tx_mem_enqueue(uint16_t handle, void *node_tx)
int ll_iso_tx_mem_enqueue(uint16_t handle, void *node_tx, void *link)
{
struct lll_adv_iso_stream *stream;
memq_link_t *link;
if (IS_ENABLED(CONFIG_BT_CTLR_CONN_ISO) &&
IS_CIS_HANDLE(handle)) {
struct ll_conn_iso_stream *cis;
/* FIXME: Translate to CIS or BIS handle
*/
cis = ll_conn_iso_stream_get(handle);
memq_enqueue(link, node_tx, &cis->lll.memq_tx.tail);
} else if (IS_ENABLED(CONFIG_BT_CTLR_ADV_ISO) &&
IS_ADV_ISO_HANDLE(handle)) {
struct lll_adv_iso_stream *stream;
/* FIXME: When hci_iso_handle uses ISOAL, link is provided and
* this code should be removed.
*/
link = mem_acquire(&mem_link_iso_tx.free);
LL_ASSERT(link);
if (IS_ENABLED(CONFIG_BT_CTLR_ADV_ISO)) {
stream = ull_adv_iso_stream_get(handle);
} else {
/* FIXME: Get connected ISO stream instance */
stream = NULL;
}
memq_enqueue(link, node_tx, &stream->memq_tx.tail);
if (!stream) {
} else {
return -EINVAL;
}
link = mem_acquire(&mem_link_tx.free);
LL_ASSERT(link);
memq_enqueue(link, node_tx, &stream->memq_tx.tail);
return 0;
}
#endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
@ -671,11 +708,6 @@ int ull_iso_reset(void)
{
int err;
#if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
/* Re-initialize the Tx Ack mfifo */
MFIFO_INIT(iso_ack);
#endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
err = init_reset();
if (err) {
return err;
@ -687,46 +719,24 @@ int ull_iso_reset(void)
#if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
void ull_iso_lll_ack_enqueue(uint16_t handle, struct node_tx_iso *node_tx)
{
struct lll_tx *tx;
uint8_t idx;
struct ll_iso_datapath *dp = NULL;
idx = MFIFO_ENQUEUE_GET(iso_ack, (void **)&tx);
LL_ASSERT(tx);
if (IS_ENABLED(CONFIG_BT_CTLR_CONN_ISO) && IS_CIS_HANDLE(handle)) {
struct ll_conn_iso_stream *cis;
tx->handle = handle;
tx->node = node_tx;
cis = ll_conn_iso_stream_get(handle);
dp = cis->hdr.datapath_in;
MFIFO_ENQUEUE(iso_ack, idx);
ll_rx_sched();
}
uint8_t ull_iso_tx_ack_get(uint16_t *handle)
{
struct lll_tx *tx;
uint8_t cmplt = 0U;
tx = MFIFO_DEQUEUE_GET(iso_ack);
if (tx) {
*handle = tx->handle;
do {
struct node_tx_iso *node_tx;
cmplt++;
node_tx = tx->node;
MFIFO_DEQUEUE(iso_ack);
mem_release(node_tx->link, &mem_link_tx.free);
mem_release(node_tx, &mem_iso_tx.free);
tx = MFIFO_DEQUEUE_GET(iso_ack);
} while (tx && (tx->handle == *handle));
if (dp) {
isoal_tx_pdu_release(dp->source_hdl, node_tx);
}
} else if (IS_ENABLED(CONFIG_BT_CTLR_ADV_ISO) && IS_ADV_ISO_HANDLE(handle)) {
/* Process as TX ack. TODO: Can be unified with CIS and use
* ISOAL.
*/
ll_tx_ack_put(handle, (void *)node_tx);
ll_rx_sched();
}
return cmplt;
}
#endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
@ -761,12 +771,50 @@ void ull_iso_rx_sched(void)
mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_HIGH, 1, &mfy);
}
#if defined(CONFIG_BT_CTLR_CONN_ISO)
static void iso_rx_cig_ref_point_update(struct ll_conn_iso_group *cig,
const struct ll_conn_iso_stream *cis,
const struct node_rx_iso_meta *meta)
{
uint32_t cig_sync_delay;
uint32_t cis_sync_delay;
uint64_t event_count;
uint8_t burst_number;
uint8_t role;
role = cig->lll.role;
cig_sync_delay = cig->sync_delay;
cis_sync_delay = cis->sync_delay;
burst_number = cis->lll.rx.burst_number;
event_count = cis->lll.event_count;
if (role) {
/* Peripheral */
/* Check if this is the first payload received for this cis in
* this event
*/
if (meta->payload_number == (burst_number * event_count)) {
/* Update the CIG reference point based on the CIS
* anchor point
*/
/* TODO: It is not clear that the timestamp is in ticks.
* Upstream expectations might be different and this
* might have to be updated.
*/
cig->cig_ref_point = HAL_TICKER_TICKS_TO_US(meta->timestamp) +
cis_sync_delay - cig_sync_delay;
}
}
}
#endif /* CONFIG_BT_CTLR_CONN_ISO */
static void iso_rx_demux(void *param)
{
struct ll_conn_iso_stream *cis;
struct ll_conn_iso_group *cig;
struct ll_iso_datapath *dp;
struct node_rx_pdu *rx_pdu;
isoal_sink_handle_t sink;
struct node_rx_hdr *rx;
memq_link_t *link;
@ -791,10 +839,12 @@ static void iso_rx_demux(void *param)
#if defined(CONFIG_BT_CTLR_CONN_ISO)
rx_pdu = (struct node_rx_pdu *)rx;
cis = ll_conn_iso_stream_get(rx_pdu->hdr.handle);
dp = cis->hdr.datapath_out;
sink = dp->sink_hdl;
cig = cis->group;
dp = cis->hdr.datapath_out;
if (dp->path_id != BT_HCI_DATAPATH_ID_HCI) {
iso_rx_cig_ref_point_update(cig, cis, &rx_pdu->hdr.rx_iso_meta);
if (dp && dp->path_id != BT_HCI_DATAPATH_ID_HCI) {
/* If vendor specific datapath pass to ISO AL here,
* in case of HCI destination it will be passed in
* HCI context.
@ -806,7 +856,7 @@ static void iso_rx_demux(void *param)
/* Pass the ISO PDU through ISO-AL */
const isoal_status_t err =
isoal_rx_pdu_recombine(sink, &pckt_meta);
isoal_rx_pdu_recombine(dp->sink_hdl, &pckt_meta);
LL_ASSERT(err == ISOAL_STATUS_OK); /* TODO handle err */
}
@ -895,7 +945,9 @@ void ll_iso_rx_mem_release(void **node_rx)
mem_release(rx_free, &mem_iso_rx.free);
break;
default:
LL_ASSERT(0);
/* Ignore other types as node may have been initialized due to
* race with HCI reset.
*/
break;
}
}
@ -911,6 +963,13 @@ void ull_iso_datapath_release(struct ll_iso_datapath *dp)
mem_release(dp, &datapath_free);
}
#if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
void ll_iso_link_tx_release(void *link)
{
mem_release(link, &mem_link_iso_tx.free);
}
#endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
#if defined(CONFIG_BT_CTLR_CONN_ISO)
/**
* Allocate a PDU from the LL and store the details in the given buffer. Allocation
@ -921,11 +980,27 @@ void ull_iso_datapath_release(struct ll_iso_datapath *dp)
*/
static isoal_status_t ll_iso_pdu_alloc(struct isoal_pdu_buffer *pdu_buffer)
{
ARG_UNUSED(pdu_buffer);
struct node_tx_iso *node_tx;
/* TODO: Function will be populated along with the data-path
* implementation
node_tx = ll_iso_tx_mem_acquire();
if (!node_tx) {
BT_ERR("Tx Buffer Overflow");
/* TODO: Report overflow to HCI and remove assert
* data_buf_overflow(evt, BT_OVERFLOW_LINK_ISO)
*/
LL_ASSERT(0);
return ISOAL_STATUS_ERR_PDU_ALLOC;
}
/* node_tx handle will be required to emit the PDU later */
pdu_buffer->handle = (void *)node_tx;
pdu_buffer->pdu = (void *)node_tx->pdu;
/* Use TX buffer size as the limit here. Actual size will be decided in
* the ISOAL based on the minimum of the buffer size and the respective
* Max_PDU_C_To_P or Max_PDU_P_To_C.
*/
pdu_buffer->size = CONFIG_BT_CTLR_ISO_TX_BUFFER_SIZE;
return ISOAL_STATUS_OK;
}
@ -950,9 +1025,13 @@ static isoal_status_t ll_iso_pdu_write(struct isoal_pdu_buffer *pdu_buffer,
LL_ASSERT(pdu_buffer->pdu);
LL_ASSERT(sdu_payload);
/* TODO: Function will be populated along with the data-path
* implementation
*/
if ((pdu_offset + consume_len) > pdu_buffer->size) {
/* Exceeded PDU buffer */
return ISOAL_STATUS_ERR_UNSPECIFIED;
}
/* Copy source to destination at given offset */
memcpy(&pdu_buffer->pdu->payload[pdu_offset], sdu_payload, consume_len);
return ISOAL_STATUS_OK;
}
@ -966,12 +1045,14 @@ static isoal_status_t ll_iso_pdu_write(struct isoal_pdu_buffer *pdu_buffer,
static isoal_status_t ll_iso_pdu_emit(struct node_tx_iso *node_tx,
const uint16_t handle)
{
ARG_UNUSED(node_tx);
ARG_UNUSED(handle);
memq_link_t *link;
/* TODO: Function will be populated along with the data-path
* implementation
*/
link = mem_acquire(&mem_link_iso_tx.free);
LL_ASSERT(link);
if (ll_iso_tx_mem_enqueue(handle, node_tx, link)) {
return ISOAL_STATUS_ERR_PDU_EMIT;
}
return ISOAL_STATUS_OK;
}
@ -987,13 +1068,14 @@ static isoal_status_t ll_iso_pdu_release(struct node_tx_iso *node_tx,
const uint16_t handle,
const isoal_status_t status)
{
ARG_UNUSED(node_tx);
ARG_UNUSED(handle);
ARG_UNUSED(status);
/* TODO: Function will be populated along with the data-path
* implementation
*/
if (status == ISOAL_STATUS_OK) {
/* Process as TX ack */
ll_tx_ack_put(handle, (void *)node_tx);
ll_rx_sched();
} else {
/* Release back to memory pool */
ll_iso_tx_mem_release(node_tx);
}
return ISOAL_STATUS_OK;
}
@ -1027,12 +1109,13 @@ static int init_reset(void)
#if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
/* Initialize tx pool. */
mem_init(mem_iso_tx.pool, ISO_TX_BUF_SIZE,
mem_init(mem_iso_tx.pool, NODE_TX_BUFFER_SIZE,
CONFIG_BT_CTLR_ISO_TX_BUFFERS, &mem_iso_tx.free);
/* Initialize tx link pool. */
mem_init(mem_link_tx.pool, sizeof(memq_link_t),
CONFIG_BT_CTLR_ISO_TX_BUFFERS, &mem_link_tx.free);
mem_init(mem_link_iso_tx.pool, sizeof(memq_link_t),
CONFIG_BT_CTLR_ISO_TX_BUFFERS,
&mem_link_iso_tx.free);
#endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
#if BT_CTLR_ISO_STREAMS
@ -1041,5 +1124,8 @@ static int init_reset(void)
sizeof(datapath_pool) / sizeof(struct ll_iso_datapath), &datapath_free);
#endif /* BT_CTLR_ISO_STREAMS */
/* Initialize ISO Adaptation Layer */
isoal_init();
return 0;
}

View file

@ -7,7 +7,6 @@
int ull_iso_init(void);
int ull_iso_reset(void);
void ull_iso_datapath_release(struct ll_iso_datapath *dp);
uint8_t ull_iso_tx_ack_get(uint16_t *handle);
void ll_iso_rx_put(memq_link_t *link, void *rx);
void *ll_iso_rx_get(void);
void ll_iso_rx_dequeue(void);

View file

@ -25,6 +25,15 @@
#define IS_CIS_HANDLE(_handle) 0
#endif /* CONFIG_BT_CTLR_CONN_ISO */
#if defined(CONFIG_BT_CTLR_ADV_ISO)
#define IS_ADV_ISO_HANDLE(_handle) \
(((_handle) >= BT_CTLR_ADV_ISO_STREAM_HANDLE_BASE) && \
((_handle) <= (BT_CTLR_ADV_ISO_STREAM_HANDLE_BASE + BT_CTLR_ADV_ISO_STREAM_MAX - 1)))
#else
#define IS_ADV_ISO_HANDLE(_handle) 0
#endif /* CONFIG_BT_CTLR_ADV_ISO */
/* Common members for ll_conn_iso_stream and ll_broadcast_iso_stream */
struct ll_iso_stream_hdr {
struct ll_iso_datapath *datapath_in;

View file

@ -92,6 +92,7 @@ uint8_t ull_peripheral_iso_acquire(struct ll_conn *acl,
{
struct ll_conn_iso_group *cig;
struct ll_conn_iso_stream *cis;
uint16_t handle;
/* Get CIG by id */
cig = ll_conn_iso_group_get_by_id(req->cig_id);
@ -106,8 +107,9 @@ uint8_t ull_peripheral_iso_acquire(struct ll_conn *acl,
memset(&cig->lll, 0, sizeof(cig->lll));
cig->cig_id = req->cig_id;
cig->lll.handle = 0xFFFF;
cig->lll.handle = LLL_HANDLE_INVALID;
cig->lll.role = acl->lll.role;
cig->lll.resume_cis = LLL_HANDLE_INVALID;
ull_hdr_init(&cig->ull);
lll_hdr_init(&cig->lll, cig);
@ -118,6 +120,14 @@ uint8_t ull_peripheral_iso_acquire(struct ll_conn *acl,
return BT_HCI_ERR_INSUFFICIENT_RESOURCES;
}
for (handle = LL_CIS_HANDLE_BASE; handle <= LAST_VALID_CIS_HANDLE; handle++) {
cis = ll_iso_stream_connected_get(handle);
if (cis && cis->cis_id == req->cis_id) {
/* CIS ID already in use */
return BT_HCI_ERR_INVALID_LL_PARAM;
}
}
/* Acquire new CIS */
cis = ll_conn_iso_stream_acquire();
if (cis == NULL) {
@ -125,11 +135,13 @@ uint8_t ull_peripheral_iso_acquire(struct ll_conn *acl,
return BT_HCI_ERR_INSUFFICIENT_RESOURCES;
}
cig->iso_interval = sys_le16_to_cpu(req->iso_interval);
cig->c_sdu_interval = sys_get_le24(req->c_sdu_interval);
cig->p_sdu_interval = sys_get_le24(req->p_sdu_interval);
cig->iso_interval = sys_le16_to_cpu(req->iso_interval);
/* Read 20-bit SDU intervals (mask away RFU bits) */
cig->c_sdu_interval = sys_get_le24(req->c_sdu_interval) & 0x0FFFFF;
cig->p_sdu_interval = sys_get_le24(req->p_sdu_interval) & 0x0FFFFF;
cis->cis_id = req->cis_id;
cis->framed = (req->c_max_sdu_packed[1] & BIT(7)) >> 7;
cis->established = 0;
cis->group = cig;
cis->teardown = 0;
@ -143,16 +155,28 @@ uint8_t ull_peripheral_iso_acquire(struct ll_conn *acl,
cis->lll.sn = 0;
cis->lll.nesn = 0;
cis->lll.cie = 0;
cis->lll.flushed = 0;
cis->lll.datapath_ready_rx = 0;
cis->lll.rx.phy = req->c_phy;
cis->lll.rx.burst_number = req->c_bn;
cis->lll.rx.flush_timeout = req->c_ft;
cis->lll.rx.max_octets = sys_le16_to_cpu(req->c_max_pdu);
cis->lll.rx.payload_number = 0;
cis->lll.tx.phy = req->p_phy;
cis->lll.tx.burst_number = req->p_bn;
cis->lll.tx.flush_timeout = req->p_ft;
cis->lll.tx.max_octets = sys_le16_to_cpu(req->p_max_pdu);
cis->lll.tx.payload_number = 0;
if (!cis->lll.link_tx_free) {
cis->lll.link_tx_free = &cis->lll.link_tx;
}
memq_init(cis->lll.link_tx_free, &cis->lll.memq_tx.head,
&cis->lll.memq_tx.tail);
cis->lll.link_tx_free = NULL;
*cis_handle = ll_conn_iso_stream_handle_get(cis);
cig->lll.num_cis++;
@ -198,11 +222,13 @@ static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
static struct lll_prepare_param p;
struct ll_conn_iso_group *cig;
struct ll_conn_iso_stream *cis;
uint64_t leading_event_count;
uint16_t handle_iter;
uint32_t err;
uint8_t ref;
cig = param;
leading_event_count = 0;
/* Check if stopping ticker (on disconnection, race with ticker expiry)
*/
@ -225,7 +251,25 @@ static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
*/
if (cis->lll.handle != 0xFFFF) {
cis->lll.event_count++;
leading_event_count = MAX(leading_event_count,
cis->lll.event_count);
}
/* Latch datapath validity entering event */
cis->lll.datapath_ready_rx = cis->hdr.datapath_out != NULL;
}
/* Update the CIG reference point for this event. Event 0 for the
* leading CIS in the CIG would have had it's reference point set in
* ull_peripheral_iso_start(). The reference point should only be
* updated from event 1 onwards. Although the cig reference point set
* this way is not accurate, it is the best possible until the anchor
* point for the leading CIS is available for this event.
*/
if (leading_event_count > 0) {
cig->cig_ref_point += (cig->iso_interval * CONN_INT_UNIT_US);
}
/* Increment prepare reference count */
@ -252,7 +296,8 @@ static void ticker_op_cb(uint32_t status, void *param)
LL_ASSERT(status == TICKER_STATUS_SUCCESS);
}
void ull_peripheral_iso_start(struct ll_conn *acl, uint32_t ticks_at_expire)
void ull_peripheral_iso_start(struct ll_conn *acl, uint32_t ticks_at_expire,
uint16_t cis_handle)
{
struct ll_conn_iso_group *cig;
struct ll_conn_iso_stream *cis;
@ -262,17 +307,12 @@ void ull_peripheral_iso_start(struct ll_conn *acl, uint32_t ticks_at_expire)
uint32_t ticks_interval;
uint32_t ticker_status;
int32_t cig_offset_us;
uint16_t handle_iter;
uint16_t cis_handle;
uint8_t ticker_id;
cis_handle = acl->llcp_cis.cis_handle;
cig = ll_conn_iso_group_get_by_id(acl->llcp_cis.cig_id);
cis = ll_conn_iso_stream_get(cis_handle);
cig = cis->group;
cis_offs_to_cig_ref = cig->sync_delay - cis->sync_delay;
handle_iter = UINT16_MAX;
cis->lll.offset = cis_offs_to_cig_ref;
cis->lll.handle = cis_handle;
@ -281,17 +321,7 @@ void ull_peripheral_iso_start(struct ll_conn *acl, uint32_t ticks_at_expire)
* running. If so, we just return with updated offset and
* validated handle.
*/
for (int i = 0; i < cig->lll.num_cis; i++) {
struct ll_conn_iso_stream *other_cis;
other_cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
LL_ASSERT(other_cis);
if (other_cis == cis || other_cis->lll.handle == 0xFFFF) {
/* Same CIS or not valid - skip */
continue;
}
if (cig->started) {
/* We're done */
return;
}
@ -312,7 +342,9 @@ void ull_peripheral_iso_start(struct ll_conn *acl, uint32_t ticks_at_expire)
ready_delay_us = lll_radio_rx_ready_delay_get(0, 0);
#endif
/* Calculate initial ticker offset - we're one ACL interval early */
cig_offset_us = acl_to_cig_ref_point;
cig_offset_us += (acl->lll.interval * CONN_INT_UNIT_US);
cig_offset_us -= EVENT_OVERHEAD_START_US;
cig_offset_us -= EVENT_TICKER_RES_MARGIN_US;
cig_offset_us -= EVENT_JITTER_US;
@ -323,6 +355,14 @@ void ull_peripheral_iso_start(struct ll_conn *acl, uint32_t ticks_at_expire)
*/
LL_ASSERT(cig_offset_us > 0);
/* Calculate the CIG reference point of first CIG event. This
* calculation is inaccurate. However it is the best estimate available
* until the first anchor point for the leading CIS is available.
*/
cig->cig_ref_point = HAL_TICKER_TICKS_TO_US(ticks_at_expire);
cig->cig_ref_point += acl_to_cig_ref_point;
cig->cig_ref_point += (acl->lll.interval * CONN_INT_UNIT_US);
/* Start CIS peripheral CIG ticker */
ticker_status = ticker_start(TICKER_INSTANCE_ID_CTLR,
TICKER_USER_ID_ULL_HIGH,
@ -338,4 +378,6 @@ void ull_peripheral_iso_start(struct ll_conn *acl, uint32_t ticks_at_expire)
LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
(ticker_status == TICKER_STATUS_BUSY));
cig->started = 1;
}

View file

@ -14,4 +14,5 @@ uint8_t ull_peripheral_iso_acquire(struct ll_conn *acl,
uint8_t ull_peripheral_iso_setup(struct pdu_data_llctrl_cis_ind *ind,
uint8_t cig_id,
uint16_t cis_handle);
void ull_peripheral_iso_start(struct ll_conn *acl, uint32_t ticks_at_expire);
void ull_peripheral_iso_start(struct ll_conn *acl, uint32_t ticks_at_expire,
uint16_t cis_handle);