Bluetooth: Controller: Fix BT_CTLR_ISOAL_PSN_IGNORE for event overlap
Fix BT_CTLR_ISOAL_PSN_IGNORE implementation when adjusting Sequence Number during overlap with current CIG event. Reuse the implementation from non-PSN ignore implementation. Signed-off-by: Vinayak Kariappa Chettimada <vich@nordicsemi.no> Signed-off-by: Troels Nilsson <trnn@demant.com>
This commit is contained in:
parent
4835658b46
commit
fe89b0ddfb
1 changed files with 58 additions and 50 deletions
|
@ -5789,6 +5789,7 @@ int hci_iso_handle(struct net_buf *buf, struct net_buf **evt)
|
|||
struct ll_conn_iso_group *cig;
|
||||
struct ll_iso_stream_hdr *hdr;
|
||||
struct ll_iso_datapath *dp_in;
|
||||
uint8_t event_offset;
|
||||
|
||||
cis = ll_iso_stream_connected_get(handle);
|
||||
if (!cis) {
|
||||
|
@ -5797,13 +5798,35 @@ int hci_iso_handle(struct net_buf *buf, struct net_buf **evt)
|
|||
|
||||
cig = cis->group;
|
||||
|
||||
/* We must ensure sufficient time for ISO-AL to fragment SDU and
|
||||
* deliver PDUs to the TX queue. By checking ull_ref_get, we
|
||||
* know if we are within the subevents of an ISO event. If so,
|
||||
* we can assume that we have enough time to deliver in the next
|
||||
* ISO event. If we're not active within the ISO event, we don't
|
||||
* know if there is enough time to deliver in the next event,
|
||||
* and for safety we set the target to current event + 2.
|
||||
*
|
||||
* For FT > 1, we have the opportunity to retransmit in later
|
||||
* event(s), in which case we have the option to target an
|
||||
* earlier event (this or next) because being late does not
|
||||
* instantly flush the payload.
|
||||
*/
|
||||
|
||||
event_offset = ull_ref_get(&cig->ull) ? 1 : 2;
|
||||
|
||||
if (cis->lll.tx.ft > 1) {
|
||||
/* FT > 1, target an earlier event */
|
||||
event_offset -= 1;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BT_CTLR_ISOAL_PSN_IGNORE)
|
||||
uint64_t event_count;
|
||||
uint64_t pkt_seq_num;
|
||||
|
||||
/* Catch up local pkt_seq_num with internal pkt_seq_num */
|
||||
event_count = cis->lll.event_count;
|
||||
event_count = cis->lll.event_count + event_offset;
|
||||
pkt_seq_num = event_count + 1U;
|
||||
|
||||
/* If pb_flag is BT_ISO_START (0b00) or BT_ISO_SINGLE (0b10)
|
||||
* then we simply check that the pb_flag is an even value, and
|
||||
* then pkt_seq_num is a future sequence number value compare
|
||||
|
@ -5844,29 +5867,6 @@ int hci_iso_handle(struct net_buf *buf, struct net_buf **evt)
|
|||
ISO_INT_UNIT_US));
|
||||
|
||||
#else /* !CONFIG_BT_CTLR_ISOAL_PSN_IGNORE */
|
||||
uint8_t event_offset;
|
||||
|
||||
/* We must ensure sufficient time for ISO-AL to fragment SDU and
|
||||
* deliver PDUs to the TX queue. By checking ull_ref_get, we
|
||||
* know if we are within the subevents of an ISO event. If so,
|
||||
* we can assume that we have enough time to deliver in the next
|
||||
* ISO event. If we're not active within the ISO event, we don't
|
||||
* know if there is enough time to deliver in the next event,
|
||||
* and for safety we set the target to current event + 2.
|
||||
*
|
||||
* For FT > 1, we have the opportunity to retransmit in later
|
||||
* event(s), in which case we have the option to target an
|
||||
* earlier event (this or next) because being late does not
|
||||
* instantly flush the payload.
|
||||
*/
|
||||
|
||||
event_offset = ull_ref_get(&cig->ull) ? 1 : 2;
|
||||
|
||||
if (cis->lll.tx.ft > 1) {
|
||||
/* FT > 1, target an earlier event */
|
||||
event_offset -= 1;
|
||||
}
|
||||
|
||||
sdu_frag_tx.target_event = cis->lll.event_count + event_offset;
|
||||
sdu_frag_tx.grp_ref_point =
|
||||
isoal_get_wrapped_time_us(cig->cig_ref_point,
|
||||
|
@ -5908,7 +5908,10 @@ int hci_iso_handle(struct net_buf *buf, struct net_buf **evt)
|
|||
struct lll_adv_iso_stream *stream;
|
||||
struct ll_adv_iso_set *adv_iso;
|
||||
struct lll_adv_iso *lll_iso;
|
||||
uint16_t latency_prepare;
|
||||
uint16_t stream_handle;
|
||||
uint64_t target_event;
|
||||
uint8_t event_offset;
|
||||
|
||||
/* Get BIS stream handle and stream context */
|
||||
stream_handle = LL_BIS_ADV_IDX_FROM_HANDLE(handle);
|
||||
|
@ -5926,13 +5929,42 @@ int hci_iso_handle(struct net_buf *buf, struct net_buf **evt)
|
|||
|
||||
lll_iso = &adv_iso->lll;
|
||||
|
||||
/* Determine the target event and the first event offset after
|
||||
* datapath setup.
|
||||
* event_offset mitigates the possibility of first SDU being
|
||||
* late on the datapath and avoid all subsequent SDUs being
|
||||
* dropped for a said SDU interval. i.e. upper layer is not
|
||||
* drifting, say first SDU dropped, hence subsequent SDUs all
|
||||
* dropped, is mitigated by offsetting the grp_ref_point.
|
||||
*
|
||||
* It is ok to do the below for every received ISO data, ISOAL
|
||||
* will not consider subsequent skewed target_event after the
|
||||
* first use of target_event value.
|
||||
*
|
||||
* In BIG implementation in LLL, payload_count corresponds to
|
||||
* the next BIG event, hence calculate grp_ref_point for next
|
||||
* BIG event by incrementing the previous elapsed big_ref_point
|
||||
* by one additional ISO interval.
|
||||
*/
|
||||
target_event = lll_iso->payload_count / lll_iso->bn;
|
||||
latency_prepare = lll_iso->latency_prepare;
|
||||
if (latency_prepare) {
|
||||
/* big_ref_point has been updated, but payload_count
|
||||
* hasn't been updated yet - increment target_event to
|
||||
* compensate
|
||||
*/
|
||||
target_event += latency_prepare;
|
||||
}
|
||||
event_offset = ull_ref_get(&adv_iso->ull) ? 0U : 1U;
|
||||
|
||||
#if defined(CONFIG_BT_CTLR_ISOAL_PSN_IGNORE)
|
||||
uint64_t event_count;
|
||||
uint64_t pkt_seq_num;
|
||||
|
||||
/* Catch up local pkt_seq_num with internal pkt_seq_num */
|
||||
event_count = lll_iso->payload_count / lll_iso->bn;
|
||||
pkt_seq_num = event_count;
|
||||
event_count = target_event + event_offset;
|
||||
pkt_seq_num = event_count + 1U;
|
||||
|
||||
/* If pb_flag is BT_ISO_START (0b00) or BT_ISO_SINGLE (0b10)
|
||||
* then we simply check that the pb_flag is an even value, and
|
||||
* then pkt_seq_num is a future sequence number value compare
|
||||
|
@ -5985,30 +6017,6 @@ int hci_iso_handle(struct net_buf *buf, struct net_buf **evt)
|
|||
ISO_INT_UNIT_US));
|
||||
|
||||
#else /* !CONFIG_BT_CTLR_ISOAL_PSN_IGNORE */
|
||||
uint64_t target_event;
|
||||
uint8_t event_offset;
|
||||
|
||||
/* Determine the target event and the first event offset after
|
||||
* datapath setup.
|
||||
* event_offset mitigates the possibility of first SDU being
|
||||
* late on the datapath and avoid all subsequent SDUs being
|
||||
* dropped for a said SDU interval. i.e. upper layer is not
|
||||
* drifting, say first SDU dropped, hence subsequent SDUs all
|
||||
* dropped, is mitigated by offsetting the grp_ref_point.
|
||||
*
|
||||
* It is ok to do the below for every received ISO data, ISOAL
|
||||
* will not consider subsequent skewed target_event after the
|
||||
* first use of target_event value.
|
||||
*
|
||||
* In BIG implementation in LLL, payload_count corresponds to
|
||||
* the next BIG event, hence calculate grp_ref_point for next
|
||||
* BIG event by incrementing the previous elapsed big_ref_point
|
||||
* by one additional ISO interval.
|
||||
*/
|
||||
target_event = lll_iso->payload_count / lll_iso->bn;
|
||||
event_offset = ull_ref_get(&adv_iso->ull) ? 0U : 1U;
|
||||
event_offset += lll_iso->latency_prepare;
|
||||
|
||||
sdu_frag_tx.target_event = target_event + event_offset;
|
||||
sdu_frag_tx.grp_ref_point =
|
||||
isoal_get_wrapped_time_us(adv_iso->big_ref_point,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue