Bluetooth: Controller: Fix incorrect elapsed events value
Fix incorrect elapsed events value when event prepare are aborted in the pipeline. This caused premature supervision timeouts. Signed-off-by: Vinayak Kariappa Chettimada <vich@nordicsemi.no>
This commit is contained in:
parent
e21ff7067d
commit
247037bd3e
11 changed files with 98 additions and 68 deletions
|
@ -48,6 +48,7 @@ struct lll_conn {
|
||||||
uint16_t latency;
|
uint16_t latency;
|
||||||
|
|
||||||
uint16_t latency_prepare;
|
uint16_t latency_prepare;
|
||||||
|
uint16_t lazy_prepare;
|
||||||
uint16_t latency_event;
|
uint16_t latency_event;
|
||||||
uint16_t event_counter;
|
uint16_t event_counter;
|
||||||
|
|
||||||
|
|
|
@ -93,6 +93,7 @@ struct lll_conn_iso_group {
|
||||||
|
|
||||||
/* Accumulates LLL prepare callback latencies */
|
/* Accumulates LLL prepare callback latencies */
|
||||||
uint16_t latency_prepare;
|
uint16_t latency_prepare;
|
||||||
|
uint16_t lazy_prepare;
|
||||||
uint16_t latency_event;
|
uint16_t latency_event;
|
||||||
|
|
||||||
#if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
|
#if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
|
||||||
|
|
|
@ -42,6 +42,7 @@ struct lll_sync {
|
||||||
#endif /* CONFIG_BT_CTLR_SCAN_AUX_SYNC_RESERVE_MIN */
|
#endif /* CONFIG_BT_CTLR_SCAN_AUX_SYNC_RESERVE_MIN */
|
||||||
|
|
||||||
uint16_t skip_prepare;
|
uint16_t skip_prepare;
|
||||||
|
uint16_t lazy_prepare;
|
||||||
uint16_t skip_event;
|
uint16_t skip_event;
|
||||||
uint16_t event_counter;
|
uint16_t event_counter;
|
||||||
|
|
||||||
|
|
|
@ -120,7 +120,8 @@ static int prepare_cb(struct lll_prepare_param *p)
|
||||||
lll_conn_prepare_reset();
|
lll_conn_prepare_reset();
|
||||||
|
|
||||||
/* Calculate the current event latency */
|
/* Calculate the current event latency */
|
||||||
lll->latency_event = lll->latency_prepare + p->lazy;
|
lll->lazy_prepare = p->lazy;
|
||||||
|
lll->latency_event = lll->latency_prepare + lll->lazy_prepare;
|
||||||
|
|
||||||
/* Calculate the current event counter value */
|
/* Calculate the current event counter value */
|
||||||
event_counter = lll->event_counter + lll->latency_event;
|
event_counter = lll->event_counter + lll->latency_event;
|
||||||
|
|
|
@ -200,7 +200,22 @@ void lll_conn_abort_cb(struct lll_prepare_param *prepare_param, void *param)
|
||||||
lll = prepare_param->param;
|
lll = prepare_param->param;
|
||||||
|
|
||||||
/* Accumulate the latency as event is aborted while being in pipeline */
|
/* Accumulate the latency as event is aborted while being in pipeline */
|
||||||
lll->latency_prepare += (prepare_param->lazy + 1);
|
lll->lazy_prepare = prepare_param->lazy;
|
||||||
|
lll->latency_prepare += (lll->lazy_prepare + 1U);
|
||||||
|
|
||||||
|
#if defined(CONFIG_BT_PERIPHERAL)
|
||||||
|
if (lll->role == BT_HCI_ROLE_PERIPHERAL) {
|
||||||
|
/* Accumulate window widening */
|
||||||
|
lll->periph.window_widening_prepare_us +=
|
||||||
|
lll->periph.window_widening_periodic_us *
|
||||||
|
(prepare_param->lazy + 1);
|
||||||
|
if (lll->periph.window_widening_prepare_us >
|
||||||
|
lll->periph.window_widening_max_us) {
|
||||||
|
lll->periph.window_widening_prepare_us =
|
||||||
|
lll->periph.window_widening_max_us;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_BT_PERIPHERAL */
|
||||||
|
|
||||||
/* Extra done event, to check supervision timeout */
|
/* Extra done event, to check supervision timeout */
|
||||||
e = ull_event_done_extra_get();
|
e = ull_event_done_extra_get();
|
||||||
|
|
|
@ -79,15 +79,6 @@ void lll_periph_prepare(void *param)
|
||||||
|
|
||||||
lll = p->param;
|
lll = p->param;
|
||||||
|
|
||||||
/* Accumulate window widening */
|
|
||||||
lll->periph.window_widening_prepare_us +=
|
|
||||||
lll->periph.window_widening_periodic_us * (p->lazy + 1);
|
|
||||||
if (lll->periph.window_widening_prepare_us >
|
|
||||||
lll->periph.window_widening_max_us) {
|
|
||||||
lll->periph.window_widening_prepare_us =
|
|
||||||
lll->periph.window_widening_max_us;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Invoke common pipeline handling of prepare */
|
/* Invoke common pipeline handling of prepare */
|
||||||
err = lll_prepare(lll_conn_is_abort_cb, lll_conn_abort_cb, prepare_cb,
|
err = lll_prepare(lll_conn_is_abort_cb, lll_conn_abort_cb, prepare_cb,
|
||||||
0U, p);
|
0U, p);
|
||||||
|
@ -133,7 +124,8 @@ static int prepare_cb(struct lll_prepare_param *p)
|
||||||
lll_conn_prepare_reset();
|
lll_conn_prepare_reset();
|
||||||
|
|
||||||
/* Calculate the current event latency */
|
/* Calculate the current event latency */
|
||||||
lll->latency_event = lll->latency_prepare + p->lazy;
|
lll->lazy_prepare = p->lazy;
|
||||||
|
lll->latency_event = lll->latency_prepare + lll->lazy_prepare;
|
||||||
|
|
||||||
/* Calculate the current event counter value */
|
/* Calculate the current event counter value */
|
||||||
event_counter = lll->event_counter + lll->latency_event;
|
event_counter = lll->event_counter + lll->latency_event;
|
||||||
|
@ -161,6 +153,15 @@ static int prepare_cb(struct lll_prepare_param *p)
|
||||||
lll->data_chan_count);
|
lll->data_chan_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Accumulate window widening */
|
||||||
|
lll->periph.window_widening_prepare_us +=
|
||||||
|
lll->periph.window_widening_periodic_us * (lll->lazy_prepare + 1U);
|
||||||
|
if (lll->periph.window_widening_prepare_us >
|
||||||
|
lll->periph.window_widening_max_us) {
|
||||||
|
lll->periph.window_widening_prepare_us =
|
||||||
|
lll->periph.window_widening_max_us;
|
||||||
|
}
|
||||||
|
|
||||||
/* current window widening */
|
/* current window widening */
|
||||||
lll->periph.window_widening_event_us +=
|
lll->periph.window_widening_event_us +=
|
||||||
lll->periph.window_widening_prepare_us;
|
lll->periph.window_widening_prepare_us;
|
||||||
|
|
|
@ -95,29 +95,15 @@ void lll_peripheral_iso_prepare(void *param)
|
||||||
{
|
{
|
||||||
struct lll_conn_iso_group *cig_lll;
|
struct lll_conn_iso_group *cig_lll;
|
||||||
struct lll_prepare_param *p;
|
struct lll_prepare_param *p;
|
||||||
uint16_t elapsed;
|
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
/* Initiate HF clock start up */
|
/* Initiate HF clock start up */
|
||||||
err = lll_hfclock_on();
|
err = lll_hfclock_on();
|
||||||
LL_ASSERT(err >= 0);
|
LL_ASSERT(err >= 0);
|
||||||
|
|
||||||
/* Instants elapsed */
|
|
||||||
p = param;
|
p = param;
|
||||||
elapsed = p->lazy + 1U;
|
|
||||||
|
|
||||||
/* Save the (latency + 1) for use in event and/or supervision timeout */
|
|
||||||
cig_lll = p->param;
|
cig_lll = p->param;
|
||||||
cig_lll->latency_prepare += elapsed;
|
|
||||||
|
|
||||||
/* Accumulate window widening */
|
|
||||||
cig_lll->window_widening_prepare_us_frac +=
|
|
||||||
cig_lll->window_widening_periodic_us_frac * elapsed;
|
|
||||||
if (cig_lll->window_widening_prepare_us_frac >
|
|
||||||
EVENT_US_TO_US_FRAC(cig_lll->window_widening_max_us)) {
|
|
||||||
cig_lll->window_widening_prepare_us_frac =
|
|
||||||
EVENT_US_TO_US_FRAC(cig_lll->window_widening_max_us);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Invoke common pipeline handling of prepare */
|
/* Invoke common pipeline handling of prepare */
|
||||||
err = lll_prepare(lll_is_abort_cb, abort_cb, prepare_cb, 0U, param);
|
err = lll_prepare(lll_is_abort_cb, abort_cb, prepare_cb, 0U, param);
|
||||||
|
@ -152,7 +138,6 @@ static int prepare_cb(struct lll_prepare_param *p)
|
||||||
memq_link_t *link;
|
memq_link_t *link;
|
||||||
uint32_t start_us;
|
uint32_t start_us;
|
||||||
uint32_t hcto;
|
uint32_t hcto;
|
||||||
uint16_t lazy;
|
|
||||||
uint32_t ret;
|
uint32_t ret;
|
||||||
uint8_t phy;
|
uint8_t phy;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
@ -190,14 +175,23 @@ static int prepare_cb(struct lll_prepare_param *p)
|
||||||
&data_chan_prn_s,
|
&data_chan_prn_s,
|
||||||
&data_chan_remap_idx);
|
&data_chan_remap_idx);
|
||||||
|
|
||||||
/* Store the current event latency */
|
/* Calculate the current event latency */
|
||||||
cig_lll->latency_event = cig_lll->latency_prepare;
|
cig_lll->lazy_prepare = p->lazy;
|
||||||
lazy = cig_lll->latency_prepare - 1U;
|
cig_lll->latency_event = cig_lll->latency_prepare + cig_lll->lazy_prepare;
|
||||||
|
|
||||||
/* Reset accumulated latencies */
|
/* Reset accumulated latencies */
|
||||||
cig_lll->latency_prepare = 0U;
|
cig_lll->latency_prepare = 0U;
|
||||||
|
|
||||||
/* current window widening */
|
/* Accumulate window widening */
|
||||||
|
cig_lll->window_widening_prepare_us_frac +=
|
||||||
|
cig_lll->window_widening_periodic_us_frac * (cig_lll->lazy_prepare + 1U);
|
||||||
|
if (cig_lll->window_widening_prepare_us_frac >
|
||||||
|
EVENT_US_TO_US_FRAC(cig_lll->window_widening_max_us)) {
|
||||||
|
cig_lll->window_widening_prepare_us_frac =
|
||||||
|
EVENT_US_TO_US_FRAC(cig_lll->window_widening_max_us);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Current window widening */
|
||||||
cig_lll->window_widening_event_us_frac +=
|
cig_lll->window_widening_event_us_frac +=
|
||||||
cig_lll->window_widening_prepare_us_frac;
|
cig_lll->window_widening_prepare_us_frac;
|
||||||
cig_lll->window_widening_prepare_us_frac = 0;
|
cig_lll->window_widening_prepare_us_frac = 0;
|
||||||
|
@ -210,7 +204,7 @@ static int prepare_cb(struct lll_prepare_param *p)
|
||||||
se_curr = 1U;
|
se_curr = 1U;
|
||||||
|
|
||||||
/* Adjust sn and nesn for skipped CIG events */
|
/* Adjust sn and nesn for skipped CIG events */
|
||||||
payload_count_lazy(cis_lll, lazy);
|
payload_count_lazy(cis_lll, cig_lll->lazy_prepare);
|
||||||
|
|
||||||
/* Start setting up of Radio h/w */
|
/* Start setting up of Radio h/w */
|
||||||
radio_reset();
|
radio_reset();
|
||||||
|
@ -381,7 +375,7 @@ static int prepare_cb(struct lll_prepare_param *p)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Adjust sn and nesn for skipped CIG events */
|
/* Adjust sn and nesn for skipped CIG events */
|
||||||
payload_count_lazy(cis_lll, lazy);
|
payload_count_lazy(cis_lll, cig_lll->lazy_prepare);
|
||||||
|
|
||||||
/* Adjust sn and nesn for canceled events */
|
/* Adjust sn and nesn for canceled events */
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -405,13 +399,13 @@ static int prepare_cb(struct lll_prepare_param *p)
|
||||||
|
|
||||||
static void abort_cb(struct lll_prepare_param *prepare_param, void *param)
|
static void abort_cb(struct lll_prepare_param *prepare_param, void *param)
|
||||||
{
|
{
|
||||||
|
struct lll_conn_iso_group *cig_lll;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
/* NOTE: This is not a prepare being cancelled */
|
/* NOTE: This is not a prepare being cancelled */
|
||||||
if (!prepare_param) {
|
if (!prepare_param) {
|
||||||
struct lll_conn_iso_stream *next_cis_lll;
|
struct lll_conn_iso_stream *next_cis_lll;
|
||||||
struct lll_conn_iso_stream *cis_lll;
|
struct lll_conn_iso_stream *cis_lll;
|
||||||
struct lll_conn_iso_group *cig_lll;
|
|
||||||
|
|
||||||
cis_lll = ull_conn_iso_lll_stream_get(cis_handle_curr);
|
cis_lll = ull_conn_iso_lll_stream_get(cis_handle_curr);
|
||||||
cig_lll = param;
|
cig_lll = param;
|
||||||
|
@ -442,6 +436,22 @@ static void abort_cb(struct lll_prepare_param *prepare_param, void *param)
|
||||||
err = lll_hfclock_off();
|
err = lll_hfclock_off();
|
||||||
LL_ASSERT(err >= 0);
|
LL_ASSERT(err >= 0);
|
||||||
|
|
||||||
|
/* Get reference to CIG LLL context */
|
||||||
|
cig_lll = prepare_param->param;
|
||||||
|
|
||||||
|
/* Accumulate the latency as event is aborted while being in pipeline */
|
||||||
|
cig_lll->lazy_prepare = prepare_param->lazy;
|
||||||
|
cig_lll->latency_prepare += (cig_lll->lazy_prepare + 1U);
|
||||||
|
|
||||||
|
/* Accumulate window widening */
|
||||||
|
cig_lll->window_widening_prepare_us_frac +=
|
||||||
|
cig_lll->window_widening_periodic_us_frac * (cig_lll->lazy_prepare + 1U);
|
||||||
|
if (cig_lll->window_widening_prepare_us_frac >
|
||||||
|
EVENT_US_TO_US_FRAC(cig_lll->window_widening_max_us)) {
|
||||||
|
cig_lll->window_widening_prepare_us_frac =
|
||||||
|
EVENT_US_TO_US_FRAC(cig_lll->window_widening_max_us);
|
||||||
|
}
|
||||||
|
|
||||||
lll_done(param);
|
lll_done(param);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -132,9 +132,11 @@ static void prepare(void *param)
|
||||||
|
|
||||||
lll = p->param;
|
lll = p->param;
|
||||||
|
|
||||||
|
lll->lazy_prepare = p->lazy;
|
||||||
|
|
||||||
/* Accumulate window widening */
|
/* Accumulate window widening */
|
||||||
lll->window_widening_prepare_us += lll->window_widening_periodic_us *
|
lll->window_widening_prepare_us += lll->window_widening_periodic_us *
|
||||||
(p->lazy + 1U);
|
(lll->lazy_prepare + 1U);
|
||||||
if (lll->window_widening_prepare_us > lll->window_widening_max_us) {
|
if (lll->window_widening_prepare_us > lll->window_widening_max_us) {
|
||||||
lll->window_widening_prepare_us = lll->window_widening_max_us;
|
lll->window_widening_prepare_us = lll->window_widening_max_us;
|
||||||
}
|
}
|
||||||
|
@ -272,7 +274,7 @@ static int create_prepare_cb(struct lll_prepare_param *p)
|
||||||
lll = p->param;
|
lll = p->param;
|
||||||
|
|
||||||
/* Calculate the current event latency */
|
/* Calculate the current event latency */
|
||||||
lll->skip_event = lll->skip_prepare + p->lazy;
|
lll->skip_event = lll->skip_prepare + lll->lazy_prepare;
|
||||||
|
|
||||||
/* Calculate the current event counter value */
|
/* Calculate the current event counter value */
|
||||||
event_counter = lll->event_counter + lll->skip_event;
|
event_counter = lll->event_counter + lll->skip_event;
|
||||||
|
@ -360,7 +362,7 @@ static int prepare_cb(struct lll_prepare_param *p)
|
||||||
lll = p->param;
|
lll = p->param;
|
||||||
|
|
||||||
/* Calculate the current event latency */
|
/* Calculate the current event latency */
|
||||||
lll->skip_event = lll->skip_prepare + p->lazy;
|
lll->skip_event = lll->skip_prepare + lll->lazy_prepare;
|
||||||
|
|
||||||
/* Calculate the current event counter value */
|
/* Calculate the current event counter value */
|
||||||
event_counter = lll->event_counter + lll->skip_event;
|
event_counter = lll->event_counter + lll->skip_event;
|
||||||
|
@ -631,7 +633,7 @@ static void abort_cb(struct lll_prepare_param *prepare_param, void *param)
|
||||||
|
|
||||||
/* Accumulate the latency as event is aborted while being in pipeline */
|
/* Accumulate the latency as event is aborted while being in pipeline */
|
||||||
lll = prepare_param->param;
|
lll = prepare_param->param;
|
||||||
lll->skip_prepare += (prepare_param->lazy + 1U);
|
lll->skip_prepare += (lll->lazy_prepare + 1U);
|
||||||
|
|
||||||
/* Extra done event, to check sync lost */
|
/* Extra done event, to check sync lost */
|
||||||
e = ull_event_done_extra_get();
|
e = ull_event_done_extra_get();
|
||||||
|
|
|
@ -1015,11 +1015,6 @@ void ull_conn_done(struct node_rx_event_done *done)
|
||||||
#else
|
#else
|
||||||
latency_event = lll->latency_event;
|
latency_event = lll->latency_event;
|
||||||
#endif
|
#endif
|
||||||
if (lll->latency_prepare) {
|
|
||||||
elapsed_event = latency_event + lll->latency_prepare;
|
|
||||||
} else {
|
|
||||||
elapsed_event = latency_event + 1U;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Peripheral drift compensation calc and new latency or
|
/* Peripheral drift compensation calc and new latency or
|
||||||
* central terminate acked
|
* central terminate acked
|
||||||
|
@ -1054,6 +1049,8 @@ void ull_conn_done(struct node_rx_event_done *done)
|
||||||
conn->connect_expire = 0U;
|
conn->connect_expire = 0U;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
elapsed_event = latency_event + lll->lazy_prepare + 1U;
|
||||||
|
|
||||||
/* Reset supervision countdown */
|
/* Reset supervision countdown */
|
||||||
if (done->extra.crc_valid && !done->extra.is_aborted) {
|
if (done->extra.crc_valid && !done->extra.is_aborted) {
|
||||||
conn->supervision_expire = 0U;
|
conn->supervision_expire = 0U;
|
||||||
|
|
|
@ -489,22 +489,26 @@ void ull_conn_iso_done(struct node_rx_event_done *done)
|
||||||
conn->supervision_timeout * 10U * 1000U,
|
conn->supervision_timeout * 10U * 1000U,
|
||||||
cig->iso_interval * CONN_INT_UNIT_US);
|
cig->iso_interval * CONN_INT_UNIT_US);
|
||||||
|
|
||||||
} else if (cis->event_expire > cig->lll.latency_event) {
|
|
||||||
cis->event_expire -= cig->lll.latency_event;
|
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
cis->event_expire = 0U;
|
uint16_t event_elapsed;
|
||||||
|
|
||||||
/* Stop CIS and defer cleanup to after teardown. This will
|
event_elapsed = cig->lll.latency_event +
|
||||||
* only generate a terminate event to the host if CIS has
|
cig->lll.lazy_prepare + 1U;
|
||||||
* been established. If CIS was not established, the
|
if (cis->event_expire > event_elapsed) {
|
||||||
* teardown will send CIS_ESTABLISHED with failure.
|
cis->event_expire -= event_elapsed;
|
||||||
*/
|
} else {
|
||||||
ull_conn_iso_cis_stop(cis, NULL,
|
cis->event_expire = 0U;
|
||||||
cis->established ?
|
|
||||||
BT_HCI_ERR_CONN_TIMEOUT :
|
|
||||||
BT_HCI_ERR_CONN_FAIL_TO_ESTAB);
|
|
||||||
|
|
||||||
|
/* Stop CIS and defer cleanup to after teardown.
|
||||||
|
* This will only generate a terminate event to the
|
||||||
|
* host if CIS has been established. If CIS was not
|
||||||
|
* established, the teardown will send
|
||||||
|
* CIS_ESTABLISHED with failure.
|
||||||
|
*/
|
||||||
|
ull_conn_iso_cis_stop(cis, NULL, cis->established ?
|
||||||
|
BT_HCI_ERR_CONN_TIMEOUT :
|
||||||
|
BT_HCI_ERR_CONN_FAIL_TO_ESTAB);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1016,14 +1016,7 @@ void ull_sync_established_report(memq_link_t *link, struct node_rx_pdu *rx)
|
||||||
|
|
||||||
void ull_sync_done(struct node_rx_event_done *done)
|
void ull_sync_done(struct node_rx_event_done *done)
|
||||||
{
|
{
|
||||||
uint32_t ticks_drift_minus;
|
|
||||||
uint32_t ticks_drift_plus;
|
|
||||||
struct ll_sync_set *sync;
|
struct ll_sync_set *sync;
|
||||||
uint16_t elapsed_event;
|
|
||||||
uint16_t skip_event;
|
|
||||||
uint8_t force_lll;
|
|
||||||
uint16_t lazy;
|
|
||||||
uint8_t force;
|
|
||||||
|
|
||||||
/* Get reference to ULL context */
|
/* Get reference to ULL context */
|
||||||
sync = CONTAINER_OF(done->param, struct ll_sync_set, ull);
|
sync = CONTAINER_OF(done->param, struct ll_sync_set, ull);
|
||||||
|
@ -1053,17 +1046,19 @@ void ull_sync_done(struct node_rx_event_done *done)
|
||||||
} else
|
} else
|
||||||
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
|
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
|
||||||
{
|
{
|
||||||
|
uint32_t ticks_drift_minus;
|
||||||
|
uint32_t ticks_drift_plus;
|
||||||
|
uint16_t elapsed_event;
|
||||||
struct lll_sync *lll;
|
struct lll_sync *lll;
|
||||||
|
uint16_t skip_event;
|
||||||
|
uint8_t force_lll;
|
||||||
|
uint16_t lazy;
|
||||||
|
uint8_t force;
|
||||||
|
|
||||||
lll = &sync->lll;
|
lll = &sync->lll;
|
||||||
|
|
||||||
/* Events elapsed used in timeout checks below */
|
/* Events elapsed used in timeout checks below */
|
||||||
skip_event = lll->skip_event;
|
skip_event = lll->skip_event;
|
||||||
if (lll->skip_prepare) {
|
|
||||||
elapsed_event = skip_event + lll->skip_prepare;
|
|
||||||
} else {
|
|
||||||
elapsed_event = skip_event + 1U;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Sync drift compensation and new skip calculation */
|
/* Sync drift compensation and new skip calculation */
|
||||||
ticks_drift_plus = 0U;
|
ticks_drift_plus = 0U;
|
||||||
|
@ -1079,6 +1074,8 @@ void ull_sync_done(struct node_rx_event_done *done)
|
||||||
sync->sync_expire = 0U;
|
sync->sync_expire = 0U;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
elapsed_event = skip_event + lll->lazy_prepare + 1U;
|
||||||
|
|
||||||
/* Reset supervision countdown */
|
/* Reset supervision countdown */
|
||||||
if (done->extra.crc_valid) {
|
if (done->extra.crc_valid) {
|
||||||
sync->timeout_expire = 0U;
|
sync->timeout_expire = 0U;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue