2018-12-18 05:48:20 +01:00
|
|
|
/*
|
2021-02-23 13:32:28 +05:30
|
|
|
* Copyright (c) 2017-2021 Nordic Semiconductor ASA
|
2018-12-18 05:48:20 +01:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <stdbool.h>
|
|
|
|
#include <errno.h>
|
|
|
|
|
2022-05-06 11:12:04 +02:00
|
|
|
#include <zephyr/zephyr.h>
|
2020-08-19 10:09:58 +05:30
|
|
|
#include <soc.h>
|
2022-05-06 11:12:04 +02:00
|
|
|
#include <zephyr/device.h>
|
|
|
|
#include <zephyr/drivers/entropy.h>
|
|
|
|
#include <zephyr/bluetooth/hci.h>
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2020-08-19 10:09:58 +05:30
|
|
|
#include "hal/cpu.h"
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "hal/ccm.h"
|
2020-08-19 10:09:58 +05:30
|
|
|
#include "hal/cntr.h"
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "hal/ticker.h"
|
|
|
|
|
|
|
|
#include "util/util.h"
|
|
|
|
#include "util/mem.h"
|
|
|
|
#include "util/mfifo.h"
|
|
|
|
#include "util/memq.h"
|
|
|
|
#include "util/mayfly.h"
|
2021-10-30 23:40:52 +02:00
|
|
|
#include "util/dbuf.h"
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
#include "ticker/ticker.h"
|
|
|
|
|
|
|
|
#include "pdu.h"
|
2021-02-24 16:42:02 +05:30
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "lll.h"
|
2021-02-24 16:42:02 +05:30
|
|
|
#include "lll/lll_vendor.h"
|
|
|
|
#include "lll/lll_adv_types.h"
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "lll_adv.h"
|
2021-02-24 16:42:02 +05:30
|
|
|
#include "lll/lll_adv_pdu.h"
|
2021-03-09 10:50:44 +05:30
|
|
|
#include "lll_chan.h"
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "lll_scan.h"
|
2021-04-01 11:44:33 +02:00
|
|
|
#include "lll/lll_df_types.h"
|
2020-08-03 16:28:58 +05:30
|
|
|
#include "lll_sync.h"
|
2020-11-09 16:31:01 +01:00
|
|
|
#include "lll_sync_iso.h"
|
2022-04-20 14:11:00 +02:00
|
|
|
#include "lll_iso_tx.h"
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "lll_conn.h"
|
2020-11-19 02:12:54 -08:00
|
|
|
#include "lll_df.h"
|
2021-02-24 16:42:02 +05:30
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "ull_adv_types.h"
|
|
|
|
#include "ull_scan_types.h"
|
2020-08-11 10:08:13 +05:30
|
|
|
#include "ull_sync_types.h"
|
2021-11-15 09:41:12 +01:00
|
|
|
#if !defined(CONFIG_BT_LL_SW_LLCP_LEGACY)
|
|
|
|
#include "ull_tx_queue.h"
|
|
|
|
#endif
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "ull_conn_types.h"
|
2019-05-09 18:39:26 +05:30
|
|
|
#include "ull_filter.h"
|
2021-05-05 21:30:40 +02:00
|
|
|
#include "ull_df_types.h"
|
|
|
|
#include "ull_df_internal.h"
|
2019-05-09 18:39:26 +05:30
|
|
|
|
2022-05-12 04:48:32 -07:00
|
|
|
#if defined(CONFIG_BT_CTLR_USER_EXT)
|
|
|
|
#include "ull_vendor.h"
|
|
|
|
#endif /* CONFIG_BT_CTLR_USER_EXT */
|
|
|
|
|
2021-02-04 16:11:06 +01:00
|
|
|
#include "isoal.h"
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "ull_internal.h"
|
2020-11-26 15:47:39 +01:00
|
|
|
#include "ull_iso_internal.h"
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "ull_adv_internal.h"
|
|
|
|
#include "ull_scan_internal.h"
|
2020-08-03 16:28:58 +05:30
|
|
|
#include "ull_sync_internal.h"
|
2020-11-09 16:31:01 +01:00
|
|
|
#include "ull_sync_iso_internal.h"
|
2021-09-20 16:25:06 +02:00
|
|
|
#include "ull_central_internal.h"
|
2021-11-23 22:50:42 +01:00
|
|
|
#include "ull_iso_types.h"
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "ull_conn_internal.h"
|
2021-02-04 16:11:06 +01:00
|
|
|
#include "lll_conn_iso.h"
|
|
|
|
#include "ull_conn_iso_types.h"
|
2021-01-25 09:00:56 +01:00
|
|
|
#include "ull_central_iso_internal.h"
|
2021-05-05 09:14:07 +02:00
|
|
|
|
|
|
|
#include "ull_conn_iso_internal.h"
|
2021-01-25 09:00:56 +01:00
|
|
|
#include "ull_peripheral_iso_internal.h"
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2021-02-24 16:42:02 +05:30
|
|
|
#include "ll.h"
|
|
|
|
#include "ll_feat.h"
|
2021-11-15 06:02:16 +05:30
|
|
|
#include "ll_test.h"
|
2021-02-24 16:42:02 +05:30
|
|
|
#include "ll_settings.h"
|
|
|
|
|
2019-11-07 11:39:45 +05:30
|
|
|
#define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_HCI_DRIVER)
|
|
|
|
#define LOG_MODULE_NAME bt_ctlr_ull
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "common/log.h"
|
|
|
|
#include "hal/debug.h"
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_BROADCASTER)
|
|
|
|
#define BT_ADV_TICKER_NODES ((TICKER_ID_ADV_LAST) - (TICKER_ID_ADV_STOP) + 1)
|
2020-02-12 08:55:57 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
|
|
|
|
#define BT_ADV_AUX_TICKER_NODES ((TICKER_ID_ADV_AUX_LAST) - \
|
|
|
|
(TICKER_ID_ADV_AUX_BASE) + 1)
|
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
|
|
|
|
#define BT_ADV_SYNC_TICKER_NODES ((TICKER_ID_ADV_SYNC_LAST) - \
|
|
|
|
(TICKER_ID_ADV_SYNC_BASE) + 1)
|
2021-03-09 13:50:02 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_ISO)
|
|
|
|
#define BT_ADV_ISO_TICKER_NODES ((TICKER_ID_ADV_ISO_LAST) - \
|
|
|
|
(TICKER_ID_ADV_ISO_BASE) + 1)
|
|
|
|
#else /* !CONFIG_BT_CTLR_ADV_ISO */
|
|
|
|
#define BT_ADV_ISO_TICKER_NODES 0
|
|
|
|
#endif /* !CONFIG_BT_CTLR_ADV_ISO */
|
2020-02-12 08:55:57 +05:30
|
|
|
#else /* !CONFIG_BT_CTLR_ADV_PERIODIC */
|
|
|
|
#define BT_ADV_SYNC_TICKER_NODES 0
|
2021-03-09 13:50:02 +05:30
|
|
|
#define BT_ADV_ISO_TICKER_NODES 0
|
2020-02-12 08:55:57 +05:30
|
|
|
#endif /* !CONFIG_BT_CTLR_ADV_PERIODIC */
|
|
|
|
#else /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
|
|
|
|
#define BT_ADV_AUX_TICKER_NODES 0
|
|
|
|
#define BT_ADV_SYNC_TICKER_NODES 0
|
2021-03-09 13:50:02 +05:30
|
|
|
#define BT_ADV_ISO_TICKER_NODES 0
|
2020-02-12 08:55:57 +05:30
|
|
|
#endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
|
|
|
|
#else /* !CONFIG_BT_BROADCASTER */
|
2018-12-18 05:48:20 +01:00
|
|
|
#define BT_ADV_TICKER_NODES 0
|
2020-02-12 08:55:57 +05:30
|
|
|
#define BT_ADV_AUX_TICKER_NODES 0
|
|
|
|
#define BT_ADV_SYNC_TICKER_NODES 0
|
2021-03-09 13:50:02 +05:30
|
|
|
#define BT_ADV_ISO_TICKER_NODES 0
|
2020-02-12 08:55:57 +05:30
|
|
|
#endif /* !CONFIG_BT_BROADCASTER */
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
#if defined(CONFIG_BT_OBSERVER)
|
|
|
|
#define BT_SCAN_TICKER_NODES ((TICKER_ID_SCAN_LAST) - (TICKER_ID_SCAN_STOP) + 1)
|
2020-04-21 11:12:41 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT)
|
|
|
|
#define BT_SCAN_AUX_TICKER_NODES ((TICKER_ID_SCAN_AUX_LAST) - \
|
|
|
|
(TICKER_ID_SCAN_AUX_BASE) + 1)
|
2020-10-05 10:42:39 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
|
2020-08-21 16:17:51 +05:30
|
|
|
#define BT_SCAN_SYNC_TICKER_NODES ((TICKER_ID_SCAN_SYNC_LAST) - \
|
|
|
|
(TICKER_ID_SCAN_SYNC_BASE) + 1)
|
2021-03-09 13:50:02 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_ISO)
|
|
|
|
#define BT_SCAN_SYNC_ISO_TICKER_NODES ((TICKER_ID_SCAN_SYNC_ISO_LAST) - \
|
|
|
|
(TICKER_ID_SCAN_SYNC_ISO_BASE) + 1)
|
|
|
|
#else /* !CONFIG_BT_CTLR_SYNC_ISO */
|
|
|
|
#define BT_SCAN_SYNC_ISO_TICKER_NODES 0
|
|
|
|
#endif /* !CONFIG_BT_CTLR_SYNC_ISO */
|
2020-10-08 14:40:43 +05:30
|
|
|
#else /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
|
|
|
|
#define BT_SCAN_SYNC_TICKER_NODES 0
|
2021-03-09 13:50:02 +05:30
|
|
|
#define BT_SCAN_SYNC_ISO_TICKER_NODES 0
|
2020-10-08 14:40:43 +05:30
|
|
|
#endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
|
2020-04-21 11:12:41 +05:30
|
|
|
#else /* !CONFIG_BT_CTLR_ADV_EXT */
|
|
|
|
#define BT_SCAN_AUX_TICKER_NODES 0
|
2020-08-21 16:17:51 +05:30
|
|
|
#define BT_SCAN_SYNC_TICKER_NODES 0
|
2021-03-09 13:50:02 +05:30
|
|
|
#define BT_SCAN_SYNC_ISO_TICKER_NODES 0
|
2020-04-21 11:12:41 +05:30
|
|
|
#endif /* !CONFIG_BT_CTLR_ADV_EXT */
|
2018-12-18 05:48:20 +01:00
|
|
|
#else
|
|
|
|
#define BT_SCAN_TICKER_NODES 0
|
2020-04-21 11:12:41 +05:30
|
|
|
#define BT_SCAN_AUX_TICKER_NODES 0
|
2020-08-21 16:17:51 +05:30
|
|
|
#define BT_SCAN_SYNC_TICKER_NODES 0
|
2021-03-09 13:50:02 +05:30
|
|
|
#define BT_SCAN_SYNC_ISO_TICKER_NODES 0
|
2018-12-18 05:48:20 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
#define BT_CONN_TICKER_NODES ((TICKER_ID_CONN_LAST) - (TICKER_ID_CONN_BASE) + 1)
|
|
|
|
#else
|
|
|
|
#define BT_CONN_TICKER_NODES 0
|
|
|
|
#endif
|
|
|
|
|
2021-04-14 10:17:20 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_CONN_ISO)
|
2021-03-29 12:49:36 +02:00
|
|
|
#define BT_CIG_TICKER_NODES ((TICKER_ID_CONN_ISO_LAST) - \
|
2021-03-29 13:22:25 +02:00
|
|
|
(TICKER_ID_CONN_ISO_BASE) + 1 + \
|
|
|
|
(TICKER_ID_CONN_ISO_RESUME_LAST) - \
|
|
|
|
(TICKER_ID_CONN_ISO_RESUME_BASE) + 1)
|
|
|
|
|
2021-02-19 13:26:32 +01:00
|
|
|
#else
|
|
|
|
#define BT_CIG_TICKER_NODES 0
|
|
|
|
#endif
|
|
|
|
|
2020-05-07 11:45:33 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_USER_EXT)
|
|
|
|
#define USER_TICKER_NODES CONFIG_BT_CTLR_USER_TICKER_ID_RANGE
|
|
|
|
#else
|
|
|
|
#define USER_TICKER_NODES 0
|
|
|
|
#endif
|
|
|
|
|
2021-03-29 16:00:42 +05:30
|
|
|
#if defined(CONFIG_SOC_FLASH_NRF_RADIO_SYNC_TICKER)
|
|
|
|
#define FLASH_TICKER_NODES 2 /* No. of tickers reserved for flash
|
|
|
|
* driver
|
|
|
|
*/
|
|
|
|
#define TICKER_USER_ULL_HIGH_FLASH_OPS 1 /* No. of additional ticker ULL_HIGH
|
|
|
|
* context operations
|
|
|
|
*/
|
|
|
|
#define TICKER_USER_THREAD_FLASH_OPS 1 /* No. of additional ticker thread
|
|
|
|
* context operations
|
|
|
|
*/
|
|
|
|
#else
|
|
|
|
#define FLASH_TICKER_NODES 0
|
|
|
|
#define TICKER_USER_ULL_HIGH_FLASH_OPS 0
|
|
|
|
#define TICKER_USER_THREAD_FLASH_OPS 0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Define ticker nodes */
|
|
|
|
/* NOTE: FLASH_TICKER_NODES shall be after Link Layer's list of ticker id
|
|
|
|
* allocations, refer to ll_timeslice_ticker_id_get on how ticker id
|
|
|
|
* used by flash driver is returned.
|
|
|
|
*/
|
2018-12-18 05:48:20 +01:00
|
|
|
#define TICKER_NODES (TICKER_ID_ULL_BASE + \
|
|
|
|
BT_ADV_TICKER_NODES + \
|
2020-02-12 08:55:57 +05:30
|
|
|
BT_ADV_AUX_TICKER_NODES + \
|
|
|
|
BT_ADV_SYNC_TICKER_NODES + \
|
2021-03-09 13:50:02 +05:30
|
|
|
BT_ADV_ISO_TICKER_NODES + \
|
2018-12-18 05:48:20 +01:00
|
|
|
BT_SCAN_TICKER_NODES + \
|
2020-04-21 11:12:41 +05:30
|
|
|
BT_SCAN_AUX_TICKER_NODES + \
|
2020-08-21 16:17:51 +05:30
|
|
|
BT_SCAN_SYNC_TICKER_NODES + \
|
2021-03-09 13:50:02 +05:30
|
|
|
BT_SCAN_SYNC_ISO_TICKER_NODES + \
|
2018-12-18 05:48:20 +01:00
|
|
|
BT_CONN_TICKER_NODES + \
|
2021-02-19 13:26:32 +01:00
|
|
|
BT_CIG_TICKER_NODES + \
|
2021-03-29 16:00:42 +05:30
|
|
|
USER_TICKER_NODES + \
|
|
|
|
FLASH_TICKER_NODES)
|
|
|
|
|
|
|
|
/* When both central and peripheral are supported, one each Rx node will be
|
|
|
|
* needed by connectable advertising and the initiator to generate connection
|
|
|
|
* complete event, hence conditionally set the count.
|
|
|
|
*/
|
|
|
|
#if defined(CONFIG_BT_MAX_CONN)
|
|
|
|
#if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_PERIPHERAL)
|
|
|
|
#define BT_CTLR_MAX_CONNECTABLE 2
|
|
|
|
#else
|
|
|
|
#define BT_CTLR_MAX_CONNECTABLE 1
|
|
|
|
#endif
|
|
|
|
#define BT_CTLR_MAX_CONN CONFIG_BT_MAX_CONN
|
2021-08-27 11:51:29 +02:00
|
|
|
#else
|
|
|
|
#define BT_CTLR_MAX_CONNECTABLE 0
|
|
|
|
#define BT_CTLR_MAX_CONN 0
|
|
|
|
#endif
|
|
|
|
|
2021-07-27 21:05:07 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_OBSERVER)
|
2022-02-10 14:19:51 +01:00
|
|
|
#if defined(CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX)
|
2021-08-27 11:51:29 +02:00
|
|
|
/* Note: Need node for PDU and CTE sample */
|
|
|
|
#define BT_CTLR_ADV_EXT_RX_CNT (CONFIG_BT_CTLR_SCAN_AUX_SET * \
|
|
|
|
CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX * 2)
|
2021-12-19 06:42:52 +05:30
|
|
|
#else /* !CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX */
|
|
|
|
#define BT_CTLR_ADV_EXT_RX_CNT 1
|
|
|
|
#endif /* !CONFIG_BT_CTLR_DF_PER_SCAN_CTE_NUM_MAX */
|
|
|
|
#else /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_OBSERVER */
|
2021-04-27 12:31:23 +05:30
|
|
|
#define BT_CTLR_ADV_EXT_RX_CNT 0
|
2021-12-19 06:42:52 +05:30
|
|
|
#endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_OBSERVER */
|
2021-03-29 16:00:42 +05:30
|
|
|
|
|
|
|
#if !defined(TICKER_USER_LLL_VENDOR_OPS)
|
|
|
|
#define TICKER_USER_LLL_VENDOR_OPS 0
|
|
|
|
#endif /* TICKER_USER_LLL_VENDOR_OPS */
|
|
|
|
|
|
|
|
#if !defined(TICKER_USER_ULL_HIGH_VENDOR_OPS)
|
|
|
|
#define TICKER_USER_ULL_HIGH_VENDOR_OPS 0
|
|
|
|
#endif /* TICKER_USER_ULL_HIGH_VENDOR_OPS */
|
|
|
|
|
|
|
|
#if !defined(TICKER_USER_THREAD_VENDOR_OPS)
|
|
|
|
#define TICKER_USER_THREAD_VENDOR_OPS 0
|
|
|
|
#endif /* TICKER_USER_THREAD_VENDOR_OPS */
|
|
|
|
|
|
|
|
/* Define ticker user operations */
|
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT) && \
|
|
|
|
(CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
|
|
|
|
/* NOTE: When ticker job is disabled inside radio events then all advertising,
|
2021-09-20 16:25:06 +02:00
|
|
|
* scanning, and peripheral latency cancel ticker operations will be deferred,
|
2021-03-29 16:00:42 +05:30
|
|
|
* requiring increased ticker thread context operation queue count.
|
|
|
|
*/
|
|
|
|
#define TICKER_USER_THREAD_OPS (BT_CTLR_ADV_SET + BT_CTLR_SCAN_SET + \
|
|
|
|
BT_CTLR_MAX_CONN + \
|
|
|
|
TICKER_USER_THREAD_VENDOR_OPS + \
|
|
|
|
TICKER_USER_THREAD_FLASH_OPS + \
|
|
|
|
1)
|
|
|
|
#else /* !CONFIG_BT_CTLR_LOW_LAT */
|
|
|
|
/* NOTE: As ticker job is not disabled inside radio events, no need for extra
|
|
|
|
* thread operations queue element for flash driver.
|
|
|
|
*/
|
|
|
|
#define TICKER_USER_THREAD_OPS (1 + TICKER_USER_THREAD_VENDOR_OPS + 1)
|
|
|
|
#endif /* !CONFIG_BT_CTLR_LOW_LAT */
|
|
|
|
|
2021-03-28 07:02:57 +05:30
|
|
|
#define TICKER_USER_ULL_LOW_OPS (1 + 1)
|
|
|
|
|
2021-03-29 16:00:42 +05:30
|
|
|
/* NOTE: When ULL_LOW priority is configured to lower than ULL_HIGH, then extra
|
|
|
|
* ULL_HIGH operations queue elements are required to buffer the
|
|
|
|
* requested ticker operations.
|
|
|
|
*/
|
2021-06-17 11:03:26 +05:30
|
|
|
#if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_CTLR_ADV_EXT) && \
|
|
|
|
defined(CONFIG_BT_CTLR_PHY_CODED)
|
|
|
|
#define TICKER_USER_ULL_HIGH_OPS (4 + TICKER_USER_ULL_HIGH_VENDOR_OPS + \
|
|
|
|
TICKER_USER_ULL_HIGH_FLASH_OPS + 1)
|
|
|
|
#else /* !CONFIG_BT_CENTRAL || !CONFIG_BT_CTLR_ADV_EXT ||
|
|
|
|
* !CONFIG_BT_CTLR_PHY_CODED
|
|
|
|
*/
|
2021-03-29 16:00:42 +05:30
|
|
|
#define TICKER_USER_ULL_HIGH_OPS (3 + TICKER_USER_ULL_HIGH_VENDOR_OPS + \
|
|
|
|
TICKER_USER_ULL_HIGH_FLASH_OPS + 1)
|
2021-06-17 11:03:26 +05:30
|
|
|
#endif /* !CONFIG_BT_CENTRAL || !CONFIG_BT_CTLR_ADV_EXT ||
|
|
|
|
* !CONFIG_BT_CTLR_PHY_CODED
|
|
|
|
*/
|
2021-03-29 16:00:42 +05:30
|
|
|
|
2021-03-28 07:02:57 +05:30
|
|
|
#define TICKER_USER_LLL_OPS (3 + TICKER_USER_LLL_VENDOR_OPS + 1)
|
2021-03-29 16:00:42 +05:30
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#define TICKER_USER_OPS (TICKER_USER_LLL_OPS + \
|
|
|
|
TICKER_USER_ULL_HIGH_OPS + \
|
|
|
|
TICKER_USER_ULL_LOW_OPS + \
|
2021-03-29 16:00:42 +05:30
|
|
|
TICKER_USER_THREAD_OPS)
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
/* Memory for ticker nodes/instances */
|
2020-05-27 11:26:57 -05:00
|
|
|
static uint8_t MALIGN(4) ticker_nodes[TICKER_NODES][TICKER_NODE_T_SIZE];
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
/* Memory for users/contexts operating on ticker module */
|
2020-05-27 11:26:57 -05:00
|
|
|
static uint8_t MALIGN(4) ticker_users[MAYFLY_CALLER_COUNT][TICKER_USER_T_SIZE];
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
/* Memory for user/context simultaneous API operations */
|
2020-05-27 11:26:57 -05:00
|
|
|
static uint8_t MALIGN(4) ticker_user_ops[TICKER_USER_OPS][TICKER_USER_OP_T_SIZE];
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2022-03-16 21:07:43 +00:00
|
|
|
/* Semaphore to wakeup thread on ticker API callback */
|
2018-12-18 05:48:20 +01:00
|
|
|
static struct k_sem sem_ticker_api_cb;
|
|
|
|
|
|
|
|
/* Semaphore to wakeup thread on Rx-ed objects */
|
|
|
|
static struct k_sem *sem_recv;
|
|
|
|
|
2019-06-19 20:22:50 +02:00
|
|
|
/* Declare prepare-event FIFO: mfifo_prep.
|
|
|
|
* Queue of struct node_rx_event_done
|
|
|
|
*/
|
2018-12-18 05:48:20 +01:00
|
|
|
static MFIFO_DEFINE(prep, sizeof(struct lll_event), EVENT_PIPELINE_MAX);
|
|
|
|
|
2021-05-19 13:59:21 +02:00
|
|
|
/* Declare done-event RXFIFO. This is a composite pool-backed MFIFO for rx_nodes.
|
|
|
|
* The declaration constructs the following data structures:
|
|
|
|
* - mfifo_done: FIFO with pointers to struct node_rx_event_done
|
|
|
|
* - mem_done: Backing data pool for struct node_rx_event_done elements
|
|
|
|
* - mem_link_done: Pool of memq_link_t elements
|
|
|
|
*
|
|
|
|
* An extra link may be reserved for use by the ull_done memq (EVENT_DONE_LINK_CNT).
|
|
|
|
*
|
2019-02-14 10:04:17 +01:00
|
|
|
* Queue of pointers to struct node_rx_event_done.
|
2021-10-04 09:41:40 +05:30
|
|
|
* The actual backing behind these pointers is mem_done.
|
|
|
|
*
|
|
|
|
* When there are radio events with time reservations lower than the preemption
|
|
|
|
* timeout of 1.5 ms, the pipeline has to account for the maximum radio events
|
|
|
|
* that can be enqueued during the preempt timeout duration. All these enqueued
|
|
|
|
* events could be aborted in case of late scheduling, needing as many done
|
|
|
|
* event buffers.
|
|
|
|
*
|
|
|
|
* During continuous scanning, there can be 1 active radio event, 1 scan resume
|
|
|
|
* and 1 new scan prepare. If there are peripheral prepares in addition, and due
|
|
|
|
* to late scheduling all these will abort needing 4 done buffers.
|
|
|
|
*
|
|
|
|
* If there are additional peripheral prepares enqueued, which are apart by
|
|
|
|
* their time reservations, these are not yet late and hence no more additional
|
|
|
|
* done buffers are needed.
|
|
|
|
*
|
|
|
|
* If Extended Scanning is supported, then an additional auxiliary scan event's
|
|
|
|
* prepare could be enqueued in the pipeline during the preemption duration.
|
2022-03-18 07:33:11 +05:30
|
|
|
*
|
|
|
|
* If Extended Scanning with Coded PHY is supported, then an additional 1 resume
|
|
|
|
* prepare could be enqueued in the pipeline during the preemption duration.
|
2019-02-14 10:04:17 +01:00
|
|
|
*/
|
2021-05-07 08:42:17 +02:00
|
|
|
#if !defined(VENDOR_EVENT_DONE_MAX)
|
2021-10-04 09:41:40 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_OBSERVER)
|
2022-03-18 07:33:11 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_PHY_CODED)
|
|
|
|
#define EVENT_DONE_MAX 6
|
|
|
|
#else /* !CONFIG_BT_CTLR_PHY_CODED */
|
2021-10-04 09:41:40 +05:30
|
|
|
#define EVENT_DONE_MAX 5
|
2022-03-18 07:33:11 +05:30
|
|
|
#endif /* !CONFIG_BT_CTLR_PHY_CODED */
|
2021-10-04 09:41:40 +05:30
|
|
|
#else /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_OBSERVER */
|
|
|
|
#define EVENT_DONE_MAX 4
|
|
|
|
#endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_OBSERVER */
|
2021-05-07 08:42:17 +02:00
|
|
|
#else
|
|
|
|
#define EVENT_DONE_MAX VENDOR_EVENT_DONE_MAX
|
|
|
|
#endif
|
|
|
|
|
2021-05-19 13:59:21 +02:00
|
|
|
static RXFIFO_DEFINE(done, sizeof(struct node_rx_event_done),
|
|
|
|
EVENT_DONE_MAX, EVENT_DONE_LINK_CNT);
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2020-12-17 12:49:04 +05:30
|
|
|
/* Minimum number of node rx for ULL to LL/HCI thread per connection.
|
|
|
|
* Increasing this by times the max. simultaneous connection count will permit
|
|
|
|
* simultaneous parallel PHY update or Connection Update procedures amongst
|
|
|
|
* active connections.
|
2022-01-14 12:39:13 +01:00
|
|
|
* Minimum node rx of 2 that can be reserved happens when:
|
|
|
|
* - for legacy LLCPs:
|
|
|
|
* Local central initiated PHY Update reserves 2 node rx,
|
|
|
|
* one for PHY update complete and another for Data Length Update complete
|
|
|
|
* notification. Otherwise, a peripheral only needs 1 additional node rx to
|
|
|
|
* generate Data Length Update complete when PHY Update completes; node rx for
|
|
|
|
* PHY update complete is reserved as the received PHY Update Ind PDU.
|
|
|
|
* - for new LLCPs:
|
|
|
|
* Central and peripheral always use two new nodes for handling completion
|
|
|
|
* notification one for PHY update complete and another for Data Length Update
|
|
|
|
* complete.
|
2020-12-17 12:49:04 +05:30
|
|
|
*/
|
2022-01-14 12:39:13 +01:00
|
|
|
#if defined(CONFIG_BT_CTLR_DATA_LENGTH) && defined(CONFIG_BT_CTLR_PHY) && \
|
|
|
|
(defined(CONFIG_BT_LL_SW_LLCP_LEGACY) && defined(CONFIG_BT_CENTRAL) || \
|
|
|
|
!defined(CONFIG_BT_LL_SW_LLCP_LEGACY))
|
2021-06-24 15:17:50 +05:30
|
|
|
#define LL_PDU_RX_CNT (2 * (CONFIG_BT_CTLR_LLCP_CONN))
|
|
|
|
#elif defined(CONFIG_BT_CONN)
|
|
|
|
#define LL_PDU_RX_CNT (CONFIG_BT_CTLR_LLCP_CONN)
|
2019-07-18 13:24:32 +05:30
|
|
|
#else
|
2021-06-24 15:17:50 +05:30
|
|
|
#define LL_PDU_RX_CNT 0
|
2019-07-18 13:24:32 +05:30
|
|
|
#endif
|
|
|
|
|
2020-12-17 12:49:04 +05:30
|
|
|
/* No. of node rx for LLL to ULL.
|
|
|
|
* Reserve 3, 1 for adv data, 1 for scan response and 1 for empty PDU reception.
|
|
|
|
*/
|
2021-04-27 12:31:23 +05:30
|
|
|
#define PDU_RX_CNT (3 + BT_CTLR_ADV_EXT_RX_CNT + CONFIG_BT_CTLR_RX_BUFFERS)
|
2020-12-17 12:49:04 +05:30
|
|
|
|
|
|
|
/* Part sum of LLL to ULL and ULL to LL/HCI thread node rx count.
|
|
|
|
* Will be used below in allocating node rx pool.
|
|
|
|
*/
|
2018-12-18 05:48:20 +01:00
|
|
|
#define RX_CNT (PDU_RX_CNT + LL_PDU_RX_CNT)
|
|
|
|
|
|
|
|
static MFIFO_DEFINE(pdu_rx_free, sizeof(void *), PDU_RX_CNT);
|
|
|
|
|
2019-05-24 11:13:21 +02:00
|
|
|
#if defined(CONFIG_BT_RX_USER_PDU_LEN)
|
|
|
|
#define PDU_RX_USER_PDU_OCTETS_MAX (CONFIG_BT_RX_USER_PDU_LEN)
|
|
|
|
#else
|
|
|
|
#define PDU_RX_USER_PDU_OCTETS_MAX 0
|
|
|
|
#endif
|
2021-01-15 16:19:21 +05:30
|
|
|
|
2021-08-26 15:52:56 +05:30
|
|
|
#define PDU_ADV_SIZE MAX(PDU_AC_LL_SIZE_MAX, \
|
|
|
|
(PDU_AC_LL_HEADER_SIZE + LL_EXT_OCTETS_RX_MAX))
|
2021-01-15 16:19:21 +05:30
|
|
|
|
|
|
|
#define PDU_DATA_SIZE MAX((PDU_DC_LL_HEADER_SIZE + LL_LENGTH_OCTETS_RX_MAX), \
|
|
|
|
(PDU_BIS_LL_HEADER_SIZE + LL_BIS_OCTETS_RX_MAX))
|
2019-02-14 13:35:51 +01:00
|
|
|
|
2021-08-26 15:52:56 +05:30
|
|
|
#define NODE_RX_HEADER_SIZE (offsetof(struct node_rx_pdu, pdu))
|
|
|
|
|
|
|
|
#define PDU_RX_NODE_POOL_ELEMENT_SIZE MROUND(NODE_RX_HEADER_SIZE + \
|
|
|
|
MAX(MAX(PDU_ADV_SIZE, \
|
|
|
|
PDU_DATA_SIZE), \
|
|
|
|
PDU_RX_USER_PDU_OCTETS_MAX))
|
2019-02-14 13:35:51 +01:00
|
|
|
|
2021-01-19 16:04:51 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_ISO_SET)
|
|
|
|
#define BT_CTLR_ADV_ISO_SET CONFIG_BT_CTLR_ADV_ISO_SET
|
|
|
|
#else
|
|
|
|
#define BT_CTLR_ADV_ISO_SET 0
|
|
|
|
#endif
|
|
|
|
|
2021-02-05 10:08:16 +05:30
|
|
|
#if defined(CONFIG_BT_PER_ADV_SYNC_MAX)
|
|
|
|
#define BT_CTLR_SCAN_SYNC_SET CONFIG_BT_PER_ADV_SYNC_MAX
|
2020-08-21 16:17:51 +05:30
|
|
|
#else
|
|
|
|
#define BT_CTLR_SCAN_SYNC_SET 0
|
|
|
|
#endif
|
|
|
|
|
2020-12-25 10:45:07 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET)
|
|
|
|
#define BT_CTLR_SCAN_SYNC_ISO_SET CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET
|
|
|
|
#else
|
|
|
|
#define BT_CTLR_SCAN_SYNC_ISO_SET 0
|
|
|
|
#endif
|
|
|
|
|
2019-09-13 17:06:11 +05:30
|
|
|
#define PDU_RX_POOL_SIZE (PDU_RX_NODE_POOL_ELEMENT_SIZE * \
|
2020-06-18 14:13:41 +02:00
|
|
|
(RX_CNT + BT_CTLR_MAX_CONNECTABLE + \
|
2020-08-21 16:17:51 +05:30
|
|
|
BT_CTLR_ADV_SET + BT_CTLR_SCAN_SYNC_SET))
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
static struct {
|
|
|
|
void *free;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t pool[PDU_RX_POOL_SIZE];
|
2018-12-18 05:48:20 +01:00
|
|
|
} mem_pdu_rx;
|
|
|
|
|
2020-10-05 11:01:00 +05:30
|
|
|
/* NOTE: Two memq_link structures are reserved in the case of periodic sync,
|
|
|
|
* one each for sync established and sync lost respectively. Where as in
|
|
|
|
* comparison to a connection, the connection established uses incoming Rx-ed
|
|
|
|
* CONNECT_IND PDU to piggy back generation of connection complete, and hence
|
|
|
|
* only one is reserved for the generation of disconnection event (which can
|
|
|
|
* happen due to supervision timeout and other reasons that dont have an
|
|
|
|
* incoming Rx-ed PDU).
|
|
|
|
*/
|
2021-05-05 21:30:40 +02:00
|
|
|
#define LINK_RX_POOL_SIZE \
|
|
|
|
(sizeof(memq_link_t) * \
|
|
|
|
(RX_CNT + 2 + BT_CTLR_MAX_CONN + BT_CTLR_ADV_SET + \
|
2021-11-03 07:06:27 +05:30
|
|
|
(BT_CTLR_ADV_ISO_SET * 2) + (BT_CTLR_SCAN_SYNC_SET * 2) + \
|
|
|
|
(BT_CTLR_SCAN_SYNC_ISO_SET * 2) + \
|
2021-05-05 21:30:40 +02:00
|
|
|
(IQ_REPORT_CNT)))
|
2018-12-18 05:48:20 +01:00
|
|
|
static struct {
|
2022-04-01 18:11:33 +05:30
|
|
|
uint16_t quota_pdu; /* Number of un-utilized buffers */
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
void *free;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t pool[LINK_RX_POOL_SIZE];
|
2018-12-18 05:48:20 +01:00
|
|
|
} mem_link_rx;
|
|
|
|
|
|
|
|
static MEMQ_DECLARE(ull_rx);
|
|
|
|
static MEMQ_DECLARE(ll_rx);
|
2021-01-21 16:33:32 +01:00
|
|
|
#if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
|
|
|
static MEMQ_DECLARE(ull_done);
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CONN)
|
2019-07-18 15:10:05 +05:30
|
|
|
static MFIFO_DEFINE(ll_pdu_rx_free, sizeof(void *), LL_PDU_RX_CNT);
|
2019-06-05 13:40:36 +02:00
|
|
|
|
|
|
|
static void *mark_update;
|
2018-12-18 05:48:20 +01:00
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
|
2022-04-28 09:59:42 +05:30
|
|
|
#if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
|
2022-04-20 14:11:00 +02:00
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
#define BT_BUF_ACL_TX_COUNT CONFIG_BT_BUF_ACL_TX_COUNT
|
|
|
|
#else
|
|
|
|
#define BT_BUF_ACL_TX_COUNT 0
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
2022-04-28 09:59:42 +05:30
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_ISO) || defined(CONFIG_BT_CTLR_CONN_ISO)
|
2022-04-20 14:11:00 +02:00
|
|
|
#define BT_CTLR_ISO_TX_BUFFERS CONFIG_BT_CTLR_ISO_TX_BUFFERS
|
|
|
|
#else
|
|
|
|
#define BT_CTLR_ISO_TX_BUFFERS 0
|
2022-04-28 09:59:42 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
|
|
|
|
|
2022-04-20 14:11:00 +02:00
|
|
|
static MFIFO_DEFINE(tx_ack, sizeof(struct lll_tx),
|
|
|
|
BT_BUF_ACL_TX_COUNT + BT_CTLR_ISO_TX_BUFFERS);
|
2022-04-28 09:59:42 +05:30
|
|
|
#endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
|
2022-04-20 14:11:00 +02:00
|
|
|
|
2019-06-05 13:40:36 +02:00
|
|
|
static void *mark_disable;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
static inline int init_reset(void);
|
2019-10-10 11:18:30 +02:00
|
|
|
static void perform_lll_reset(void *param);
|
2019-06-05 13:40:36 +02:00
|
|
|
static inline void *mark_set(void **m, void *param);
|
|
|
|
static inline void *mark_unset(void **m, void *param);
|
|
|
|
static inline void *mark_get(void *m);
|
2022-01-22 07:31:03 +05:30
|
|
|
static void rx_replenish_all(void);
|
|
|
|
#if defined(CONFIG_BT_CONN) || \
|
|
|
|
(defined(CONFIG_BT_OBSERVER) && defined(CONFIG_BT_CTLR_ADV_EXT)) || \
|
|
|
|
defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
|
|
|
|
defined(CONFIG_BT_CTLR_ADV_ISO)
|
|
|
|
static void rx_release_replenish(struct node_rx_hdr *rx);
|
|
|
|
static void rx_link_dequeue_release_quota_inc(memq_link_t *link);
|
|
|
|
#endif /* CONFIG_BT_CONN ||
|
|
|
|
* (CONFIG_BT_OBSERVER && CONFIG_BT_CTLR_ADV_EXT) ||
|
|
|
|
* CONFIG_BT_CTLR_ADV_PERIODIC ||
|
|
|
|
* CONFIG_BT_CTLR_ADV_ISO
|
|
|
|
*/
|
2019-02-04 22:21:55 +05:30
|
|
|
static void rx_demux(void *param);
|
2021-01-29 18:08:59 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
|
|
|
static void rx_demux_yield(void);
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
2022-04-28 09:59:42 +05:30
|
|
|
#if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
|
2020-08-19 07:27:02 +05:30
|
|
|
static uint8_t tx_cmplt_get(uint16_t *handle, uint8_t *first, uint8_t last);
|
2020-08-11 10:51:43 +05:30
|
|
|
static inline void rx_demux_conn_tx_ack(uint8_t ack_last, uint16_t handle,
|
|
|
|
memq_link_t *link,
|
|
|
|
struct node_tx *node_tx);
|
2022-04-28 09:59:42 +05:30
|
|
|
#endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
|
2019-02-11 22:55:30 +05:30
|
|
|
static inline int rx_demux_rx(memq_link_t *link, struct node_rx_hdr *rx);
|
2019-02-04 22:21:55 +05:30
|
|
|
static inline void rx_demux_event_done(memq_link_t *link,
|
|
|
|
struct node_rx_hdr *rx);
|
2022-01-22 07:31:03 +05:30
|
|
|
static void ll_rx_link_quota_inc(void);
|
|
|
|
static void ll_rx_link_quota_dec(void);
|
2019-02-04 22:21:55 +05:30
|
|
|
static void disabled_cb(void *param);
|
2021-01-21 16:33:32 +01:00
|
|
|
#if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
|
|
|
static void ull_done(void *param);
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
int ll_init(struct k_sem *sem_rx)
|
|
|
|
{
|
Bluetooth: Controller: fix mayfly unwanted re-init every bt_enable call
Mayfly by design uses a memq for storage of its jobs. The memq
requires head and tail to track the content. It is considered
empty is head equals tail.
When memq instance is initialized then there is a new link
instance stored in head and tail, nevertheless the memq is
still empty.
When new job is enqueued to a memq, the inilial link is used
to store the job. New link, provided by enqueue call, is stored
in the tail for future enqueue.
When enqueued job was served and is dequeued, the link it was
assigned to is returned and stored in the job object.
That link will be used in future for call to enqueue.
Now lets consider a situation when we are just after initalization.
Some default initial link is in empty memq. We enqueue and dequeue
a job. After dequeue, the job object stores the initial link object.
The one that was put into the memq during initialization.
Next Bluetooth stack is disabled and enabled again.
The job is enqueued again, but it still stores the initial link
address. After enqueue the memq head points to initial link object,
that stores new job. Tail points to link deliveded by enqueue call,
that is also the initial link object. The memq is considered to be
empty, nevertheless there was a successful enqueue operation.
The issue is casued by lack of re-initialization of a job object
on init. In most cases these objects are static members of some
functions, hence there is no re-initialization after bt_disable
and bt_enable calls.
The problem is fixed by re-initialization of mayfly only once
on bt_enable() call. Then it doesn't matter what links are stored
in dequeued objects and there is no need to re-initialized mayfly
job objects.
Signed-off-by: Piotr Pryga <piotr.pryga@nordicsemi.no>
2022-07-11 15:05:52 +02:00
|
|
|
static bool mayfly_initialized;
|
2018-12-18 05:48:20 +01:00
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Store the semaphore to be used to wakeup Thread context */
|
|
|
|
sem_recv = sem_rx;
|
|
|
|
|
|
|
|
/* Initialize counter */
|
|
|
|
/* TODO: Bind and use counter driver? */
|
|
|
|
cntr_init();
|
|
|
|
|
Bluetooth: Controller: fix mayfly unwanted re-init every bt_enable call
Mayfly by design uses a memq for storage of its jobs. The memq
requires head and tail to track the content. It is considered
empty is head equals tail.
When memq instance is initialized then there is a new link
instance stored in head and tail, nevertheless the memq is
still empty.
When new job is enqueued to a memq, the inilial link is used
to store the job. New link, provided by enqueue call, is stored
in the tail for future enqueue.
When enqueued job was served and is dequeued, the link it was
assigned to is returned and stored in the job object.
That link will be used in future for call to enqueue.
Now lets consider a situation when we are just after initalization.
Some default initial link is in empty memq. We enqueue and dequeue
a job. After dequeue, the job object stores the initial link object.
The one that was put into the memq during initialization.
Next Bluetooth stack is disabled and enabled again.
The job is enqueued again, but it still stores the initial link
address. After enqueue the memq head points to initial link object,
that stores new job. Tail points to link deliveded by enqueue call,
that is also the initial link object. The memq is considered to be
empty, nevertheless there was a successful enqueue operation.
The issue is casued by lack of re-initialization of a job object
on init. In most cases these objects are static members of some
functions, hence there is no re-initialization after bt_disable
and bt_enable calls.
The problem is fixed by re-initialization of mayfly only once
on bt_enable() call. Then it doesn't matter what links are stored
in dequeued objects and there is no need to re-initialized mayfly
job objects.
Signed-off-by: Piotr Pryga <piotr.pryga@nordicsemi.no>
2022-07-11 15:05:52 +02:00
|
|
|
/* Initialize mayfly. It may be done only once due to mayfly design.
|
|
|
|
*
|
|
|
|
* On init mayfly memq head and tail is assigned with a link instance
|
|
|
|
* that is used during enqueue operation. New link provided by enqueue
|
|
|
|
* is added as a tail and will be used in future enqueue. While dequeue,
|
|
|
|
* the link that was used for storage of the job is relesed and stored
|
|
|
|
* in a job it was related to. The job may store initial link. If mayfly
|
|
|
|
* is re-initialized but job objects were not re-initialized there is a
|
|
|
|
* risk that enqueued job will point to the same link as it is in a memq
|
|
|
|
* just after re-initialization. After enqueue operation with that link,
|
|
|
|
* head and tail still points to the same link object, so memq is
|
|
|
|
* considered as empty.
|
|
|
|
*/
|
|
|
|
if (!mayfly_initialized) {
|
|
|
|
mayfly_init();
|
|
|
|
mayfly_initialized = true;
|
|
|
|
}
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
/* Initialize Ticker */
|
2019-02-04 22:21:55 +05:30
|
|
|
ticker_users[MAYFLY_CALL_ID_0][0] = TICKER_USER_LLL_OPS;
|
|
|
|
ticker_users[MAYFLY_CALL_ID_1][0] = TICKER_USER_ULL_HIGH_OPS;
|
|
|
|
ticker_users[MAYFLY_CALL_ID_2][0] = TICKER_USER_ULL_LOW_OPS;
|
2021-03-29 16:00:42 +05:30
|
|
|
ticker_users[MAYFLY_CALL_ID_PROGRAM][0] = TICKER_USER_THREAD_OPS;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
err = ticker_init(TICKER_INSTANCE_ID_CTLR,
|
2019-02-04 22:21:55 +05:30
|
|
|
TICKER_NODES, &ticker_nodes[0],
|
|
|
|
MAYFLY_CALLER_COUNT, &ticker_users[0],
|
|
|
|
TICKER_USER_OPS, &ticker_user_ops[0],
|
2018-12-18 05:48:20 +01:00
|
|
|
hal_ticker_instance0_caller_id_get,
|
|
|
|
hal_ticker_instance0_sched,
|
|
|
|
hal_ticker_instance0_trigger_set);
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
|
|
|
|
/* Initialize semaphore for ticker API blocking wait */
|
|
|
|
k_sem_init(&sem_ticker_api_cb, 0, 1);
|
|
|
|
|
|
|
|
/* Initialize LLL */
|
|
|
|
err = lll_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize ULL internals */
|
|
|
|
/* TODO: globals? */
|
|
|
|
|
|
|
|
/* Common to init and reset */
|
2019-02-04 22:21:55 +05:30
|
|
|
err = init_reset();
|
2018-12-18 05:48:20 +01:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_BROADCASTER)
|
|
|
|
err = lll_adv_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = ull_adv_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_BROADCASTER */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_OBSERVER)
|
|
|
|
err = lll_scan_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = ull_scan_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_OBSERVER */
|
|
|
|
|
2020-10-05 10:42:39 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
|
2020-08-11 09:58:34 +05:30
|
|
|
err = lll_sync_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = ull_sync_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2020-11-09 16:31:01 +01:00
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_ISO)
|
|
|
|
err = ull_sync_iso_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_ISO */
|
2020-10-05 10:42:39 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
|
2020-08-11 09:58:34 +05:30
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
err = lll_conn_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = ull_conn_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
|
2021-02-24 13:57:41 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_DF)
|
2020-11-20 05:16:05 -08:00
|
|
|
err = ull_df_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-04-14 10:17:20 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_ISO)
|
2020-11-26 15:47:39 +01:00
|
|
|
err = ull_iso_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2021-04-14 10:17:20 +02:00
|
|
|
#endif /* CONFIG_BT_CTLR_ISO */
|
2020-11-26 15:47:39 +01:00
|
|
|
|
2021-04-14 10:17:20 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_CONN_ISO)
|
2021-01-25 09:00:56 +01:00
|
|
|
err = ull_conn_iso_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2021-04-14 10:17:20 +02:00
|
|
|
#endif /* CONFIG_BT_CTLR_CONN_ISO */
|
2021-01-25 09:00:56 +01:00
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
|
|
|
|
err = ull_peripheral_iso_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
|
|
|
|
err = ull_central_iso_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
|
|
|
|
|
2020-10-30 11:26:26 +01:00
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_ISO)
|
|
|
|
err = ull_adv_iso_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2021-01-25 09:00:56 +01:00
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_ISO */
|
2020-10-30 11:26:26 +01:00
|
|
|
|
2021-02-24 13:57:41 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_DF)
|
2020-11-19 02:12:54 -08:00
|
|
|
err = lll_df_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-05-24 11:13:21 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_USER_EXT)
|
|
|
|
err = ull_user_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CTLR_USER_EXT */
|
|
|
|
|
2021-09-20 15:27:58 +02:00
|
|
|
/* reset filter accept list, resolving list and initialise RPA timeout*/
|
|
|
|
if (IS_ENABLED(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)) {
|
2019-05-09 18:39:26 +05:30
|
|
|
ull_filter_reset(true);
|
|
|
|
}
|
|
|
|
|
2021-03-09 10:50:44 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_TEST)
|
|
|
|
lll_chan_sel_2_ut();
|
|
|
|
#endif /* CONFIG_BT_CTLR_TEST */
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-06-02 15:40:40 +02:00
|
|
|
int ll_deinit(void)
|
|
|
|
{
|
|
|
|
ll_reset();
|
|
|
|
return lll_deinit();
|
|
|
|
}
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
void ll_reset(void)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2020-11-03 16:25:13 +05:30
|
|
|
/* Note: The sequence of reset control flow is as follows:
|
|
|
|
* - Reset ULL context, i.e. stop ULL scheduling, abort LLL events etc.
|
|
|
|
* - Reset LLL context, i.e. post LLL event abort, let LLL cleanup its
|
|
|
|
* variables, if any.
|
|
|
|
* - Reset ULL static variables (which otherwise was mem-zeroed in cases
|
|
|
|
* if power-on reset wherein architecture startup mem-zeroes .bss
|
|
|
|
* sections.
|
|
|
|
* - Initialize ULL context variable, similar to on-power-up.
|
|
|
|
*/
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#if defined(CONFIG_BT_BROADCASTER)
|
|
|
|
/* Reset adv state */
|
|
|
|
err = ull_adv_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
#endif /* CONFIG_BT_BROADCASTER */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_OBSERVER)
|
2020-10-05 10:42:39 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
|
2020-11-09 16:31:01 +01:00
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_ISO)
|
|
|
|
/* Reset periodic sync sets */
|
|
|
|
err = ull_sync_iso_reset();
|
|
|
|
LL_ASSERT(!err);
|
2022-03-07 06:33:36 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_ISO */
|
|
|
|
|
|
|
|
/* Reset periodic sync sets */
|
|
|
|
err = ull_sync_reset();
|
|
|
|
LL_ASSERT(!err);
|
2020-11-09 16:31:01 +01:00
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
|
2022-03-07 06:33:36 +05:30
|
|
|
|
|
|
|
/* Reset scan state */
|
|
|
|
err = ull_scan_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
#endif /* CONFIG_BT_OBSERVER */
|
2020-08-11 09:58:34 +05:30
|
|
|
|
2021-04-14 10:17:20 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_ISO)
|
2020-11-26 15:47:39 +01:00
|
|
|
err = ull_iso_reset();
|
|
|
|
LL_ASSERT(!err);
|
2021-04-14 10:17:20 +02:00
|
|
|
#endif /* CONFIG_BT_CTLR_ISO */
|
2020-11-26 15:47:39 +01:00
|
|
|
|
2021-04-14 10:17:20 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_CONN_ISO)
|
2021-01-25 09:00:56 +01:00
|
|
|
err = ull_conn_iso_reset();
|
|
|
|
LL_ASSERT(!err);
|
2021-04-14 10:17:20 +02:00
|
|
|
#endif /* CONFIG_BT_CTLR_CONN_ISO */
|
2021-01-25 09:00:56 +01:00
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
|
|
|
|
err = ull_peripheral_iso_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
#endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
|
|
|
|
err = ull_central_iso_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
#endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
|
|
|
|
|
2020-11-26 15:47:39 +01:00
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_ISO)
|
2020-10-30 11:26:26 +01:00
|
|
|
/* Reset periodic sync sets */
|
|
|
|
err = ull_adv_iso_reset();
|
|
|
|
LL_ASSERT(!err);
|
2021-01-25 09:00:56 +01:00
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_ISO */
|
2020-10-30 11:26:26 +01:00
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
/* Reset conn role */
|
|
|
|
err = ull_conn_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
|
|
|
|
MFIFO_INIT(tx_ack);
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
|
2021-09-20 15:27:58 +02:00
|
|
|
/* reset filter accept list and resolving list */
|
|
|
|
if (IS_ENABLED(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)) {
|
2019-05-09 18:39:26 +05:30
|
|
|
ull_filter_reset(false);
|
|
|
|
}
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
/* Re-initialize ULL internals */
|
|
|
|
|
|
|
|
/* Re-initialize the prep mfifo */
|
|
|
|
MFIFO_INIT(prep);
|
|
|
|
|
|
|
|
/* Re-initialize the free rx mfifo */
|
|
|
|
MFIFO_INIT(pdu_rx_free);
|
|
|
|
|
2019-07-18 15:10:05 +05:30
|
|
|
#if defined(CONFIG_BT_CONN)
|
2018-12-18 05:48:20 +01:00
|
|
|
/* Re-initialize the free ll rx mfifo */
|
|
|
|
MFIFO_INIT(ll_pdu_rx_free);
|
2019-07-18 15:10:05 +05:30
|
|
|
#endif /* CONFIG_BT_CONN */
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-10-10 11:18:30 +02:00
|
|
|
/* Reset LLL via mayfly */
|
|
|
|
{
|
|
|
|
static memq_link_t link;
|
|
|
|
static struct mayfly mfy = {0, 0, &link, NULL,
|
|
|
|
perform_lll_reset};
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t retval;
|
2019-10-08 12:00:21 +05:30
|
|
|
|
|
|
|
/* NOTE: If Zero Latency Interrupt is used, then LLL context
|
|
|
|
* will be the highest priority IRQ in the system, hence
|
|
|
|
* mayfly_enqueue will be done running the callee inline
|
|
|
|
* (vector to the callee function) in this function. Else
|
|
|
|
* we use semaphore to wait for perform_lll_reset to
|
|
|
|
* complete.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if !defined(CONFIG_BT_CTLR_ZLI)
|
|
|
|
struct k_sem sem;
|
2019-10-10 11:18:30 +02:00
|
|
|
|
2019-11-11 10:11:32 +01:00
|
|
|
k_sem_init(&sem, 0, 1);
|
|
|
|
mfy.param = &sem;
|
2019-10-08 12:00:21 +05:30
|
|
|
#endif /* !CONFIG_BT_CTLR_ZLI */
|
|
|
|
|
2019-10-10 11:18:30 +02:00
|
|
|
retval = mayfly_enqueue(TICKER_USER_ID_THREAD,
|
|
|
|
TICKER_USER_ID_LLL, 0, &mfy);
|
|
|
|
LL_ASSERT(!retval);
|
2019-10-08 12:00:21 +05:30
|
|
|
|
|
|
|
#if !defined(CONFIG_BT_CTLR_ZLI)
|
2019-11-11 10:11:32 +01:00
|
|
|
/* LLL reset must complete before returning - wait for
|
|
|
|
* reset completion in LLL mayfly thread
|
|
|
|
*/
|
|
|
|
k_sem_take(&sem, K_FOREVER);
|
2019-10-08 12:00:21 +05:30
|
|
|
#endif /* !CONFIG_BT_CTLR_ZLI */
|
2019-10-10 11:18:30 +02:00
|
|
|
}
|
|
|
|
|
2020-10-22 18:40:38 +05:30
|
|
|
#if defined(CONFIG_BT_BROADCASTER)
|
2020-11-03 16:25:13 +05:30
|
|
|
/* Finalize after adv state LLL context reset */
|
2020-10-22 18:40:38 +05:30
|
|
|
err = ull_adv_reset_finalize();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
#endif /* CONFIG_BT_BROADCASTER */
|
|
|
|
|
2021-11-15 06:02:16 +05:30
|
|
|
/* Reset/End DTM Tx or Rx commands */
|
|
|
|
if (IS_ENABLED(CONFIG_BT_CTLR_DTM)) {
|
|
|
|
uint16_t num_rx;
|
|
|
|
|
|
|
|
(void)ll_test_end(&num_rx);
|
|
|
|
ARG_UNUSED(num_rx);
|
|
|
|
}
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
/* Common to init and reset */
|
2019-02-04 22:21:55 +05:30
|
|
|
err = init_reset();
|
2018-12-18 05:48:20 +01:00
|
|
|
LL_ASSERT(!err);
|
2021-08-25 13:54:23 +02:00
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_DF)
|
|
|
|
/* Direction Finding has to be reset after ull init_reset call because
|
|
|
|
* it uses mem_link_rx for node_rx_iq_report. The mem_linx_rx is reset
|
|
|
|
* in common ull init_reset.
|
|
|
|
*/
|
|
|
|
err = ull_df_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
#endif
|
2022-05-24 13:36:48 +02:00
|
|
|
|
|
|
|
/* clear static random address */
|
|
|
|
(void)ll_addr_set(1U, NULL);
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
2019-02-14 10:04:17 +01:00
|
|
|
/**
|
|
|
|
* @brief Peek the next node_rx to send up to Host
|
|
|
|
* @details Tightly coupled with prio_recv_thread()
|
|
|
|
* Execution context: Controller thread
|
2019-02-27 10:40:03 +01:00
|
|
|
*
|
|
|
|
* @param node_rx[out] Pointer to rx node at head of queue
|
|
|
|
* @param handle[out] Connection handle
|
2019-02-14 10:04:17 +01:00
|
|
|
* @return TX completed
|
|
|
|
*/
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t ll_rx_get(void **node_rx, uint16_t *handle)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
struct node_rx_hdr *rx;
|
|
|
|
memq_link_t *link;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t cmplt = 0U;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2021-08-25 11:05:23 +05:30
|
|
|
#if defined(CONFIG_BT_CONN) || \
|
2021-08-25 20:38:10 +05:30
|
|
|
(defined(CONFIG_BT_OBSERVER) && defined(CONFIG_BT_CTLR_ADV_EXT)) || \
|
2022-04-28 09:59:42 +05:30
|
|
|
defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
|
|
|
|
defined(CONFIG_BT_CTLR_ADV_ISO)
|
2018-12-18 05:48:20 +01:00
|
|
|
ll_rx_get_again:
|
2021-08-25 11:05:23 +05:30
|
|
|
#endif /* CONFIG_BT_CONN ||
|
2021-08-25 20:38:10 +05:30
|
|
|
* (CONFIG_BT_OBSERVER && CONFIG_BT_CTLR_ADV_EXT) ||
|
2022-04-28 09:59:42 +05:30
|
|
|
* CONFIG_BT_CTLR_ADV_PERIODIC ||
|
|
|
|
* CONFIG_BT_CTLR_ADV_ISO
|
2021-08-25 11:05:23 +05:30
|
|
|
*/
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-04-26 13:15:54 +02:00
|
|
|
*node_rx = NULL;
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
link = memq_peek(memq_ll_rx.head, memq_ll_rx.tail, (void **)&rx);
|
|
|
|
if (link) {
|
2022-04-28 09:59:42 +05:30
|
|
|
#if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
|
2018-12-18 05:48:20 +01:00
|
|
|
cmplt = tx_cmplt_get(handle, &mfifo_tx_ack.f, rx->ack_last);
|
|
|
|
if (!cmplt) {
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t f, cmplt_prev, cmplt_curr;
|
|
|
|
uint16_t h;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-03-26 19:57:45 -06:00
|
|
|
cmplt_curr = 0U;
|
2018-12-18 05:48:20 +01:00
|
|
|
f = mfifo_tx_ack.f;
|
|
|
|
do {
|
|
|
|
cmplt_prev = cmplt_curr;
|
|
|
|
cmplt_curr = tx_cmplt_get(&h, &f,
|
|
|
|
mfifo_tx_ack.l);
|
2019-03-26 19:57:45 -06:00
|
|
|
} while ((cmplt_prev != 0U) ||
|
2018-12-18 05:48:20 +01:00
|
|
|
(cmplt_prev != cmplt_curr));
|
2022-04-28 09:59:42 +05:30
|
|
|
#endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2021-08-25 11:05:23 +05:30
|
|
|
if (0) {
|
|
|
|
#if defined(CONFIG_BT_CONN) || \
|
|
|
|
(defined(CONFIG_BT_OBSERVER) && defined(CONFIG_BT_CTLR_ADV_EXT))
|
2019-02-14 10:04:17 +01:00
|
|
|
/* Do not send up buffers to Host thread that are
|
|
|
|
* marked for release
|
|
|
|
*/
|
2021-08-25 11:05:23 +05:30
|
|
|
} else if (rx->type == NODE_RX_TYPE_RELEASE) {
|
2022-01-22 07:31:03 +05:30
|
|
|
rx_link_dequeue_release_quota_inc(link);
|
|
|
|
rx_release_replenish(rx);
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
goto ll_rx_get_again;
|
2021-08-25 11:05:23 +05:30
|
|
|
#endif /* CONFIG_BT_CONN ||
|
|
|
|
* (CONFIG_BT_OBSERVER && CONFIG_BT_CTLR_ADV_EXT)
|
|
|
|
*/
|
2021-08-25 20:38:10 +05:30
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
|
|
|
|
} else if (rx->type == NODE_RX_TYPE_SYNC_CHM_COMPLETE) {
|
2022-01-22 07:31:03 +05:30
|
|
|
rx_link_dequeue_release_quota_inc(link);
|
2021-08-25 20:38:10 +05:30
|
|
|
|
|
|
|
/* Remove Channel Map Update Indication from
|
|
|
|
* ACAD.
|
|
|
|
*/
|
|
|
|
ull_adv_sync_chm_complete(rx);
|
|
|
|
|
2022-01-22 07:31:03 +05:30
|
|
|
rx_release_replenish(rx);
|
2021-08-25 20:38:10 +05:30
|
|
|
|
|
|
|
goto ll_rx_get_again;
|
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
|
2022-01-19 20:57:50 +05:30
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_ISO)
|
|
|
|
} else if (rx->type == NODE_RX_TYPE_BIG_CHM_COMPLETE) {
|
2022-01-22 07:31:03 +05:30
|
|
|
rx_link_dequeue_release_quota_inc(link);
|
2022-01-19 20:57:50 +05:30
|
|
|
|
|
|
|
/* Update Channel Map in BIGInfo present in
|
|
|
|
* Periodic Advertising PDU.
|
|
|
|
*/
|
|
|
|
ull_adv_iso_chm_complete(rx);
|
|
|
|
|
2022-01-22 07:31:03 +05:30
|
|
|
rx_release_replenish(rx);
|
2022-01-19 20:57:50 +05:30
|
|
|
|
|
|
|
goto ll_rx_get_again;
|
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_ISO */
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
*node_rx = rx;
|
|
|
|
|
2022-04-28 09:59:42 +05:30
|
|
|
#if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
cmplt = tx_cmplt_get(handle, &mfifo_tx_ack.f, mfifo_tx_ack.l);
|
2022-04-28 09:59:42 +05:30
|
|
|
#endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return cmplt;
|
|
|
|
}
|
|
|
|
|
2019-02-14 10:04:17 +01:00
|
|
|
/**
|
|
|
|
* @brief Commit the dequeue from memq_ll_rx, where ll_rx_get() did the peek
|
|
|
|
* @details Execution context: Controller thread
|
|
|
|
*/
|
2018-12-18 05:48:20 +01:00
|
|
|
void ll_rx_dequeue(void)
|
|
|
|
{
|
|
|
|
struct node_rx_hdr *rx = NULL;
|
|
|
|
memq_link_t *link;
|
|
|
|
|
|
|
|
link = memq_dequeue(memq_ll_rx.tail, &memq_ll_rx.head,
|
|
|
|
(void **)&rx);
|
|
|
|
LL_ASSERT(link);
|
|
|
|
|
2022-01-22 07:31:03 +05:30
|
|
|
ll_rx_link_release(link);
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
/* handle object specific clean up */
|
|
|
|
switch (rx->type) {
|
2020-05-04 13:32:24 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT)
|
2020-08-19 12:54:46 +05:30
|
|
|
#if defined(CONFIG_BT_OBSERVER)
|
2020-05-04 13:32:24 +05:30
|
|
|
case NODE_RX_TYPE_EXT_1M_REPORT:
|
|
|
|
case NODE_RX_TYPE_EXT_2M_REPORT:
|
|
|
|
case NODE_RX_TYPE_EXT_CODED_REPORT:
|
2020-11-05 18:13:25 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
|
|
|
|
case NODE_RX_TYPE_SYNC_REPORT:
|
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
|
2020-05-04 13:32:24 +05:30
|
|
|
{
|
|
|
|
struct node_rx_hdr *rx_curr;
|
|
|
|
struct pdu_adv *adv;
|
|
|
|
|
|
|
|
adv = (void *)((struct node_rx_pdu *)rx)->pdu;
|
|
|
|
if (adv->type != PDU_ADV_TYPE_EXT_IND) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
rx_curr = rx->rx_ftr.extra;
|
|
|
|
while (rx_curr) {
|
|
|
|
memq_link_t *link_free;
|
|
|
|
|
2021-07-28 07:44:30 +05:30
|
|
|
link_free = rx_curr->link;
|
2021-07-27 21:05:07 +05:30
|
|
|
rx_curr = rx_curr->rx_ftr.extra;
|
2021-07-28 07:44:30 +05:30
|
|
|
|
2022-01-22 07:31:03 +05:30
|
|
|
ll_rx_link_release(link_free);
|
2020-05-04 13:32:24 +05:30
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2020-09-02 13:52:29 +05:30
|
|
|
|
|
|
|
case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
|
2020-10-12 21:56:59 +05:30
|
|
|
{
|
2020-10-16 10:06:43 +05:30
|
|
|
ull_scan_term_dequeue(rx->handle);
|
2020-10-12 21:56:59 +05:30
|
|
|
}
|
|
|
|
break;
|
2020-08-19 12:54:46 +05:30
|
|
|
#endif /* CONFIG_BT_OBSERVER */
|
2020-07-01 10:54:18 +02:00
|
|
|
|
2020-08-19 12:54:46 +05:30
|
|
|
#if defined(CONFIG_BT_BROADCASTER)
|
2020-07-01 10:54:18 +02:00
|
|
|
case NODE_RX_TYPE_EXT_ADV_TERMINATE:
|
2020-07-13 17:59:38 +05:30
|
|
|
{
|
|
|
|
struct ll_adv_set *adv;
|
2021-03-01 21:21:03 +05:30
|
|
|
struct lll_adv_aux *lll_aux;
|
2020-07-13 17:59:38 +05:30
|
|
|
|
|
|
|
adv = ull_adv_set_get(rx->handle);
|
2021-05-19 07:36:46 +05:30
|
|
|
LL_ASSERT(adv);
|
|
|
|
|
2021-03-01 21:21:03 +05:30
|
|
|
lll_aux = adv->lll.aux;
|
|
|
|
if (lll_aux) {
|
|
|
|
struct ll_adv_aux_set *aux;
|
|
|
|
|
2021-04-05 12:56:51 +05:30
|
|
|
aux = HDR_LLL2ULL(lll_aux);
|
2021-03-01 21:21:03 +05:30
|
|
|
|
|
|
|
aux->is_started = 0U;
|
|
|
|
}
|
2020-07-13 17:59:38 +05:30
|
|
|
|
2020-08-19 12:54:46 +05:30
|
|
|
#if defined(CONFIG_BT_PERIPHERAL)
|
|
|
|
struct lll_conn *lll_conn = adv->lll.conn;
|
|
|
|
|
2020-07-13 17:59:38 +05:30
|
|
|
if (!lll_conn) {
|
|
|
|
adv->is_enabled = 0U;
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
LL_ASSERT(!lll_conn->link_tx_free);
|
2020-08-19 12:54:46 +05:30
|
|
|
|
|
|
|
memq_link_t *link = memq_deinit(&lll_conn->memq_tx.head,
|
|
|
|
&lll_conn->memq_tx.tail);
|
2020-07-13 17:59:38 +05:30
|
|
|
LL_ASSERT(link);
|
2020-08-19 12:54:46 +05:30
|
|
|
|
2020-07-13 17:59:38 +05:30
|
|
|
lll_conn->link_tx_free = link;
|
2020-07-01 10:54:18 +02:00
|
|
|
|
2021-04-05 12:56:51 +05:30
|
|
|
struct ll_conn *conn = HDR_LLL2ULL(lll_conn);
|
2020-08-19 12:54:46 +05:30
|
|
|
|
2020-07-13 17:59:38 +05:30
|
|
|
ll_conn_release(conn);
|
|
|
|
adv->lll.conn = NULL;
|
|
|
|
|
|
|
|
ll_rx_release(adv->node_rx_cc_free);
|
|
|
|
adv->node_rx_cc_free = NULL;
|
2020-08-19 12:54:46 +05:30
|
|
|
|
2020-07-13 17:59:38 +05:30
|
|
|
ll_rx_link_release(adv->link_cc_free);
|
|
|
|
adv->link_cc_free = NULL;
|
2020-08-19 12:54:46 +05:30
|
|
|
#endif /* CONFIG_BT_PERIPHERAL */
|
2020-07-13 17:59:38 +05:30
|
|
|
|
|
|
|
adv->is_enabled = 0U;
|
|
|
|
}
|
|
|
|
break;
|
2020-08-19 12:54:46 +05:30
|
|
|
#endif /* CONFIG_BT_BROADCASTER */
|
2020-05-04 13:32:24 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_EXT */
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
case NODE_RX_TYPE_CONNECTION:
|
|
|
|
{
|
2019-09-27 12:04:22 +05:30
|
|
|
struct node_rx_cc *cc = (void *)((struct node_rx_pdu *)rx)->pdu;
|
|
|
|
struct node_rx_ftr *ftr = &(rx->rx_ftr);
|
|
|
|
|
2019-10-04 17:06:14 +05:30
|
|
|
if (0) {
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_PERIPHERAL)
|
|
|
|
} else if ((cc->status == BT_HCI_ERR_ADV_TIMEOUT) || cc->role) {
|
2021-04-05 12:56:51 +05:30
|
|
|
struct ll_adv_set *adv;
|
|
|
|
struct lll_adv *lll;
|
|
|
|
|
|
|
|
/* Get reference to ULL context */
|
|
|
|
lll = ftr->param;
|
|
|
|
adv = HDR_LLL2ULL(lll);
|
2019-09-27 12:04:22 +05:30
|
|
|
|
|
|
|
if (cc->status == BT_HCI_ERR_ADV_TIMEOUT) {
|
|
|
|
struct lll_conn *conn_lll;
|
|
|
|
struct ll_conn *conn;
|
|
|
|
memq_link_t *link;
|
|
|
|
|
|
|
|
conn_lll = lll->conn;
|
|
|
|
LL_ASSERT(conn_lll);
|
2019-12-26 16:16:36 +05:30
|
|
|
lll->conn = NULL;
|
2019-09-27 12:04:22 +05:30
|
|
|
|
|
|
|
LL_ASSERT(!conn_lll->link_tx_free);
|
|
|
|
link = memq_deinit(&conn_lll->memq_tx.head,
|
|
|
|
&conn_lll->memq_tx.tail);
|
|
|
|
LL_ASSERT(link);
|
|
|
|
conn_lll->link_tx_free = link;
|
|
|
|
|
2021-04-05 12:56:51 +05:30
|
|
|
conn = HDR_LLL2ULL(conn_lll);
|
2019-09-27 12:04:22 +05:30
|
|
|
ll_conn_release(conn);
|
|
|
|
} else {
|
|
|
|
/* Release un-utilized node rx */
|
|
|
|
if (adv->node_rx_cc_free) {
|
|
|
|
void *rx_free;
|
|
|
|
|
|
|
|
rx_free = adv->node_rx_cc_free;
|
|
|
|
adv->node_rx_cc_free = NULL;
|
|
|
|
|
2022-01-22 07:31:03 +05:30
|
|
|
ll_rx_release(rx_free);
|
2019-09-27 12:04:22 +05:30
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-22 10:59:18 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT)
|
|
|
|
if (lll->aux) {
|
|
|
|
struct ll_adv_aux_set *aux;
|
|
|
|
|
2021-04-05 12:56:51 +05:30
|
|
|
aux = HDR_LLL2ULL(lll->aux);
|
2020-10-22 10:59:18 +02:00
|
|
|
aux->is_started = 0U;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_EXT */
|
|
|
|
|
2019-09-27 12:04:22 +05:30
|
|
|
adv->is_enabled = 0U;
|
2019-10-04 17:06:14 +05:30
|
|
|
#else /* !CONFIG_BT_PERIPHERAL */
|
|
|
|
ARG_UNUSED(cc);
|
|
|
|
#endif /* !CONFIG_BT_PERIPHERAL */
|
|
|
|
|
2021-04-27 12:35:27 +05:30
|
|
|
#if defined(CONFIG_BT_CENTRAL)
|
|
|
|
} else {
|
2021-04-05 12:56:51 +05:30
|
|
|
struct ll_scan_set *scan = HDR_LLL2ULL(ftr->param);
|
2019-09-27 12:04:22 +05:30
|
|
|
|
2021-04-27 12:35:27 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_CTLR_PHY_CODED)
|
|
|
|
struct ll_scan_set *scan_other =
|
|
|
|
ull_scan_is_enabled_get(SCAN_HANDLE_PHY_CODED);
|
|
|
|
|
|
|
|
if (scan_other) {
|
|
|
|
if (scan_other == scan) {
|
|
|
|
scan_other = ull_scan_is_enabled_get(SCAN_HANDLE_1M);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (scan_other) {
|
|
|
|
scan_other->lll.conn = NULL;
|
|
|
|
scan_other->is_enabled = 0U;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_EXT && CONFIG_BT_CTLR_PHY_CODED */
|
|
|
|
|
|
|
|
scan->lll.conn = NULL;
|
2019-09-27 12:04:22 +05:30
|
|
|
scan->is_enabled = 0U;
|
2021-04-27 12:35:27 +05:30
|
|
|
#else /* !CONFIG_BT_CENTRAL */
|
2019-09-27 12:04:22 +05:30
|
|
|
} else {
|
|
|
|
LL_ASSERT(0);
|
2021-04-27 12:35:27 +05:30
|
|
|
#endif /* !CONFIG_BT_CENTRAL */
|
2019-09-27 12:04:22 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY)) {
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t bm;
|
2019-09-27 12:04:22 +05:30
|
|
|
|
2020-05-11 15:36:12 +05:30
|
|
|
/* FIXME: use the correct adv and scan set to get
|
|
|
|
* enabled status bitmask
|
|
|
|
*/
|
2019-09-27 12:04:22 +05:30
|
|
|
bm = (IS_ENABLED(CONFIG_BT_OBSERVER) &&
|
2020-07-13 17:59:38 +05:30
|
|
|
(ull_scan_is_enabled(0) << 1)) |
|
|
|
|
(IS_ENABLED(CONFIG_BT_BROADCASTER) &&
|
|
|
|
ull_adv_is_enabled(0));
|
2019-09-27 12:04:22 +05:30
|
|
|
|
|
|
|
if (!bm) {
|
|
|
|
ull_filter_adv_scan_state_cb(0);
|
|
|
|
}
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
}
|
2019-09-27 12:04:22 +05:30
|
|
|
break;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-09-27 12:04:22 +05:30
|
|
|
case NODE_RX_TYPE_TERMINATE:
|
2018-12-18 05:48:20 +01:00
|
|
|
case NODE_RX_TYPE_DC_PDU:
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
|
2021-01-19 16:04:51 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_ISO)
|
|
|
|
case NODE_RX_TYPE_BIG_COMPLETE:
|
2021-11-23 21:34:04 +05:30
|
|
|
case NODE_RX_TYPE_BIG_TERMINATE:
|
2021-01-19 16:04:51 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_ISO */
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#if defined(CONFIG_BT_OBSERVER)
|
|
|
|
case NODE_RX_TYPE_REPORT:
|
2020-08-03 16:28:58 +05:30
|
|
|
|
2020-10-05 10:42:39 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
|
2020-08-03 16:28:58 +05:30
|
|
|
/* fall through */
|
|
|
|
case NODE_RX_TYPE_SYNC:
|
2020-08-11 11:12:34 +05:30
|
|
|
case NODE_RX_TYPE_SYNC_LOST:
|
2021-01-07 17:38:48 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_ISO)
|
|
|
|
/* fall through */
|
|
|
|
case NODE_RX_TYPE_SYNC_ISO:
|
|
|
|
case NODE_RX_TYPE_SYNC_ISO_LOST:
|
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_ISO */
|
2020-10-05 10:42:39 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
|
2018-12-18 05:48:20 +01:00
|
|
|
#endif /* CONFIG_BT_OBSERVER */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
|
|
|
|
case NODE_RX_TYPE_SCAN_REQ:
|
|
|
|
#endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
case NODE_RX_TYPE_CONN_UPDATE:
|
|
|
|
case NODE_RX_TYPE_ENC_REFRESH:
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_LE_PING)
|
|
|
|
case NODE_RX_TYPE_APTO:
|
|
|
|
#endif /* CONFIG_BT_CTLR_LE_PING */
|
|
|
|
|
|
|
|
case NODE_RX_TYPE_CHAN_SEL_ALGO:
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_PHY)
|
|
|
|
case NODE_RX_TYPE_PHY_UPDATE:
|
|
|
|
#endif /* CONFIG_BT_CTLR_PHY */
|
|
|
|
|
2020-04-28 20:23:29 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
|
2018-12-18 05:48:20 +01:00
|
|
|
case NODE_RX_TYPE_RSSI:
|
2020-04-28 20:23:29 +02:00
|
|
|
#endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
|
2018-12-18 05:48:20 +01:00
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_PROFILE_ISR)
|
|
|
|
case NODE_RX_TYPE_PROFILE:
|
|
|
|
#endif /* CONFIG_BT_CTLR_PROFILE_ISR */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_INDICATION)
|
|
|
|
case NODE_RX_TYPE_ADV_INDICATION:
|
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_INDICATION */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
|
|
|
|
case NODE_RX_TYPE_SCAN_INDICATION:
|
|
|
|
#endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_HCI_MESH_EXT)
|
|
|
|
case NODE_RX_TYPE_MESH_ADV_CPLT:
|
|
|
|
case NODE_RX_TYPE_MESH_REPORT:
|
|
|
|
#endif /* CONFIG_BT_HCI_MESH_EXT */
|
|
|
|
|
2020-11-11 10:48:00 +01:00
|
|
|
#if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
|
2020-11-10 13:51:28 +01:00
|
|
|
case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
|
2020-08-21 13:45:52 -07:00
|
|
|
__fallthrough;
|
2020-11-11 10:48:00 +01:00
|
|
|
#endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
|
2019-09-27 12:04:22 +05:30
|
|
|
|
2021-02-19 13:26:32 +01:00
|
|
|
#if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
|
|
|
|
case NODE_RX_TYPE_CIS_REQUEST:
|
|
|
|
#endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
|
|
|
|
|
2021-04-14 10:17:20 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_CONN_ISO)
|
2021-02-19 13:26:32 +01:00
|
|
|
case NODE_RX_TYPE_CIS_ESTABLISHED:
|
2021-04-14 10:17:20 +02:00
|
|
|
#endif /* CONFIG_BT_CTLR_CONN_ISO */
|
2021-02-19 13:26:32 +01:00
|
|
|
|
2021-05-05 21:30:40 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
|
2021-10-31 00:40:25 +02:00
|
|
|
case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
|
2021-05-05 21:30:40 +02:00
|
|
|
#endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
|
|
|
|
|
2022-01-04 07:56:02 +01:00
|
|
|
#if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX)
|
2021-10-31 00:40:25 +02:00
|
|
|
case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
|
2022-01-04 07:56:02 +01:00
|
|
|
#endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RX */
|
2021-10-31 00:40:25 +02:00
|
|
|
|
2022-01-18 16:39:39 +01:00
|
|
|
#if defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
|
|
|
|
case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
|
|
|
|
#endif /* CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT */
|
|
|
|
|
2019-09-27 12:04:22 +05:30
|
|
|
/* Ensure that at least one 'case' statement is present for this
|
|
|
|
* code block.
|
|
|
|
*/
|
|
|
|
case NODE_RX_TYPE_NONE:
|
|
|
|
LL_ASSERT(rx->type != NODE_RX_TYPE_NONE);
|
2018-12-18 05:48:20 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
LL_ASSERT(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-09-27 12:04:22 +05:30
|
|
|
/* FIXME: clean up when porting Mesh Ext. */
|
2018-12-18 05:48:20 +01:00
|
|
|
if (0) {
|
|
|
|
#if defined(CONFIG_BT_HCI_MESH_EXT)
|
|
|
|
} else if (rx->type == NODE_RX_TYPE_MESH_ADV_CPLT) {
|
|
|
|
struct ll_adv_set *adv;
|
|
|
|
struct ll_scan_set *scan;
|
|
|
|
|
|
|
|
adv = ull_adv_is_enabled_get(0);
|
|
|
|
LL_ASSERT(adv);
|
2019-03-26 19:57:45 -06:00
|
|
|
adv->is_enabled = 0U;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
scan = ull_scan_is_enabled_get(0);
|
|
|
|
LL_ASSERT(scan);
|
|
|
|
|
2019-03-26 19:57:45 -06:00
|
|
|
scan->is_enabled = 0U;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
ll_adv_scan_state_cb(0);
|
|
|
|
#endif /* CONFIG_BT_HCI_MESH_EXT */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ll_rx_mem_release(void **node_rx)
|
|
|
|
{
|
2019-02-04 22:21:55 +05:30
|
|
|
struct node_rx_hdr *rx;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
rx = *node_rx;
|
|
|
|
while (rx) {
|
|
|
|
struct node_rx_hdr *rx_free;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
rx_free = rx;
|
|
|
|
rx = rx->next;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
switch (rx_free->type) {
|
2020-10-11 06:47:24 +05:30
|
|
|
#if defined(CONFIG_BT_BROADCASTER)
|
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT)
|
|
|
|
case NODE_RX_TYPE_EXT_ADV_TERMINATE:
|
2022-01-22 07:31:03 +05:30
|
|
|
ll_rx_release(rx_free);
|
2020-10-11 06:47:24 +05:30
|
|
|
break;
|
2021-11-23 21:34:04 +05:30
|
|
|
|
2021-01-19 16:04:51 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_ISO)
|
|
|
|
case NODE_RX_TYPE_BIG_COMPLETE:
|
|
|
|
/* Nothing to release */
|
|
|
|
break;
|
2021-11-23 21:34:04 +05:30
|
|
|
|
|
|
|
case NODE_RX_TYPE_BIG_TERMINATE:
|
|
|
|
{
|
|
|
|
struct ll_adv_iso_set *adv_iso = rx_free->rx_ftr.param;
|
|
|
|
|
|
|
|
ull_adv_iso_stream_release(adv_iso);
|
|
|
|
}
|
|
|
|
break;
|
2021-01-19 16:04:51 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_ISO */
|
2020-10-11 06:47:24 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_EXT */
|
|
|
|
#endif /* CONFIG_BT_BROADCASTER */
|
|
|
|
|
2020-10-12 21:56:59 +05:30
|
|
|
#if defined(CONFIG_BT_OBSERVER)
|
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT)
|
|
|
|
case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
|
2021-10-31 00:40:25 +02:00
|
|
|
{
|
2022-01-22 07:31:03 +05:30
|
|
|
ll_rx_release(rx_free);
|
2021-10-31 00:40:25 +02:00
|
|
|
}
|
|
|
|
break;
|
2020-10-12 21:56:59 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_EXT */
|
|
|
|
#endif /* CONFIG_BT_OBSERVER */
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
case NODE_RX_TYPE_CONNECTION:
|
|
|
|
{
|
2019-12-26 16:31:07 +05:30
|
|
|
struct node_rx_cc *cc =
|
|
|
|
(void *)((struct node_rx_pdu *)rx_free)->pdu;
|
|
|
|
|
|
|
|
if (0) {
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_PERIPHERAL)
|
|
|
|
} else if (cc->status == BT_HCI_ERR_ADV_TIMEOUT) {
|
2022-01-22 07:31:03 +05:30
|
|
|
ll_rx_release(rx_free);
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-12-26 16:31:07 +05:30
|
|
|
break;
|
|
|
|
#endif /* !CONFIG_BT_PERIPHERAL */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CENTRAL)
|
|
|
|
} else if (cc->status == BT_HCI_ERR_UNKNOWN_CONN_ID) {
|
2021-09-20 16:25:06 +02:00
|
|
|
ull_central_cleanup(rx_free);
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_PRIVACY)
|
|
|
|
#if defined(CONFIG_BT_BROADCASTER)
|
|
|
|
if (!ull_adv_is_enabled_get(0))
|
2019-12-26 16:31:07 +05:30
|
|
|
#endif /* CONFIG_BT_BROADCASTER */
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
2019-05-09 18:39:26 +05:30
|
|
|
ull_filter_adv_scan_state_cb(0);
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
2019-12-26 16:31:07 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_PRIVACY */
|
2018-12-18 05:48:20 +01:00
|
|
|
break;
|
2019-12-26 16:31:07 +05:30
|
|
|
#endif /* CONFIG_BT_CENTRAL */
|
|
|
|
|
|
|
|
} else {
|
|
|
|
LL_ASSERT(!cc->status);
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
}
|
2019-12-26 16:31:07 +05:30
|
|
|
|
2020-08-21 13:45:52 -07:00
|
|
|
__fallthrough;
|
2018-12-18 05:48:20 +01:00
|
|
|
case NODE_RX_TYPE_DC_PDU:
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_OBSERVER)
|
|
|
|
case NODE_RX_TYPE_REPORT:
|
2020-09-02 13:52:29 +05:30
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT)
|
2020-08-21 13:45:52 -07:00
|
|
|
__fallthrough;
|
2018-12-18 05:48:20 +01:00
|
|
|
case NODE_RX_TYPE_EXT_1M_REPORT:
|
2020-04-21 11:12:41 +05:30
|
|
|
case NODE_RX_TYPE_EXT_2M_REPORT:
|
2018-12-18 05:48:20 +01:00
|
|
|
case NODE_RX_TYPE_EXT_CODED_REPORT:
|
2020-11-05 18:13:25 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
|
|
|
|
case NODE_RX_TYPE_SYNC_REPORT:
|
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
|
2018-12-18 05:48:20 +01:00
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_EXT */
|
2020-10-11 06:54:23 +05:30
|
|
|
#endif /* CONFIG_BT_OBSERVER */
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
|
|
|
|
case NODE_RX_TYPE_SCAN_REQ:
|
|
|
|
#endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
case NODE_RX_TYPE_CONN_UPDATE:
|
|
|
|
case NODE_RX_TYPE_ENC_REFRESH:
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_LE_PING)
|
|
|
|
case NODE_RX_TYPE_APTO:
|
|
|
|
#endif /* CONFIG_BT_CTLR_LE_PING */
|
|
|
|
|
|
|
|
case NODE_RX_TYPE_CHAN_SEL_ALGO:
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_PHY)
|
|
|
|
case NODE_RX_TYPE_PHY_UPDATE:
|
|
|
|
#endif /* CONFIG_BT_CTLR_PHY */
|
|
|
|
|
2020-04-28 20:23:29 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
|
2018-12-18 05:48:20 +01:00
|
|
|
case NODE_RX_TYPE_RSSI:
|
2020-04-28 20:23:29 +02:00
|
|
|
#endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
|
2018-12-18 05:48:20 +01:00
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_PROFILE_ISR)
|
|
|
|
case NODE_RX_TYPE_PROFILE:
|
|
|
|
#endif /* CONFIG_BT_CTLR_PROFILE_ISR */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_INDICATION)
|
|
|
|
case NODE_RX_TYPE_ADV_INDICATION:
|
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_INDICATION */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
|
|
|
|
case NODE_RX_TYPE_SCAN_INDICATION:
|
|
|
|
#endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_HCI_MESH_EXT)
|
|
|
|
case NODE_RX_TYPE_MESH_ADV_CPLT:
|
|
|
|
case NODE_RX_TYPE_MESH_REPORT:
|
|
|
|
#endif /* CONFIG_BT_HCI_MESH_EXT */
|
|
|
|
|
2020-11-11 10:48:00 +01:00
|
|
|
#if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
|
2020-11-10 13:51:28 +01:00
|
|
|
case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
|
2020-11-11 10:48:00 +01:00
|
|
|
#endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
|
2019-05-09 15:41:39 +02:00
|
|
|
|
2021-02-19 13:26:32 +01:00
|
|
|
#if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
|
|
|
|
case NODE_RX_TYPE_CIS_REQUEST:
|
|
|
|
#endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
|
|
|
|
|
2021-04-14 10:17:20 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_CONN_ISO)
|
2021-02-19 13:26:32 +01:00
|
|
|
case NODE_RX_TYPE_CIS_ESTABLISHED:
|
2021-04-14 10:17:20 +02:00
|
|
|
#endif /* CONFIG_BT_CTLR_CONN_ISO */
|
2021-02-19 13:26:32 +01:00
|
|
|
|
2021-04-14 10:17:20 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_ISO)
|
2021-02-04 16:11:06 +01:00
|
|
|
case NODE_RX_TYPE_ISO_PDU:
|
|
|
|
#endif
|
|
|
|
|
2019-08-15 14:04:37 +02:00
|
|
|
/* Ensure that at least one 'case' statement is present for this
|
|
|
|
* code block.
|
|
|
|
*/
|
|
|
|
case NODE_RX_TYPE_NONE:
|
|
|
|
LL_ASSERT(rx_free->type != NODE_RX_TYPE_NONE);
|
2022-01-22 07:31:03 +05:30
|
|
|
ll_rx_link_quota_inc();
|
|
|
|
ll_rx_release(rx_free);
|
2018-12-18 05:48:20 +01:00
|
|
|
break;
|
|
|
|
|
2020-10-05 10:42:39 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
|
2020-08-11 10:08:13 +05:30
|
|
|
case NODE_RX_TYPE_SYNC:
|
|
|
|
{
|
|
|
|
struct node_rx_sync *se =
|
|
|
|
(void *)((struct node_rx_pdu *)rx_free)->pdu;
|
2021-11-12 16:33:46 +05:30
|
|
|
uint8_t status = se->status;
|
|
|
|
|
|
|
|
/* Below status codes use node_rx_sync_estab, hence
|
|
|
|
* release the node_rx memory and release sync context
|
|
|
|
* if sync establishment failed.
|
|
|
|
*/
|
|
|
|
if ((status == BT_HCI_ERR_SUCCESS) ||
|
|
|
|
(status == BT_HCI_ERR_UNSUPP_REMOTE_FEATURE) ||
|
|
|
|
(status == BT_HCI_ERR_CONN_FAIL_TO_ESTAB)) {
|
|
|
|
struct ll_sync_set *sync;
|
|
|
|
struct ll_scan_set *scan;
|
|
|
|
|
|
|
|
/* pick the scan context before node_rx
|
|
|
|
* release.
|
|
|
|
*/
|
|
|
|
scan = (void *)rx_free->rx_ftr.param;
|
2020-08-11 10:08:13 +05:30
|
|
|
|
2022-01-22 07:31:03 +05:30
|
|
|
ll_rx_release(rx_free);
|
2020-08-11 10:08:13 +05:30
|
|
|
|
2021-11-12 16:33:46 +05:30
|
|
|
/* pick the sync context before scan context
|
|
|
|
* is cleanup of sync context association.
|
|
|
|
*/
|
2021-12-09 06:08:30 +05:30
|
|
|
sync = scan->periodic.sync;
|
2021-11-12 16:33:46 +05:30
|
|
|
|
2022-03-01 12:45:58 +05:30
|
|
|
ull_sync_setup_reset(scan);
|
2021-11-12 16:33:46 +05:30
|
|
|
|
|
|
|
if (status != BT_HCI_ERR_SUCCESS) {
|
2021-12-15 21:07:57 +05:30
|
|
|
memq_link_t *link_sync_lost;
|
|
|
|
|
|
|
|
link_sync_lost =
|
|
|
|
sync->node_rx_lost.hdr.link;
|
|
|
|
ll_rx_link_release(link_sync_lost);
|
|
|
|
|
2021-11-12 16:33:46 +05:30
|
|
|
ull_sync_release(sync);
|
|
|
|
}
|
|
|
|
|
2020-08-11 10:08:13 +05:30
|
|
|
break;
|
2021-11-12 16:33:46 +05:30
|
|
|
} else {
|
|
|
|
LL_ASSERT(status == BT_HCI_ERR_OP_CANCELLED_BY_HOST);
|
|
|
|
|
|
|
|
/* Fall through and release sync context */
|
2020-08-11 10:08:13 +05:30
|
|
|
}
|
2020-08-11 11:12:34 +05:30
|
|
|
}
|
|
|
|
/* Pass through */
|
2020-08-11 10:08:13 +05:30
|
|
|
|
2020-08-11 11:12:34 +05:30
|
|
|
case NODE_RX_TYPE_SYNC_LOST:
|
|
|
|
{
|
2020-08-21 16:15:14 +05:30
|
|
|
struct ll_sync_set *sync =
|
|
|
|
(void *)rx_free->rx_ftr.param;
|
|
|
|
|
|
|
|
ull_sync_release(sync);
|
2020-08-11 10:08:13 +05:30
|
|
|
}
|
|
|
|
break;
|
2021-11-12 16:33:46 +05:30
|
|
|
|
2021-01-07 17:38:48 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_ISO)
|
|
|
|
case NODE_RX_TYPE_SYNC_ISO:
|
|
|
|
{
|
|
|
|
struct node_rx_sync_iso *se =
|
|
|
|
(void *)((struct node_rx_pdu *)rx_free)->pdu;
|
|
|
|
|
|
|
|
if (!se->status) {
|
2022-01-22 07:31:03 +05:30
|
|
|
ll_rx_release(rx_free);
|
2021-01-07 17:38:48 +05:30
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Pass through */
|
|
|
|
|
|
|
|
case NODE_RX_TYPE_SYNC_ISO_LOST:
|
|
|
|
{
|
|
|
|
struct ll_sync_iso_set *sync_iso =
|
|
|
|
(void *)rx_free->rx_ftr.param;
|
|
|
|
|
2021-11-23 21:34:04 +05:30
|
|
|
ull_sync_iso_stream_release(sync_iso);
|
2021-01-07 17:38:48 +05:30
|
|
|
}
|
|
|
|
break;
|
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_ISO */
|
2020-10-05 10:42:39 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
|
2020-08-11 10:08:13 +05:30
|
|
|
|
2022-01-18 16:39:39 +01:00
|
|
|
#if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX) || \
|
|
|
|
defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
|
2021-10-31 00:40:25 +02:00
|
|
|
case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
|
|
|
|
case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
|
2022-01-18 16:39:39 +01:00
|
|
|
case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
|
2021-10-31 00:40:25 +02:00
|
|
|
{
|
|
|
|
const uint8_t report_cnt = 1U;
|
|
|
|
|
|
|
|
ull_iq_report_link_inc_quota(report_cnt);
|
|
|
|
ull_df_iq_report_mem_release(rx_free);
|
|
|
|
ull_df_rx_iq_report_alloc(report_cnt);
|
|
|
|
}
|
|
|
|
break;
|
2022-01-04 07:56:02 +01:00
|
|
|
#endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX || CONFIG_BT_CTLR_DF_CONN_CTE_RX */
|
2021-10-31 00:40:25 +02:00
|
|
|
|
2021-11-23 22:50:42 +01:00
|
|
|
#if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_CONN_ISO)
|
2018-12-18 05:48:20 +01:00
|
|
|
case NODE_RX_TYPE_TERMINATE:
|
|
|
|
{
|
2021-10-18 14:52:05 +02:00
|
|
|
if (IS_ACL_HANDLE(rx_free->handle)) {
|
|
|
|
struct ll_conn *conn;
|
|
|
|
memq_link_t *link;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2021-10-18 14:52:05 +02:00
|
|
|
conn = ll_conn_get(rx_free->handle);
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2021-10-18 14:52:05 +02:00
|
|
|
LL_ASSERT(!conn->lll.link_tx_free);
|
|
|
|
link = memq_deinit(&conn->lll.memq_tx.head,
|
|
|
|
&conn->lll.memq_tx.tail);
|
|
|
|
LL_ASSERT(link);
|
|
|
|
conn->lll.link_tx_free = link;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2021-10-18 14:52:05 +02:00
|
|
|
ll_conn_release(conn);
|
|
|
|
}
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
break;
|
2021-11-23 22:50:42 +01:00
|
|
|
#endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_CONN_ISO */
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
case NODE_RX_TYPE_EVENT_DONE:
|
|
|
|
default:
|
|
|
|
LL_ASSERT(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
*node_rx = rx;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2022-01-22 07:31:03 +05:30
|
|
|
rx_replenish_all();
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
2022-01-22 07:31:03 +05:30
|
|
|
static void ll_rx_link_quota_update(int8_t delta)
|
2019-08-16 15:40:31 +02:00
|
|
|
{
|
|
|
|
LL_ASSERT(delta <= 0 || mem_link_rx.quota_pdu < RX_CNT);
|
|
|
|
mem_link_rx.quota_pdu += delta;
|
|
|
|
}
|
|
|
|
|
2022-01-22 07:31:03 +05:30
|
|
|
static void ll_rx_link_quota_inc(void)
|
|
|
|
{
|
|
|
|
ll_rx_link_quota_update(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ll_rx_link_quota_dec(void)
|
|
|
|
{
|
|
|
|
ll_rx_link_quota_update(-1);
|
|
|
|
}
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
void *ll_rx_link_alloc(void)
|
|
|
|
{
|
|
|
|
return mem_acquire(&mem_link_rx.free);
|
|
|
|
}
|
|
|
|
|
2022-01-22 07:31:03 +05:30
|
|
|
void ll_rx_link_release(memq_link_t *link)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
mem_release(link, &mem_link_rx.free);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *ll_rx_alloc(void)
|
|
|
|
{
|
|
|
|
return mem_acquire(&mem_pdu_rx.free);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ll_rx_release(void *node_rx)
|
|
|
|
{
|
|
|
|
mem_release(node_rx, &mem_pdu_rx.free);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ll_rx_put(memq_link_t *link, void *rx)
|
|
|
|
{
|
2020-10-24 07:01:29 +05:30
|
|
|
#if defined(CONFIG_BT_CONN)
|
2018-12-18 05:48:20 +01:00
|
|
|
struct node_rx_hdr *rx_hdr = rx;
|
|
|
|
|
|
|
|
/* Serialize Tx ack with Rx enqueue by storing reference to
|
|
|
|
* last element index in Tx ack FIFO.
|
|
|
|
*/
|
|
|
|
rx_hdr->ack_last = mfifo_tx_ack.l;
|
2020-10-24 07:01:29 +05:30
|
|
|
#endif /* CONFIG_BT_CONN */
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
/* Enqueue the Rx object */
|
|
|
|
memq_enqueue(link, rx, &memq_ll_rx.tail);
|
|
|
|
}
|
|
|
|
|
2019-02-14 10:04:17 +01:00
|
|
|
/**
|
|
|
|
* @brief Permit another loop in the controller thread (prio_recv_thread)
|
|
|
|
* @details Execution context: ULL mayfly
|
|
|
|
*/
|
2018-12-18 05:48:20 +01:00
|
|
|
void ll_rx_sched(void)
|
|
|
|
{
|
2019-02-14 10:04:17 +01:00
|
|
|
/* sem_recv references the same semaphore (sem_prio_recv)
|
|
|
|
* in prio_recv_thread
|
|
|
|
*/
|
2018-12-18 05:48:20 +01:00
|
|
|
k_sem_give(sem_recv);
|
|
|
|
}
|
|
|
|
|
2019-02-04 22:33:51 +05:30
|
|
|
#if defined(CONFIG_BT_CONN)
|
2020-05-27 11:26:57 -05:00
|
|
|
void *ll_pdu_rx_alloc_peek(uint8_t count)
|
2019-07-18 15:10:05 +05:30
|
|
|
{
|
|
|
|
if (count > MFIFO_AVAIL_COUNT_GET(ll_pdu_rx_free)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return MFIFO_DEQUEUE_PEEK(ll_pdu_rx_free);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *ll_pdu_rx_alloc(void)
|
|
|
|
{
|
|
|
|
return MFIFO_DEQUEUE(ll_pdu_rx_free);
|
|
|
|
}
|
2022-04-28 09:59:42 +05:30
|
|
|
#endif /* CONFIG_BT_CONN */
|
2019-07-18 15:10:05 +05:30
|
|
|
|
2022-04-28 09:59:42 +05:30
|
|
|
#if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
|
2020-05-27 11:26:57 -05:00
|
|
|
void ll_tx_ack_put(uint16_t handle, struct node_tx *node_tx)
|
2019-02-04 22:33:51 +05:30
|
|
|
{
|
|
|
|
struct lll_tx *tx;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t idx;
|
2019-02-04 22:33:51 +05:30
|
|
|
|
|
|
|
idx = MFIFO_ENQUEUE_GET(tx_ack, (void **)&tx);
|
|
|
|
LL_ASSERT(tx);
|
|
|
|
|
|
|
|
tx->handle = handle;
|
|
|
|
tx->node = node_tx;
|
|
|
|
|
|
|
|
MFIFO_ENQUEUE(tx_ack, idx);
|
|
|
|
}
|
2022-04-28 09:59:42 +05:30
|
|
|
#endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
|
2019-02-04 22:33:51 +05:30
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
void ll_timeslice_ticker_id_get(uint8_t * const instance_index,
|
2021-03-29 16:00:42 +05:30
|
|
|
uint8_t * const ticker_id)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
*instance_index = TICKER_INSTANCE_ID_CTLR;
|
2021-03-29 16:00:42 +05:30
|
|
|
*ticker_id = (TICKER_NODES - FLASH_TICKER_NODES);
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void ll_radio_state_abort(void)
|
|
|
|
{
|
2019-02-04 22:21:55 +05:30
|
|
|
static memq_link_t link;
|
|
|
|
static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t ret;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-10-03 17:49:27 +05:30
|
|
|
ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0,
|
2019-02-04 22:21:55 +05:30
|
|
|
&mfy);
|
2018-12-18 05:48:20 +01:00
|
|
|
LL_ASSERT(!ret);
|
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t ll_radio_state_is_idle(void)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
2019-04-01 14:32:40 +02:00
|
|
|
return lll_radio_is_idle();
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
void ull_ticker_status_give(uint32_t status, void *param)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
2020-05-27 11:26:57 -05:00
|
|
|
*((uint32_t volatile *)param) = status;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
k_sem_give(&sem_ticker_api_cb);
|
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t ull_ticker_status_take(uint32_t ret, uint32_t volatile *ret_cb)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
if (ret == TICKER_STATUS_BUSY) {
|
|
|
|
/* TODO: Enable ticker job in case of CONFIG_BT_CTLR_LOW_LAT */
|
2020-09-30 17:55:15 +05:30
|
|
|
} else {
|
|
|
|
/* Check for ticker operation enqueue failed, in which case
|
|
|
|
* function return value (ret) will be TICKER_STATUS_FAILURE
|
|
|
|
* and callback return value (ret_cb) will remain as
|
|
|
|
* TICKER_STATUS_BUSY.
|
|
|
|
* This assert check will avoid waiting forever to take the
|
|
|
|
* semaphore that will never be given when the ticker operation
|
|
|
|
* callback does not get called due to enqueue failure.
|
|
|
|
*/
|
|
|
|
LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
|
|
|
|
(*ret_cb != TICKER_STATUS_BUSY));
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
k_sem_take(&sem_ticker_api_cb, K_FOREVER);
|
|
|
|
|
|
|
|
return *ret_cb;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *ull_disable_mark(void *param)
|
|
|
|
{
|
2019-06-05 13:40:36 +02:00
|
|
|
return mark_set(&mark_disable, param);
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void *ull_disable_unmark(void *param)
|
|
|
|
{
|
2019-06-05 13:40:36 +02:00
|
|
|
return mark_unset(&mark_disable, param);
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void *ull_disable_mark_get(void)
|
|
|
|
{
|
2019-06-05 13:40:36 +02:00
|
|
|
return mark_get(mark_disable);
|
|
|
|
}
|
|
|
|
|
2020-11-12 15:11:43 +01:00
|
|
|
/**
|
|
|
|
* @brief Stops a specified ticker using the ull_disable_(un)mark functions.
|
|
|
|
*
|
|
|
|
* @param ticker_handle The handle of the ticker.
|
|
|
|
* @param param The object to mark.
|
|
|
|
* @param lll_disable Optional object when calling @ref ull_disable
|
|
|
|
*
|
|
|
|
* @return 0 if success, else ERRNO.
|
|
|
|
*/
|
|
|
|
int ull_ticker_stop_with_mark(uint8_t ticker_handle, void *param,
|
|
|
|
void *lll_disable)
|
|
|
|
{
|
|
|
|
uint32_t volatile ret_cb;
|
|
|
|
uint32_t ret;
|
|
|
|
void *mark;
|
2021-12-13 06:57:57 +05:30
|
|
|
int err;
|
2020-11-12 15:11:43 +01:00
|
|
|
|
|
|
|
mark = ull_disable_mark(param);
|
|
|
|
if (mark != param) {
|
|
|
|
return -ENOLCK;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret_cb = TICKER_STATUS_BUSY;
|
|
|
|
ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
|
|
|
|
ticker_handle, ull_ticker_status_give,
|
|
|
|
(void *)&ret_cb);
|
|
|
|
ret = ull_ticker_status_take(ret, &ret_cb);
|
|
|
|
if (ret) {
|
|
|
|
mark = ull_disable_unmark(param);
|
|
|
|
if (mark != param) {
|
|
|
|
return -ENOLCK;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EALREADY;
|
|
|
|
}
|
|
|
|
|
2021-12-13 06:57:57 +05:30
|
|
|
err = ull_disable(lll_disable);
|
2020-11-12 15:11:43 +01:00
|
|
|
|
|
|
|
mark = ull_disable_unmark(param);
|
|
|
|
if (mark != param) {
|
|
|
|
return -ENOLCK;
|
|
|
|
}
|
|
|
|
|
2021-12-13 06:57:57 +05:30
|
|
|
if (err && (err != -EALREADY)) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-11-12 15:11:43 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-06-05 13:40:36 +02:00
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
void *ull_update_mark(void *param)
|
|
|
|
{
|
|
|
|
return mark_set(&mark_update, param);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *ull_update_unmark(void *param)
|
|
|
|
{
|
|
|
|
return mark_unset(&mark_update, param);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *ull_update_mark_get(void)
|
|
|
|
{
|
|
|
|
return mark_get(mark_update);
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
2019-06-05 13:40:36 +02:00
|
|
|
#endif /* CONFIG_BT_CONN */
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
int ull_disable(void *lll)
|
|
|
|
{
|
2019-02-04 22:21:55 +05:30
|
|
|
static memq_link_t link;
|
|
|
|
static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
|
2018-12-18 05:48:20 +01:00
|
|
|
struct ull_hdr *hdr;
|
|
|
|
struct k_sem sem;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t ret;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2021-04-05 12:56:51 +05:30
|
|
|
hdr = HDR_LLL2ULL(lll);
|
2021-12-13 06:57:57 +05:30
|
|
|
if (!ull_ref_get(hdr)) {
|
|
|
|
return -EALREADY;
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
k_sem_init(&sem, 0, 1);
|
2020-01-10 16:13:16 +05:30
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
hdr->disabled_param = &sem;
|
2019-02-04 22:21:55 +05:30
|
|
|
hdr->disabled_cb = disabled_cb;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2021-08-16 16:50:07 +05:30
|
|
|
/* ULL_HIGH can run after we have call `ull_ref_get` and it can
|
|
|
|
* decrement the ref count. Hence, handle this race condition by
|
|
|
|
* ensuring that `disabled_cb` has been set while the ref count is still
|
|
|
|
* set.
|
|
|
|
* No need to call `lll_disable` and take the semaphore thereafter if
|
|
|
|
* reference count is zero.
|
|
|
|
* If the `sem` is given when reference count was decremented, we do not
|
|
|
|
* care.
|
|
|
|
*/
|
|
|
|
if (!ull_ref_get(hdr)) {
|
2021-12-13 06:57:57 +05:30
|
|
|
return -EALREADY;
|
2021-08-16 16:50:07 +05:30
|
|
|
}
|
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
mfy.param = lll;
|
2018-12-18 05:48:20 +01:00
|
|
|
ret = mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_LLL, 0,
|
2019-02-04 22:21:55 +05:30
|
|
|
&mfy);
|
2018-12-18 05:48:20 +01:00
|
|
|
LL_ASSERT(!ret);
|
|
|
|
|
|
|
|
return k_sem_take(&sem, K_FOREVER);
|
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
void *ull_pdu_rx_alloc_peek(uint8_t count)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
if (count > MFIFO_AVAIL_COUNT_GET(pdu_rx_free)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return MFIFO_DEQUEUE_PEEK(pdu_rx_free);
|
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
void *ull_pdu_rx_alloc_peek_iter(uint8_t *idx)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
return *(void **)MFIFO_DEQUEUE_ITER_GET(pdu_rx_free, idx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *ull_pdu_rx_alloc(void)
|
|
|
|
{
|
|
|
|
return MFIFO_DEQUEUE(pdu_rx_free);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ull_rx_put(memq_link_t *link, void *rx)
|
|
|
|
{
|
2020-10-24 07:01:29 +05:30
|
|
|
#if defined(CONFIG_BT_CONN)
|
2018-12-18 05:48:20 +01:00
|
|
|
struct node_rx_hdr *rx_hdr = rx;
|
|
|
|
|
|
|
|
/* Serialize Tx ack with Rx enqueue by storing reference to
|
|
|
|
* last element index in Tx ack FIFO.
|
|
|
|
*/
|
2019-04-30 14:23:39 +05:30
|
|
|
rx_hdr->ack_last = ull_conn_ack_last_idx_get();
|
2020-10-24 07:01:29 +05:30
|
|
|
#endif /* CONFIG_BT_CONN */
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
/* Enqueue the Rx object */
|
|
|
|
memq_enqueue(link, rx, &memq_ull_rx.tail);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ull_rx_sched(void)
|
|
|
|
{
|
2019-02-04 22:21:55 +05:30
|
|
|
static memq_link_t link;
|
|
|
|
static struct mayfly mfy = {0, 0, &link, NULL, rx_demux};
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
/* Kick the ULL (using the mayfly, tailchain it) */
|
2019-02-04 22:21:55 +05:30
|
|
|
mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_HIGH, 1, &mfy);
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
2021-01-21 16:33:32 +01:00
|
|
|
#if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
|
|
|
void ull_rx_put_done(memq_link_t *link, void *done)
|
|
|
|
{
|
|
|
|
/* Enqueue the done object */
|
|
|
|
memq_enqueue(link, done, &memq_ull_done.tail);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ull_rx_sched_done(void)
|
|
|
|
{
|
|
|
|
static memq_link_t link;
|
|
|
|
static struct mayfly mfy = {0, 0, &link, NULL, ull_done};
|
|
|
|
|
|
|
|
/* Kick the ULL (using the mayfly, tailchain it) */
|
|
|
|
mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_HIGH, 1, &mfy);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
|
|
|
|
2021-05-04 09:56:35 +05:30
|
|
|
struct lll_event *ull_prepare_enqueue(lll_is_abort_cb_t is_abort_cb,
|
|
|
|
lll_abort_cb_t abort_cb,
|
|
|
|
struct lll_prepare_param *prepare_param,
|
|
|
|
lll_prepare_cb_t prepare_cb,
|
|
|
|
uint8_t is_resume)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
struct lll_event *e;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t idx;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
idx = MFIFO_ENQUEUE_GET(prep, (void **)&e);
|
|
|
|
if (!e) {
|
2021-05-04 09:56:35 +05:30
|
|
|
return NULL;
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(&e->prepare_param, prepare_param, sizeof(e->prepare_param));
|
|
|
|
e->prepare_cb = prepare_cb;
|
|
|
|
e->is_abort_cb = is_abort_cb;
|
|
|
|
e->abort_cb = abort_cb;
|
|
|
|
e->is_resume = is_resume;
|
2019-03-26 19:57:45 -06:00
|
|
|
e->is_aborted = 0U;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
MFIFO_ENQUEUE(prep, idx);
|
|
|
|
|
2021-05-04 09:56:35 +05:30
|
|
|
return e;
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void *ull_prepare_dequeue_get(void)
|
|
|
|
{
|
|
|
|
return MFIFO_DEQUEUE_GET(prep);
|
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
void *ull_prepare_dequeue_iter(uint8_t *idx)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
return MFIFO_DEQUEUE_ITER_GET(prep, idx);
|
|
|
|
}
|
|
|
|
|
2021-03-28 07:34:26 +05:30
|
|
|
void ull_prepare_dequeue(uint8_t caller_id)
|
|
|
|
{
|
2022-03-14 16:55:21 +05:30
|
|
|
void *param_resume_head = NULL;
|
|
|
|
void *param_resume_next = NULL;
|
2021-03-28 07:34:26 +05:30
|
|
|
struct lll_event *next;
|
|
|
|
|
|
|
|
next = ull_prepare_dequeue_get();
|
|
|
|
while (next) {
|
2022-03-14 16:55:21 +05:30
|
|
|
void *param = next->prepare_param.param;
|
2021-03-28 07:34:26 +05:30
|
|
|
uint8_t is_aborted = next->is_aborted;
|
|
|
|
uint8_t is_resume = next->is_resume;
|
|
|
|
|
2022-03-14 16:55:21 +05:30
|
|
|
/* Let LLL invoke the `prepare` interface if radio not in active
|
|
|
|
* use. Otherwise, enqueue at end of the prepare pipeline queue.
|
|
|
|
*/
|
2021-03-28 07:34:26 +05:30
|
|
|
if (!is_aborted) {
|
|
|
|
static memq_link_t link;
|
|
|
|
static struct mayfly mfy = {0, 0, &link, NULL,
|
|
|
|
lll_resume};
|
|
|
|
uint32_t ret;
|
|
|
|
|
|
|
|
mfy.param = next;
|
|
|
|
ret = mayfly_enqueue(caller_id, TICKER_USER_ID_LLL, 0,
|
|
|
|
&mfy);
|
|
|
|
LL_ASSERT(!ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
MFIFO_DEQUEUE(prep);
|
|
|
|
|
2022-03-14 16:55:21 +05:30
|
|
|
/* Check for anymore more prepare elements in queue */
|
2021-03-28 07:34:26 +05:30
|
|
|
next = ull_prepare_dequeue_get();
|
2022-03-14 16:55:21 +05:30
|
|
|
if (!next) {
|
2021-03-28 07:34:26 +05:30
|
|
|
break;
|
|
|
|
}
|
2022-03-14 16:55:21 +05:30
|
|
|
|
|
|
|
/* A valid prepare element has its `prepare` invoked or was
|
|
|
|
* enqueued back into prepare pipeline.
|
|
|
|
*/
|
|
|
|
if (!is_aborted) {
|
|
|
|
/* The prepare element was not a resume event, it would
|
|
|
|
* use the radio or was enqueued back into prepare
|
|
|
|
* pipeline with a preempt timeout being set.
|
|
|
|
*/
|
|
|
|
if (!is_resume) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remember the first encountered resume and the next
|
|
|
|
* resume element in the prepare pipeline so that we do
|
|
|
|
* not infinitely loop through the resume events in
|
|
|
|
* prepare pipeline.
|
|
|
|
*/
|
|
|
|
if (!param_resume_head) {
|
|
|
|
param_resume_head = param;
|
|
|
|
} else if (!param_resume_next) {
|
|
|
|
param_resume_next = param;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Stop traversing the prepare pipeline when we reach
|
|
|
|
* back to the first or next resume event where we
|
|
|
|
* initially started processing the prepare pipeline.
|
|
|
|
*/
|
|
|
|
if (next->is_resume &&
|
|
|
|
((next->prepare_param.param ==
|
|
|
|
param_resume_head) ||
|
|
|
|
(next->prepare_param.param ==
|
|
|
|
param_resume_next))) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2021-03-28 07:34:26 +05:30
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-18 16:28:04 +05:30
|
|
|
struct event_done_extra *ull_event_done_extra_get(void)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
2019-02-27 10:40:03 +01:00
|
|
|
struct node_rx_event_done *evdone;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-02-27 10:40:03 +01:00
|
|
|
evdone = MFIFO_DEQUEUE_PEEK(done);
|
|
|
|
if (!evdone) {
|
2018-12-18 05:48:20 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-02-27 10:40:03 +01:00
|
|
|
return &evdone->extra;
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
2021-08-18 16:28:04 +05:30
|
|
|
struct event_done_extra *ull_done_extra_type_set(uint8_t type)
|
|
|
|
{
|
|
|
|
struct event_done_extra *extra;
|
|
|
|
|
|
|
|
extra = ull_event_done_extra_get();
|
|
|
|
if (!extra) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
extra->type = type;
|
|
|
|
|
|
|
|
return extra;
|
|
|
|
}
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
void *ull_event_done(void *param)
|
|
|
|
{
|
2019-02-14 10:04:17 +01:00
|
|
|
struct node_rx_event_done *evdone;
|
2018-12-18 05:48:20 +01:00
|
|
|
memq_link_t *link;
|
|
|
|
|
2019-02-14 10:04:17 +01:00
|
|
|
/* Obtain new node that signals "Done of an RX-event".
|
|
|
|
* Obtain this by dequeuing from the global 'mfifo_done' queue.
|
|
|
|
* Note that 'mfifo_done' is a queue of pointers, not of
|
|
|
|
* struct node_rx_event_done
|
|
|
|
*/
|
|
|
|
evdone = MFIFO_DEQUEUE(done);
|
|
|
|
if (!evdone) {
|
|
|
|
/* Not fatal if we can not obtain node, though
|
|
|
|
* we will loose the packets in software stack.
|
|
|
|
* If this happens during Conn Upd, this could cause LSTO
|
|
|
|
*/
|
2018-12-18 05:48:20 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-02-14 10:04:17 +01:00
|
|
|
link = evdone->hdr.link;
|
|
|
|
evdone->hdr.link = NULL;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-02-14 10:04:17 +01:00
|
|
|
evdone->hdr.type = NODE_RX_TYPE_EVENT_DONE;
|
|
|
|
evdone->param = param;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2021-01-21 16:33:32 +01:00
|
|
|
#if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
|
|
|
ull_rx_put_done(link, evdone);
|
|
|
|
ull_rx_sched_done();
|
|
|
|
#else
|
2019-02-14 10:04:17 +01:00
|
|
|
ull_rx_put(link, evdone);
|
2018-12-18 05:48:20 +01:00
|
|
|
ull_rx_sched();
|
2021-01-21 16:33:32 +01:00
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-02-14 10:04:17 +01:00
|
|
|
return evdone;
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
2020-10-05 10:42:39 +05:30
|
|
|
#if defined(CONFIG_BT_PERIPHERAL) || defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
|
2020-08-11 10:38:23 +05:30
|
|
|
/**
|
|
|
|
* @brief Extract timing from completed event
|
|
|
|
*
|
|
|
|
* @param node_rx_event_done[in] Done event containing fresh timing information
|
|
|
|
* @param ticks_drift_plus[out] Positive part of drift uncertainty window
|
|
|
|
* @param ticks_drift_minus[out] Negative part of drift uncertainty window
|
|
|
|
*/
|
|
|
|
void ull_drift_ticks_get(struct node_rx_event_done *done,
|
|
|
|
uint32_t *ticks_drift_plus,
|
|
|
|
uint32_t *ticks_drift_minus)
|
|
|
|
{
|
|
|
|
uint32_t start_to_address_expected_us;
|
|
|
|
uint32_t start_to_address_actual_us;
|
|
|
|
uint32_t window_widening_event_us;
|
|
|
|
uint32_t preamble_to_addr_us;
|
|
|
|
|
|
|
|
start_to_address_actual_us =
|
|
|
|
done->extra.drift.start_to_address_actual_us;
|
|
|
|
window_widening_event_us =
|
|
|
|
done->extra.drift.window_widening_event_us;
|
|
|
|
preamble_to_addr_us =
|
|
|
|
done->extra.drift.preamble_to_addr_us;
|
|
|
|
|
|
|
|
start_to_address_expected_us = EVENT_JITTER_US +
|
|
|
|
EVENT_TICKER_RES_MARGIN_US +
|
|
|
|
window_widening_event_us +
|
|
|
|
preamble_to_addr_us;
|
|
|
|
|
|
|
|
if (start_to_address_actual_us <= start_to_address_expected_us) {
|
|
|
|
*ticks_drift_plus =
|
|
|
|
HAL_TICKER_US_TO_TICKS(window_widening_event_us);
|
|
|
|
*ticks_drift_minus =
|
|
|
|
HAL_TICKER_US_TO_TICKS((start_to_address_expected_us -
|
|
|
|
start_to_address_actual_us));
|
|
|
|
} else {
|
|
|
|
*ticks_drift_plus =
|
|
|
|
HAL_TICKER_US_TO_TICKS(start_to_address_actual_us);
|
|
|
|
*ticks_drift_minus =
|
|
|
|
HAL_TICKER_US_TO_TICKS(EVENT_JITTER_US +
|
|
|
|
EVENT_TICKER_RES_MARGIN_US +
|
|
|
|
preamble_to_addr_us);
|
|
|
|
}
|
|
|
|
}
|
2020-10-05 10:42:39 +05:30
|
|
|
#endif /* CONFIG_BT_PERIPHERAL || CONFIG_BT_CTLR_SYNC_PERIODIC */
|
2020-08-11 10:38:23 +05:30
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
static inline int init_reset(void)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
memq_link_t *link;
|
|
|
|
|
2021-05-19 13:59:21 +02:00
|
|
|
/* Initialize and allocate done pool */
|
|
|
|
RXFIFO_INIT_ALLOC(done);
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
/* Initialize rx pool. */
|
2019-02-14 14:17:37 +01:00
|
|
|
mem_init(mem_pdu_rx.pool, (PDU_RX_NODE_POOL_ELEMENT_SIZE),
|
|
|
|
sizeof(mem_pdu_rx.pool) / (PDU_RX_NODE_POOL_ELEMENT_SIZE),
|
2018-12-18 05:48:20 +01:00
|
|
|
&mem_pdu_rx.free);
|
|
|
|
|
|
|
|
/* Initialize rx link pool. */
|
|
|
|
mem_init(mem_link_rx.pool, sizeof(memq_link_t),
|
|
|
|
sizeof(mem_link_rx.pool) / sizeof(memq_link_t),
|
|
|
|
&mem_link_rx.free);
|
|
|
|
|
|
|
|
/* Acquire a link to initialize ull rx memq */
|
|
|
|
link = mem_acquire(&mem_link_rx.free);
|
|
|
|
LL_ASSERT(link);
|
|
|
|
|
|
|
|
/* Initialize ull rx memq */
|
|
|
|
MEMQ_INIT(ull_rx, link);
|
|
|
|
|
2021-01-21 16:33:32 +01:00
|
|
|
#if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
|
|
|
/* Acquire a link to initialize ull done memq */
|
|
|
|
link = mem_acquire(&mem_link_done.free);
|
|
|
|
LL_ASSERT(link);
|
|
|
|
|
|
|
|
/* Initialize ull done memq */
|
|
|
|
MEMQ_INIT(ull_done, link);
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
/* Acquire a link to initialize ll rx memq */
|
|
|
|
link = mem_acquire(&mem_link_rx.free);
|
|
|
|
LL_ASSERT(link);
|
|
|
|
|
|
|
|
/* Initialize ll rx memq */
|
|
|
|
MEMQ_INIT(ll_rx, link);
|
|
|
|
|
|
|
|
/* Allocate rx free buffers */
|
|
|
|
mem_link_rx.quota_pdu = RX_CNT;
|
2022-01-22 07:31:03 +05:30
|
|
|
rx_replenish_all();
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-10 11:18:30 +02:00
|
|
|
static void perform_lll_reset(void *param)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Reset LLL */
|
|
|
|
err = lll_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_BROADCASTER)
|
|
|
|
/* Reset adv state */
|
|
|
|
err = lll_adv_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
#endif /* CONFIG_BT_BROADCASTER */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_OBSERVER)
|
|
|
|
/* Reset scan state */
|
|
|
|
err = lll_scan_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
#endif /* CONFIG_BT_OBSERVER */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
/* Reset conn role */
|
|
|
|
err = lll_conn_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
2019-11-11 10:11:32 +01:00
|
|
|
|
2021-02-24 13:57:41 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_DF)
|
2020-11-19 02:12:54 -08:00
|
|
|
err = lll_df_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
#endif /* CONFIG_BT_CTLR_DF */
|
|
|
|
|
2019-10-08 12:00:21 +05:30
|
|
|
#if !defined(CONFIG_BT_CTLR_ZLI)
|
2019-11-11 10:11:32 +01:00
|
|
|
k_sem_give(param);
|
2019-10-08 12:00:21 +05:30
|
|
|
#endif /* !CONFIG_BT_CTLR_ZLI */
|
2019-10-10 11:18:30 +02:00
|
|
|
}
|
|
|
|
|
2019-06-05 13:40:36 +02:00
|
|
|
static inline void *mark_set(void **m, void *param)
|
|
|
|
{
|
|
|
|
if (!*m) {
|
|
|
|
*m = param;
|
|
|
|
}
|
|
|
|
|
|
|
|
return *m;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *mark_unset(void **m, void *param)
|
|
|
|
{
|
|
|
|
if (*m && *m == param) {
|
|
|
|
*m = NULL;
|
|
|
|
|
|
|
|
return param;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *mark_get(void *m)
|
|
|
|
{
|
|
|
|
return m;
|
|
|
|
}
|
|
|
|
|
2022-01-22 07:31:03 +05:30
|
|
|
static void rx_replenish(uint8_t max)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t idx;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2020-12-17 12:26:05 +05:30
|
|
|
if (max > mem_link_rx.quota_pdu) {
|
|
|
|
max = mem_link_rx.quota_pdu;
|
|
|
|
}
|
|
|
|
|
2021-06-25 00:32:33 +05:30
|
|
|
while (max && MFIFO_ENQUEUE_IDX_GET(pdu_rx_free, &idx)) {
|
2018-12-18 05:48:20 +01:00
|
|
|
memq_link_t *link;
|
|
|
|
struct node_rx_hdr *rx;
|
|
|
|
|
|
|
|
link = mem_acquire(&mem_link_rx.free);
|
|
|
|
if (!link) {
|
2020-12-17 12:26:05 +05:30
|
|
|
return;
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
rx = mem_acquire(&mem_pdu_rx.free);
|
|
|
|
if (!rx) {
|
2022-01-22 07:31:03 +05:30
|
|
|
ll_rx_link_release(link);
|
2020-12-17 12:26:05 +05:30
|
|
|
return;
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
rx->link = link;
|
|
|
|
|
2020-12-17 12:26:05 +05:30
|
|
|
MFIFO_BY_IDX_ENQUEUE(pdu_rx_free, idx, rx);
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2022-01-22 07:31:03 +05:30
|
|
|
ll_rx_link_quota_dec();
|
2021-06-25 00:32:33 +05:30
|
|
|
|
|
|
|
max--;
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
2020-12-17 12:26:05 +05:30
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
if (!max) {
|
|
|
|
return;
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
2020-12-17 12:26:05 +05:30
|
|
|
/* Replenish the ULL to LL/HCI free Rx PDU queue after LLL to ULL free
|
|
|
|
* Rx PDU queue has been filled.
|
|
|
|
*/
|
|
|
|
while (mem_link_rx.quota_pdu &&
|
|
|
|
MFIFO_ENQUEUE_IDX_GET(ll_pdu_rx_free, &idx)) {
|
2018-12-18 05:48:20 +01:00
|
|
|
memq_link_t *link;
|
|
|
|
struct node_rx_hdr *rx;
|
|
|
|
|
|
|
|
link = mem_acquire(&mem_link_rx.free);
|
|
|
|
if (!link) {
|
2020-12-17 12:26:05 +05:30
|
|
|
return;
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
rx = mem_acquire(&mem_pdu_rx.free);
|
|
|
|
if (!rx) {
|
2022-01-22 07:31:03 +05:30
|
|
|
ll_rx_link_release(link);
|
2020-12-17 12:26:05 +05:30
|
|
|
return;
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
2020-12-17 12:26:05 +05:30
|
|
|
link->mem = NULL;
|
2018-12-18 05:48:20 +01:00
|
|
|
rx->link = link;
|
|
|
|
|
2020-12-17 12:26:05 +05:30
|
|
|
MFIFO_BY_IDX_ENQUEUE(ll_pdu_rx_free, idx, rx);
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2022-01-22 07:31:03 +05:30
|
|
|
ll_rx_link_quota_dec();
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
2020-12-17 12:26:05 +05:30
|
|
|
#endif /* CONFIG_BT_CONN */
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
2022-01-22 07:31:03 +05:30
|
|
|
static void rx_replenish_all(void)
|
|
|
|
{
|
|
|
|
rx_replenish(UINT8_MAX);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CONN) || \
|
|
|
|
(defined(CONFIG_BT_OBSERVER) && defined(CONFIG_BT_CTLR_ADV_EXT)) || \
|
|
|
|
defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
|
|
|
|
defined(CONFIG_BT_CTLR_ADV_ISO)
|
|
|
|
|
|
|
|
static void rx_replenish_one(void)
|
|
|
|
{
|
|
|
|
rx_replenish(1U);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rx_release_replenish(struct node_rx_hdr *rx)
|
|
|
|
{
|
|
|
|
ll_rx_release(rx);
|
|
|
|
rx_replenish_one();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rx_link_dequeue_release_quota_inc(memq_link_t *link)
|
|
|
|
{
|
|
|
|
(void)memq_dequeue(memq_ll_rx.tail,
|
|
|
|
&memq_ll_rx.head, NULL);
|
|
|
|
ll_rx_link_release(link);
|
|
|
|
ll_rx_link_quota_inc();
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CONN ||
|
|
|
|
* (CONFIG_BT_OBSERVER && CONFIG_BT_CTLR_ADV_EXT) ||
|
|
|
|
* CONFIG_BT_CTLR_ADV_PERIODIC ||
|
|
|
|
* CONFIG_BT_CTLR_ADV_ISO
|
|
|
|
*/
|
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
static void rx_demux(void *param)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
memq_link_t *link;
|
|
|
|
|
2019-02-18 19:32:08 +05:30
|
|
|
#if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
2018-12-18 05:48:20 +01:00
|
|
|
do {
|
2019-02-18 19:32:08 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
2018-12-18 05:48:20 +01:00
|
|
|
struct node_rx_hdr *rx;
|
|
|
|
|
|
|
|
link = memq_peek(memq_ull_rx.head, memq_ull_rx.tail,
|
|
|
|
(void **)&rx);
|
|
|
|
if (link) {
|
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
struct node_tx *node_tx;
|
|
|
|
memq_link_t *link_tx;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint16_t handle; /* Handle to Ack TX */
|
2018-12-18 05:48:20 +01:00
|
|
|
#endif /* CONFIG_BT_CONN */
|
2019-02-11 22:55:30 +05:30
|
|
|
int nack = 0;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
LL_ASSERT(rx);
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CONN)
|
2019-04-30 14:23:39 +05:30
|
|
|
link_tx = ull_conn_ack_by_last_peek(rx->ack_last,
|
2018-12-18 05:48:20 +01:00
|
|
|
&handle, &node_tx);
|
|
|
|
if (link_tx) {
|
2019-02-04 22:21:55 +05:30
|
|
|
rx_demux_conn_tx_ack(rx->ack_last, handle,
|
|
|
|
link_tx, node_tx);
|
2018-12-18 05:48:20 +01:00
|
|
|
} else
|
2019-02-18 19:32:08 +05:30
|
|
|
#endif /* CONFIG_BT_CONN */
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
2019-02-11 22:55:30 +05:30
|
|
|
nack = rx_demux_rx(link, rx);
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
2019-02-11 22:55:30 +05:30
|
|
|
|
2019-02-18 19:32:08 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
|
|
|
if (!nack) {
|
2021-01-29 18:08:59 +05:30
|
|
|
rx_demux_yield();
|
2019-02-18 19:32:08 +05:30
|
|
|
}
|
|
|
|
#else /* !CONFIG_BT_CTLR_LOW_LAT_ULL */
|
2019-02-11 22:55:30 +05:30
|
|
|
if (nack) {
|
|
|
|
break;
|
|
|
|
}
|
2019-02-18 19:32:08 +05:30
|
|
|
#endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL */
|
2019-02-11 22:55:30 +05:30
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
} else {
|
|
|
|
struct node_tx *node_tx;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t ack_last;
|
|
|
|
uint16_t handle;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-04-30 14:23:39 +05:30
|
|
|
link = ull_conn_ack_peek(&ack_last, &handle, &node_tx);
|
2018-12-18 05:48:20 +01:00
|
|
|
if (link) {
|
2019-02-04 22:21:55 +05:30
|
|
|
rx_demux_conn_tx_ack(ack_last, handle,
|
2018-12-18 05:48:20 +01:00
|
|
|
link, node_tx);
|
2019-02-18 19:32:08 +05:30
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
2021-01-29 18:08:59 +05:30
|
|
|
rx_demux_yield();
|
2019-02-18 19:32:08 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
2019-02-18 19:32:08 +05:30
|
|
|
#endif /* CONFIG_BT_CONN */
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
2019-02-18 19:32:08 +05:30
|
|
|
|
|
|
|
#if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
2018-12-18 05:48:20 +01:00
|
|
|
} while (link);
|
2019-02-18 19:32:08 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
2021-01-29 18:08:59 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
|
|
|
static void rx_demux_yield(void)
|
|
|
|
{
|
|
|
|
static memq_link_t link;
|
|
|
|
static struct mayfly mfy = {0, 0, &link, NULL, rx_demux};
|
2021-02-02 13:49:02 +05:30
|
|
|
struct node_rx_hdr *rx;
|
|
|
|
memq_link_t *link_peek;
|
|
|
|
|
|
|
|
link_peek = memq_peek(memq_ull_rx.head, memq_ull_rx.tail, (void **)&rx);
|
|
|
|
if (!link_peek) {
|
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
struct node_tx *node_tx;
|
|
|
|
uint8_t ack_last;
|
|
|
|
uint16_t handle;
|
|
|
|
|
|
|
|
link_peek = ull_conn_ack_peek(&ack_last, &handle, &node_tx);
|
|
|
|
if (!link_peek) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#else /* !CONFIG_BT_CONN */
|
|
|
|
return;
|
|
|
|
#endif /* !CONFIG_BT_CONN */
|
|
|
|
}
|
2021-01-29 18:08:59 +05:30
|
|
|
|
|
|
|
/* Kick the ULL (using the mayfly, tailchain it) */
|
|
|
|
mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_HIGH, 1,
|
|
|
|
&mfy);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
|
|
|
|
2022-04-28 09:59:42 +05:30
|
|
|
#if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_CTLR_ADV_ISO)
|
2020-08-19 07:27:02 +05:30
|
|
|
static uint8_t tx_cmplt_get(uint16_t *handle, uint8_t *first, uint8_t last)
|
|
|
|
{
|
|
|
|
struct lll_tx *tx;
|
|
|
|
uint8_t cmplt;
|
|
|
|
|
|
|
|
tx = mfifo_dequeue_iter_get(mfifo_tx_ack.m, mfifo_tx_ack.s,
|
|
|
|
mfifo_tx_ack.n, mfifo_tx_ack.f, last,
|
|
|
|
first);
|
|
|
|
if (!tx) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
*handle = tx->handle;
|
|
|
|
cmplt = 0U;
|
|
|
|
do {
|
2022-04-28 09:59:42 +05:30
|
|
|
if (false) {
|
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_ISO) || \
|
2022-04-20 14:11:00 +02:00
|
|
|
defined(CONFIG_BT_CTLR_CONN_ISO)
|
2022-04-28 09:59:42 +05:30
|
|
|
} else if (IS_CIS_HANDLE(tx->handle) ||
|
|
|
|
IS_ADV_ISO_HANDLE(tx->handle)) {
|
2022-04-20 14:11:00 +02:00
|
|
|
struct node_tx_iso *tx_node_iso;
|
2022-04-28 09:59:42 +05:30
|
|
|
struct pdu_data *p;
|
2022-04-20 14:11:00 +02:00
|
|
|
|
|
|
|
tx_node_iso = tx->node;
|
|
|
|
p = (void *)tx_node_iso->pdu;
|
2022-04-29 09:32:53 +02:00
|
|
|
|
|
|
|
if (IS_ADV_ISO_HANDLE(tx->handle)) {
|
|
|
|
/* FIXME: ADV_ISO shall be updated to use ISOAL for
|
|
|
|
* TX. Until then, assume 1 node equals 1 fragment.
|
|
|
|
*/
|
|
|
|
cmplt += 1;
|
|
|
|
} else {
|
|
|
|
/* We count each SDU fragment completed by this PDU */
|
|
|
|
cmplt += tx_node_iso->sdu_fragments;
|
2022-04-20 14:11:00 +02:00
|
|
|
}
|
2022-04-28 09:59:42 +05:30
|
|
|
|
2022-04-20 14:11:00 +02:00
|
|
|
ll_iso_link_tx_release(tx_node_iso->link);
|
|
|
|
ll_iso_tx_mem_release(tx_node_iso);
|
|
|
|
goto next_ack;
|
2022-04-28 09:59:42 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CONN)
|
2020-08-19 07:27:02 +05:30
|
|
|
} else {
|
2022-04-28 09:59:42 +05:30
|
|
|
struct node_tx *tx_node;
|
|
|
|
struct pdu_data *p;
|
|
|
|
|
|
|
|
tx_node = tx->node;
|
|
|
|
p = (void *)tx_node->pdu;
|
|
|
|
if (!tx_node || (tx_node == (void *)1) ||
|
|
|
|
(((uint32_t)tx_node & ~3) &&
|
|
|
|
(p->ll_id == PDU_DATA_LLID_DATA_START ||
|
|
|
|
p->ll_id == PDU_DATA_LLID_DATA_CONTINUE))) {
|
|
|
|
/* data packet, hence count num cmplt */
|
|
|
|
tx->node = (void *)1;
|
|
|
|
cmplt++;
|
|
|
|
} else {
|
|
|
|
/* ctrl packet or flushed, hence dont count num cmplt */
|
|
|
|
tx->node = (void *)2;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (((uint32_t)tx_node & ~3)) {
|
|
|
|
ll_tx_mem_release(tx_node);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
2020-08-19 07:27:02 +05:30
|
|
|
|
|
|
|
}
|
|
|
|
|
2022-04-28 09:59:42 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_ISO) || \
|
2022-04-20 14:11:00 +02:00
|
|
|
defined(CONFIG_BT_CTLR_CONN_ISO)
|
|
|
|
next_ack:
|
2022-04-28 09:59:42 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_CONN_ISO */
|
2022-04-20 14:11:00 +02:00
|
|
|
|
2020-08-19 07:27:02 +05:30
|
|
|
tx = mfifo_dequeue_iter_get(mfifo_tx_ack.m, mfifo_tx_ack.s,
|
|
|
|
mfifo_tx_ack.n, mfifo_tx_ack.f,
|
|
|
|
last, first);
|
|
|
|
} while (tx && tx->handle == *handle);
|
|
|
|
|
|
|
|
return cmplt;
|
|
|
|
}
|
|
|
|
|
2020-08-11 10:51:43 +05:30
|
|
|
static inline void rx_demux_conn_tx_ack(uint8_t ack_last, uint16_t handle,
|
|
|
|
memq_link_t *link,
|
|
|
|
struct node_tx *node_tx)
|
|
|
|
{
|
|
|
|
#if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
|
|
|
do {
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
|
|
|
/* Dequeue node */
|
|
|
|
ull_conn_ack_dequeue();
|
|
|
|
|
|
|
|
/* Process Tx ack */
|
|
|
|
ull_conn_tx_ack(handle, link, node_tx);
|
|
|
|
|
|
|
|
/* Release link mem */
|
|
|
|
ull_conn_link_tx_release(link);
|
|
|
|
|
|
|
|
/* check for more rx ack */
|
|
|
|
link = ull_conn_ack_by_last_peek(ack_last, &handle, &node_tx);
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
|
|
|
if (!link)
|
|
|
|
#else /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
|
|
|
} while (link);
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
|
|
|
|
|
|
|
{
|
|
|
|
/* trigger thread to call ll_rx_get() */
|
|
|
|
ll_rx_sched();
|
|
|
|
}
|
|
|
|
}
|
2022-04-28 09:59:42 +05:30
|
|
|
#endif /* CONFIG_BT_CONN || CONFIG_BT_CTLR_ADV_ISO */
|
2020-08-11 10:51:43 +05:30
|
|
|
|
2021-01-21 16:33:32 +01:00
|
|
|
#if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
|
|
|
static void ull_done(void *param)
|
|
|
|
{
|
|
|
|
memq_link_t *link;
|
|
|
|
struct node_rx_hdr *done;
|
|
|
|
|
|
|
|
do {
|
|
|
|
link = memq_peek(memq_ull_done.head, memq_ull_done.tail,
|
|
|
|
(void **)&done);
|
|
|
|
|
|
|
|
if (link) {
|
|
|
|
/* Process done event */
|
2021-08-16 12:53:35 +05:30
|
|
|
(void)memq_dequeue(memq_ull_done.tail,
|
|
|
|
&memq_ull_done.head, NULL);
|
2021-01-21 16:33:32 +01:00
|
|
|
rx_demux_event_done(link, done);
|
|
|
|
}
|
|
|
|
} while (link);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
|
|
|
|
2019-02-14 10:04:17 +01:00
|
|
|
/**
|
|
|
|
* @brief Dispatch rx objects
|
|
|
|
* @details Rx objects are only peeked, not dequeued yet.
|
|
|
|
* Execution context: ULL high priority Mayfly
|
|
|
|
*/
|
2019-02-11 22:55:30 +05:30
|
|
|
static inline int rx_demux_rx(memq_link_t *link, struct node_rx_hdr *rx)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
/* Demux Rx objects */
|
|
|
|
switch (rx->type) {
|
2021-01-21 16:33:32 +01:00
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
2018-12-18 05:48:20 +01:00
|
|
|
case NODE_RX_TYPE_EVENT_DONE:
|
|
|
|
{
|
2021-08-16 12:53:35 +05:30
|
|
|
(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
|
2019-02-04 22:21:55 +05:30
|
|
|
rx_demux_event_done(link, rx);
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
break;
|
2021-01-21 16:33:32 +01:00
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2020-05-04 13:32:24 +05:30
|
|
|
#if defined(CONFIG_BT_OBSERVER)
|
2020-04-21 11:12:41 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT)
|
|
|
|
case NODE_RX_TYPE_EXT_1M_REPORT:
|
|
|
|
case NODE_RX_TYPE_EXT_CODED_REPORT:
|
2020-11-05 18:13:25 +05:30
|
|
|
case NODE_RX_TYPE_EXT_AUX_REPORT:
|
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
|
|
|
|
case NODE_RX_TYPE_SYNC_REPORT:
|
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
|
2020-04-21 11:12:41 +05:30
|
|
|
{
|
2020-05-04 13:32:24 +05:30
|
|
|
struct pdu_adv *adv;
|
2020-04-21 11:12:41 +05:30
|
|
|
|
2021-08-16 12:53:35 +05:30
|
|
|
(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
|
2020-04-21 11:12:41 +05:30
|
|
|
|
2020-05-04 13:32:24 +05:30
|
|
|
adv = (void *)((struct node_rx_pdu *)rx)->pdu;
|
|
|
|
if (adv->type != PDU_ADV_TYPE_EXT_IND) {
|
|
|
|
ll_rx_put(link, rx);
|
|
|
|
ll_rx_sched();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-11-05 18:13:25 +05:30
|
|
|
ull_scan_aux_setup(link, rx);
|
2020-04-21 11:12:41 +05:30
|
|
|
}
|
|
|
|
break;
|
2021-08-03 05:59:29 +05:30
|
|
|
|
|
|
|
case NODE_RX_TYPE_EXT_AUX_RELEASE:
|
|
|
|
{
|
2021-08-16 12:53:35 +05:30
|
|
|
(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
|
2021-08-03 05:59:29 +05:30
|
|
|
ull_scan_aux_release(link, rx);
|
|
|
|
}
|
|
|
|
break;
|
2021-09-03 12:53:01 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
|
|
|
|
case NODE_RX_TYPE_SYNC:
|
|
|
|
{
|
|
|
|
(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
|
|
|
|
ull_sync_established_report(link, rx);
|
|
|
|
}
|
|
|
|
break;
|
2021-10-31 00:40:25 +02:00
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
|
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_EXT */
|
|
|
|
#endif /* CONFIG_BT_OBSERVER */
|
|
|
|
|
2022-01-18 16:39:39 +01:00
|
|
|
#if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX) || defined(CONFIG_BT_CTLR_DF_CONN_CTE_RX) || \
|
|
|
|
defined(CONFIG_BT_CTLR_DTM_HCI_DF_IQ_REPORT)
|
2021-10-31 00:40:25 +02:00
|
|
|
case NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT:
|
|
|
|
case NODE_RX_TYPE_CONN_IQ_SAMPLE_REPORT:
|
2022-01-18 16:39:39 +01:00
|
|
|
case NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT:
|
2021-10-31 00:40:25 +02:00
|
|
|
{
|
2021-10-14 15:39:37 +02:00
|
|
|
(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
|
|
|
|
ll_rx_put(link, rx);
|
|
|
|
ll_rx_sched();
|
|
|
|
}
|
|
|
|
break;
|
2022-01-04 07:56:02 +01:00
|
|
|
#endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX || CONFIG_BT_CTLR_DF_CONN_CTE_RX */
|
2020-04-21 11:12:41 +05:30
|
|
|
|
2019-09-27 18:19:52 +05:30
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
case NODE_RX_TYPE_CONNECTION:
|
|
|
|
{
|
2021-08-16 12:53:35 +05:30
|
|
|
(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
|
2019-09-27 18:19:52 +05:30
|
|
|
ull_conn_setup(link, rx);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NODE_RX_TYPE_DC_PDU:
|
|
|
|
{
|
|
|
|
int nack;
|
|
|
|
|
|
|
|
nack = ull_conn_rx(link, (void *)&rx);
|
|
|
|
if (nack) {
|
|
|
|
return nack;
|
|
|
|
}
|
|
|
|
|
2021-08-16 12:53:35 +05:30
|
|
|
(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
|
2019-09-27 18:19:52 +05:30
|
|
|
|
|
|
|
if (rx) {
|
|
|
|
ll_rx_put(link, rx);
|
|
|
|
ll_rx_sched();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NODE_RX_TYPE_TERMINATE:
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#if defined(CONFIG_BT_OBSERVER) || \
|
2021-08-25 20:38:10 +05:30
|
|
|
defined(CONFIG_BT_CTLR_ADV_PERIODIC) || \
|
2021-11-16 13:04:21 +05:30
|
|
|
defined(CONFIG_BT_CTLR_BROADCAST_ISO) || \
|
2018-12-18 05:48:20 +01:00
|
|
|
defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY) || \
|
|
|
|
defined(CONFIG_BT_CTLR_PROFILE_ISR) || \
|
|
|
|
defined(CONFIG_BT_CTLR_ADV_INDICATION) || \
|
2019-09-27 18:19:52 +05:30
|
|
|
defined(CONFIG_BT_CTLR_SCAN_INDICATION) || \
|
|
|
|
defined(CONFIG_BT_CONN)
|
|
|
|
|
2021-08-25 20:38:10 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
|
|
|
|
case NODE_RX_TYPE_SYNC_CHM_COMPLETE:
|
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
|
|
|
|
|
2021-03-10 06:57:56 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_ISO)
|
2022-01-19 20:57:50 +05:30
|
|
|
case NODE_RX_TYPE_BIG_CHM_COMPLETE:
|
2021-03-10 06:57:56 +05:30
|
|
|
case NODE_RX_TYPE_BIG_TERMINATE:
|
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_ISO */
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#if defined(CONFIG_BT_OBSERVER)
|
|
|
|
case NODE_RX_TYPE_REPORT:
|
|
|
|
#endif /* CONFIG_BT_OBSERVER */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
|
|
|
|
case NODE_RX_TYPE_SCAN_REQ:
|
|
|
|
#endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_PROFILE_ISR)
|
|
|
|
case NODE_RX_TYPE_PROFILE:
|
|
|
|
#endif /* CONFIG_BT_CTLR_PROFILE_ISR */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_INDICATION)
|
|
|
|
case NODE_RX_TYPE_ADV_INDICATION:
|
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_INDICATION */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
|
|
|
|
case NODE_RX_TYPE_SCAN_INDICATION:
|
|
|
|
#endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
|
2020-11-12 09:20:44 +01:00
|
|
|
|
|
|
|
case NODE_RX_TYPE_RELEASE:
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
2021-08-16 12:53:35 +05:30
|
|
|
(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
|
2018-12-18 05:48:20 +01:00
|
|
|
ll_rx_put(link, rx);
|
|
|
|
ll_rx_sched();
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
#endif /* CONFIG_BT_OBSERVER ||
|
2021-08-25 20:38:10 +05:30
|
|
|
* CONFIG_BT_CTLR_ADV_PERIODIC ||
|
2021-11-16 13:04:21 +05:30
|
|
|
* CONFIG_BT_CTLR_BROADCAST_ISO ||
|
2018-12-18 05:48:20 +01:00
|
|
|
* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY ||
|
|
|
|
* CONFIG_BT_CTLR_PROFILE_ISR ||
|
|
|
|
* CONFIG_BT_CTLR_ADV_INDICATION ||
|
2019-09-27 18:19:52 +05:30
|
|
|
* CONFIG_BT_CTLR_SCAN_INDICATION ||
|
|
|
|
* CONFIG_BT_CONN
|
2018-12-18 05:48:20 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
default:
|
|
|
|
{
|
2019-05-09 15:41:39 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_USER_EXT)
|
|
|
|
/* Try proprietary demuxing */
|
|
|
|
rx_demux_rx_proprietary(link, rx, memq_ull_rx.tail,
|
|
|
|
&memq_ull_rx.head);
|
|
|
|
#else
|
2018-12-18 05:48:20 +01:00
|
|
|
LL_ASSERT(0);
|
2019-05-09 15:41:39 +02:00
|
|
|
#endif /* CONFIG_BT_CTLR_USER_EXT */
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2019-02-11 22:55:30 +05:30
|
|
|
|
|
|
|
return 0;
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
static inline void rx_demux_event_done(memq_link_t *link,
|
|
|
|
struct node_rx_hdr *rx)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
struct node_rx_event_done *done = (void *)rx;
|
|
|
|
struct ull_hdr *ull_hdr;
|
2019-06-07 16:42:57 +02:00
|
|
|
void *release;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2021-03-04 10:24:53 +05:30
|
|
|
/* Decrement prepare reference if ULL will not resume */
|
2018-12-18 05:48:20 +01:00
|
|
|
ull_hdr = done->param;
|
2021-03-04 10:24:53 +05:30
|
|
|
if (ull_hdr) {
|
|
|
|
LL_ASSERT(ull_ref_get(ull_hdr));
|
|
|
|
ull_ref_dec(ull_hdr);
|
|
|
|
}
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
/* Process role dependent event done */
|
|
|
|
switch (done->extra.type) {
|
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
case EVENT_DONE_EXTRA_TYPE_CONN:
|
|
|
|
ull_conn_done(done);
|
|
|
|
break;
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
2019-05-09 15:41:39 +02:00
|
|
|
|
2020-08-11 10:44:52 +05:30
|
|
|
#if defined(CONFIG_BT_BROADCASTER)
|
2021-05-25 14:52:44 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT) || \
|
|
|
|
defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
|
2020-06-18 14:13:41 +02:00
|
|
|
case EVENT_DONE_EXTRA_TYPE_ADV:
|
2020-07-13 17:59:38 +05:30
|
|
|
ull_adv_done(done);
|
2020-06-18 14:13:41 +02:00
|
|
|
break;
|
2021-08-11 11:05:56 +05:30
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT)
|
|
|
|
case EVENT_DONE_EXTRA_TYPE_ADV_AUX:
|
|
|
|
ull_adv_aux_done(done);
|
|
|
|
break;
|
2021-03-16 13:31:26 +05:30
|
|
|
|
2021-01-19 16:04:51 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_ISO)
|
2021-03-16 13:31:26 +05:30
|
|
|
case EVENT_DONE_EXTRA_TYPE_ADV_ISO_COMPLETE:
|
|
|
|
ull_adv_iso_done_complete(done);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case EVENT_DONE_EXTRA_TYPE_ADV_ISO_TERMINATE:
|
|
|
|
ull_adv_iso_done_terminate(done);
|
2021-01-19 16:04:51 +05:30
|
|
|
break;
|
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_ISO */
|
2021-08-11 11:05:56 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_EXT */
|
2021-05-25 14:52:44 +02:00
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_EXT || CONFIG_BT_CTLR_JIT_SCHEDULING */
|
2020-08-11 10:44:52 +05:30
|
|
|
#endif /* CONFIG_BT_BROADCASTER */
|
2020-06-18 14:13:41 +02:00
|
|
|
|
2021-05-25 14:52:44 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT)
|
2020-04-21 11:12:41 +05:30
|
|
|
#if defined(CONFIG_BT_OBSERVER)
|
2020-08-04 07:29:52 +02:00
|
|
|
case EVENT_DONE_EXTRA_TYPE_SCAN:
|
|
|
|
ull_scan_done(done);
|
|
|
|
break;
|
|
|
|
|
2020-04-21 11:12:41 +05:30
|
|
|
case EVENT_DONE_EXTRA_TYPE_SCAN_AUX:
|
|
|
|
ull_scan_aux_done(done);
|
|
|
|
break;
|
2020-08-04 07:29:52 +02:00
|
|
|
|
2020-10-05 10:42:39 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
|
2020-08-11 11:01:26 +05:30
|
|
|
case EVENT_DONE_EXTRA_TYPE_SYNC:
|
|
|
|
ull_sync_done(done);
|
|
|
|
break;
|
2021-02-09 12:06:13 +05:30
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_ISO)
|
|
|
|
case EVENT_DONE_EXTRA_TYPE_SYNC_ISO_ESTAB:
|
|
|
|
ull_sync_iso_estab_done(done);
|
|
|
|
break;
|
2021-02-17 14:58:02 +05:30
|
|
|
|
2021-02-09 12:06:13 +05:30
|
|
|
case EVENT_DONE_EXTRA_TYPE_SYNC_ISO:
|
|
|
|
ull_sync_iso_done(done);
|
|
|
|
break;
|
2021-05-05 12:00:13 +05:30
|
|
|
|
|
|
|
case EVENT_DONE_EXTRA_TYPE_SYNC_ISO_TERMINATE:
|
|
|
|
ull_sync_iso_done_terminate(done);
|
|
|
|
break;
|
2021-02-09 12:06:13 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_ISO */
|
2020-10-05 10:42:39 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
|
2020-04-21 11:12:41 +05:30
|
|
|
#endif /* CONFIG_BT_OBSERVER */
|
2020-08-11 10:44:52 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_EXT */
|
2020-04-21 11:12:41 +05:30
|
|
|
|
2021-04-14 10:17:20 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_CONN_ISO)
|
2021-02-19 13:26:32 +01:00
|
|
|
case EVENT_DONE_EXTRA_TYPE_CIS:
|
|
|
|
ull_conn_iso_done(done);
|
|
|
|
break;
|
2021-04-14 10:17:20 +02:00
|
|
|
#endif /* CONFIG_BT_CTLR_CONN_ISO */
|
2021-02-19 13:26:32 +01:00
|
|
|
|
2019-05-09 15:41:39 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_USER_EXT)
|
|
|
|
case EVENT_DONE_EXTRA_TYPE_USER_START
|
|
|
|
... EVENT_DONE_EXTRA_TYPE_USER_END:
|
|
|
|
ull_proprietary_done(done);
|
|
|
|
break;
|
|
|
|
#endif /* CONFIG_BT_CTLR_USER_EXT */
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
case EVENT_DONE_EXTRA_TYPE_NONE:
|
|
|
|
/* ignore */
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
LL_ASSERT(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-05-19 13:59:21 +02:00
|
|
|
/* Release done */
|
2019-03-26 19:57:45 -06:00
|
|
|
done->extra.type = 0U;
|
2021-05-19 13:59:21 +02:00
|
|
|
release = RXFIFO_RELEASE(done, link, done);
|
2019-06-07 16:42:57 +02:00
|
|
|
LL_ASSERT(release == done);
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2021-03-28 07:02:57 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
|
2018-12-18 05:48:20 +01:00
|
|
|
/* dequeue prepare pipeline */
|
2021-03-28 07:34:26 +05:30
|
|
|
ull_prepare_dequeue(TICKER_USER_ID_ULL_HIGH);
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2021-10-07 11:25:04 +05:30
|
|
|
/* LLL done synchronize count */
|
|
|
|
lll_done_ull_inc();
|
2021-03-28 07:02:57 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
/* If disable initiated, signal the semaphore */
|
2021-03-04 10:24:53 +05:30
|
|
|
if (ull_hdr && !ull_ref_get(ull_hdr) && ull_hdr->disabled_cb) {
|
2018-12-18 05:48:20 +01:00
|
|
|
ull_hdr->disabled_cb(ull_hdr->disabled_param);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
static void disabled_cb(void *param)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
k_sem_give(param);
|
|
|
|
}
|
2021-05-19 13:59:21 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Support function for RXFIFO_ALLOC macro
|
|
|
|
* @details This function allocates up to 'max' number of MFIFO elements by
|
|
|
|
* enqueuing pointers to memory elements with associated memq links.
|
|
|
|
*/
|
|
|
|
void ull_rxfifo_alloc(uint8_t s, uint8_t n, uint8_t f, uint8_t *l, uint8_t *m,
|
|
|
|
void *mem_free, void *link_free, uint8_t max)
|
|
|
|
{
|
|
|
|
uint8_t idx;
|
|
|
|
|
|
|
|
while ((max--) && mfifo_enqueue_idx_get(n, f, *l, &idx)) {
|
|
|
|
memq_link_t *link;
|
|
|
|
struct node_rx_hdr *rx;
|
|
|
|
|
|
|
|
link = mem_acquire(link_free);
|
|
|
|
if (!link) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
rx = mem_acquire(mem_free);
|
|
|
|
if (!rx) {
|
|
|
|
mem_release(link, link_free);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
link->mem = NULL;
|
|
|
|
rx->link = link;
|
|
|
|
|
|
|
|
mfifo_by_idx_enqueue(m, s, idx, rx, l);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Support function for RXFIFO_RELEASE macro
|
|
|
|
* @details This function releases a node by returning it to the FIFO.
|
|
|
|
*/
|
|
|
|
void *ull_rxfifo_release(uint8_t s, uint8_t n, uint8_t f, uint8_t *l, uint8_t *m,
|
|
|
|
memq_link_t *link, struct node_rx_hdr *rx)
|
|
|
|
{
|
|
|
|
uint8_t idx;
|
|
|
|
|
|
|
|
if (!mfifo_enqueue_idx_get(n, f, *l, &idx)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
rx->link = link;
|
|
|
|
|
|
|
|
mfifo_by_idx_enqueue(m, s, idx, rx, l);
|
|
|
|
|
|
|
|
return rx;
|
|
|
|
}
|
2022-05-05 10:38:23 +02:00
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO)
|
|
|
|
/* Contains vendor specific argument, function to be implemented by vendors */
|
|
|
|
__weak uint8_t ll_configure_data_path(uint8_t data_path_dir,
|
|
|
|
uint8_t data_path_id,
|
|
|
|
uint8_t vs_config_len,
|
|
|
|
uint8_t *vs_config)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(data_path_dir);
|
|
|
|
ARG_UNUSED(data_path_id);
|
|
|
|
ARG_UNUSED(vs_config_len);
|
|
|
|
ARG_UNUSED(vs_config);
|
|
|
|
|
|
|
|
return BT_HCI_ERR_CMD_DISALLOWED;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CTLR_HCI_CODEC_AND_DELAY_INFO */
|