2018-12-18 05:48:20 +01:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2017-2019 Nordic Semiconductor ASA
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <stdbool.h>
|
|
|
|
#include <errno.h>
|
|
|
|
|
2020-08-19 10:09:58 +05:30
|
|
|
#include <zephyr.h>
|
|
|
|
#include <soc.h>
|
2018-12-18 05:48:20 +01:00
|
|
|
#include <device.h>
|
2019-06-25 15:53:49 -04:00
|
|
|
#include <drivers/entropy.h>
|
2018-12-18 05:48:20 +01:00
|
|
|
#include <bluetooth/hci.h>
|
|
|
|
|
2020-08-19 10:09:58 +05:30
|
|
|
#include "hal/cpu.h"
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "hal/ccm.h"
|
2020-08-19 10:09:58 +05:30
|
|
|
#include "hal/cntr.h"
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "hal/ticker.h"
|
|
|
|
|
|
|
|
#include "util/util.h"
|
|
|
|
#include "util/mem.h"
|
|
|
|
#include "util/mfifo.h"
|
|
|
|
#include "util/memq.h"
|
|
|
|
#include "util/mayfly.h"
|
|
|
|
|
|
|
|
#include "ticker/ticker.h"
|
|
|
|
|
|
|
|
#include "pdu.h"
|
|
|
|
#include "ll.h"
|
2019-01-26 06:07:45 +05:30
|
|
|
#include "ll_feat.h"
|
2020-01-27 12:52:04 +01:00
|
|
|
#include "ll_settings.h"
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "lll.h"
|
2020-08-11 11:01:26 +05:30
|
|
|
#include "lll_vendor.h"
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "lll_adv.h"
|
|
|
|
#include "lll_scan.h"
|
2020-08-03 16:28:58 +05:30
|
|
|
#include "lll_sync.h"
|
2020-11-09 16:31:01 +01:00
|
|
|
#include "lll_sync_iso.h"
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "lll_conn.h"
|
2020-11-19 02:12:54 -08:00
|
|
|
#include "lll_df.h"
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "ull_adv_types.h"
|
|
|
|
#include "ull_scan_types.h"
|
2020-08-11 10:08:13 +05:30
|
|
|
#include "ull_sync_types.h"
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "ull_conn_types.h"
|
2019-05-09 18:39:26 +05:30
|
|
|
#include "ull_filter.h"
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "ull_internal.h"
|
2020-11-26 15:47:39 +01:00
|
|
|
#include "ull_iso_internal.h"
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "ull_adv_internal.h"
|
|
|
|
#include "ull_scan_internal.h"
|
2020-08-03 16:28:58 +05:30
|
|
|
#include "ull_sync_internal.h"
|
2020-11-09 16:31:01 +01:00
|
|
|
#include "ull_sync_iso_internal.h"
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "ull_conn_internal.h"
|
2020-11-20 05:16:05 -08:00
|
|
|
#include "ull_df.h"
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-05-09 15:41:39 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_USER_EXT)
|
|
|
|
#include "ull_vendor.h"
|
|
|
|
#endif /* CONFIG_BT_CTLR_USER_EXT */
|
|
|
|
|
2019-11-07 11:39:45 +05:30
|
|
|
#define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_HCI_DRIVER)
|
|
|
|
#define LOG_MODULE_NAME bt_ctlr_ull
|
2018-12-18 05:48:20 +01:00
|
|
|
#include "common/log.h"
|
|
|
|
#include "hal/debug.h"
|
|
|
|
|
2021-01-12 11:28:17 +01:00
|
|
|
#if !defined(TICKER_USER_LLL_VENDOR_OPS)
|
|
|
|
#define TICKER_USER_LLL_VENDOR_OPS 0
|
|
|
|
#endif /* TICKER_USER_LLL_VENDOR_OPS */
|
|
|
|
|
2020-11-09 15:23:06 +01:00
|
|
|
#if !defined(TICKER_USER_ULL_HIGH_VENDOR_OPS)
|
|
|
|
#define TICKER_USER_ULL_HIGH_VENDOR_OPS 0
|
|
|
|
#endif /* TICKER_USER_ULL_HIGH_VENDOR_OPS */
|
|
|
|
|
|
|
|
#if !defined(TICKER_USER_THREAD_VENDOR_OPS)
|
|
|
|
#define TICKER_USER_THREAD_VENDOR_OPS 0
|
|
|
|
#endif /* TICKER_USER_THREAD_VENDOR_OPS */
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
/* Define ticker nodes and user operations */
|
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT) && \
|
|
|
|
(CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
|
2021-01-12 11:28:17 +01:00
|
|
|
#define TICKER_USER_LLL_OPS (3 + TICKER_USER_LLL_VENDOR_OPS + 1)
|
2018-12-18 05:48:20 +01:00
|
|
|
#else
|
2021-01-12 11:28:17 +01:00
|
|
|
#define TICKER_USER_LLL_OPS (2 + TICKER_USER_LLL_VENDOR_OPS + 1)
|
2018-12-18 05:48:20 +01:00
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT */
|
2019-09-27 11:58:07 +02:00
|
|
|
|
|
|
|
#define TICKER_USER_ULL_HIGH_OPS (3 + TICKER_USER_ULL_HIGH_VENDOR_OPS + 1)
|
2018-12-18 05:48:20 +01:00
|
|
|
#define TICKER_USER_ULL_LOW_OPS (1 + 1)
|
2020-11-09 15:23:06 +01:00
|
|
|
#define TICKER_USER_THREAD_OPS (1 + TICKER_USER_THREAD_VENDOR_OPS + 1)
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
#if defined(CONFIG_BT_BROADCASTER)
|
|
|
|
#define BT_ADV_TICKER_NODES ((TICKER_ID_ADV_LAST) - (TICKER_ID_ADV_STOP) + 1)
|
2020-02-12 08:55:57 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
|
|
|
|
#define BT_ADV_AUX_TICKER_NODES ((TICKER_ID_ADV_AUX_LAST) - \
|
|
|
|
(TICKER_ID_ADV_AUX_BASE) + 1)
|
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
|
|
|
|
#define BT_ADV_SYNC_TICKER_NODES ((TICKER_ID_ADV_SYNC_LAST) - \
|
|
|
|
(TICKER_ID_ADV_SYNC_BASE) + 1)
|
|
|
|
#else /* !CONFIG_BT_CTLR_ADV_PERIODIC */
|
|
|
|
#define BT_ADV_SYNC_TICKER_NODES 0
|
|
|
|
#endif /* !CONFIG_BT_CTLR_ADV_PERIODIC */
|
|
|
|
#else /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
|
|
|
|
#define BT_ADV_AUX_TICKER_NODES 0
|
|
|
|
#define BT_ADV_SYNC_TICKER_NODES 0
|
|
|
|
#endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
|
|
|
|
#else /* !CONFIG_BT_BROADCASTER */
|
2018-12-18 05:48:20 +01:00
|
|
|
#define BT_ADV_TICKER_NODES 0
|
2020-02-12 08:55:57 +05:30
|
|
|
#define BT_ADV_AUX_TICKER_NODES 0
|
|
|
|
#define BT_ADV_SYNC_TICKER_NODES 0
|
|
|
|
#endif /* !CONFIG_BT_BROADCASTER */
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
#if defined(CONFIG_BT_OBSERVER)
|
|
|
|
#define BT_SCAN_TICKER_NODES ((TICKER_ID_SCAN_LAST) - (TICKER_ID_SCAN_STOP) + 1)
|
2020-04-21 11:12:41 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT)
|
|
|
|
#define BT_SCAN_AUX_TICKER_NODES ((TICKER_ID_SCAN_AUX_LAST) - \
|
|
|
|
(TICKER_ID_SCAN_AUX_BASE) + 1)
|
2020-10-05 10:42:39 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
|
2020-08-21 16:17:51 +05:30
|
|
|
#define BT_SCAN_SYNC_TICKER_NODES ((TICKER_ID_SCAN_SYNC_LAST) - \
|
|
|
|
(TICKER_ID_SCAN_SYNC_BASE) + 1)
|
2020-10-08 14:40:43 +05:30
|
|
|
#else /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
|
|
|
|
#define BT_SCAN_SYNC_TICKER_NODES 0
|
|
|
|
#endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
|
2020-04-21 11:12:41 +05:30
|
|
|
#else /* !CONFIG_BT_CTLR_ADV_EXT */
|
|
|
|
#define BT_SCAN_AUX_TICKER_NODES 0
|
2020-08-21 16:17:51 +05:30
|
|
|
#define BT_SCAN_SYNC_TICKER_NODES 0
|
2020-04-21 11:12:41 +05:30
|
|
|
#endif /* !CONFIG_BT_CTLR_ADV_EXT */
|
2018-12-18 05:48:20 +01:00
|
|
|
#else
|
|
|
|
#define BT_SCAN_TICKER_NODES 0
|
2020-04-21 11:12:41 +05:30
|
|
|
#define BT_SCAN_AUX_TICKER_NODES 0
|
2020-08-21 16:17:51 +05:30
|
|
|
#define BT_SCAN_SYNC_TICKER_NODES 0
|
2018-12-18 05:48:20 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
#define BT_CONN_TICKER_NODES ((TICKER_ID_CONN_LAST) - (TICKER_ID_CONN_BASE) + 1)
|
|
|
|
#else
|
|
|
|
#define BT_CONN_TICKER_NODES 0
|
|
|
|
#endif
|
|
|
|
|
2020-09-17 15:44:37 +02:00
|
|
|
#if defined(CONFIG_SOC_FLASH_NRF_RADIO_SYNC_TICKER)
|
2020-07-09 13:03:27 +05:30
|
|
|
#define FLASH_TICKER_NODES 2 /* No. of tickers reserved for flashing */
|
2018-12-18 05:48:20 +01:00
|
|
|
#define FLASH_TICKER_USER_APP_OPS 1 /* No. of additional ticker operations */
|
|
|
|
#else
|
|
|
|
#define FLASH_TICKER_NODES 0
|
|
|
|
#define FLASH_TICKER_USER_APP_OPS 0
|
|
|
|
#endif
|
|
|
|
|
2020-05-07 11:45:33 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_USER_EXT)
|
|
|
|
#define USER_TICKER_NODES CONFIG_BT_CTLR_USER_TICKER_ID_RANGE
|
|
|
|
#else
|
|
|
|
#define USER_TICKER_NODES 0
|
|
|
|
#endif
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#define TICKER_NODES (TICKER_ID_ULL_BASE + \
|
|
|
|
BT_ADV_TICKER_NODES + \
|
2020-02-12 08:55:57 +05:30
|
|
|
BT_ADV_AUX_TICKER_NODES + \
|
|
|
|
BT_ADV_SYNC_TICKER_NODES + \
|
2018-12-18 05:48:20 +01:00
|
|
|
BT_SCAN_TICKER_NODES + \
|
2020-04-21 11:12:41 +05:30
|
|
|
BT_SCAN_AUX_TICKER_NODES + \
|
2020-08-21 16:17:51 +05:30
|
|
|
BT_SCAN_SYNC_TICKER_NODES + \
|
2018-12-18 05:48:20 +01:00
|
|
|
BT_CONN_TICKER_NODES + \
|
2020-05-07 11:45:33 +02:00
|
|
|
FLASH_TICKER_NODES + \
|
|
|
|
USER_TICKER_NODES)
|
2018-12-18 05:48:20 +01:00
|
|
|
#define TICKER_USER_APP_OPS (TICKER_USER_THREAD_OPS + \
|
|
|
|
FLASH_TICKER_USER_APP_OPS)
|
|
|
|
#define TICKER_USER_OPS (TICKER_USER_LLL_OPS + \
|
|
|
|
TICKER_USER_ULL_HIGH_OPS + \
|
|
|
|
TICKER_USER_ULL_LOW_OPS + \
|
|
|
|
TICKER_USER_THREAD_OPS + \
|
|
|
|
FLASH_TICKER_USER_APP_OPS)
|
|
|
|
|
|
|
|
/* Memory for ticker nodes/instances */
|
2020-05-27 11:26:57 -05:00
|
|
|
static uint8_t MALIGN(4) ticker_nodes[TICKER_NODES][TICKER_NODE_T_SIZE];
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
/* Memory for users/contexts operating on ticker module */
|
2020-05-27 11:26:57 -05:00
|
|
|
static uint8_t MALIGN(4) ticker_users[MAYFLY_CALLER_COUNT][TICKER_USER_T_SIZE];
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
/* Memory for user/context simultaneous API operations */
|
2020-05-27 11:26:57 -05:00
|
|
|
static uint8_t MALIGN(4) ticker_user_ops[TICKER_USER_OPS][TICKER_USER_OP_T_SIZE];
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
/* Semaphire to wakeup thread on ticker API callback */
|
|
|
|
static struct k_sem sem_ticker_api_cb;
|
|
|
|
|
|
|
|
/* Semaphore to wakeup thread on Rx-ed objects */
|
|
|
|
static struct k_sem *sem_recv;
|
|
|
|
|
2019-06-19 20:22:50 +02:00
|
|
|
/* Declare prepare-event FIFO: mfifo_prep.
|
|
|
|
* Queue of struct node_rx_event_done
|
|
|
|
*/
|
2018-12-18 05:48:20 +01:00
|
|
|
static MFIFO_DEFINE(prep, sizeof(struct lll_event), EVENT_PIPELINE_MAX);
|
|
|
|
|
2019-02-14 10:04:17 +01:00
|
|
|
/* Declare done-event FIFO: mfifo_done.
|
|
|
|
* Queue of pointers to struct node_rx_event_done.
|
|
|
|
* The actual backing behind these pointers is mem_done
|
|
|
|
*/
|
2019-08-09 02:44:56 +02:00
|
|
|
static MFIFO_DEFINE(done, sizeof(struct node_rx_event_done *), EVENT_DONE_MAX);
|
2019-02-14 10:04:17 +01:00
|
|
|
|
|
|
|
/* Backing storage for elements in mfifo_done */
|
2018-12-18 05:48:20 +01:00
|
|
|
static struct {
|
|
|
|
void *free;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t pool[sizeof(struct node_rx_event_done) * EVENT_DONE_MAX];
|
2018-12-18 05:48:20 +01:00
|
|
|
} mem_done;
|
|
|
|
|
|
|
|
static struct {
|
|
|
|
void *free;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t pool[sizeof(memq_link_t) * EVENT_DONE_MAX];
|
2018-12-18 05:48:20 +01:00
|
|
|
} mem_link_done;
|
|
|
|
|
2020-12-17 12:49:04 +05:30
|
|
|
/* Minimum number of node rx for ULL to LL/HCI thread per connection.
|
|
|
|
* Increasing this by times the max. simultaneous connection count will permit
|
|
|
|
* simultaneous parallel PHY update or Connection Update procedures amongst
|
|
|
|
* active connections.
|
|
|
|
*/
|
2019-07-18 13:24:32 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_PHY) && defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
2019-10-10 17:00:35 +05:30
|
|
|
#define LL_PDU_RX_CNT 3
|
2019-07-18 13:24:32 +05:30
|
|
|
#else
|
2019-10-10 17:00:35 +05:30
|
|
|
#define LL_PDU_RX_CNT 2
|
2019-07-18 13:24:32 +05:30
|
|
|
#endif
|
|
|
|
|
2020-12-17 12:49:04 +05:30
|
|
|
/* No. of node rx for LLL to ULL.
|
|
|
|
* Reserve 3, 1 for adv data, 1 for scan response and 1 for empty PDU reception.
|
|
|
|
*/
|
2019-07-18 13:24:32 +05:30
|
|
|
#define PDU_RX_CNT (CONFIG_BT_CTLR_RX_BUFFERS + 3)
|
2020-12-17 12:49:04 +05:30
|
|
|
|
|
|
|
/* Part sum of LLL to ULL and ULL to LL/HCI thread node rx count.
|
|
|
|
* Will be used below in allocating node rx pool.
|
|
|
|
*/
|
2018-12-18 05:48:20 +01:00
|
|
|
#define RX_CNT (PDU_RX_CNT + LL_PDU_RX_CNT)
|
|
|
|
|
|
|
|
static MFIFO_DEFINE(pdu_rx_free, sizeof(void *), PDU_RX_CNT);
|
|
|
|
|
2019-05-24 11:13:21 +02:00
|
|
|
#if defined(CONFIG_BT_RX_USER_PDU_LEN)
|
|
|
|
#define PDU_RX_USER_PDU_OCTETS_MAX (CONFIG_BT_RX_USER_PDU_LEN)
|
|
|
|
#else
|
|
|
|
#define PDU_RX_USER_PDU_OCTETS_MAX 0
|
|
|
|
#endif
|
2019-02-14 13:35:51 +01:00
|
|
|
#define NODE_RX_HEADER_SIZE (offsetof(struct node_rx_pdu, pdu))
|
2019-05-01 12:27:13 +02:00
|
|
|
#define NODE_RX_STRUCT_OVERHEAD (NODE_RX_HEADER_SIZE)
|
2019-02-14 13:35:51 +01:00
|
|
|
|
2020-05-04 12:58:50 +05:30
|
|
|
#define PDU_ADVERTIZE_SIZE (PDU_AC_LL_SIZE_MAX + PDU_AC_LL_SIZE_EXTRA)
|
2019-05-01 17:12:27 +05:30
|
|
|
#define PDU_DATA_SIZE (PDU_DC_LL_HEADER_SIZE + LL_LENGTH_OCTETS_RX_MAX)
|
2019-02-14 13:35:51 +01:00
|
|
|
|
2019-05-24 11:13:21 +02:00
|
|
|
#define PDU_RX_NODE_POOL_ELEMENT_SIZE \
|
|
|
|
MROUND( \
|
|
|
|
NODE_RX_STRUCT_OVERHEAD \
|
|
|
|
+ MAX(MAX(PDU_ADVERTIZE_SIZE, \
|
|
|
|
PDU_DATA_SIZE), \
|
|
|
|
PDU_RX_USER_PDU_OCTETS_MAX) \
|
2019-02-14 13:35:51 +01:00
|
|
|
)
|
|
|
|
|
2019-09-13 17:06:11 +05:30
|
|
|
/* When both central and peripheral are supported, one each Rx node will be
|
|
|
|
* needed by connectable advertising and the initiator to generate connection
|
|
|
|
* complete event, hence conditionally set the count.
|
|
|
|
*/
|
|
|
|
#if defined(CONFIG_BT_MAX_CONN)
|
|
|
|
#if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_PERIPHERAL)
|
|
|
|
#define BT_CTLR_MAX_CONNECTABLE 2
|
|
|
|
#else
|
|
|
|
#define BT_CTLR_MAX_CONNECTABLE 1
|
|
|
|
#endif
|
|
|
|
#define BT_CTLR_MAX_CONN CONFIG_BT_MAX_CONN
|
|
|
|
#else
|
|
|
|
#define BT_CTLR_MAX_CONNECTABLE 0
|
|
|
|
#define BT_CTLR_MAX_CONN 0
|
|
|
|
#endif
|
|
|
|
|
2020-08-21 16:17:51 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_SCAN_SYNC_SET)
|
|
|
|
#define BT_CTLR_SCAN_SYNC_SET CONFIG_BT_CTLR_SCAN_SYNC_SET
|
|
|
|
#else
|
|
|
|
#define BT_CTLR_SCAN_SYNC_SET 0
|
|
|
|
#endif
|
|
|
|
|
2020-12-25 10:45:07 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET)
|
|
|
|
#define BT_CTLR_SCAN_SYNC_ISO_SET CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET
|
|
|
|
#else
|
|
|
|
#define BT_CTLR_SCAN_SYNC_ISO_SET 0
|
|
|
|
#endif
|
|
|
|
|
2019-09-13 17:06:11 +05:30
|
|
|
#define PDU_RX_POOL_SIZE (PDU_RX_NODE_POOL_ELEMENT_SIZE * \
|
2020-06-18 14:13:41 +02:00
|
|
|
(RX_CNT + BT_CTLR_MAX_CONNECTABLE + \
|
2020-08-21 16:17:51 +05:30
|
|
|
BT_CTLR_ADV_SET + BT_CTLR_SCAN_SYNC_SET))
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
static struct {
|
|
|
|
void *free;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t pool[PDU_RX_POOL_SIZE];
|
2018-12-18 05:48:20 +01:00
|
|
|
} mem_pdu_rx;
|
|
|
|
|
2020-10-05 11:01:00 +05:30
|
|
|
/* NOTE: Two memq_link structures are reserved in the case of periodic sync,
|
|
|
|
* one each for sync established and sync lost respectively. Where as in
|
|
|
|
* comparison to a connection, the connection established uses incoming Rx-ed
|
|
|
|
* CONNECT_IND PDU to piggy back generation of connection complete, and hence
|
|
|
|
* only one is reserved for the generation of disconnection event (which can
|
|
|
|
* happen due to supervision timeout and other reasons that dont have an
|
|
|
|
* incoming Rx-ed PDU).
|
|
|
|
*/
|
2020-12-25 10:45:07 +05:30
|
|
|
#define LINK_RX_POOL_SIZE (sizeof(memq_link_t) * \
|
|
|
|
(RX_CNT + 2 + BT_CTLR_MAX_CONN + BT_CTLR_ADV_SET + \
|
|
|
|
(BT_CTLR_SCAN_SYNC_SET * 2) + \
|
|
|
|
(BT_CTLR_SCAN_SYNC_ISO_SET * 2)))
|
2018-12-18 05:48:20 +01:00
|
|
|
static struct {
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t quota_pdu; /* Number of un-utilized buffers */
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
void *free;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t pool[LINK_RX_POOL_SIZE];
|
2018-12-18 05:48:20 +01:00
|
|
|
} mem_link_rx;
|
|
|
|
|
|
|
|
static MEMQ_DECLARE(ull_rx);
|
|
|
|
static MEMQ_DECLARE(ll_rx);
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CONN)
|
2019-07-18 15:10:05 +05:30
|
|
|
static MFIFO_DEFINE(ll_pdu_rx_free, sizeof(void *), LL_PDU_RX_CNT);
|
2018-12-18 05:48:20 +01:00
|
|
|
static MFIFO_DEFINE(tx_ack, sizeof(struct lll_tx),
|
|
|
|
CONFIG_BT_CTLR_TX_BUFFERS);
|
2019-06-05 13:40:36 +02:00
|
|
|
|
|
|
|
static void *mark_update;
|
2018-12-18 05:48:20 +01:00
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
|
2019-06-05 13:40:36 +02:00
|
|
|
static void *mark_disable;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
static inline int init_reset(void);
|
2019-10-10 11:18:30 +02:00
|
|
|
static void perform_lll_reset(void *param);
|
2019-06-05 13:40:36 +02:00
|
|
|
static inline void *mark_set(void **m, void *param);
|
|
|
|
static inline void *mark_unset(void **m, void *param);
|
|
|
|
static inline void *mark_get(void *m);
|
2019-02-04 22:21:55 +05:30
|
|
|
static inline void done_alloc(void);
|
2020-05-27 11:26:57 -05:00
|
|
|
static inline void rx_alloc(uint8_t max);
|
2019-02-04 22:21:55 +05:30
|
|
|
static void rx_demux(void *param);
|
2021-01-29 18:08:59 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
|
|
|
static void rx_demux_yield(void);
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
2020-08-19 07:27:02 +05:30
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
static uint8_t tx_cmplt_get(uint16_t *handle, uint8_t *first, uint8_t last);
|
2020-08-11 10:51:43 +05:30
|
|
|
static inline void rx_demux_conn_tx_ack(uint8_t ack_last, uint16_t handle,
|
|
|
|
memq_link_t *link,
|
|
|
|
struct node_tx *node_tx);
|
2020-08-19 07:27:02 +05:30
|
|
|
#endif /* CONFIG_BT_CONN */
|
2019-02-11 22:55:30 +05:30
|
|
|
static inline int rx_demux_rx(memq_link_t *link, struct node_rx_hdr *rx);
|
2019-02-04 22:21:55 +05:30
|
|
|
static inline void rx_demux_event_done(memq_link_t *link,
|
|
|
|
struct node_rx_hdr *rx);
|
2019-08-16 15:40:31 +02:00
|
|
|
static inline void ll_rx_link_inc_quota(int8_t delta);
|
2019-02-04 22:21:55 +05:30
|
|
|
static void disabled_cb(void *param);
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
int ll_init(struct k_sem *sem_rx)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Store the semaphore to be used to wakeup Thread context */
|
|
|
|
sem_recv = sem_rx;
|
|
|
|
|
|
|
|
/* Initialize counter */
|
|
|
|
/* TODO: Bind and use counter driver? */
|
|
|
|
cntr_init();
|
|
|
|
|
|
|
|
/* Initialize Mayfly */
|
|
|
|
mayfly_init();
|
|
|
|
|
|
|
|
/* Initialize Ticker */
|
2019-02-04 22:21:55 +05:30
|
|
|
ticker_users[MAYFLY_CALL_ID_0][0] = TICKER_USER_LLL_OPS;
|
|
|
|
ticker_users[MAYFLY_CALL_ID_1][0] = TICKER_USER_ULL_HIGH_OPS;
|
|
|
|
ticker_users[MAYFLY_CALL_ID_2][0] = TICKER_USER_ULL_LOW_OPS;
|
|
|
|
ticker_users[MAYFLY_CALL_ID_PROGRAM][0] = TICKER_USER_APP_OPS;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
err = ticker_init(TICKER_INSTANCE_ID_CTLR,
|
2019-02-04 22:21:55 +05:30
|
|
|
TICKER_NODES, &ticker_nodes[0],
|
|
|
|
MAYFLY_CALLER_COUNT, &ticker_users[0],
|
|
|
|
TICKER_USER_OPS, &ticker_user_ops[0],
|
2018-12-18 05:48:20 +01:00
|
|
|
hal_ticker_instance0_caller_id_get,
|
|
|
|
hal_ticker_instance0_sched,
|
|
|
|
hal_ticker_instance0_trigger_set);
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
|
|
|
|
/* Initialize semaphore for ticker API blocking wait */
|
|
|
|
k_sem_init(&sem_ticker_api_cb, 0, 1);
|
|
|
|
|
|
|
|
/* Initialize LLL */
|
|
|
|
err = lll_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize ULL internals */
|
|
|
|
/* TODO: globals? */
|
|
|
|
|
|
|
|
/* Common to init and reset */
|
2019-02-04 22:21:55 +05:30
|
|
|
err = init_reset();
|
2018-12-18 05:48:20 +01:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_BROADCASTER)
|
|
|
|
err = lll_adv_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = ull_adv_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_BROADCASTER */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_OBSERVER)
|
|
|
|
err = lll_scan_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = ull_scan_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_OBSERVER */
|
|
|
|
|
2020-10-05 10:42:39 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
|
2020-08-11 09:58:34 +05:30
|
|
|
err = lll_sync_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = ull_sync_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
2020-11-09 16:31:01 +01:00
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_ISO)
|
|
|
|
err = ull_sync_iso_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_ISO */
|
2020-10-05 10:42:39 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
|
2020-08-11 09:58:34 +05:30
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
err = lll_conn_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = ull_conn_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
|
2020-11-20 05:16:05 -08:00
|
|
|
#if IS_ENABLED(CONFIG_BT_CTLR_DF)
|
|
|
|
err = ull_df_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-11-26 15:47:39 +01:00
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_ISO) || \
|
|
|
|
defined(CONFIG_BT_CTLR_SYNC_ISO) || \
|
|
|
|
defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || \
|
|
|
|
defined(CONFIG_BT_CTLR_CENTRAL_ISO)
|
|
|
|
err = ull_iso_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_SYNC_ISO ||
|
|
|
|
* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO
|
|
|
|
*/
|
|
|
|
|
2020-10-30 11:26:26 +01:00
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_ISO)
|
|
|
|
err = ull_adv_iso_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
|
2020-11-19 02:12:54 -08:00
|
|
|
#if IS_ENABLED(CONFIG_BT_CTLR_DF)
|
|
|
|
err = lll_df_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-05-24 11:13:21 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_USER_EXT)
|
|
|
|
err = ull_user_init();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CTLR_USER_EXT */
|
|
|
|
|
2019-05-09 18:39:26 +05:30
|
|
|
/* reset whitelist, resolving list and initialise RPA timeout*/
|
|
|
|
if (IS_ENABLED(CONFIG_BT_CTLR_FILTER)) {
|
|
|
|
ull_filter_reset(true);
|
|
|
|
}
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ll_reset(void)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2020-11-03 16:25:13 +05:30
|
|
|
/* Note: The sequence of reset control flow is as follows:
|
|
|
|
* - Reset ULL context, i.e. stop ULL scheduling, abort LLL events etc.
|
|
|
|
* - Reset LLL context, i.e. post LLL event abort, let LLL cleanup its
|
|
|
|
* variables, if any.
|
|
|
|
* - Reset ULL static variables (which otherwise was mem-zeroed in cases
|
|
|
|
* if power-on reset wherein architecture startup mem-zeroes .bss
|
|
|
|
* sections.
|
|
|
|
* - Initialize ULL context variable, similar to on-power-up.
|
|
|
|
*/
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#if defined(CONFIG_BT_BROADCASTER)
|
|
|
|
/* Reset adv state */
|
|
|
|
err = ull_adv_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
#endif /* CONFIG_BT_BROADCASTER */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_OBSERVER)
|
|
|
|
/* Reset scan state */
|
|
|
|
err = ull_scan_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
#endif /* CONFIG_BT_OBSERVER */
|
|
|
|
|
2020-10-05 10:42:39 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
|
2020-08-11 09:58:34 +05:30
|
|
|
/* Reset periodic sync sets */
|
|
|
|
err = ull_sync_reset();
|
|
|
|
LL_ASSERT(!err);
|
2020-11-09 16:31:01 +01:00
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_ISO)
|
|
|
|
/* Reset periodic sync sets */
|
|
|
|
err = ull_sync_iso_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
|
2020-10-05 10:42:39 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
|
2020-08-11 09:58:34 +05:30
|
|
|
|
2020-11-26 15:47:39 +01:00
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_ISO) || \
|
|
|
|
defined(CONFIG_BT_CTLR_SYNC_ISO) || \
|
|
|
|
defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || \
|
|
|
|
defined(CONFIG_BT_CTLR_CENTRAL_ISO)
|
|
|
|
err = ull_iso_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_ISO || CONFIG_BT_CTLR_SYNC_ISO ||
|
|
|
|
* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_ISO)
|
2020-10-30 11:26:26 +01:00
|
|
|
/* Reset periodic sync sets */
|
|
|
|
err = ull_adv_iso_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
|
|
|
|
|
2020-11-20 05:16:05 -08:00
|
|
|
#if IS_ENABLED(CONFIG_BT_CTLR_DF)
|
|
|
|
err = ull_df_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
#endif
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
#if defined(CONFIG_BT_CENTRAL)
|
|
|
|
/* Reset initiator */
|
|
|
|
{
|
|
|
|
void *rx;
|
|
|
|
|
|
|
|
err = ll_connect_disable(&rx);
|
|
|
|
if (!err) {
|
|
|
|
struct ll_scan_set *scan;
|
|
|
|
|
|
|
|
scan = ull_scan_is_enabled_get(0);
|
|
|
|
LL_ASSERT(scan);
|
|
|
|
|
2019-03-26 19:57:45 -06:00
|
|
|
scan->is_enabled = 0U;
|
2018-12-18 05:48:20 +01:00
|
|
|
scan->lll.conn = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ARG_UNUSED(rx);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CENTRAL */
|
|
|
|
|
|
|
|
/* Reset conn role */
|
|
|
|
err = ull_conn_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
|
|
|
|
MFIFO_INIT(tx_ack);
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
|
2019-05-09 18:39:26 +05:30
|
|
|
/* reset whitelist and resolving list */
|
|
|
|
if (IS_ENABLED(CONFIG_BT_CTLR_FILTER)) {
|
|
|
|
ull_filter_reset(false);
|
|
|
|
}
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
/* Re-initialize ULL internals */
|
|
|
|
|
|
|
|
/* Re-initialize the prep mfifo */
|
|
|
|
MFIFO_INIT(prep);
|
|
|
|
|
|
|
|
/* Re-initialize the free done mfifo */
|
|
|
|
MFIFO_INIT(done);
|
|
|
|
|
|
|
|
/* Re-initialize the free rx mfifo */
|
|
|
|
MFIFO_INIT(pdu_rx_free);
|
|
|
|
|
2019-07-18 15:10:05 +05:30
|
|
|
#if defined(CONFIG_BT_CONN)
|
2018-12-18 05:48:20 +01:00
|
|
|
/* Re-initialize the free ll rx mfifo */
|
|
|
|
MFIFO_INIT(ll_pdu_rx_free);
|
2019-07-18 15:10:05 +05:30
|
|
|
#endif /* CONFIG_BT_CONN */
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-10-10 11:18:30 +02:00
|
|
|
/* Reset LLL via mayfly */
|
|
|
|
{
|
|
|
|
static memq_link_t link;
|
|
|
|
static struct mayfly mfy = {0, 0, &link, NULL,
|
|
|
|
perform_lll_reset};
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t retval;
|
2019-10-08 12:00:21 +05:30
|
|
|
|
|
|
|
/* NOTE: If Zero Latency Interrupt is used, then LLL context
|
|
|
|
* will be the highest priority IRQ in the system, hence
|
|
|
|
* mayfly_enqueue will be done running the callee inline
|
|
|
|
* (vector to the callee function) in this function. Else
|
|
|
|
* we use semaphore to wait for perform_lll_reset to
|
|
|
|
* complete.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if !defined(CONFIG_BT_CTLR_ZLI)
|
|
|
|
struct k_sem sem;
|
2019-10-10 11:18:30 +02:00
|
|
|
|
2019-11-11 10:11:32 +01:00
|
|
|
k_sem_init(&sem, 0, 1);
|
|
|
|
mfy.param = &sem;
|
2019-10-08 12:00:21 +05:30
|
|
|
#endif /* !CONFIG_BT_CTLR_ZLI */
|
|
|
|
|
2019-10-10 11:18:30 +02:00
|
|
|
retval = mayfly_enqueue(TICKER_USER_ID_THREAD,
|
|
|
|
TICKER_USER_ID_LLL, 0, &mfy);
|
|
|
|
LL_ASSERT(!retval);
|
2019-10-08 12:00:21 +05:30
|
|
|
|
|
|
|
#if !defined(CONFIG_BT_CTLR_ZLI)
|
2019-11-11 10:11:32 +01:00
|
|
|
/* LLL reset must complete before returning - wait for
|
|
|
|
* reset completion in LLL mayfly thread
|
|
|
|
*/
|
|
|
|
k_sem_take(&sem, K_FOREVER);
|
2019-10-08 12:00:21 +05:30
|
|
|
#endif /* !CONFIG_BT_CTLR_ZLI */
|
2019-10-10 11:18:30 +02:00
|
|
|
}
|
|
|
|
|
2020-10-22 18:40:38 +05:30
|
|
|
#if defined(CONFIG_BT_BROADCASTER)
|
2020-11-03 16:25:13 +05:30
|
|
|
/* Finalize after adv state LLL context reset */
|
2020-10-22 18:40:38 +05:30
|
|
|
err = ull_adv_reset_finalize();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
#endif /* CONFIG_BT_BROADCASTER */
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
/* Common to init and reset */
|
2019-02-04 22:21:55 +05:30
|
|
|
err = init_reset();
|
2018-12-18 05:48:20 +01:00
|
|
|
LL_ASSERT(!err);
|
|
|
|
}
|
|
|
|
|
2019-02-14 10:04:17 +01:00
|
|
|
/**
|
|
|
|
* @brief Peek the next node_rx to send up to Host
|
|
|
|
* @details Tightly coupled with prio_recv_thread()
|
|
|
|
* Execution context: Controller thread
|
2019-02-27 10:40:03 +01:00
|
|
|
*
|
|
|
|
* @param node_rx[out] Pointer to rx node at head of queue
|
|
|
|
* @param handle[out] Connection handle
|
2019-02-14 10:04:17 +01:00
|
|
|
* @return TX completed
|
|
|
|
*/
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t ll_rx_get(void **node_rx, uint16_t *handle)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
struct node_rx_hdr *rx;
|
|
|
|
memq_link_t *link;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t cmplt = 0U;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
ll_rx_get_again:
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
|
2019-04-26 13:15:54 +02:00
|
|
|
*node_rx = NULL;
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
link = memq_peek(memq_ll_rx.head, memq_ll_rx.tail, (void **)&rx);
|
|
|
|
if (link) {
|
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
cmplt = tx_cmplt_get(handle, &mfifo_tx_ack.f, rx->ack_last);
|
|
|
|
if (!cmplt) {
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t f, cmplt_prev, cmplt_curr;
|
|
|
|
uint16_t h;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-03-26 19:57:45 -06:00
|
|
|
cmplt_curr = 0U;
|
2018-12-18 05:48:20 +01:00
|
|
|
f = mfifo_tx_ack.f;
|
|
|
|
do {
|
|
|
|
cmplt_prev = cmplt_curr;
|
|
|
|
cmplt_curr = tx_cmplt_get(&h, &f,
|
|
|
|
mfifo_tx_ack.l);
|
2019-03-26 19:57:45 -06:00
|
|
|
} while ((cmplt_prev != 0U) ||
|
2018-12-18 05:48:20 +01:00
|
|
|
(cmplt_prev != cmplt_curr));
|
|
|
|
|
2019-02-14 10:04:17 +01:00
|
|
|
/* Do not send up buffers to Host thread that are
|
|
|
|
* marked for release
|
|
|
|
*/
|
2020-11-12 09:20:44 +01:00
|
|
|
if (rx->type == NODE_RX_TYPE_RELEASE) {
|
2018-12-18 05:48:20 +01:00
|
|
|
(void)memq_dequeue(memq_ll_rx.tail,
|
|
|
|
&memq_ll_rx.head, NULL);
|
|
|
|
mem_release(link, &mem_link_rx.free);
|
|
|
|
|
2019-08-16 15:40:31 +02:00
|
|
|
ll_rx_link_inc_quota(1);
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
mem_release(rx, &mem_pdu_rx.free);
|
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
rx_alloc(1);
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
goto ll_rx_get_again;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
|
|
|
|
*node_rx = rx;
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
cmplt = tx_cmplt_get(handle, &mfifo_tx_ack.f, mfifo_tx_ack.l);
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
}
|
|
|
|
|
|
|
|
return cmplt;
|
|
|
|
}
|
|
|
|
|
2019-02-14 10:04:17 +01:00
|
|
|
/**
|
|
|
|
* @brief Commit the dequeue from memq_ll_rx, where ll_rx_get() did the peek
|
|
|
|
* @details Execution context: Controller thread
|
|
|
|
*/
|
2018-12-18 05:48:20 +01:00
|
|
|
void ll_rx_dequeue(void)
|
|
|
|
{
|
|
|
|
struct node_rx_hdr *rx = NULL;
|
|
|
|
memq_link_t *link;
|
|
|
|
|
|
|
|
link = memq_dequeue(memq_ll_rx.tail, &memq_ll_rx.head,
|
|
|
|
(void **)&rx);
|
|
|
|
LL_ASSERT(link);
|
|
|
|
|
|
|
|
mem_release(link, &mem_link_rx.free);
|
|
|
|
|
|
|
|
/* handle object specific clean up */
|
|
|
|
switch (rx->type) {
|
2020-05-04 13:32:24 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT)
|
2020-08-19 12:54:46 +05:30
|
|
|
#if defined(CONFIG_BT_OBSERVER)
|
2020-05-04 13:32:24 +05:30
|
|
|
case NODE_RX_TYPE_EXT_1M_REPORT:
|
|
|
|
case NODE_RX_TYPE_EXT_2M_REPORT:
|
|
|
|
case NODE_RX_TYPE_EXT_CODED_REPORT:
|
2020-11-05 18:13:25 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
|
|
|
|
case NODE_RX_TYPE_SYNC_REPORT:
|
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
|
2020-05-04 13:32:24 +05:30
|
|
|
{
|
|
|
|
struct node_rx_hdr *rx_curr;
|
|
|
|
struct pdu_adv *adv;
|
|
|
|
|
|
|
|
adv = (void *)((struct node_rx_pdu *)rx)->pdu;
|
|
|
|
if (adv->type != PDU_ADV_TYPE_EXT_IND) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
rx_curr = rx->rx_ftr.extra;
|
|
|
|
while (rx_curr) {
|
|
|
|
memq_link_t *link_free;
|
|
|
|
|
|
|
|
link_free = rx_curr->link;
|
|
|
|
rx_curr = rx_curr->rx_ftr.extra;
|
|
|
|
|
|
|
|
mem_release(link_free, &mem_link_rx.free);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2020-09-02 13:52:29 +05:30
|
|
|
|
|
|
|
case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
|
2020-10-12 21:56:59 +05:30
|
|
|
{
|
2020-10-16 10:06:43 +05:30
|
|
|
ull_scan_term_dequeue(rx->handle);
|
2020-10-12 21:56:59 +05:30
|
|
|
}
|
|
|
|
break;
|
2020-08-19 12:54:46 +05:30
|
|
|
#endif /* CONFIG_BT_OBSERVER */
|
2020-07-01 10:54:18 +02:00
|
|
|
|
2020-08-19 12:54:46 +05:30
|
|
|
#if defined(CONFIG_BT_BROADCASTER)
|
2020-07-01 10:54:18 +02:00
|
|
|
case NODE_RX_TYPE_EXT_ADV_TERMINATE:
|
2020-07-13 17:59:38 +05:30
|
|
|
{
|
|
|
|
struct ll_adv_set *adv;
|
|
|
|
|
|
|
|
adv = ull_adv_set_get(rx->handle);
|
|
|
|
|
2020-08-19 12:54:46 +05:30
|
|
|
#if defined(CONFIG_BT_PERIPHERAL)
|
|
|
|
struct lll_conn *lll_conn = adv->lll.conn;
|
|
|
|
|
2020-07-13 17:59:38 +05:30
|
|
|
if (!lll_conn) {
|
|
|
|
adv->is_enabled = 0U;
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
LL_ASSERT(!lll_conn->link_tx_free);
|
2020-08-19 12:54:46 +05:30
|
|
|
|
|
|
|
memq_link_t *link = memq_deinit(&lll_conn->memq_tx.head,
|
|
|
|
&lll_conn->memq_tx.tail);
|
2020-07-13 17:59:38 +05:30
|
|
|
LL_ASSERT(link);
|
2020-08-19 12:54:46 +05:30
|
|
|
|
2020-07-13 17:59:38 +05:30
|
|
|
lll_conn->link_tx_free = link;
|
2020-07-01 10:54:18 +02:00
|
|
|
|
2020-08-19 12:54:46 +05:30
|
|
|
struct ll_conn *conn = (void *)HDR_LLL2EVT(lll_conn);
|
|
|
|
|
2020-07-13 17:59:38 +05:30
|
|
|
ll_conn_release(conn);
|
|
|
|
adv->lll.conn = NULL;
|
|
|
|
|
|
|
|
ll_rx_release(adv->node_rx_cc_free);
|
|
|
|
adv->node_rx_cc_free = NULL;
|
2020-08-19 12:54:46 +05:30
|
|
|
|
2020-07-13 17:59:38 +05:30
|
|
|
ll_rx_link_release(adv->link_cc_free);
|
|
|
|
adv->link_cc_free = NULL;
|
2020-08-19 12:54:46 +05:30
|
|
|
#endif /* CONFIG_BT_PERIPHERAL */
|
2020-07-13 17:59:38 +05:30
|
|
|
|
|
|
|
adv->is_enabled = 0U;
|
|
|
|
}
|
|
|
|
break;
|
2020-08-19 12:54:46 +05:30
|
|
|
#endif /* CONFIG_BT_BROADCASTER */
|
2020-05-04 13:32:24 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_EXT */
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
case NODE_RX_TYPE_CONNECTION:
|
|
|
|
{
|
2019-09-27 12:04:22 +05:30
|
|
|
struct node_rx_cc *cc = (void *)((struct node_rx_pdu *)rx)->pdu;
|
|
|
|
struct node_rx_ftr *ftr = &(rx->rx_ftr);
|
|
|
|
|
2019-10-04 17:06:14 +05:30
|
|
|
if (0) {
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_PERIPHERAL)
|
|
|
|
} else if ((cc->status == BT_HCI_ERR_ADV_TIMEOUT) || cc->role) {
|
2019-09-27 12:04:22 +05:30
|
|
|
struct lll_adv *lll = ftr->param;
|
|
|
|
struct ll_adv_set *adv = (void *)HDR_LLL2EVT(lll);
|
|
|
|
|
|
|
|
if (cc->status == BT_HCI_ERR_ADV_TIMEOUT) {
|
|
|
|
struct lll_conn *conn_lll;
|
|
|
|
struct ll_conn *conn;
|
|
|
|
memq_link_t *link;
|
|
|
|
|
|
|
|
conn_lll = lll->conn;
|
|
|
|
LL_ASSERT(conn_lll);
|
2019-12-26 16:16:36 +05:30
|
|
|
lll->conn = NULL;
|
2019-09-27 12:04:22 +05:30
|
|
|
|
|
|
|
LL_ASSERT(!conn_lll->link_tx_free);
|
|
|
|
link = memq_deinit(&conn_lll->memq_tx.head,
|
|
|
|
&conn_lll->memq_tx.tail);
|
|
|
|
LL_ASSERT(link);
|
|
|
|
conn_lll->link_tx_free = link;
|
|
|
|
|
|
|
|
conn = (void *)HDR_LLL2EVT(conn_lll);
|
|
|
|
ll_conn_release(conn);
|
|
|
|
} else {
|
|
|
|
/* Release un-utilized node rx */
|
|
|
|
if (adv->node_rx_cc_free) {
|
|
|
|
void *rx_free;
|
|
|
|
|
|
|
|
rx_free = adv->node_rx_cc_free;
|
|
|
|
adv->node_rx_cc_free = NULL;
|
|
|
|
|
2019-12-26 16:16:36 +05:30
|
|
|
mem_release(rx_free, &mem_pdu_rx.free);
|
2019-09-27 12:04:22 +05:30
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-22 10:59:18 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT)
|
|
|
|
if (lll->aux) {
|
|
|
|
struct ll_adv_aux_set *aux;
|
|
|
|
|
|
|
|
aux = (void *)HDR_LLL2EVT(lll->aux);
|
|
|
|
aux->is_started = 0U;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_EXT */
|
|
|
|
|
2019-09-27 12:04:22 +05:30
|
|
|
adv->is_enabled = 0U;
|
2019-10-04 17:06:14 +05:30
|
|
|
#else /* !CONFIG_BT_PERIPHERAL */
|
|
|
|
ARG_UNUSED(cc);
|
|
|
|
#endif /* !CONFIG_BT_PERIPHERAL */
|
|
|
|
|
2019-09-27 12:04:22 +05:30
|
|
|
} else if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
|
|
|
|
struct lll_scan *lll = ftr->param;
|
|
|
|
struct ll_scan_set *scan = (void *)HDR_LLL2EVT(lll);
|
|
|
|
|
|
|
|
scan->is_enabled = 0U;
|
|
|
|
} else {
|
|
|
|
LL_ASSERT(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY)) {
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t bm;
|
2019-09-27 12:04:22 +05:30
|
|
|
|
2020-05-11 15:36:12 +05:30
|
|
|
/* FIXME: use the correct adv and scan set to get
|
|
|
|
* enabled status bitmask
|
|
|
|
*/
|
2019-09-27 12:04:22 +05:30
|
|
|
bm = (IS_ENABLED(CONFIG_BT_OBSERVER) &&
|
2020-07-13 17:59:38 +05:30
|
|
|
(ull_scan_is_enabled(0) << 1)) |
|
|
|
|
(IS_ENABLED(CONFIG_BT_BROADCASTER) &&
|
|
|
|
ull_adv_is_enabled(0));
|
2019-09-27 12:04:22 +05:30
|
|
|
|
|
|
|
if (!bm) {
|
|
|
|
ull_filter_adv_scan_state_cb(0);
|
|
|
|
}
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
}
|
2019-09-27 12:04:22 +05:30
|
|
|
break;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-09-27 12:04:22 +05:30
|
|
|
case NODE_RX_TYPE_TERMINATE:
|
2018-12-18 05:48:20 +01:00
|
|
|
case NODE_RX_TYPE_DC_PDU:
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_OBSERVER)
|
|
|
|
case NODE_RX_TYPE_REPORT:
|
2020-08-03 16:28:58 +05:30
|
|
|
|
2020-10-05 10:42:39 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
|
2020-08-03 16:28:58 +05:30
|
|
|
/* fall through */
|
|
|
|
case NODE_RX_TYPE_SYNC:
|
2020-08-11 11:12:34 +05:30
|
|
|
case NODE_RX_TYPE_SYNC_LOST:
|
2020-10-05 10:42:39 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
|
2018-12-18 05:48:20 +01:00
|
|
|
#endif /* CONFIG_BT_OBSERVER */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
|
|
|
|
case NODE_RX_TYPE_SCAN_REQ:
|
|
|
|
#endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
case NODE_RX_TYPE_CONN_UPDATE:
|
|
|
|
case NODE_RX_TYPE_ENC_REFRESH:
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_LE_PING)
|
|
|
|
case NODE_RX_TYPE_APTO:
|
|
|
|
#endif /* CONFIG_BT_CTLR_LE_PING */
|
|
|
|
|
|
|
|
case NODE_RX_TYPE_CHAN_SEL_ALGO:
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_PHY)
|
|
|
|
case NODE_RX_TYPE_PHY_UPDATE:
|
|
|
|
#endif /* CONFIG_BT_CTLR_PHY */
|
|
|
|
|
2020-04-28 20:23:29 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
|
2018-12-18 05:48:20 +01:00
|
|
|
case NODE_RX_TYPE_RSSI:
|
2020-04-28 20:23:29 +02:00
|
|
|
#endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
|
2018-12-18 05:48:20 +01:00
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_PROFILE_ISR)
|
|
|
|
case NODE_RX_TYPE_PROFILE:
|
|
|
|
#endif /* CONFIG_BT_CTLR_PROFILE_ISR */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_INDICATION)
|
|
|
|
case NODE_RX_TYPE_ADV_INDICATION:
|
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_INDICATION */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
|
|
|
|
case NODE_RX_TYPE_SCAN_INDICATION:
|
|
|
|
#endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_HCI_MESH_EXT)
|
|
|
|
case NODE_RX_TYPE_MESH_ADV_CPLT:
|
|
|
|
case NODE_RX_TYPE_MESH_REPORT:
|
|
|
|
#endif /* CONFIG_BT_HCI_MESH_EXT */
|
|
|
|
|
2020-11-11 10:48:00 +01:00
|
|
|
#if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
|
2020-11-10 13:51:28 +01:00
|
|
|
case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
|
2020-08-21 13:45:52 -07:00
|
|
|
__fallthrough;
|
2020-11-11 10:48:00 +01:00
|
|
|
#endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
|
2019-09-27 12:04:22 +05:30
|
|
|
|
|
|
|
/* Ensure that at least one 'case' statement is present for this
|
|
|
|
* code block.
|
|
|
|
*/
|
|
|
|
case NODE_RX_TYPE_NONE:
|
|
|
|
LL_ASSERT(rx->type != NODE_RX_TYPE_NONE);
|
2018-12-18 05:48:20 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
LL_ASSERT(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-09-27 12:04:22 +05:30
|
|
|
/* FIXME: clean up when porting Mesh Ext. */
|
2018-12-18 05:48:20 +01:00
|
|
|
if (0) {
|
|
|
|
#if defined(CONFIG_BT_HCI_MESH_EXT)
|
|
|
|
} else if (rx->type == NODE_RX_TYPE_MESH_ADV_CPLT) {
|
|
|
|
struct ll_adv_set *adv;
|
|
|
|
struct ll_scan_set *scan;
|
|
|
|
|
|
|
|
adv = ull_adv_is_enabled_get(0);
|
|
|
|
LL_ASSERT(adv);
|
2019-03-26 19:57:45 -06:00
|
|
|
adv->is_enabled = 0U;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
scan = ull_scan_is_enabled_get(0);
|
|
|
|
LL_ASSERT(scan);
|
|
|
|
|
2019-03-26 19:57:45 -06:00
|
|
|
scan->is_enabled = 0U;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
ll_adv_scan_state_cb(0);
|
|
|
|
#endif /* CONFIG_BT_HCI_MESH_EXT */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ll_rx_mem_release(void **node_rx)
|
|
|
|
{
|
2019-02-04 22:21:55 +05:30
|
|
|
struct node_rx_hdr *rx;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
rx = *node_rx;
|
|
|
|
while (rx) {
|
|
|
|
struct node_rx_hdr *rx_free;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
rx_free = rx;
|
|
|
|
rx = rx->next;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
switch (rx_free->type) {
|
2020-10-11 06:47:24 +05:30
|
|
|
#if defined(CONFIG_BT_BROADCASTER)
|
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT)
|
|
|
|
case NODE_RX_TYPE_EXT_ADV_TERMINATE:
|
|
|
|
mem_release(rx_free, &mem_pdu_rx.free);
|
|
|
|
break;
|
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_EXT */
|
|
|
|
#endif /* CONFIG_BT_BROADCASTER */
|
|
|
|
|
2020-10-12 21:56:59 +05:30
|
|
|
#if defined(CONFIG_BT_OBSERVER)
|
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT)
|
|
|
|
case NODE_RX_TYPE_EXT_SCAN_TERMINATE:
|
|
|
|
{
|
|
|
|
mem_release(rx_free, &mem_pdu_rx.free);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_EXT */
|
|
|
|
#endif /* CONFIG_BT_OBSERVER */
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
case NODE_RX_TYPE_CONNECTION:
|
|
|
|
{
|
2019-12-26 16:31:07 +05:30
|
|
|
struct node_rx_cc *cc =
|
|
|
|
(void *)((struct node_rx_pdu *)rx_free)->pdu;
|
|
|
|
|
|
|
|
if (0) {
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_PERIPHERAL)
|
|
|
|
} else if (cc->status == BT_HCI_ERR_ADV_TIMEOUT) {
|
|
|
|
mem_release(rx_free, &mem_pdu_rx.free);
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-12-26 16:31:07 +05:30
|
|
|
break;
|
|
|
|
#endif /* !CONFIG_BT_PERIPHERAL */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CENTRAL)
|
|
|
|
} else if (cc->status == BT_HCI_ERR_UNKNOWN_CONN_ID) {
|
2019-12-26 16:33:17 +05:30
|
|
|
struct node_rx_ftr *ftr = &rx_free->rx_ftr;
|
|
|
|
struct ll_scan_set *scan =
|
|
|
|
(void *)HDR_LLL2EVT(ftr->param);
|
2018-12-18 05:48:20 +01:00
|
|
|
struct lll_conn *conn_lll;
|
|
|
|
struct ll_conn *conn;
|
|
|
|
memq_link_t *link;
|
|
|
|
|
|
|
|
conn_lll = scan->lll.conn;
|
|
|
|
LL_ASSERT(conn_lll);
|
2019-12-26 16:16:36 +05:30
|
|
|
scan->lll.conn = NULL;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
LL_ASSERT(!conn_lll->link_tx_free);
|
|
|
|
link = memq_deinit(&conn_lll->memq_tx.head,
|
|
|
|
&conn_lll->memq_tx.tail);
|
|
|
|
LL_ASSERT(link);
|
|
|
|
conn_lll->link_tx_free = link;
|
|
|
|
|
|
|
|
conn = (void *)HDR_LLL2EVT(conn_lll);
|
|
|
|
ll_conn_release(conn);
|
|
|
|
|
2019-03-26 19:57:45 -06:00
|
|
|
scan->is_enabled = 0U;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_PRIVACY)
|
|
|
|
#if defined(CONFIG_BT_BROADCASTER)
|
|
|
|
if (!ull_adv_is_enabled_get(0))
|
2019-12-26 16:31:07 +05:30
|
|
|
#endif /* CONFIG_BT_BROADCASTER */
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
2019-05-09 18:39:26 +05:30
|
|
|
ull_filter_adv_scan_state_cb(0);
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
2019-12-26 16:31:07 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_PRIVACY */
|
2018-12-18 05:48:20 +01:00
|
|
|
break;
|
2019-12-26 16:31:07 +05:30
|
|
|
#endif /* CONFIG_BT_CENTRAL */
|
|
|
|
|
|
|
|
} else {
|
|
|
|
LL_ASSERT(!cc->status);
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
}
|
2019-12-26 16:31:07 +05:30
|
|
|
|
2020-08-21 13:45:52 -07:00
|
|
|
__fallthrough;
|
2018-12-18 05:48:20 +01:00
|
|
|
case NODE_RX_TYPE_DC_PDU:
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_OBSERVER)
|
|
|
|
case NODE_RX_TYPE_REPORT:
|
2020-09-02 13:52:29 +05:30
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT)
|
2020-08-21 13:45:52 -07:00
|
|
|
__fallthrough;
|
2018-12-18 05:48:20 +01:00
|
|
|
case NODE_RX_TYPE_EXT_1M_REPORT:
|
2020-04-21 11:12:41 +05:30
|
|
|
case NODE_RX_TYPE_EXT_2M_REPORT:
|
2018-12-18 05:48:20 +01:00
|
|
|
case NODE_RX_TYPE_EXT_CODED_REPORT:
|
2020-11-05 18:13:25 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
|
|
|
|
case NODE_RX_TYPE_SYNC_REPORT:
|
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
|
2018-12-18 05:48:20 +01:00
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_EXT */
|
2020-10-11 06:54:23 +05:30
|
|
|
#endif /* CONFIG_BT_OBSERVER */
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
|
|
|
|
case NODE_RX_TYPE_SCAN_REQ:
|
|
|
|
#endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
case NODE_RX_TYPE_CONN_UPDATE:
|
|
|
|
case NODE_RX_TYPE_ENC_REFRESH:
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_LE_PING)
|
|
|
|
case NODE_RX_TYPE_APTO:
|
|
|
|
#endif /* CONFIG_BT_CTLR_LE_PING */
|
|
|
|
|
|
|
|
case NODE_RX_TYPE_CHAN_SEL_ALGO:
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_PHY)
|
|
|
|
case NODE_RX_TYPE_PHY_UPDATE:
|
|
|
|
#endif /* CONFIG_BT_CTLR_PHY */
|
|
|
|
|
2020-04-28 20:23:29 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
|
2018-12-18 05:48:20 +01:00
|
|
|
case NODE_RX_TYPE_RSSI:
|
2020-04-28 20:23:29 +02:00
|
|
|
#endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
|
2018-12-18 05:48:20 +01:00
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_PROFILE_ISR)
|
|
|
|
case NODE_RX_TYPE_PROFILE:
|
|
|
|
#endif /* CONFIG_BT_CTLR_PROFILE_ISR */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_INDICATION)
|
|
|
|
case NODE_RX_TYPE_ADV_INDICATION:
|
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_INDICATION */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
|
|
|
|
case NODE_RX_TYPE_SCAN_INDICATION:
|
|
|
|
#endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_HCI_MESH_EXT)
|
|
|
|
case NODE_RX_TYPE_MESH_ADV_CPLT:
|
|
|
|
case NODE_RX_TYPE_MESH_REPORT:
|
|
|
|
#endif /* CONFIG_BT_HCI_MESH_EXT */
|
|
|
|
|
2020-11-11 10:48:00 +01:00
|
|
|
#if CONFIG_BT_CTLR_USER_EVT_RANGE > 0
|
2020-11-10 13:51:28 +01:00
|
|
|
case NODE_RX_TYPE_USER_START ... NODE_RX_TYPE_USER_END - 1:
|
2020-11-11 10:48:00 +01:00
|
|
|
#endif /* CONFIG_BT_CTLR_USER_EVT_RANGE > 0 */
|
2019-05-09 15:41:39 +02:00
|
|
|
|
2019-08-15 14:04:37 +02:00
|
|
|
/* Ensure that at least one 'case' statement is present for this
|
|
|
|
* code block.
|
|
|
|
*/
|
|
|
|
case NODE_RX_TYPE_NONE:
|
|
|
|
LL_ASSERT(rx_free->type != NODE_RX_TYPE_NONE);
|
2019-09-27 12:04:22 +05:30
|
|
|
ll_rx_link_inc_quota(1);
|
2019-02-04 22:21:55 +05:30
|
|
|
mem_release(rx_free, &mem_pdu_rx.free);
|
2018-12-18 05:48:20 +01:00
|
|
|
break;
|
|
|
|
|
2020-10-05 10:42:39 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
|
2020-08-11 10:08:13 +05:30
|
|
|
case NODE_RX_TYPE_SYNC:
|
|
|
|
{
|
|
|
|
struct node_rx_sync *se =
|
|
|
|
(void *)((struct node_rx_pdu *)rx_free)->pdu;
|
|
|
|
|
2020-08-11 11:12:34 +05:30
|
|
|
if (!se->status) {
|
|
|
|
mem_release(rx_free, &mem_pdu_rx.free);
|
2020-08-11 10:08:13 +05:30
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
2020-08-11 11:12:34 +05:30
|
|
|
}
|
|
|
|
/* Pass through */
|
2020-08-11 10:08:13 +05:30
|
|
|
|
2020-08-11 11:12:34 +05:30
|
|
|
case NODE_RX_TYPE_SYNC_LOST:
|
|
|
|
{
|
2020-08-21 16:15:14 +05:30
|
|
|
struct ll_sync_set *sync =
|
|
|
|
(void *)rx_free->rx_ftr.param;
|
|
|
|
|
|
|
|
sync->timeout_reload = 0U;
|
|
|
|
|
|
|
|
ull_sync_release(sync);
|
2020-08-11 10:08:13 +05:30
|
|
|
}
|
|
|
|
break;
|
2020-10-05 10:42:39 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
|
2020-08-11 10:08:13 +05:30
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
case NODE_RX_TYPE_TERMINATE:
|
|
|
|
{
|
|
|
|
struct ll_conn *conn;
|
|
|
|
memq_link_t *link;
|
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
conn = ll_conn_get(rx_free->handle);
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2020-01-17 18:08:55 +05:30
|
|
|
LL_ASSERT(!conn->lll.link_tx_free);
|
|
|
|
link = memq_deinit(&conn->lll.memq_tx.head,
|
|
|
|
&conn->lll.memq_tx.tail);
|
2018-12-18 05:48:20 +01:00
|
|
|
LL_ASSERT(link);
|
2020-01-17 18:08:55 +05:30
|
|
|
conn->lll.link_tx_free = link;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
ll_conn_release(conn);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
|
|
|
|
case NODE_RX_TYPE_EVENT_DONE:
|
|
|
|
default:
|
|
|
|
LL_ASSERT(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
*node_rx = rx;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
rx_alloc(UINT8_MAX);
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
2019-08-16 15:40:31 +02:00
|
|
|
static inline void ll_rx_link_inc_quota(int8_t delta)
|
|
|
|
{
|
|
|
|
LL_ASSERT(delta <= 0 || mem_link_rx.quota_pdu < RX_CNT);
|
|
|
|
mem_link_rx.quota_pdu += delta;
|
|
|
|
}
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
void *ll_rx_link_alloc(void)
|
|
|
|
{
|
|
|
|
return mem_acquire(&mem_link_rx.free);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ll_rx_link_release(void *link)
|
|
|
|
{
|
|
|
|
mem_release(link, &mem_link_rx.free);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *ll_rx_alloc(void)
|
|
|
|
{
|
|
|
|
return mem_acquire(&mem_pdu_rx.free);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ll_rx_release(void *node_rx)
|
|
|
|
{
|
|
|
|
mem_release(node_rx, &mem_pdu_rx.free);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ll_rx_put(memq_link_t *link, void *rx)
|
|
|
|
{
|
2020-10-24 07:01:29 +05:30
|
|
|
#if defined(CONFIG_BT_CONN)
|
2018-12-18 05:48:20 +01:00
|
|
|
struct node_rx_hdr *rx_hdr = rx;
|
|
|
|
|
|
|
|
/* Serialize Tx ack with Rx enqueue by storing reference to
|
|
|
|
* last element index in Tx ack FIFO.
|
|
|
|
*/
|
|
|
|
rx_hdr->ack_last = mfifo_tx_ack.l;
|
2020-10-24 07:01:29 +05:30
|
|
|
#endif /* CONFIG_BT_CONN */
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
/* Enqueue the Rx object */
|
|
|
|
memq_enqueue(link, rx, &memq_ll_rx.tail);
|
|
|
|
}
|
|
|
|
|
2019-02-14 10:04:17 +01:00
|
|
|
/**
|
|
|
|
* @brief Permit another loop in the controller thread (prio_recv_thread)
|
|
|
|
* @details Execution context: ULL mayfly
|
|
|
|
*/
|
2018-12-18 05:48:20 +01:00
|
|
|
void ll_rx_sched(void)
|
|
|
|
{
|
2019-02-14 10:04:17 +01:00
|
|
|
/* sem_recv references the same semaphore (sem_prio_recv)
|
|
|
|
* in prio_recv_thread
|
|
|
|
*/
|
2018-12-18 05:48:20 +01:00
|
|
|
k_sem_give(sem_recv);
|
|
|
|
}
|
|
|
|
|
2019-02-04 22:33:51 +05:30
|
|
|
#if defined(CONFIG_BT_CONN)
|
2020-05-27 11:26:57 -05:00
|
|
|
void *ll_pdu_rx_alloc_peek(uint8_t count)
|
2019-07-18 15:10:05 +05:30
|
|
|
{
|
|
|
|
if (count > MFIFO_AVAIL_COUNT_GET(ll_pdu_rx_free)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return MFIFO_DEQUEUE_PEEK(ll_pdu_rx_free);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *ll_pdu_rx_alloc(void)
|
|
|
|
{
|
|
|
|
return MFIFO_DEQUEUE(ll_pdu_rx_free);
|
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
void ll_tx_ack_put(uint16_t handle, struct node_tx *node_tx)
|
2019-02-04 22:33:51 +05:30
|
|
|
{
|
|
|
|
struct lll_tx *tx;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t idx;
|
2019-02-04 22:33:51 +05:30
|
|
|
|
|
|
|
idx = MFIFO_ENQUEUE_GET(tx_ack, (void **)&tx);
|
|
|
|
LL_ASSERT(tx);
|
|
|
|
|
|
|
|
tx->handle = handle;
|
|
|
|
tx->node = node_tx;
|
|
|
|
|
|
|
|
MFIFO_ENQUEUE(tx_ack, idx);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
void ll_timeslice_ticker_id_get(uint8_t * const instance_index,
|
|
|
|
uint8_t * const user_id)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
*instance_index = TICKER_INSTANCE_ID_CTLR;
|
|
|
|
*user_id = (TICKER_NODES - FLASH_TICKER_NODES);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ll_radio_state_abort(void)
|
|
|
|
{
|
2019-02-04 22:21:55 +05:30
|
|
|
static memq_link_t link;
|
|
|
|
static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t ret;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-10-03 17:49:27 +05:30
|
|
|
ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0,
|
2019-02-04 22:21:55 +05:30
|
|
|
&mfy);
|
2018-12-18 05:48:20 +01:00
|
|
|
LL_ASSERT(!ret);
|
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t ll_radio_state_is_idle(void)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
2019-04-01 14:32:40 +02:00
|
|
|
return lll_radio_is_idle();
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
void ull_ticker_status_give(uint32_t status, void *param)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
2020-05-27 11:26:57 -05:00
|
|
|
*((uint32_t volatile *)param) = status;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
k_sem_give(&sem_ticker_api_cb);
|
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t ull_ticker_status_take(uint32_t ret, uint32_t volatile *ret_cb)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
if (ret == TICKER_STATUS_BUSY) {
|
|
|
|
/* TODO: Enable ticker job in case of CONFIG_BT_CTLR_LOW_LAT */
|
2020-09-30 17:55:15 +05:30
|
|
|
} else {
|
|
|
|
/* Check for ticker operation enqueue failed, in which case
|
|
|
|
* function return value (ret) will be TICKER_STATUS_FAILURE
|
|
|
|
* and callback return value (ret_cb) will remain as
|
|
|
|
* TICKER_STATUS_BUSY.
|
|
|
|
* This assert check will avoid waiting forever to take the
|
|
|
|
* semaphore that will never be given when the ticker operation
|
|
|
|
* callback does not get called due to enqueue failure.
|
|
|
|
*/
|
|
|
|
LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
|
|
|
|
(*ret_cb != TICKER_STATUS_BUSY));
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
k_sem_take(&sem_ticker_api_cb, K_FOREVER);
|
|
|
|
|
|
|
|
return *ret_cb;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *ull_disable_mark(void *param)
|
|
|
|
{
|
2019-06-05 13:40:36 +02:00
|
|
|
return mark_set(&mark_disable, param);
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void *ull_disable_unmark(void *param)
|
|
|
|
{
|
2019-06-05 13:40:36 +02:00
|
|
|
return mark_unset(&mark_disable, param);
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void *ull_disable_mark_get(void)
|
|
|
|
{
|
2019-06-05 13:40:36 +02:00
|
|
|
return mark_get(mark_disable);
|
|
|
|
}
|
|
|
|
|
2020-11-12 15:11:43 +01:00
|
|
|
/**
|
|
|
|
* @brief Stops a specified ticker using the ull_disable_(un)mark functions.
|
|
|
|
*
|
|
|
|
* @param ticker_handle The handle of the ticker.
|
|
|
|
* @param param The object to mark.
|
|
|
|
* @param lll_disable Optional object when calling @ref ull_disable
|
|
|
|
*
|
|
|
|
* @return 0 if success, else ERRNO.
|
|
|
|
*/
|
|
|
|
int ull_ticker_stop_with_mark(uint8_t ticker_handle, void *param,
|
|
|
|
void *lll_disable)
|
|
|
|
{
|
|
|
|
uint32_t volatile ret_cb;
|
|
|
|
uint32_t ret;
|
|
|
|
void *mark;
|
|
|
|
|
|
|
|
mark = ull_disable_mark(param);
|
|
|
|
if (mark != param) {
|
|
|
|
return -ENOLCK;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret_cb = TICKER_STATUS_BUSY;
|
|
|
|
ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
|
|
|
|
ticker_handle, ull_ticker_status_give,
|
|
|
|
(void *)&ret_cb);
|
|
|
|
ret = ull_ticker_status_take(ret, &ret_cb);
|
|
|
|
if (ret) {
|
|
|
|
mark = ull_disable_unmark(param);
|
|
|
|
if (mark != param) {
|
|
|
|
return -ENOLCK;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EALREADY;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ull_disable(lll_disable);
|
|
|
|
if (ret) {
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
mark = ull_disable_unmark(param);
|
|
|
|
if (mark != param) {
|
|
|
|
return -ENOLCK;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-06-05 13:40:36 +02:00
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
void *ull_update_mark(void *param)
|
|
|
|
{
|
|
|
|
return mark_set(&mark_update, param);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *ull_update_unmark(void *param)
|
|
|
|
{
|
|
|
|
return mark_unset(&mark_update, param);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *ull_update_mark_get(void)
|
|
|
|
{
|
|
|
|
return mark_get(mark_update);
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
2019-06-05 13:40:36 +02:00
|
|
|
#endif /* CONFIG_BT_CONN */
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
int ull_disable(void *lll)
|
|
|
|
{
|
2019-02-04 22:21:55 +05:30
|
|
|
static memq_link_t link;
|
|
|
|
static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
|
2018-12-18 05:48:20 +01:00
|
|
|
struct ull_hdr *hdr;
|
|
|
|
struct k_sem sem;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t ret;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
hdr = HDR_ULL(((struct lll_hdr *)lll)->parent);
|
2020-01-10 16:13:16 +05:30
|
|
|
if (!hdr) {
|
2018-12-18 05:48:20 +01:00
|
|
|
return ULL_STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
k_sem_init(&sem, 0, 1);
|
2020-01-10 16:13:16 +05:30
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
hdr->disabled_param = &sem;
|
2019-02-04 22:21:55 +05:30
|
|
|
hdr->disabled_cb = disabled_cb;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2020-09-09 15:57:34 +05:30
|
|
|
if (!ull_ref_get(hdr)) {
|
2020-01-10 16:13:16 +05:30
|
|
|
return ULL_STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
mfy.param = lll;
|
2018-12-18 05:48:20 +01:00
|
|
|
ret = mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_LLL, 0,
|
2019-02-04 22:21:55 +05:30
|
|
|
&mfy);
|
2018-12-18 05:48:20 +01:00
|
|
|
LL_ASSERT(!ret);
|
|
|
|
|
|
|
|
return k_sem_take(&sem, K_FOREVER);
|
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
void *ull_pdu_rx_alloc_peek(uint8_t count)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
if (count > MFIFO_AVAIL_COUNT_GET(pdu_rx_free)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return MFIFO_DEQUEUE_PEEK(pdu_rx_free);
|
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
void *ull_pdu_rx_alloc_peek_iter(uint8_t *idx)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
return *(void **)MFIFO_DEQUEUE_ITER_GET(pdu_rx_free, idx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *ull_pdu_rx_alloc(void)
|
|
|
|
{
|
|
|
|
return MFIFO_DEQUEUE(pdu_rx_free);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ull_rx_put(memq_link_t *link, void *rx)
|
|
|
|
{
|
2020-10-24 07:01:29 +05:30
|
|
|
#if defined(CONFIG_BT_CONN)
|
2018-12-18 05:48:20 +01:00
|
|
|
struct node_rx_hdr *rx_hdr = rx;
|
|
|
|
|
|
|
|
/* Serialize Tx ack with Rx enqueue by storing reference to
|
|
|
|
* last element index in Tx ack FIFO.
|
|
|
|
*/
|
2019-04-30 14:23:39 +05:30
|
|
|
rx_hdr->ack_last = ull_conn_ack_last_idx_get();
|
2020-10-24 07:01:29 +05:30
|
|
|
#endif /* CONFIG_BT_CONN */
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
/* Enqueue the Rx object */
|
|
|
|
memq_enqueue(link, rx, &memq_ull_rx.tail);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ull_rx_sched(void)
|
|
|
|
{
|
2019-02-04 22:21:55 +05:30
|
|
|
static memq_link_t link;
|
|
|
|
static struct mayfly mfy = {0, 0, &link, NULL, rx_demux};
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
/* Kick the ULL (using the mayfly, tailchain it) */
|
2019-02-04 22:21:55 +05:30
|
|
|
mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_HIGH, 1, &mfy);
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
int ull_prepare_enqueue(lll_is_abort_cb_t is_abort_cb,
|
|
|
|
lll_abort_cb_t abort_cb,
|
|
|
|
struct lll_prepare_param *prepare_param,
|
|
|
|
lll_prepare_cb_t prepare_cb, int prio,
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t is_resume)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
struct lll_event *e;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t idx;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
idx = MFIFO_ENQUEUE_GET(prep, (void **)&e);
|
|
|
|
if (!e) {
|
|
|
|
return -ENOBUFS;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(&e->prepare_param, prepare_param, sizeof(e->prepare_param));
|
|
|
|
e->prepare_cb = prepare_cb;
|
|
|
|
e->is_abort_cb = is_abort_cb;
|
|
|
|
e->abort_cb = abort_cb;
|
|
|
|
e->prio = prio;
|
|
|
|
e->is_resume = is_resume;
|
2019-03-26 19:57:45 -06:00
|
|
|
e->is_aborted = 0U;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
MFIFO_ENQUEUE(prep, idx);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *ull_prepare_dequeue_get(void)
|
|
|
|
{
|
|
|
|
return MFIFO_DEQUEUE_GET(prep);
|
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
void *ull_prepare_dequeue_iter(uint8_t *idx)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
return MFIFO_DEQUEUE_ITER_GET(prep, idx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *ull_event_done_extra_get(void)
|
|
|
|
{
|
2019-02-27 10:40:03 +01:00
|
|
|
struct node_rx_event_done *evdone;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-02-27 10:40:03 +01:00
|
|
|
evdone = MFIFO_DEQUEUE_PEEK(done);
|
|
|
|
if (!evdone) {
|
2018-12-18 05:48:20 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-02-27 10:40:03 +01:00
|
|
|
return &evdone->extra;
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void *ull_event_done(void *param)
|
|
|
|
{
|
2019-02-14 10:04:17 +01:00
|
|
|
struct node_rx_event_done *evdone;
|
2018-12-18 05:48:20 +01:00
|
|
|
memq_link_t *link;
|
|
|
|
|
2019-02-14 10:04:17 +01:00
|
|
|
/* Obtain new node that signals "Done of an RX-event".
|
|
|
|
* Obtain this by dequeuing from the global 'mfifo_done' queue.
|
|
|
|
* Note that 'mfifo_done' is a queue of pointers, not of
|
|
|
|
* struct node_rx_event_done
|
|
|
|
*/
|
|
|
|
evdone = MFIFO_DEQUEUE(done);
|
|
|
|
if (!evdone) {
|
|
|
|
/* Not fatal if we can not obtain node, though
|
|
|
|
* we will loose the packets in software stack.
|
|
|
|
* If this happens during Conn Upd, this could cause LSTO
|
|
|
|
*/
|
2018-12-18 05:48:20 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-02-14 10:04:17 +01:00
|
|
|
link = evdone->hdr.link;
|
|
|
|
evdone->hdr.link = NULL;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-02-14 10:04:17 +01:00
|
|
|
evdone->hdr.type = NODE_RX_TYPE_EVENT_DONE;
|
|
|
|
evdone->param = param;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-02-14 10:04:17 +01:00
|
|
|
ull_rx_put(link, evdone);
|
2018-12-18 05:48:20 +01:00
|
|
|
ull_rx_sched();
|
|
|
|
|
2019-02-14 10:04:17 +01:00
|
|
|
return evdone;
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
2020-10-05 10:42:39 +05:30
|
|
|
#if defined(CONFIG_BT_PERIPHERAL) || defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
|
2020-08-11 10:38:23 +05:30
|
|
|
/**
|
|
|
|
* @brief Extract timing from completed event
|
|
|
|
*
|
|
|
|
* @param node_rx_event_done[in] Done event containing fresh timing information
|
|
|
|
* @param ticks_drift_plus[out] Positive part of drift uncertainty window
|
|
|
|
* @param ticks_drift_minus[out] Negative part of drift uncertainty window
|
|
|
|
*/
|
|
|
|
void ull_drift_ticks_get(struct node_rx_event_done *done,
|
|
|
|
uint32_t *ticks_drift_plus,
|
|
|
|
uint32_t *ticks_drift_minus)
|
|
|
|
{
|
|
|
|
uint32_t start_to_address_expected_us;
|
|
|
|
uint32_t start_to_address_actual_us;
|
|
|
|
uint32_t window_widening_event_us;
|
|
|
|
uint32_t preamble_to_addr_us;
|
|
|
|
|
|
|
|
start_to_address_actual_us =
|
|
|
|
done->extra.drift.start_to_address_actual_us;
|
|
|
|
window_widening_event_us =
|
|
|
|
done->extra.drift.window_widening_event_us;
|
|
|
|
preamble_to_addr_us =
|
|
|
|
done->extra.drift.preamble_to_addr_us;
|
|
|
|
|
|
|
|
start_to_address_expected_us = EVENT_JITTER_US +
|
|
|
|
EVENT_TICKER_RES_MARGIN_US +
|
|
|
|
window_widening_event_us +
|
|
|
|
preamble_to_addr_us;
|
|
|
|
|
|
|
|
if (start_to_address_actual_us <= start_to_address_expected_us) {
|
|
|
|
*ticks_drift_plus =
|
|
|
|
HAL_TICKER_US_TO_TICKS(window_widening_event_us);
|
|
|
|
*ticks_drift_minus =
|
|
|
|
HAL_TICKER_US_TO_TICKS((start_to_address_expected_us -
|
|
|
|
start_to_address_actual_us));
|
|
|
|
} else {
|
|
|
|
*ticks_drift_plus =
|
|
|
|
HAL_TICKER_US_TO_TICKS(start_to_address_actual_us);
|
|
|
|
*ticks_drift_minus =
|
|
|
|
HAL_TICKER_US_TO_TICKS(EVENT_JITTER_US +
|
|
|
|
EVENT_TICKER_RES_MARGIN_US +
|
|
|
|
preamble_to_addr_us);
|
|
|
|
}
|
|
|
|
}
|
2020-10-05 10:42:39 +05:30
|
|
|
#endif /* CONFIG_BT_PERIPHERAL || CONFIG_BT_CTLR_SYNC_PERIODIC */
|
2020-08-11 10:38:23 +05:30
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
static inline int init_reset(void)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
memq_link_t *link;
|
|
|
|
|
|
|
|
/* Initialize done pool. */
|
|
|
|
mem_init(mem_done.pool, sizeof(struct node_rx_event_done),
|
2019-08-09 02:44:56 +02:00
|
|
|
EVENT_DONE_MAX, &mem_done.free);
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
/* Initialize done link pool. */
|
2019-08-09 02:44:56 +02:00
|
|
|
mem_init(mem_link_done.pool, sizeof(memq_link_t), EVENT_DONE_MAX,
|
2018-12-18 05:48:20 +01:00
|
|
|
&mem_link_done.free);
|
|
|
|
|
|
|
|
/* Allocate done buffers */
|
2019-02-04 22:21:55 +05:30
|
|
|
done_alloc();
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
/* Initialize rx pool. */
|
2019-02-14 14:17:37 +01:00
|
|
|
mem_init(mem_pdu_rx.pool, (PDU_RX_NODE_POOL_ELEMENT_SIZE),
|
|
|
|
sizeof(mem_pdu_rx.pool) / (PDU_RX_NODE_POOL_ELEMENT_SIZE),
|
2018-12-18 05:48:20 +01:00
|
|
|
&mem_pdu_rx.free);
|
|
|
|
|
|
|
|
/* Initialize rx link pool. */
|
|
|
|
mem_init(mem_link_rx.pool, sizeof(memq_link_t),
|
|
|
|
sizeof(mem_link_rx.pool) / sizeof(memq_link_t),
|
|
|
|
&mem_link_rx.free);
|
|
|
|
|
|
|
|
/* Acquire a link to initialize ull rx memq */
|
|
|
|
link = mem_acquire(&mem_link_rx.free);
|
|
|
|
LL_ASSERT(link);
|
|
|
|
|
|
|
|
/* Initialize ull rx memq */
|
|
|
|
MEMQ_INIT(ull_rx, link);
|
|
|
|
|
|
|
|
/* Acquire a link to initialize ll rx memq */
|
|
|
|
link = mem_acquire(&mem_link_rx.free);
|
|
|
|
LL_ASSERT(link);
|
|
|
|
|
|
|
|
/* Initialize ll rx memq */
|
|
|
|
MEMQ_INIT(ll_rx, link);
|
|
|
|
|
|
|
|
/* Allocate rx free buffers */
|
|
|
|
mem_link_rx.quota_pdu = RX_CNT;
|
2019-02-04 22:21:55 +05:30
|
|
|
rx_alloc(UINT8_MAX);
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-10 11:18:30 +02:00
|
|
|
static void perform_lll_reset(void *param)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Reset LLL */
|
|
|
|
err = lll_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_BROADCASTER)
|
|
|
|
/* Reset adv state */
|
|
|
|
err = lll_adv_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
#endif /* CONFIG_BT_BROADCASTER */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_OBSERVER)
|
|
|
|
/* Reset scan state */
|
|
|
|
err = lll_scan_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
#endif /* CONFIG_BT_OBSERVER */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
/* Reset conn role */
|
|
|
|
err = lll_conn_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
2019-11-11 10:11:32 +01:00
|
|
|
|
2020-11-19 02:12:54 -08:00
|
|
|
#if IS_ENABLED(CONFIG_BT_CTLR_DF)
|
|
|
|
err = lll_df_reset();
|
|
|
|
LL_ASSERT(!err);
|
|
|
|
#endif /* CONFIG_BT_CTLR_DF */
|
|
|
|
|
2019-10-08 12:00:21 +05:30
|
|
|
#if !defined(CONFIG_BT_CTLR_ZLI)
|
2019-11-11 10:11:32 +01:00
|
|
|
k_sem_give(param);
|
2019-10-08 12:00:21 +05:30
|
|
|
#endif /* !CONFIG_BT_CTLR_ZLI */
|
2019-10-10 11:18:30 +02:00
|
|
|
}
|
|
|
|
|
2019-06-05 13:40:36 +02:00
|
|
|
static inline void *mark_set(void **m, void *param)
|
|
|
|
{
|
|
|
|
if (!*m) {
|
|
|
|
*m = param;
|
|
|
|
}
|
|
|
|
|
|
|
|
return *m;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *mark_unset(void **m, void *param)
|
|
|
|
{
|
|
|
|
if (*m && *m == param) {
|
|
|
|
*m = NULL;
|
|
|
|
|
|
|
|
return param;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *mark_get(void *m)
|
|
|
|
{
|
|
|
|
return m;
|
|
|
|
}
|
|
|
|
|
2019-02-14 10:04:17 +01:00
|
|
|
/**
|
|
|
|
* @brief Allocate buffers for done events
|
|
|
|
*/
|
2019-02-04 22:21:55 +05:30
|
|
|
static inline void done_alloc(void)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t idx;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-02-14 10:04:17 +01:00
|
|
|
/* mfifo_done is a queue of pointers */
|
2018-12-18 05:48:20 +01:00
|
|
|
while (MFIFO_ENQUEUE_IDX_GET(done, &idx)) {
|
|
|
|
memq_link_t *link;
|
|
|
|
struct node_rx_hdr *rx;
|
|
|
|
|
|
|
|
link = mem_acquire(&mem_link_done.free);
|
|
|
|
if (!link) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
rx = mem_acquire(&mem_done.free);
|
|
|
|
if (!rx) {
|
|
|
|
mem_release(link, &mem_link_done.free);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
rx->link = link;
|
|
|
|
|
|
|
|
MFIFO_BY_IDX_ENQUEUE(done, idx, rx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
static inline void *done_release(memq_link_t *link,
|
|
|
|
struct node_rx_event_done *done)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t idx;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
if (!MFIFO_ENQUEUE_IDX_GET(done, &idx)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-08-22 09:58:50 +05:30
|
|
|
done->hdr.link = link;
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
MFIFO_BY_IDX_ENQUEUE(done, idx, done);
|
|
|
|
|
|
|
|
return done;
|
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
static inline void rx_alloc(uint8_t max)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t idx;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2020-12-17 12:26:05 +05:30
|
|
|
if (max > mem_link_rx.quota_pdu) {
|
|
|
|
max = mem_link_rx.quota_pdu;
|
|
|
|
}
|
|
|
|
|
|
|
|
while ((max--) && MFIFO_ENQUEUE_IDX_GET(pdu_rx_free, &idx)) {
|
2018-12-18 05:48:20 +01:00
|
|
|
memq_link_t *link;
|
|
|
|
struct node_rx_hdr *rx;
|
|
|
|
|
|
|
|
link = mem_acquire(&mem_link_rx.free);
|
|
|
|
if (!link) {
|
2020-12-17 12:26:05 +05:30
|
|
|
return;
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
rx = mem_acquire(&mem_pdu_rx.free);
|
|
|
|
if (!rx) {
|
|
|
|
mem_release(link, &mem_link_rx.free);
|
2020-12-17 12:26:05 +05:30
|
|
|
return;
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
rx->link = link;
|
|
|
|
|
2020-12-17 12:26:05 +05:30
|
|
|
MFIFO_BY_IDX_ENQUEUE(pdu_rx_free, idx, rx);
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-08-16 15:40:31 +02:00
|
|
|
ll_rx_link_inc_quota(-1);
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
2020-12-17 12:26:05 +05:30
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
if (!max) {
|
|
|
|
return;
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
2020-12-17 12:26:05 +05:30
|
|
|
/* Replenish the ULL to LL/HCI free Rx PDU queue after LLL to ULL free
|
|
|
|
* Rx PDU queue has been filled.
|
|
|
|
*/
|
|
|
|
while (mem_link_rx.quota_pdu &&
|
|
|
|
MFIFO_ENQUEUE_IDX_GET(ll_pdu_rx_free, &idx)) {
|
2018-12-18 05:48:20 +01:00
|
|
|
memq_link_t *link;
|
|
|
|
struct node_rx_hdr *rx;
|
|
|
|
|
|
|
|
link = mem_acquire(&mem_link_rx.free);
|
|
|
|
if (!link) {
|
2020-12-17 12:26:05 +05:30
|
|
|
return;
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
rx = mem_acquire(&mem_pdu_rx.free);
|
|
|
|
if (!rx) {
|
|
|
|
mem_release(link, &mem_link_rx.free);
|
2020-12-17 12:26:05 +05:30
|
|
|
return;
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
2020-12-17 12:26:05 +05:30
|
|
|
link->mem = NULL;
|
2018-12-18 05:48:20 +01:00
|
|
|
rx->link = link;
|
|
|
|
|
2020-12-17 12:26:05 +05:30
|
|
|
MFIFO_BY_IDX_ENQUEUE(ll_pdu_rx_free, idx, rx);
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-08-16 15:40:31 +02:00
|
|
|
ll_rx_link_inc_quota(-1);
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
2020-12-17 12:26:05 +05:30
|
|
|
#endif /* CONFIG_BT_CONN */
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
static void rx_demux(void *param)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
memq_link_t *link;
|
|
|
|
|
2019-02-18 19:32:08 +05:30
|
|
|
#if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
2018-12-18 05:48:20 +01:00
|
|
|
do {
|
2019-02-18 19:32:08 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
2018-12-18 05:48:20 +01:00
|
|
|
struct node_rx_hdr *rx;
|
|
|
|
|
|
|
|
link = memq_peek(memq_ull_rx.head, memq_ull_rx.tail,
|
|
|
|
(void **)&rx);
|
|
|
|
if (link) {
|
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
struct node_tx *node_tx;
|
|
|
|
memq_link_t *link_tx;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint16_t handle; /* Handle to Ack TX */
|
2018-12-18 05:48:20 +01:00
|
|
|
#endif /* CONFIG_BT_CONN */
|
2019-02-11 22:55:30 +05:30
|
|
|
int nack = 0;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
LL_ASSERT(rx);
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CONN)
|
2019-04-30 14:23:39 +05:30
|
|
|
link_tx = ull_conn_ack_by_last_peek(rx->ack_last,
|
2018-12-18 05:48:20 +01:00
|
|
|
&handle, &node_tx);
|
|
|
|
if (link_tx) {
|
2019-02-04 22:21:55 +05:30
|
|
|
rx_demux_conn_tx_ack(rx->ack_last, handle,
|
|
|
|
link_tx, node_tx);
|
2018-12-18 05:48:20 +01:00
|
|
|
} else
|
2019-02-18 19:32:08 +05:30
|
|
|
#endif /* CONFIG_BT_CONN */
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
2019-02-11 22:55:30 +05:30
|
|
|
nack = rx_demux_rx(link, rx);
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
2019-02-11 22:55:30 +05:30
|
|
|
|
2019-02-18 19:32:08 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
|
|
|
if (!nack) {
|
2021-01-29 18:08:59 +05:30
|
|
|
rx_demux_yield();
|
2019-02-18 19:32:08 +05:30
|
|
|
}
|
|
|
|
#else /* !CONFIG_BT_CTLR_LOW_LAT_ULL */
|
2019-02-11 22:55:30 +05:30
|
|
|
if (nack) {
|
|
|
|
break;
|
|
|
|
}
|
2019-02-18 19:32:08 +05:30
|
|
|
#endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL */
|
2019-02-11 22:55:30 +05:30
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
} else {
|
|
|
|
struct node_tx *node_tx;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t ack_last;
|
|
|
|
uint16_t handle;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-04-30 14:23:39 +05:30
|
|
|
link = ull_conn_ack_peek(&ack_last, &handle, &node_tx);
|
2018-12-18 05:48:20 +01:00
|
|
|
if (link) {
|
2019-02-04 22:21:55 +05:30
|
|
|
rx_demux_conn_tx_ack(ack_last, handle,
|
2018-12-18 05:48:20 +01:00
|
|
|
link, node_tx);
|
2019-02-18 19:32:08 +05:30
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
2021-01-29 18:08:59 +05:30
|
|
|
rx_demux_yield();
|
2019-02-18 19:32:08 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
2019-02-18 19:32:08 +05:30
|
|
|
#endif /* CONFIG_BT_CONN */
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
2019-02-18 19:32:08 +05:30
|
|
|
|
|
|
|
#if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
2018-12-18 05:48:20 +01:00
|
|
|
} while (link);
|
2019-02-18 19:32:08 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
2021-01-29 18:08:59 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
|
|
|
static void rx_demux_yield(void)
|
|
|
|
{
|
|
|
|
static memq_link_t link;
|
|
|
|
static struct mayfly mfy = {0, 0, &link, NULL, rx_demux};
|
|
|
|
|
|
|
|
/* Kick the ULL (using the mayfly, tailchain it) */
|
|
|
|
mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_HIGH, 1,
|
|
|
|
&mfy);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
|
|
|
|
2020-08-19 07:27:02 +05:30
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
static uint8_t tx_cmplt_get(uint16_t *handle, uint8_t *first, uint8_t last)
|
|
|
|
{
|
|
|
|
struct lll_tx *tx;
|
|
|
|
uint8_t cmplt;
|
|
|
|
|
|
|
|
tx = mfifo_dequeue_iter_get(mfifo_tx_ack.m, mfifo_tx_ack.s,
|
|
|
|
mfifo_tx_ack.n, mfifo_tx_ack.f, last,
|
|
|
|
first);
|
|
|
|
if (!tx) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
*handle = tx->handle;
|
|
|
|
cmplt = 0U;
|
|
|
|
do {
|
|
|
|
struct node_tx *node_tx;
|
|
|
|
struct pdu_data *p;
|
|
|
|
|
|
|
|
node_tx = tx->node;
|
|
|
|
p = (void *)node_tx->pdu;
|
|
|
|
if (!node_tx || (node_tx == (void *)1) ||
|
|
|
|
(((uint32_t)node_tx & ~3) &&
|
|
|
|
(p->ll_id == PDU_DATA_LLID_DATA_START ||
|
|
|
|
p->ll_id == PDU_DATA_LLID_DATA_CONTINUE))) {
|
|
|
|
/* data packet, hence count num cmplt */
|
|
|
|
tx->node = (void *)1;
|
|
|
|
cmplt++;
|
|
|
|
} else {
|
|
|
|
/* ctrl packet or flushed, hence dont count num cmplt */
|
|
|
|
tx->node = (void *)2;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (((uint32_t)node_tx & ~3)) {
|
|
|
|
ll_tx_mem_release(node_tx);
|
|
|
|
}
|
|
|
|
|
|
|
|
tx = mfifo_dequeue_iter_get(mfifo_tx_ack.m, mfifo_tx_ack.s,
|
|
|
|
mfifo_tx_ack.n, mfifo_tx_ack.f,
|
|
|
|
last, first);
|
|
|
|
} while (tx && tx->handle == *handle);
|
|
|
|
|
|
|
|
return cmplt;
|
|
|
|
}
|
|
|
|
|
2020-08-11 10:51:43 +05:30
|
|
|
static inline void rx_demux_conn_tx_ack(uint8_t ack_last, uint16_t handle,
|
|
|
|
memq_link_t *link,
|
|
|
|
struct node_tx *node_tx)
|
|
|
|
{
|
|
|
|
#if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
|
|
|
do {
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
|
|
|
/* Dequeue node */
|
|
|
|
ull_conn_ack_dequeue();
|
|
|
|
|
|
|
|
/* Process Tx ack */
|
|
|
|
ull_conn_tx_ack(handle, link, node_tx);
|
|
|
|
|
|
|
|
/* Release link mem */
|
|
|
|
ull_conn_link_tx_release(link);
|
|
|
|
|
|
|
|
/* check for more rx ack */
|
|
|
|
link = ull_conn_ack_by_last_peek(ack_last, &handle, &node_tx);
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT_ULL)
|
|
|
|
if (!link)
|
|
|
|
#else /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
|
|
|
} while (link);
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL */
|
|
|
|
|
|
|
|
{
|
|
|
|
/* trigger thread to call ll_rx_get() */
|
|
|
|
ll_rx_sched();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
|
2019-02-14 10:04:17 +01:00
|
|
|
/**
|
|
|
|
* @brief Dispatch rx objects
|
|
|
|
* @details Rx objects are only peeked, not dequeued yet.
|
|
|
|
* Execution context: ULL high priority Mayfly
|
|
|
|
*/
|
2019-02-11 22:55:30 +05:30
|
|
|
static inline int rx_demux_rx(memq_link_t *link, struct node_rx_hdr *rx)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
/* Demux Rx objects */
|
|
|
|
switch (rx->type) {
|
|
|
|
case NODE_RX_TYPE_EVENT_DONE:
|
|
|
|
{
|
|
|
|
memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
|
2019-02-04 22:21:55 +05:30
|
|
|
rx_demux_event_done(link, rx);
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2020-05-04 13:32:24 +05:30
|
|
|
#if defined(CONFIG_BT_OBSERVER)
|
2020-04-21 11:12:41 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT)
|
|
|
|
case NODE_RX_TYPE_EXT_1M_REPORT:
|
|
|
|
case NODE_RX_TYPE_EXT_CODED_REPORT:
|
2020-11-05 18:13:25 +05:30
|
|
|
case NODE_RX_TYPE_EXT_AUX_REPORT:
|
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
|
|
|
|
case NODE_RX_TYPE_SYNC_REPORT:
|
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
|
2020-04-21 11:12:41 +05:30
|
|
|
{
|
2020-05-04 13:32:24 +05:30
|
|
|
struct pdu_adv *adv;
|
2020-04-21 11:12:41 +05:30
|
|
|
|
|
|
|
memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
|
|
|
|
|
2020-05-04 13:32:24 +05:30
|
|
|
adv = (void *)((struct node_rx_pdu *)rx)->pdu;
|
|
|
|
if (adv->type != PDU_ADV_TYPE_EXT_IND) {
|
|
|
|
ll_rx_put(link, rx);
|
|
|
|
ll_rx_sched();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-11-05 18:13:25 +05:30
|
|
|
ull_scan_aux_setup(link, rx);
|
2020-04-21 11:12:41 +05:30
|
|
|
}
|
|
|
|
break;
|
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_EXT */
|
2020-05-04 13:32:24 +05:30
|
|
|
#endif /* CONFIG_BT_OBSERVER */
|
2020-04-21 11:12:41 +05:30
|
|
|
|
2019-09-27 18:19:52 +05:30
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
case NODE_RX_TYPE_CONNECTION:
|
|
|
|
{
|
|
|
|
memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
|
|
|
|
ull_conn_setup(link, rx);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NODE_RX_TYPE_DC_PDU:
|
|
|
|
{
|
|
|
|
int nack;
|
|
|
|
|
|
|
|
nack = ull_conn_rx(link, (void *)&rx);
|
|
|
|
if (nack) {
|
|
|
|
return nack;
|
|
|
|
}
|
|
|
|
|
|
|
|
memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
|
|
|
|
|
|
|
|
if (rx) {
|
|
|
|
ll_rx_put(link, rx);
|
|
|
|
ll_rx_sched();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NODE_RX_TYPE_TERMINATE:
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#if defined(CONFIG_BT_OBSERVER) || \
|
|
|
|
defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY) || \
|
|
|
|
defined(CONFIG_BT_CTLR_PROFILE_ISR) || \
|
|
|
|
defined(CONFIG_BT_CTLR_ADV_INDICATION) || \
|
2019-09-27 18:19:52 +05:30
|
|
|
defined(CONFIG_BT_CTLR_SCAN_INDICATION) || \
|
|
|
|
defined(CONFIG_BT_CONN)
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
#if defined(CONFIG_BT_OBSERVER)
|
|
|
|
case NODE_RX_TYPE_REPORT:
|
|
|
|
#endif /* CONFIG_BT_OBSERVER */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
|
|
|
|
case NODE_RX_TYPE_SCAN_REQ:
|
|
|
|
#endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_PROFILE_ISR)
|
|
|
|
case NODE_RX_TYPE_PROFILE:
|
|
|
|
#endif /* CONFIG_BT_CTLR_PROFILE_ISR */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_INDICATION)
|
|
|
|
case NODE_RX_TYPE_ADV_INDICATION:
|
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_INDICATION */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_SCAN_INDICATION)
|
|
|
|
case NODE_RX_TYPE_SCAN_INDICATION:
|
|
|
|
#endif /* CONFIG_BT_CTLR_SCAN_INDICATION */
|
2020-11-12 09:20:44 +01:00
|
|
|
|
|
|
|
case NODE_RX_TYPE_RELEASE:
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
|
|
|
|
ll_rx_put(link, rx);
|
|
|
|
ll_rx_sched();
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
#endif /* CONFIG_BT_OBSERVER ||
|
|
|
|
* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY ||
|
|
|
|
* CONFIG_BT_CTLR_PROFILE_ISR ||
|
|
|
|
* CONFIG_BT_CTLR_ADV_INDICATION ||
|
2019-09-27 18:19:52 +05:30
|
|
|
* CONFIG_BT_CTLR_SCAN_INDICATION ||
|
|
|
|
* CONFIG_BT_CONN
|
2018-12-18 05:48:20 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
default:
|
|
|
|
{
|
2019-05-09 15:41:39 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_USER_EXT)
|
|
|
|
/* Try proprietary demuxing */
|
|
|
|
rx_demux_rx_proprietary(link, rx, memq_ull_rx.tail,
|
|
|
|
&memq_ull_rx.head);
|
|
|
|
#else
|
2018-12-18 05:48:20 +01:00
|
|
|
LL_ASSERT(0);
|
2019-05-09 15:41:39 +02:00
|
|
|
#endif /* CONFIG_BT_CTLR_USER_EXT */
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2019-02-11 22:55:30 +05:30
|
|
|
|
|
|
|
return 0;
|
2018-12-18 05:48:20 +01:00
|
|
|
}
|
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
static inline void rx_demux_event_done(memq_link_t *link,
|
|
|
|
struct node_rx_hdr *rx)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
struct node_rx_event_done *done = (void *)rx;
|
|
|
|
struct ull_hdr *ull_hdr;
|
|
|
|
struct lll_event *next;
|
2019-06-07 16:42:57 +02:00
|
|
|
void *release;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
/* Get the ull instance */
|
|
|
|
ull_hdr = done->param;
|
|
|
|
|
|
|
|
/* Process role dependent event done */
|
|
|
|
switch (done->extra.type) {
|
|
|
|
#if defined(CONFIG_BT_CONN)
|
|
|
|
case EVENT_DONE_EXTRA_TYPE_CONN:
|
|
|
|
ull_conn_done(done);
|
|
|
|
break;
|
|
|
|
#endif /* CONFIG_BT_CONN */
|
2019-05-09 15:41:39 +02:00
|
|
|
|
2020-08-11 10:44:52 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_ADV_EXT)
|
|
|
|
#if defined(CONFIG_BT_BROADCASTER)
|
2020-06-18 14:13:41 +02:00
|
|
|
case EVENT_DONE_EXTRA_TYPE_ADV:
|
2020-07-13 17:59:38 +05:30
|
|
|
ull_adv_done(done);
|
2020-06-18 14:13:41 +02:00
|
|
|
break;
|
2020-08-11 10:44:52 +05:30
|
|
|
#endif /* CONFIG_BT_BROADCASTER */
|
2020-06-18 14:13:41 +02:00
|
|
|
|
2020-04-21 11:12:41 +05:30
|
|
|
#if defined(CONFIG_BT_OBSERVER)
|
2020-08-04 07:29:52 +02:00
|
|
|
case EVENT_DONE_EXTRA_TYPE_SCAN:
|
|
|
|
ull_scan_done(done);
|
|
|
|
break;
|
|
|
|
|
2020-04-21 11:12:41 +05:30
|
|
|
case EVENT_DONE_EXTRA_TYPE_SCAN_AUX:
|
|
|
|
ull_scan_aux_done(done);
|
|
|
|
break;
|
2020-08-04 07:29:52 +02:00
|
|
|
|
2020-10-05 10:42:39 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
|
2020-08-11 11:01:26 +05:30
|
|
|
case EVENT_DONE_EXTRA_TYPE_SYNC:
|
|
|
|
ull_sync_done(done);
|
|
|
|
break;
|
2020-10-05 10:42:39 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
|
2020-04-21 11:12:41 +05:30
|
|
|
#endif /* CONFIG_BT_OBSERVER */
|
2020-08-11 10:44:52 +05:30
|
|
|
#endif /* CONFIG_BT_CTLR_ADV_EXT */
|
2020-04-21 11:12:41 +05:30
|
|
|
|
2019-05-09 15:41:39 +02:00
|
|
|
#if defined(CONFIG_BT_CTLR_USER_EXT)
|
|
|
|
case EVENT_DONE_EXTRA_TYPE_USER_START
|
|
|
|
... EVENT_DONE_EXTRA_TYPE_USER_END:
|
|
|
|
ull_proprietary_done(done);
|
|
|
|
break;
|
|
|
|
#endif /* CONFIG_BT_CTLR_USER_EXT */
|
|
|
|
|
2018-12-18 05:48:20 +01:00
|
|
|
case EVENT_DONE_EXTRA_TYPE_NONE:
|
|
|
|
/* ignore */
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
LL_ASSERT(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* release done */
|
2019-03-26 19:57:45 -06:00
|
|
|
done->extra.type = 0U;
|
2019-06-07 16:42:57 +02:00
|
|
|
release = done_release(link, done);
|
|
|
|
LL_ASSERT(release == done);
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
/* dequeue prepare pipeline */
|
|
|
|
next = ull_prepare_dequeue_get();
|
|
|
|
while (next) {
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t is_aborted = next->is_aborted;
|
|
|
|
uint8_t is_resume = next->is_resume;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-07-30 10:30:14 +05:30
|
|
|
if (!is_aborted) {
|
2019-02-04 22:21:55 +05:30
|
|
|
static memq_link_t link;
|
|
|
|
static struct mayfly mfy = {0, 0, &link, NULL,
|
|
|
|
lll_resume};
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t ret;
|
2018-12-18 05:48:20 +01:00
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
mfy.param = next;
|
2018-12-18 05:48:20 +01:00
|
|
|
ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
|
2019-02-04 22:21:55 +05:30
|
|
|
TICKER_USER_ID_LLL, 0, &mfy);
|
2018-12-18 05:48:20 +01:00
|
|
|
LL_ASSERT(!ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
MFIFO_DEQUEUE(prep);
|
|
|
|
|
|
|
|
next = ull_prepare_dequeue_get();
|
|
|
|
|
2019-07-30 10:30:14 +05:30
|
|
|
if (!next || (!is_aborted && (!is_resume || next->is_resume))) {
|
2018-12-18 05:48:20 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ull instance will resume, dont decrement ref */
|
|
|
|
if (!ull_hdr) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Decrement prepare reference */
|
2020-09-09 15:57:34 +05:30
|
|
|
LL_ASSERT(ull_ref_get(ull_hdr));
|
2018-12-13 09:53:04 +01:00
|
|
|
ull_ref_dec(ull_hdr);
|
2018-12-18 05:48:20 +01:00
|
|
|
|
|
|
|
/* If disable initiated, signal the semaphore */
|
2020-09-09 15:57:34 +05:30
|
|
|
if (!ull_ref_get(ull_hdr) && ull_hdr->disabled_cb) {
|
2018-12-18 05:48:20 +01:00
|
|
|
ull_hdr->disabled_cb(ull_hdr->disabled_param);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-04 22:21:55 +05:30
|
|
|
static void disabled_cb(void *param)
|
2018-12-18 05:48:20 +01:00
|
|
|
{
|
|
|
|
k_sem_give(param);
|
|
|
|
}
|