From 1475402d416800c4be265e2e8fde1786f7238ee3 Mon Sep 17 00:00:00 2001 From: Vinayak Kariappa Chettimada Date: Tue, 18 Dec 2018 05:48:20 +0100 Subject: [PATCH] Bluetooth: controller: Introduce ULL LLL architecture This is a squash merge of commits introducing the new split Upper Link Layer and Lower Link Layer architecture of the Bluetooth Low Energy controller. This introduces a new, improved Link Layer based on the concept of split responsibilities; The Upper Link Layer (ULL) is in charge of control procedures, inter-event scheduling and overall role management. The code for the ULL is shared among all hardware implementations. The Lower Link Layer (LLL) is responsible for the intra-event scheduling and vendor specific radio hardware access. The communication between ULL and LLL is achieved through a set of FIFOs that contain both control and data packets. Signed-off-by: Vinayak Kariappa Chettimada Signed-off-by: Alberto Escolar Piedras Signed-off-by: Wolfgang Puffitsch Signed-off-by: Morten Priess --- subsys/bluetooth/CMakeLists.txt | 2 +- subsys/bluetooth/controller/CMakeLists.txt | 95 + subsys/bluetooth/controller/Kconfig | 108 + subsys/bluetooth/controller/ll_sw/ll_addr.c | 11 +- subsys/bluetooth/controller/ll_sw/lll.h | 2 +- subsys/bluetooth/controller/ll_sw/lll_chan.c | 199 + subsys/bluetooth/controller/ll_sw/lll_chan.h | 11 + subsys/bluetooth/controller/ll_sw/lll_conn.h | 152 +- .../bluetooth/controller/ll_sw/lll_filter.h | 17 + .../controller/ll_sw/nordic/hal/nrf5/mayfly.c | 35 + .../controller/ll_sw/nordic/hal/nrf5/ticker.c | 39 + .../controller/ll_sw/nordic/lll/lll.c | 552 ++ .../controller/ll_sw/nordic/lll/lll_adv.c | 846 +++ .../controller/ll_sw/nordic/lll/lll_adv.h | 99 + .../ll_sw/nordic/lll/lll_adv_internal.h | 45 + .../controller/ll_sw/nordic/lll/lll_clock.c | 38 + .../controller/ll_sw/nordic/lll/lll_clock.h | 7 + .../controller/ll_sw/nordic/lll/lll_conn.c | 913 ++++ .../controller/ll_sw/nordic/lll/lll_filter.c | 981 ++++ .../ll_sw/nordic/lll/lll_internal.h | 15 + .../controller/ll_sw/nordic/lll/lll_master.c | 208 + .../controller/ll_sw/nordic/lll/lll_master.h | 9 + .../controller/ll_sw/nordic/lll/lll_prof.c | 138 + .../ll_sw/nordic/lll/lll_prof_internal.h | 10 + .../controller/ll_sw/nordic/lll/lll_scan.c | 1054 ++++ .../controller/ll_sw/nordic/lll/lll_scan.h | 45 + .../controller/ll_sw/nordic/lll/lll_slave.c | 253 + .../controller/ll_sw/nordic/lll/lll_slave.h | 9 + .../controller/ll_sw/nordic/lll/lll_test.c | 343 ++ .../ll_sw/nordic/lll/lll_tim_internal.h | 32 + .../controller/ll_sw/nordic/lll/lll_tmp.c | 246 + .../controller/ll_sw/nordic/lll/lll_tmp.h | 24 + .../ll_sw/nordic/lll/lll_tmp_internal.h | 5 + .../controller/ll_sw/nordic/lll/lll_vendor.h | 12 + subsys/bluetooth/controller/ll_sw/ull.c | 1535 ++++++ subsys/bluetooth/controller/ll_sw/ull_adv.c | 1141 ++++ .../bluetooth/controller/ll_sw/ull_adv_aux.c | 106 + .../bluetooth/controller/ll_sw/ull_adv_aux.h | 16 + .../controller/ll_sw/ull_adv_internal.h | 23 + .../controller/ll_sw/ull_adv_types.h | 31 + subsys/bluetooth/controller/ll_sw/ull_conn.c | 4764 +++++++++++++++++ .../controller/ll_sw/ull_conn_internal.h | 29 + .../controller/ll_sw/ull_conn_types.h | 195 +- .../bluetooth/controller/ll_sw/ull_internal.h | 32 + .../bluetooth/controller/ll_sw/ull_master.c | 894 ++++ .../controller/ll_sw/ull_master_internal.h | 10 + subsys/bluetooth/controller/ll_sw/ull_scan.c | 437 ++ .../controller/ll_sw/ull_scan_internal.h | 39 + .../controller/ll_sw/ull_scan_types.h | 14 + subsys/bluetooth/controller/ll_sw/ull_sched.c | 46 + .../controller/ll_sw/ull_sched_internal.h | 12 + subsys/bluetooth/controller/ll_sw/ull_slave.c | 488 ++ .../controller/ll_sw/ull_slave_internal.h | 12 + subsys/bluetooth/controller/ll_sw/ull_tmp.c | 325 ++ subsys/bluetooth/controller/ll_sw/ull_tmp.h | 10 + .../controller/ll_sw/ull_tmp_internal.h | 9 + subsys/bluetooth/controller/util/mfifo.h | 182 + 57 files changed, 16899 insertions(+), 6 deletions(-) create mode 100644 subsys/bluetooth/controller/ll_sw/lll_chan.c create mode 100644 subsys/bluetooth/controller/ll_sw/lll_chan.h create mode 100644 subsys/bluetooth/controller/ll_sw/lll_filter.h create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll.c create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll_adv.c create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll_adv.h create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll_adv_internal.h create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll_clock.c create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll_clock.h create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll_conn.c create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll_filter.c create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll_internal.h create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll_master.c create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll_master.h create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll_prof.c create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll_prof_internal.h create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll_scan.c create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll_scan.h create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll_slave.c create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll_slave.h create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll_test.c create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll_tim_internal.h create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll_tmp.c create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll_tmp.h create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll_tmp_internal.h create mode 100644 subsys/bluetooth/controller/ll_sw/nordic/lll/lll_vendor.h create mode 100644 subsys/bluetooth/controller/ll_sw/ull.c create mode 100644 subsys/bluetooth/controller/ll_sw/ull_adv.c create mode 100644 subsys/bluetooth/controller/ll_sw/ull_adv_aux.c create mode 100644 subsys/bluetooth/controller/ll_sw/ull_adv_aux.h create mode 100644 subsys/bluetooth/controller/ll_sw/ull_adv_internal.h create mode 100644 subsys/bluetooth/controller/ll_sw/ull_adv_types.h create mode 100644 subsys/bluetooth/controller/ll_sw/ull_conn.c create mode 100644 subsys/bluetooth/controller/ll_sw/ull_conn_internal.h create mode 100644 subsys/bluetooth/controller/ll_sw/ull_internal.h create mode 100644 subsys/bluetooth/controller/ll_sw/ull_master.c create mode 100644 subsys/bluetooth/controller/ll_sw/ull_master_internal.h create mode 100644 subsys/bluetooth/controller/ll_sw/ull_scan.c create mode 100644 subsys/bluetooth/controller/ll_sw/ull_scan_internal.h create mode 100644 subsys/bluetooth/controller/ll_sw/ull_scan_types.h create mode 100644 subsys/bluetooth/controller/ll_sw/ull_sched.c create mode 100644 subsys/bluetooth/controller/ll_sw/ull_sched_internal.h create mode 100644 subsys/bluetooth/controller/ll_sw/ull_slave.c create mode 100644 subsys/bluetooth/controller/ll_sw/ull_slave_internal.h create mode 100644 subsys/bluetooth/controller/ll_sw/ull_tmp.c create mode 100644 subsys/bluetooth/controller/ll_sw/ull_tmp.h create mode 100644 subsys/bluetooth/controller/ll_sw/ull_tmp_internal.h create mode 100644 subsys/bluetooth/controller/util/mfifo.h diff --git a/subsys/bluetooth/CMakeLists.txt b/subsys/bluetooth/CMakeLists.txt index b4c24574dd1..1d45bca5c84 100644 --- a/subsys/bluetooth/CMakeLists.txt +++ b/subsys/bluetooth/CMakeLists.txt @@ -8,7 +8,7 @@ add_subdirectory_ifdef(CONFIG_BT_SHELL shell) add_subdirectory_ifdef(CONFIG_BT_CONN services) if(CONFIG_BT_CTLR) - if(CONFIG_BT_LL_SW) + if(CONFIG_BT_LL_SW OR CONFIG_BT_LL_SW_SPLIT) add_subdirectory(controller) endif() endif() diff --git a/subsys/bluetooth/controller/CMakeLists.txt b/subsys/bluetooth/controller/CMakeLists.txt index abe42018a1e..9e39dc65242 100644 --- a/subsys/bluetooth/controller/CMakeLists.txt +++ b/subsys/bluetooth/controller/CMakeLists.txt @@ -51,6 +51,101 @@ if(CONFIG_BT_LL_SW) ) endif() +if(CONFIG_BT_LL_SW_SPLIT) + zephyr_library_sources( + ll_sw/ull.c + ll_sw/lll_chan.c + ) + zephyr_library_sources_ifdef( + CONFIG_BT_LLL_VENDOR_NORDIC + ll_sw/nordic/lll/lll.c + ) + if(CONFIG_BT_BROADCASTER) + zephyr_library_sources( + ll_sw/ull_adv.c + ) + zephyr_library_sources_ifdef( + CONFIG_BT_CTLR_ADV_EXT + ll_sw/ull_adv_aux.c + ) + zephyr_library_sources_ifdef( + CONFIG_BT_LLL_VENDOR_NORDIC + ll_sw/nordic/lll/lll_adv.c + ) + endif() + if(CONFIG_BT_OBSERVER) + zephyr_library_sources( + ll_sw/ull_scan.c + ) + zephyr_library_sources_ifdef( + CONFIG_BT_LLL_VENDOR_NORDIC + ll_sw/nordic/lll/lll_scan.c + ) + endif() + if(CONFIG_BT_CONN) + zephyr_library_sources( + ll_sw/ull_conn.c + ) + zephyr_library_sources_ifdef( + CONFIG_BT_LLL_VENDOR_NORDIC + ll_sw/nordic/lll/lll_clock.c + ll_sw/nordic/lll/lll_conn.c + ) + if(CONFIG_BT_PERIPHERAL) + zephyr_library_sources( + ll_sw/ull_slave.c + ) + zephyr_library_sources_ifdef( + CONFIG_BT_LLL_VENDOR_NORDIC + ll_sw/nordic/lll/lll_slave.c + ) + endif() + if(CONFIG_BT_CENTRAL) + zephyr_library_sources( + ll_sw/ull_master.c + ) + zephyr_library_sources_ifdef( + CONFIG_BT_LLL_VENDOR_NORDIC + ll_sw/nordic/lll/lll_master.c + ) + endif() + if(CONFIG_BT_CTLR_SCHED_ADVANCED) + zephyr_library_sources( + ll_sw/ull_sched.c + ) + endif() + endif() + zephyr_library_sources_ifdef( + CONFIG_BT_CTLR_FILTER + ll_sw/nordic/lll/lll_filter.c + ) + zephyr_library_sources_ifdef( + CONFIG_BT_HCI_MESH_EXT + ll_sw/ll_mesh.c + ) + zephyr_library_sources_ifdef( + CONFIG_BT_CTLR_DTM + ll_sw/nordic/lll/lll_test.c + ) + if(CONFIG_BT_TMP) + zephyr_library_sources( + ll_sw/ull_tmp.c + ) + zephyr_library_sources_ifdef( + CONFIG_BT_LLL_VENDOR_NORDIC + ll_sw/nordic/lll/lll_tmp.c + ) + endif() + if(CONFIG_BT_LLL_VENDOR_NORDIC) + zephyr_library_include_directories( + ll_sw/nordic/lll + ) + zephyr_library_sources_ifdef( + CONFIG_BT_CTLR_PROFILE_ISR + ll_sw/nordic/lll/lll_prof.c + ) + endif() +endif() zephyr_library_sources_ifdef( CONFIG_SOC_COMPATIBLE_NRF diff --git a/subsys/bluetooth/controller/Kconfig b/subsys/bluetooth/controller/Kconfig index 4c346b0bafc..30658c52601 100644 --- a/subsys/bluetooth/controller/Kconfig +++ b/subsys/bluetooth/controller/Kconfig @@ -29,8 +29,24 @@ config BT_LL_SW help Use Zephyr software BLE Link Layer implementation. +config BT_LL_SW_SPLIT + bool "Software-based BLE Link Layer (ULL/LLL split)" + select BT_RECV_IS_RX_THREAD + select ENTROPY_GENERATOR + help + Use Zephyr software BLE Link Layer ULL LLL split implementation. + endchoice +config BT_LLL_VENDOR_NORDIC + bool "Use Nordic LLL" + depends on BT_LL_SW_SPLIT && SOC_COMPATIBLE_NRF + select ENTROPY_NRF5_RNG + select ENTROPY_NRF5_BIAS_CORRECTION + default y + help + Use Nordic Lower Link Layer implementation. + comment "BLE Controller configuration" config BT_CTLR_CRYPTO @@ -321,6 +337,14 @@ config BT_CTLR_ADV_EXT Enable support for Bluetooth 5.0 LE Advertising Extensions in the Controller. +config BT_ADV_SET + prompt "LE Advertising Extensions Sets" + depends on BT_CTLR_ADV_EXT + int + default 1 + help + Maximum supported advertising sets. + config BT_CTLR_DTM bool help @@ -451,6 +475,57 @@ config BT_CTLR_SCHED_ADVANCED Disabling this feature will lead to overlapping role in timespace leading to skipped events amongst active roles. +if BT_LL_SW_SPLIT +config BT_CTLR_LLL_PRIO + prompt "Lower Link Layer (Radio) IRQ priority" + int + range 0 3 if SOC_SERIES_NRF51X + range 0 6 if SOC_SERIES_NRF52X + default 0 + help + The interrupt priority for event preparation and radio IRQ. + +config BT_CTLR_ULL_HIGH_PRIO + prompt "Upper Link Layer High IRQ priority" + int + range BT_CTLR_LLL_PRIO 3 if SOC_SERIES_NRF51X + range BT_CTLR_LLL_PRIO 6 if SOC_SERIES_NRF52X + default 0 + help + The interrupt priority for Ticker's Worker IRQ and Upper Link Layer + higher priority functions. + +config BT_CTLR_ULL_LOW_PRIO + prompt "Upper Link Layer Low IRQ priority" + int + range BT_CTLR_ULL_HIGH_PRIO 3 if SOC_SERIES_NRF51X + range BT_CTLR_ULL_HIGH_PRIO 6 if SOC_SERIES_NRF52X + default 0 + help + The interrupt priority for Ticker's Job IRQ and Upper Link Layer + lower priority functions. + +config BT_CTLR_LOWEST_PRIO + prompt "Link Layer Lowest IRQ priority" + int + range BT_CTLR_ULL_LOW_PRIO 3 if SOC_SERIES_NRF51X + range BT_CTLR_ULL_LOW_PRIO 6 if SOC_SERIES_NRF52X + default 0 + help + The interrupt priority for RNG and other non-critical functions. + +config BT_CTLR_LOW_LAT + prompt "Low latency non-negotiating event pre-emption" + bool + default y if SOC_SERIES_NRF51X + help + Use low latency non-negotiating event pre-emption. This reduces + Radio ISR latencies by the controller event scheduling framework. + Consequently, this reduces on-air radio utilization due to redundant + radio state switches. + +endif # BT_LL_SW_SPLIT + config BT_CTLR_RADIO_ENABLE_FAST bool "Use tTXEN/RXEN,FAST ramp-up" depends on SOC_COMPATIBLE_NRF52X @@ -612,6 +687,39 @@ config BT_CTLR_PA_LNA_GPIOTE_CHAN help Select the nRF5 GPIOTE channel to use for PA/LNA GPIO feature. +if BT_LL_SW_SPLIT +config BT_TMP + prompt "Temporary Role" + depends on BT_SHELL + bool + default y + help + Temporary role to manual test ULL/LLL split architecture. + +if BT_TMP +config BT_TMP_MAX + prompt "Temporary Role Max. Instances" + int + default 3 + help + Maximum supported Temporary role instances. + +config BT_TMP_TX_SIZE_MAX + prompt "Temporary Role Max. Tx buffer size" + int + default 10 + help + Temporary role's maximum transmit buffer size in bytes. + +config BT_TMP_TX_COUNT_MAX + prompt "Temporary Role Max. Tx buffers" + int + default 1 + help + Temporary role's maximum transmit buffer count. +endif # BT_TMP +endif # BT_LL_SW_SPLIT + comment "BLE Controller debug configuration" config BT_CTLR_ASSERT_HANDLER diff --git a/subsys/bluetooth/controller/ll_sw/ll_addr.c b/subsys/bluetooth/controller/ll_sw/ll_addr.c index 6d33561364a..248016f8999 100644 --- a/subsys/bluetooth/controller/ll_sw/ll_addr.c +++ b/subsys/bluetooth/controller/ll_sw/ll_addr.c @@ -20,6 +20,13 @@ #if defined(CONFIG_BT_LL_SW) #include #include "ctrl.h" +#define ull_adv_is_enabled ll_adv_is_enabled +#define ull_scan_is_enabled ll_scan_is_enabled +#elif defined(CONFIG_BT_LL_SW_SPLIT) +#include "lll_scan.h" +#include "ull_scan_types.h" +#include "ull_adv_internal.h" +#include "ull_scan_internal.h" #endif static u8_t pub_addr[BDADDR_SIZE]; @@ -49,12 +56,12 @@ u8_t *ll_addr_get(u8_t addr_type, u8_t *bdaddr) u32_t ll_addr_set(u8_t addr_type, u8_t const *const bdaddr) { if (IS_ENABLED(CONFIG_BT_BROADCASTER) && - ll_adv_is_enabled(0)) { + ull_adv_is_enabled(0)) { return BT_HCI_ERR_CMD_DISALLOWED; } if (IS_ENABLED(CONFIG_BT_OBSERVER) && - (ll_scan_is_enabled(0) & (BIT(1) | BIT(2)))) { + (ull_scan_is_enabled(0) & (BIT(1) | BIT(2)))) { return BT_HCI_ERR_CMD_DISALLOWED; } diff --git a/subsys/bluetooth/controller/ll_sw/lll.h b/subsys/bluetooth/controller/ll_sw/lll.h index 0bdfb098d87..76868bf1c95 100644 --- a/subsys/bluetooth/controller/ll_sw/lll.h +++ b/subsys/bluetooth/controller/ll_sw/lll.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 Nordic Semiconductor ASA + * Copyright (c) 2018-2019 Nordic Semiconductor ASA * * SPDX-License-Identifier: Apache-2.0 */ diff --git a/subsys/bluetooth/controller/ll_sw/lll_chan.c b/subsys/bluetooth/controller/ll_sw/lll_chan.c new file mode 100644 index 00000000000..fcb9617c74c --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/lll_chan.c @@ -0,0 +1,199 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include + +#include "hal/ccm.h" +#include "hal/radio.h" + +#define LOG_MODULE_NAME bt_ctlr_llsw_nordic_lll_chan +#include "common/log.h" +#include +#include "hal/debug.h" + +#if defined(CONFIG_BT_CONN) +static u8_t chan_sel_remap(u8_t *chan_map, u8_t chan_index); +#if defined(CONFIG_BT_CTLR_CHAN_SEL_2) +static u16_t chan_prn(u16_t counter, u16_t chan_id); +#endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */ +#endif /* CONFIG_BT_CONN */ + +void lll_chan_set(u32_t chan) +{ + switch (chan) { + case 37: + radio_freq_chan_set(2); + break; + + case 38: + radio_freq_chan_set(26); + break; + + case 39: + radio_freq_chan_set(80); + break; + + default: + if (chan < 11) { + radio_freq_chan_set(4 + (2 * chan)); + } else if (chan < 40) { + radio_freq_chan_set(28 + (2 * (chan - 11))); + } else { + LL_ASSERT(0); + } + break; + } + + radio_whiten_iv_set(chan); +} + +#if defined(CONFIG_BT_CONN) +u8_t lll_chan_sel_1(u8_t *chan_use, u8_t hop, u16_t latency, u8_t *chan_map, + u8_t chan_count) +{ + u8_t chan_next; + + chan_next = ((*chan_use) + (hop * (1 + latency))) % 37; + *chan_use = chan_next; + + if ((chan_map[chan_next >> 3] & (1 << (chan_next % 8))) == 0) { + u8_t chan_index; + + chan_index = chan_next % chan_count; + chan_next = chan_sel_remap(chan_map, chan_index); + + } else { + /* channel can be used, return it */ + } + + return chan_next; +} + +#if defined(CONFIG_BT_CTLR_CHAN_SEL_2) +u8_t lll_chan_sel_2(u16_t counter, u16_t chan_id, u8_t *chan_map, + u8_t chan_count) +{ + u8_t chan_next; + u16_t prn_e; + + prn_e = chan_prn(counter, chan_id); + chan_next = prn_e % 37; + + if ((chan_map[chan_next >> 3] & (1 << (chan_next % 8))) == 0) { + u8_t chan_index; + + chan_index = ((u32_t)chan_count * prn_e) >> 16; + chan_next = chan_sel_remap(chan_map, chan_index); + + } else { + /* channel can be used, return it */ + } + + return chan_next; +} +#endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */ + +static u8_t chan_sel_remap(u8_t *chan_map, u8_t chan_index) +{ + u8_t chan_next; + u8_t byte_count; + + chan_next = 0; + byte_count = 5; + while (byte_count--) { + u8_t bite; + u8_t bit_count; + + bite = *chan_map; + bit_count = 8; + while (bit_count--) { + if (bite & 0x01) { + if (chan_index == 0) { + break; + } + chan_index--; + } + chan_next++; + bite >>= 1; + } + + if (bit_count < 8) { + break; + } + + chan_map++; + } + + return chan_next; +} + +#if defined(CONFIG_BT_CTLR_CHAN_SEL_2) +#if defined(RADIO_UNIT_TEST) +void lll_chan_sel_2_ut(void) +{ + u8_t chan_map_1[] = {0xFF, 0xFF, 0xFF, 0xFF, 0x1F}; + u8_t chan_map_2[] = {0x00, 0x06, 0xE0, 0x00, 0x1E}; + u8_t m; + + m = chan_sel_2(1, 0x305F, chan_map_1, 37); + LL_ASSERT(m == 20); + + m = chan_sel_2(2, 0x305F, chan_map_1, 37); + LL_ASSERT(m == 6); + + m = chan_sel_2(3, 0x305F, chan_map_1, 37); + LL_ASSERT(m == 21); + + m = chan_sel_2(6, 0x305F, chan_map_2, 9); + LL_ASSERT(m == 23); + + m = chan_sel_2(7, 0x305F, chan_map_2, 9); + LL_ASSERT(m == 9); + + m = chan_sel_2(8, 0x305F, chan_map_2, 9); + LL_ASSERT(m == 34); +} +#endif /* RADIO_UNIT_TEST */ + +/* Attribution: + * http://graphics.stanford.edu/%7Eseander/bithacks.html#ReverseByteWith32Bits + */ +static u8_t chan_rev_8(u8_t b) +{ + b = (((u32_t)b * 0x0802LU & 0x22110LU) | + ((u32_t)b * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16; + + return b; +} + +static u16_t chan_perm(u16_t i) +{ + return (chan_rev_8((i >> 8) & 0xFF) << 8) | chan_rev_8(i & 0xFF); +} + +static u16_t chan_mam(u16_t a, u16_t b) +{ + return ((u32_t)a * 17 + b) & 0xFFFF; +} + +static u16_t chan_prn(u16_t counter, u16_t chan_id) +{ + u8_t iterate; + u16_t prn_e; + + prn_e = counter ^ chan_id; + + for (iterate = 0; iterate < 3; iterate++) { + prn_e = chan_perm(prn_e); + prn_e = chan_mam(prn_e, chan_id); + } + + prn_e ^= chan_id; + + return prn_e; +} +#endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */ +#endif /* CONFIG_BT_CONN */ diff --git a/subsys/bluetooth/controller/ll_sw/lll_chan.h b/subsys/bluetooth/controller/ll_sw/lll_chan.h new file mode 100644 index 00000000000..d60a657c57e --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/lll_chan.h @@ -0,0 +1,11 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +void lll_chan_set(u32_t chan); +u8_t lll_chan_sel_1(u8_t *chan_use, u8_t hop, u16_t latency, u8_t *chan_map, + u8_t chan_count); +u8_t lll_chan_sel_2(u16_t counter, u16_t chan_id, u8_t *chan_map, + u8_t chan_count); diff --git a/subsys/bluetooth/controller/ll_sw/lll_conn.h b/subsys/bluetooth/controller/ll_sw/lll_conn.h index 4bc6a88c43c..afa21b1b54c 100644 --- a/subsys/bluetooth/controller/ll_sw/lll_conn.h +++ b/subsys/bluetooth/controller/ll_sw/lll_conn.h @@ -1,9 +1,21 @@ /* - * Copyright (c) 2018 Nordic Semiconductor ASA + * Copyright (c) 2018-2019 Nordic Semiconductor ASA * * SPDX-License-Identifier: Apache-2.0 */ +#define LLL_CONN_RSSI_SAMPLE_COUNT 10 +#define LLL_CONN_RSSI_THRESHOLD 4 + +#define LLL_CONN_MIC_NONE 0 +#define LLL_CONN_MIC_PASS 1 +#define LLL_CONN_MIC_FAIL 2 + +struct lll_tx { + u16_t handle; + void *node; +}; + struct node_tx { union { void *next; @@ -12,3 +24,141 @@ struct node_tx { u8_t pdu[]; }; + +enum llcp { + LLCP_NONE, + LLCP_CONN_UPD, + LLCP_CHAN_MAP, + +#if defined(CONFIG_BT_CTLR_LE_ENC) + LLCP_ENCRYPTION, +#endif /* CONFIG_BT_CTLR_LE_ENC */ + + LLCP_FEATURE_EXCHANGE, + LLCP_VERSION_EXCHANGE, + /* LLCP_TERMINATE, */ + LLCP_CONNECTION_PARAM_REQ, + +#if defined(CONFIG_BT_CTLR_LE_PING) + LLCP_PING, +#endif /* CONFIG_BT_CTLR_LE_PING */ + +#if defined(CONFIG_BT_CTLR_PHY) + LLCP_PHY_UPD, +#endif /* CONFIG_BT_CTLR_PHY */ +}; + +struct lll_conn { + struct lll_hdr hdr; + + u8_t access_addr[4]; + u8_t crc_init[3]; + + u16_t handle; + u16_t interval; + u16_t latency; + + /* FIXME: BEGIN: Move to ULL? */ + u16_t latency_prepare; + u16_t latency_event; + + u16_t event_counter; + u8_t data_chan_map[5]; + u8_t data_chan_count:6; + u8_t data_chan_sel:1; + u8_t role:1; + + union { + struct { + u8_t data_chan_hop; + u8_t data_chan_use; + }; + + u16_t data_chan_id; + }; + + union { + struct { + u8_t terminate_ack:1; + } master; + + struct { + u8_t latency_enabled:1; + u8_t latency_cancel:1; + u8_t sca:3; + u32_t window_widening_periodic_us; + u32_t window_widening_max_us; + u32_t window_widening_prepare_us; + u32_t window_widening_event_us; + u32_t window_size_prepare_us; + u32_t window_size_event_us; + } slave; + }; + /* FIXME: END: Move to ULL? */ + +#if defined(CONFIG_BT_CTLR_DATA_LENGTH) + u16_t default_tx_octets; + u16_t max_tx_octets; + u16_t max_rx_octets; + +#if defined(CONFIG_BT_CTLR_PHY) + u16_t default_tx_time; + u16_t max_tx_time; + u16_t max_rx_time; +#endif /* CONFIG_BT_CTLR_PHY */ +#endif /* CONFIG_BT_CTLR_DATA_LENGTH */ + +#if defined(CONFIG_BT_CTLR_PHY) + u8_t phy_tx:3; + u8_t phy_flags:1; + u8_t phy_tx_time:3; + u8_t phy_rx:3; +#endif /* CONFIG_BT_CTLR_PHY */ + + MEMQ_DECLARE(tx); + memq_link_t link_tx; + memq_link_t *link_tx_free; + u8_t packet_tx_head_len; + u8_t packet_tx_head_offset; + + u8_t sn:1; + u8_t nesn:1; + u8_t empty:1; + +#if defined(CONFIG_BT_CTLR_LE_ENC) + u8_t enc_rx:1; + u8_t enc_tx:1; + + struct ccm ccm_rx; + struct ccm ccm_tx; +#endif /* CONFIG_BT_CTLR_LE_ENC */ + +#if defined(CONFIG_BT_CTLR_CONN_RSSI) + u8_t rssi_latest; + u8_t rssi_reported; + u8_t rssi_sample_count; +#endif /* CONFIG_BT_CTLR_CONN_RSSI */ +}; + +int lll_conn_init(void); +int lll_conn_reset(void); +u8_t lll_conn_sca_local_get(void); +u32_t lll_conn_ppm_local_get(void); +u32_t lll_conn_ppm_get(u8_t sca); +void lll_conn_prepare_reset(void); +int lll_conn_is_abort_cb(void *next, int prio, void *curr, + lll_prepare_cb_t *resume_cb, int *resume_prio); +void lll_conn_abort_cb(struct lll_prepare_param *prepare_param, void *param); +void lll_conn_isr_rx(void *param); +void lll_conn_isr_tx(void *param); +void lll_conn_isr_abort(void *param); +void lll_conn_rx_pkt_set(struct lll_conn *lll); +void lll_conn_tx_pkt_set(struct lll_conn *lll, struct pdu_data *pdu_data_tx); +void lll_conn_pdu_tx_prep(struct lll_conn *lll, struct pdu_data **pdu_data_tx); +u8_t lll_conn_ack_last_idx_get(void); +memq_link_t *lll_conn_ack_peek(u8_t *ack_last, u16_t *handle, + struct node_tx **node_tx); +memq_link_t *lll_conn_ack_by_last_peek(u8_t last, u16_t *handle, + struct node_tx **node_tx); +void *lll_conn_ack_dequeue(void); +void lll_conn_tx_flush(void *param); diff --git a/subsys/bluetooth/controller/ll_sw/lll_filter.h b/subsys/bluetooth/controller/ll_sw/lll_filter.h new file mode 100644 index 00000000000..a3a1ec359d3 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/lll_filter.h @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#define WL_SIZE 8 +#define FILTER_IDX_NONE 0xFF + +struct ll_filter { + u8_t enable_bitmask; + u8_t addr_type_bitmask; + u8_t bdaddr[WL_SIZE][BDADDR_SIZE]; +}; + +struct ll_filter *ctrl_filter_get(bool whitelist); +void ll_adv_scan_state_cb(u8_t bm); diff --git a/subsys/bluetooth/controller/ll_sw/nordic/hal/nrf5/mayfly.c b/subsys/bluetooth/controller/ll_sw/nordic/hal/nrf5/mayfly.c index bb53f81cb59..aea9dbc17a2 100644 --- a/subsys/bluetooth/controller/ll_sw/nordic/hal/nrf5/mayfly.c +++ b/subsys/bluetooth/controller/ll_sw/nordic/hal/nrf5/mayfly.c @@ -18,6 +18,11 @@ #if defined(CONFIG_BT_LL_SW) #define MAYFLY_CALL_ID_WORKER MAYFLY_CALL_ID_0 #define MAYFLY_CALL_ID_JOB MAYFLY_CALL_ID_1 +#elif defined(CONFIG_BT_LL_SW_SPLIT) +#include "ll_sw/lll.h" +#define MAYFLY_CALL_ID_LLL TICKER_USER_ID_LLL +#define MAYFLY_CALL_ID_WORKER TICKER_USER_ID_ULL_HIGH +#define MAYFLY_CALL_ID_JOB TICKER_USER_ID_ULL_LOW #else #error Unknown LL variant. #endif @@ -40,6 +45,11 @@ u32_t mayfly_is_enabled(u8_t caller_id, u8_t callee_id) (void)caller_id; switch (callee_id) { +#if defined(CONFIG_BT_LL_SW_SPLIT) + case MAYFLY_CALL_ID_LLL: + return irq_is_enabled(SWI4_IRQn); +#endif /* CONFIG_BT_LL_SW_SPLIT */ + case MAYFLY_CALL_ID_WORKER: return irq_is_enabled(RTC0_IRQn); @@ -64,6 +74,25 @@ u32_t mayfly_prio_is_equal(u8_t caller_id, u8_t callee_id) ((caller_id == MAYFLY_CALL_ID_JOB) && (callee_id == MAYFLY_CALL_ID_WORKER)) || #endif +#elif defined(CONFIG_BT_LL_SW_SPLIT) +#if (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_HIGH_PRIO) + ((caller_id == MAYFLY_CALL_ID_LLL) && + (callee_id == MAYFLY_CALL_ID_WORKER)) || + ((caller_id == MAYFLY_CALL_ID_WORKER) && + (callee_id == MAYFLY_CALL_ID_LLL)) || +#endif +#if (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO) + ((caller_id == MAYFLY_CALL_ID_LLL) && + (callee_id == MAYFLY_CALL_ID_JOB)) || + ((caller_id == MAYFLY_CALL_ID_JOB) && + (callee_id == MAYFLY_CALL_ID_LLL)) || +#endif +#if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO) + ((caller_id == MAYFLY_CALL_ID_WORKER) && + (callee_id == MAYFLY_CALL_ID_JOB)) || + ((caller_id == MAYFLY_CALL_ID_JOB) && + (callee_id == MAYFLY_CALL_ID_WORKER)) || +#endif #endif 0; } @@ -73,6 +102,12 @@ void mayfly_pend(u8_t caller_id, u8_t callee_id) (void)caller_id; switch (callee_id) { +#if defined(CONFIG_BT_LL_SW_SPLIT) + case MAYFLY_CALL_ID_LLL: + NVIC_SetPendingIRQ(SWI4_IRQn); + break; +#endif /* CONFIG_BT_LL_SW_SPLIT */ + case MAYFLY_CALL_ID_WORKER: NVIC_SetPendingIRQ(RTC0_IRQn); break; diff --git a/subsys/bluetooth/controller/ll_sw/nordic/hal/nrf5/ticker.c b/subsys/bluetooth/controller/ll_sw/nordic/hal/nrf5/ticker.c index eaafbe4388b..09350620fc8 100644 --- a/subsys/bluetooth/controller/ll_sw/nordic/hal/nrf5/ticker.c +++ b/subsys/bluetooth/controller/ll_sw/nordic/hal/nrf5/ticker.c @@ -31,6 +31,19 @@ static u8_t const caller_id_lut[] = { TICKER_CALL_ID_NONE, TICKER_CALL_ID_PROGRAM }; +#elif defined(CONFIG_BT_LL_SW_SPLIT) +#include "ll_sw/lll.h" +#define TICKER_MAYFLY_CALL_ID_ISR TICKER_USER_ID_LLL +#define TICKER_MAYFLY_CALL_ID_TRIGGER TICKER_USER_ID_ULL_HIGH +#define TICKER_MAYFLY_CALL_ID_WORKER TICKER_USER_ID_ULL_HIGH +#define TICKER_MAYFLY_CALL_ID_JOB TICKER_USER_ID_ULL_LOW +#define TICKER_MAYFLY_CALL_ID_PROGRAM TICKER_USER_ID_THREAD +static u8_t const caller_id_lut[] = { + TICKER_CALL_ID_ISR, + TICKER_CALL_ID_WORKER, + TICKER_CALL_ID_JOB, + TICKER_CALL_ID_PROGRAM +}; #else #error Unknown LL variant. #endif @@ -55,6 +68,32 @@ void hal_ticker_instance0_sched(u8_t caller_id, u8_t callee_id, u8_t chain, * schedule. */ switch (caller_id) { +#if defined(CONFIG_BT_LL_SW_SPLIT) + case TICKER_CALL_ID_ISR: + switch (callee_id) { + case TICKER_CALL_ID_JOB: + { + static memq_link_t link; + static struct mayfly m = {0, 0, &link, NULL, + ticker_job}; + + m.param = instance; + + /* TODO: scheduler lock, if preemptive threads used */ + mayfly_enqueue(TICKER_MAYFLY_CALL_ID_ISR, + TICKER_MAYFLY_CALL_ID_JOB, + chain, + &m); + } + break; + + default: + LL_ASSERT(0); + break; + } + break; +#endif /* CONFIG_BT_LL_SW_SPLIT */ + case TICKER_CALL_ID_TRIGGER: switch (callee_id) { case TICKER_CALL_ID_WORKER: diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll.c b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll.c new file mode 100644 index 00000000000..12f0e69e07c --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll.c @@ -0,0 +1,552 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include + +#include "hal/ccm.h" +#include "hal/radio.h" +#include "hal/ticker.h" + +#include "util/mem.h" +#include "util/memq.h" + +#include "util/mayfly.h" +#include "ticker/ticker.h" + +#include "lll.h" +#include "lll_internal.h" + +#define LOG_MODULE_NAME bt_ctlr_llsw_nordic_lll +#include "common/log.h" +#include +#include "hal/debug.h" + +static struct { + struct { + void *param; + lll_is_abort_cb_t is_abort_cb; + lll_abort_cb_t abort_cb; + } curr; +} event; + +static struct { + struct device *clk_hf; +} lll; + +static int _init_reset(void); +static int prepare(lll_is_abort_cb_t is_abort_cb, lll_abort_cb_t abort_cb, + lll_prepare_cb_t prepare_cb, int prio, + struct lll_prepare_param *prepare_param, u8_t is_resume); +static int resume_enqueue(lll_prepare_cb_t resume_cb, int resume_prio); + +#if !defined(CONFIG_BT_CTLR_LOW_LAT) +static void _preempt_ticker_cb(u32_t ticks_at_expire, u32_t remainder, + u16_t lazy, void *param); +static void _preempt(void *param); +#else /* CONFIG_BT_CTLR_LOW_LAT */ +#if (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO) +static void ticker_op_job_disable(u32_t status, void *op_context); +#endif +#endif /* CONFIG_BT_CTLR_LOW_LAT */ + +ISR_DIRECT_DECLARE(radio_nrf5_isr) +{ + DEBUG_RADIO_ISR(1); + + isr_radio(); + + ISR_DIRECT_PM(); + + DEBUG_RADIO_ISR(0); + return 1; +} + +static void rtc0_nrf5_isr(void *arg) +{ + DEBUG_TICKER_ISR(1); + + /* On compare0 run ticker worker instance0 */ + if (NRF_RTC0->EVENTS_COMPARE[0]) { + NRF_RTC0->EVENTS_COMPARE[0] = 0; + + ticker_trigger(0); + } + + mayfly_run(TICKER_USER_ID_ULL_HIGH); + + DEBUG_TICKER_ISR(0); +} + +static void swi4_nrf5_isr(void *arg) +{ + DEBUG_RADIO_ISR(1); + + mayfly_run(TICKER_USER_ID_LLL); + + DEBUG_RADIO_ISR(0); +} + +static void swi5_nrf5_isr(void *arg) +{ + DEBUG_TICKER_JOB(1); + + mayfly_run(TICKER_USER_ID_ULL_LOW); + + DEBUG_TICKER_JOB(0); +} + +int lll_init(void) +{ + struct device *clk_k32; + int err; + + /* Initialise LLL internals */ + event.curr.abort_cb = NULL; + + /* Initialize LF CLK */ + clk_k32 = device_get_binding(CONFIG_CLOCK_CONTROL_NRF_K32SRC_DRV_NAME); + if (!clk_k32) { + return -ENODEV; + } + + clock_control_on(clk_k32, (void *)CLOCK_CONTROL_NRF_K32SRC); + + /* Initialize HF CLK */ + lll.clk_hf = + device_get_binding(CONFIG_CLOCK_CONTROL_NRF_M16SRC_DRV_NAME); + if (!lll.clk_hf) { + return -ENODEV; + } + + err = _init_reset(); + if (err) { + return err; + } + + /* Connect ISRs */ + IRQ_DIRECT_CONNECT(NRF5_IRQ_RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO, + radio_nrf5_isr, 0); + IRQ_CONNECT(NRF5_IRQ_SWI4_IRQn, CONFIG_BT_CTLR_LLL_PRIO, + swi4_nrf5_isr, NULL, 0); + IRQ_CONNECT(NRF5_IRQ_RTC0_IRQn, CONFIG_BT_CTLR_ULL_HIGH_PRIO, + rtc0_nrf5_isr, NULL, 0); + IRQ_CONNECT(NRF5_IRQ_SWI5_IRQn, CONFIG_BT_CTLR_ULL_LOW_PRIO, + swi5_nrf5_isr, NULL, 0); + + /* Enable IRQs */ + irq_enable(NRF5_IRQ_RADIO_IRQn); + irq_enable(NRF5_IRQ_SWI4_IRQn); + irq_enable(NRF5_IRQ_RTC0_IRQn); + irq_enable(NRF5_IRQ_SWI5_IRQn); + + return 0; +} + +int lll_reset(void) +{ + int err; + + err = _init_reset(); + if (err) { + return err; + } + + return 0; +} + +int lll_prepare(lll_is_abort_cb_t is_abort_cb, lll_abort_cb_t abort_cb, + lll_prepare_cb_t prepare_cb, int prio, + struct lll_prepare_param *prepare_param) +{ + return prepare(is_abort_cb, abort_cb, prepare_cb, prio, prepare_param, + 0); +} + +void lll_resume(void *param) +{ + struct lll_event *next = param; + int ret; + + if (event.curr.abort_cb) { + ret = prepare(next->is_abort_cb, next->abort_cb, + next->prepare_cb, next->prio, + &next->prepare_param, next->is_resume); + LL_ASSERT(!ret || ret == -EINPROGRESS); + + return; + } + + event.curr.is_abort_cb = next->is_abort_cb; + event.curr.abort_cb = next->abort_cb; + event.curr.param = next->prepare_param.param; + + ret = next->prepare_cb(&next->prepare_param); + LL_ASSERT(!ret); +} + +void lll_disable(void *param) +{ + if (!param || param == event.curr.param) { + if (event.curr.abort_cb && event.curr.param) { + event.curr.abort_cb(NULL, event.curr.param); + } else { + LL_ASSERT(!param); + } + } + { + struct lll_event *next; + u8_t idx = UINT8_MAX; + + next = ull_prepare_dequeue_iter(&idx); + while (next) { + if (!next->is_aborted && + param == next->prepare_param.param) { + next->is_aborted = 1; + next->abort_cb(&next->prepare_param, + next->prepare_param.param); + } + + next = ull_prepare_dequeue_iter(&idx); + } + } +} + +int lll_prepare_done(void *param) +{ +#if defined(CONFIG_BT_CTLR_LOW_LAT) && \ + (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO) + u32_t ret; + + /* Ticker Job Silence */ + ret = ticker_job_idle_get(TICKER_INSTANCE_ID_CTLR, + TICKER_USER_ID_LLL, + ticker_op_job_disable, NULL); + + return ((ret == TICKER_STATUS_SUCCESS) || (ret == TICKER_STATUS_BUSY)) ? + 0 : -EFAULT; +#else + return 0; +#endif /* CONFIG_BT_CTLR_LOW_LAT */ +} + +int lll_done(void *param) +{ + struct lll_event *next = ull_prepare_dequeue_get(); + struct ull_hdr *ull = NULL; + int ret = 0; + + /* Assert if param supplied without a pending prepare to cancel. */ + LL_ASSERT(!param || next); + + /* check if current LLL event is done */ + if (!param) { + /* Reset current event instance */ + LL_ASSERT(event.curr.abort_cb); + event.curr.abort_cb = NULL; + + param = event.curr.param; + event.curr.param = NULL; + + if (param) { + ull = HDR_ULL(((struct lll_hdr *)param)->parent); + } + +#if defined(CONFIG_BT_CTLR_LOW_LAT) && \ + (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO) + mayfly_enable(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_LOW, 1); +#endif /* CONFIG_BT_CTLR_LOW_LAT */ + + DEBUG_RADIO_CLOSE(0); + } else { + ull = HDR_ULL(((struct lll_hdr *)param)->parent); + } + + /* Let ULL know about LLL event done */ + ull_event_done(ull); + + return ret; +} + +bool lll_is_done(void *param) +{ + /* FIXME: use param to check */ + return !event.curr.abort_cb; +} + +int lll_clk_on(void) +{ + int err; + + /* turn on radio clock in non-blocking mode. */ + err = clock_control_on(lll.clk_hf, NULL); + if (!err || err == -EINPROGRESS) { + DEBUG_RADIO_XTAL(1); + } + + return err; +} + +int lll_clk_on_wait(void) +{ + int err; + + /* turn on radio clock in blocking mode. */ + err = clock_control_on(lll.clk_hf, (void *)1); + if (!err || err == -EINPROGRESS) { + DEBUG_RADIO_XTAL(1); + } + + return err; +} + +int lll_clk_off(void) +{ + int err; + + /* turn off radio clock in non-blocking mode. */ + err = clock_control_off(lll.clk_hf, NULL); + if (!err) { + DEBUG_RADIO_XTAL(0); + } else if (err == -EBUSY) { + DEBUG_RADIO_XTAL(1); + } + + return err; +} + +u32_t lll_evt_offset_get(struct evt_hdr *evt) +{ + if (0) { +#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED) + } else if (evt->ticks_xtal_to_start & XON_BITMASK) { + return max(evt->ticks_active_to_start, + evt->ticks_preempt_to_start); +#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */ + } else { + return max(evt->ticks_active_to_start, + evt->ticks_xtal_to_start); + } +} + +u32_t lll_preempt_calc(struct evt_hdr *evt, u8_t ticker_id, + u32_t ticks_at_event) +{ + /* TODO: */ + return 0; +} + +static int _init_reset(void) +{ + return 0; +} + +static int prepare(lll_is_abort_cb_t is_abort_cb, lll_abort_cb_t abort_cb, + lll_prepare_cb_t prepare_cb, int prio, + struct lll_prepare_param *prepare_param, u8_t is_resume) +{ + struct lll_event *p; + u8_t idx = UINT8_MAX; + + p = ull_prepare_dequeue_iter(&idx); + while (p && p->is_aborted) { + p = ull_prepare_dequeue_iter(&idx); + } + + if (event.curr.abort_cb || p) { +#if !defined(CONFIG_BT_CTLR_LOW_LAT) + u32_t preempt_anchor; + struct evt_hdr *evt; + u32_t preempt_to; +#else /* CONFIG_BT_CTLR_LOW_LAT */ + lll_prepare_cb_t resume_cb; + struct lll_event *next; + int resume_prio; +#endif /* CONFIG_BT_CTLR_LOW_LAT */ + int ret; + +#if defined(CONFIG_BT_CTLR_LOW_LAT) + /* early abort */ + if (event.curr.param) { + event.curr.abort_cb(NULL, event.curr.param); + } +#endif /* CONFIG_BT_CTLR_LOW_LAT */ + + /* Store the next prepare for deferred call */ + ret = ull_prepare_enqueue(is_abort_cb, abort_cb, prepare_param, + prepare_cb, prio, is_resume); + LL_ASSERT(!ret); + + if (is_resume) { + return -EINPROGRESS; + } + +#if !defined(CONFIG_BT_CTLR_LOW_LAT) + /* Calc the preempt timeout */ + evt = HDR_LLL2EVT(prepare_param->param); + preempt_anchor = prepare_param->ticks_at_expire; + preempt_to = max(evt->ticks_active_to_start, + evt->ticks_xtal_to_start) - + evt->ticks_preempt_to_start; + + /* Setup pre empt timeout */ + ret = ticker_start(TICKER_INSTANCE_ID_CTLR, + TICKER_USER_ID_LLL, + TICKER_ID_LLL_PREEMPT, + preempt_anchor, + preempt_to, + TICKER_NULL_PERIOD, + TICKER_NULL_REMAINDER, + TICKER_NULL_LAZY, + TICKER_NULL_SLOT, + _preempt_ticker_cb, NULL, + NULL, NULL); + LL_ASSERT((ret == TICKER_STATUS_SUCCESS) || + (ret == TICKER_STATUS_FAILURE) || + (ret == TICKER_STATUS_BUSY)); + +#else /* CONFIG_BT_CTLR_LOW_LAT */ + next = NULL; + while (p) { + if (!p->is_aborted) { + if (event.curr.param == + p->prepare_param.param) { + p->is_aborted = 1; + p->abort_cb(&p->prepare_param, + p->prepare_param.param); + } else { + next = p; + } + } + + p = ull_prepare_dequeue_iter(&idx); + } + + if (next) { + /* check if resume requested by curr */ + ret = event.curr.is_abort_cb(NULL, 0, event.curr.param, + &resume_cb, &resume_prio); + LL_ASSERT(ret); + + if (ret == -EAGAIN) { + ret = resume_enqueue(resume_cb, resume_prio); + LL_ASSERT(!ret); + } else { + LL_ASSERT(ret == -ECANCELED); + } + } +#endif /* CONFIG_BT_CTLR_LOW_LAT */ + + return -EINPROGRESS; + } + + event.curr.param = prepare_param->param; + event.curr.is_abort_cb = is_abort_cb; + event.curr.abort_cb = abort_cb; + + return prepare_cb(prepare_param); +} + +static int resume_enqueue(lll_prepare_cb_t resume_cb, int resume_prio) +{ + struct lll_prepare_param prepare_param; + + prepare_param.param = event.curr.param; + event.curr.param = NULL; + + return ull_prepare_enqueue(event.curr.is_abort_cb, event.curr.abort_cb, + &prepare_param, resume_cb, resume_prio, 1); +} + +#if !defined(CONFIG_BT_CTLR_LOW_LAT) +static void _preempt_ticker_cb(u32_t ticks_at_expire, u32_t remainder, + u16_t lazy, void *param) +{ + static memq_link_t _link; + static struct mayfly _mfy = {0, 0, &_link, NULL, _preempt}; + u32_t ret; + + ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, + 0, &_mfy); + LL_ASSERT(!ret); +} + +static void _preempt(void *param) +{ + struct lll_event *next = ull_prepare_dequeue_get(); + lll_prepare_cb_t resume_cb; + u8_t idx = UINT8_MAX; + int resume_prio; + int ret; + + next = ull_prepare_dequeue_iter(&idx); + if (!next || !event.curr.abort_cb || !event.curr.param) { + return; + } + + while (next && next->is_resume) { + next = ull_prepare_dequeue_iter(&idx); + } + + if (!next) { + return; + } + + ret = event.curr.is_abort_cb(next->prepare_param.param, next->prio, + event.curr.param, + &resume_cb, &resume_prio); + if (!ret) { + /* Let LLL know about the cancelled prepare */ + next->is_aborted = 1; + next->abort_cb(&next->prepare_param, next->prepare_param.param); + + return; + } + + event.curr.abort_cb(NULL, event.curr.param); + + if (ret == -EAGAIN) { + struct lll_event *iter; + u8_t idx = UINT8_MAX; + + iter = ull_prepare_dequeue_iter(&idx); + while (iter) { + if (!iter->is_aborted && + event.curr.param == iter->prepare_param.param) { + iter->is_aborted = 1; + iter->abort_cb(&iter->prepare_param, + iter->prepare_param.param); + } + + iter = ull_prepare_dequeue_iter(&idx); + } + + ret = resume_enqueue(resume_cb, resume_prio); + LL_ASSERT(!ret); + } else { + LL_ASSERT(ret == -ECANCELED); + } +} +#else /* CONFIG_BT_CTLR_LOW_LAT */ + +#if (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO) +static void ticker_op_job_disable(u32_t status, void *op_context) +{ + ARG_UNUSED(status); + ARG_UNUSED(op_context); + + /* FIXME: */ + if (1 /* _radio.state != STATE_NONE */) { + mayfly_enable(TICKER_USER_ID_ULL_LOW, + TICKER_USER_ID_ULL_LOW, 0); + } +} +#endif + +#endif /* CONFIG_BT_CTLR_LOW_LAT */ diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_adv.c b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_adv.c new file mode 100644 index 00000000000..6fddfe3ac94 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_adv.c @@ -0,0 +1,846 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +#include + +#include +#include + +#include "hal/ccm.h" +#include "hal/radio.h" +#include "hal/ticker.h" + +#include "util/util.h" +#include "util/memq.h" + +#include "ticker/ticker.h" + +#include "pdu.h" + +#include "lll.h" +#include "lll_vendor.h" +#include "lll_adv.h" +#include "lll_filter.h" +#include "lll_chan.h" + +#include "lll_internal.h" +#include "lll_tim_internal.h" +#include "lll_adv_internal.h" +#include "lll_prof_internal.h" + +#define LOG_MODULE_NAME bt_ctlr_llsw_nordic_lll_adv +#include "common/log.h" +#include +#include "hal/debug.h" + +static int init_reset(void); +static int prepare_cb(struct lll_prepare_param *prepare_param); +static int is_abort_cb(void *next, int prio, void *curr, + lll_prepare_cb_t *resume_cb, int *resume_prio); +static void abort_cb(struct lll_prepare_param *prepare_param, void *param); +static void isr_tx(void *param); +static void isr_rx(void *param); +static void isr_done(void *param); +static void isr_abort(void *param); +static void isr_cleanup(void *param); +static void isr_race(void *param); +static void chan_prepare(struct lll_adv *lll); +static inline int isr_rx_pdu(struct lll_adv *lll, + u8_t devmatch_ok, u8_t devmatch_id, + u8_t irkmatch_ok, u8_t irkmatch_id, + u8_t rssi_ready); +static inline bool isr_rx_sr_check(struct lll_adv *lll, struct pdu_adv *adv, + struct pdu_adv *sr, u8_t devmatch_ok, + u8_t *rl_idx); +static inline bool isr_rx_sr_adva_check(struct pdu_adv *adv, + struct pdu_adv *sr); +#if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY) +static inline int isr_rx_sr_report(struct pdu_adv *pdu_adv_rx, + u8_t rssi_ready); +#endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */ +static inline bool isr_rx_ci_check(struct lll_adv *lll, struct pdu_adv *adv, + struct pdu_adv *ci, u8_t devmatch_ok, + u8_t *rl_idx); +static inline bool isr_rx_ci_tgta_check(struct pdu_adv *adv, struct pdu_adv *ci, + u8_t rl_idx); +static inline bool isr_rx_ci_adva_check(struct pdu_adv *adv, + struct pdu_adv *ci); + +int lll_adv_init(void) +{ + int err; + + err = init_reset(); + if (err) { + return err; + } + + return 0; +} + +int lll_adv_reset(void) +{ + int err; + + err = init_reset(); + if (err) { + return err; + } + + return 0; +} + +void lll_adv_prepare(void *param) +{ + struct lll_prepare_param *p = param; + int err; + + err = lll_clk_on(); + LL_ASSERT(!err || err == -EINPROGRESS); + + err = lll_prepare(is_abort_cb, abort_cb, prepare_cb, 0, p); + LL_ASSERT(!err || err == -EINPROGRESS); +} + +static int init_reset(void) +{ + return 0; +} + +static int prepare_cb(struct lll_prepare_param *prepare_param) +{ + struct lll_adv *lll = prepare_param->param; + u32_t aa = 0x8e89bed6; + u32_t ticks_at_event; + struct evt_hdr *evt; + u32_t remainder_us; + u32_t remainder; + + DEBUG_RADIO_START_A(1); + + /* Check if stopped (on connection establishment race between LLL and + * ULL. + */ + if (lll_is_stop(lll)) { + int err; + + err = lll_clk_off(); + LL_ASSERT(!err || err == -EBUSY); + + lll_done(NULL); + + DEBUG_RADIO_START_A(0); + return 0; + } + + radio_reset(); + /* TODO: other Tx Power settings */ + radio_tx_power_set(0); + +#if defined(CONFIG_BT_CTLR_ADV_EXT) + /* TODO: if coded we use S8? */ + radio_phy_set(lll->phy_p, 1); + radio_pkt_configure(8, PDU_AC_PAYLOAD_SIZE_MAX, (lll->phy_p << 1)); +#else /* !CONFIG_BT_CTLR_ADV_EXT */ + radio_phy_set(0, 0); + radio_pkt_configure(8, PDU_AC_PAYLOAD_SIZE_MAX, 0); +#endif /* !CONFIG_BT_CTLR_ADV_EXT */ + + radio_aa_set((u8_t *)&aa); + radio_crc_configure(((0x5bUL) | ((0x06UL) << 8) | ((0x00UL) << 16)), + 0x555555); + + lll->chan_map_curr = lll->chan_map; + + chan_prepare(lll); + +#if defined(CONFIG_BT_HCI_MESH_EXT) + _radio.mesh_adv_end_us = 0; +#endif /* CONFIG_BT_HCI_MESH_EXT */ + + +#if defined(CONFIG_BT_CTLR_PRIVACY) + if (ctrl_rl_enabled()) { + struct ll_filter *filter = + ctrl_filter_get(!!(_radio.advertiser.filter_policy)); + + radio_filter_configure(filter->enable_bitmask, + filter->addr_type_bitmask, + (u8_t *)filter->bdaddr); + } else +#endif /* CONFIG_BT_CTLR_PRIVACY */ + +#if defined(CONFIG_BT_CTLR_FILTER) + /* Setup Radio Filter */ + if (lll->filter_policy) { + + struct ll_filter *wl = ctrl_filter_get(true); + + radio_filter_configure(wl->enable_bitmask, + wl->addr_type_bitmask, + (u8_t *)wl->bdaddr); + } +#endif /* CONFIG_BT_CTLR_FILTER */ + + ticks_at_event = prepare_param->ticks_at_expire; + evt = HDR_LLL2EVT(lll); + ticks_at_event += lll_evt_offset_get(evt); + ticks_at_event += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US); + + remainder = prepare_param->remainder; + remainder_us = radio_tmr_start(1, ticks_at_event, remainder); + + /* capture end of Tx-ed PDU, used to calculate HCTO. */ + radio_tmr_end_capture(); + +#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) + radio_gpio_pa_setup(); + radio_gpio_pa_lna_enable(remainder_us + + radio_tx_ready_delay_get(0, 0) - + CONFIG_BT_CTLR_GPIO_PA_OFFSET); +#else /* !CONFIG_BT_CTLR_GPIO_PA_PIN */ + ARG_UNUSED(remainder_us); +#endif /* !CONFIG_BT_CTLR_GPIO_PA_PIN */ + +#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED) && \ + (EVENT_OVERHEAD_PREEMPT_US <= EVENT_OVERHEAD_PREEMPT_MIN_US) + /* check if preempt to start has changed */ + if (lll_preempt_calc(evt, TICKER_ID_ADV_BASE, ticks_at_event)) { + radio_isr_set(isr_abort, lll); + radio_disable(); + } else +#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */ + { + u32_t ret; + + ret = lll_prepare_done(lll); + LL_ASSERT(!ret); + } + + DEBUG_RADIO_START_A(1); + + return 0; +} + +#if defined(CONFIG_BT_PERIPHERAL) +static int resume_prepare_cb(struct lll_prepare_param *p) +{ + struct evt_hdr *evt = HDR_LLL2EVT(p->param); + + p->ticks_at_expire = ticker_ticks_now_get() - lll_evt_offset_get(evt); + p->remainder = 0; + p->lazy = 0; + + return prepare_cb(p); +} +#endif /* CONFIG_BT_PERIPHERAL */ + +static int is_abort_cb(void *next, int prio, void *curr, + lll_prepare_cb_t *resume_cb, int *resume_prio) +{ +#if defined(CONFIG_BT_PERIPHERAL) + struct lll_adv *lll = curr; + struct pdu_adv *pdu; +#endif /* CONFIG_BT_PERIPHERAL */ + + /* TODO: prio check */ + if (next != curr) { + if (0) { +#if defined(CONFIG_BT_PERIPHERAL) + } else if (lll->is_hdcd) { + int err; + + /* wrap back after the pre-empter */ + *resume_cb = resume_prepare_cb; + *resume_prio = 0; /* TODO: */ + + /* Retain HF clk */ + err = lll_clk_on(); + LL_ASSERT(!err || err == -EINPROGRESS); + + return -EAGAIN; +#endif /* CONFIG_BT_PERIPHERAL */ + } else { + return -ECANCELED; + } + } + +#if defined(CONFIG_BT_PERIPHERAL) + pdu = lll_adv_data_curr_get(lll); + if (pdu->type == PDU_ADV_TYPE_DIRECT_IND) { + return 0; + } +#endif /* CONFIG_BT_PERIPHERAL */ + + return -ECANCELED; +} + +static void abort_cb(struct lll_prepare_param *prepare_param, void *param) +{ + int err; + + /* NOTE: This is not a prepare being cancelled */ + if (!prepare_param) { + /* Perform event abort here. + * After event has been cleanly aborted, clean up resources + * and dispatch event done. + */ + radio_isr_set(isr_abort, param); + radio_disable(); + return; + } + + /* NOTE: Else clean the top half preparations of the aborted event + * currently in preparation pipeline. + */ + err = lll_clk_off(); + LL_ASSERT(!err || err == -EBUSY); + + lll_done(param); +} + +static void isr_tx(void *param) +{ + u32_t hcto; + + /* TODO: MOVE to a common interface, isr_lll_radio_status? */ + +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) + lll_prof_latency_capture(); +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ + + /* Clear radio status and events */ + radio_status_reset(); + radio_tmr_status_reset(); + +#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) || \ + defined(CONFIG_BT_CTLR_GPIO_LNA_PIN) + radio_gpio_pa_lna_disable(); +#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN || CONFIG_BT_CTLR_GPIO_LNA_PIN */ + /* TODO: MOVE ^^ */ + + radio_isr_set(isr_rx, param); + radio_tmr_tifs_set(TIFS_US); + radio_switch_complete_and_tx(0, 0, 0, 0); + radio_pkt_rx_set(radio_pkt_scratch_get()); + + /* assert if radio packet ptr is not set and radio started rx */ + LL_ASSERT(!radio_is_ready()); + +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) + lll_prof_cputime_capture(); +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ + +#if defined(CONFIG_BT_CTLR_PRIVACY) + if (ctrl_rl_enabled()) { + u8_t count, *irks = ctrl_irks_get(&count); + + radio_ar_configure(count, irks); + } +#endif /* CONFIG_BT_CTLR_PRIVACY */ + + /* +/- 2us active clock jitter, +1 us hcto compensation */ + hcto = radio_tmr_tifs_base_get() + TIFS_US + 4 + 1; + hcto += radio_rx_chain_delay_get(0, 0); + hcto += addr_us_get(0); + hcto -= radio_tx_chain_delay_get(0, 0); + radio_tmr_hcto_configure(hcto); + + /* capture end of CONNECT_IND PDU, used for calculating first + * slave event. + */ + radio_tmr_end_capture(); + +#if defined(CONFIG_BT_CTLR_SCAN_REQ_RSSI) + radio_rssi_measure(); +#endif /* CONFIG_BT_CTLR_SCAN_REQ_RSSI */ + +#if defined(CONFIG_BT_CTLR_GPIO_LNA_PIN) +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) + /* PA/LNA enable is overwriting packet end used in ISR profiling, + * hence back it up for later use. + */ + lll_prof_radio_end_backup(); +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ + + radio_gpio_lna_setup(); + radio_gpio_pa_lna_enable(radio_tmr_tifs_base_get() + TIFS_US - 4 - + radio_tx_chain_delay_get(0, 0) - + CONFIG_BT_CTLR_GPIO_LNA_OFFSET); +#endif /* CONFIG_BT_CTLR_GPIO_LNA_PIN */ + +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) + /* NOTE: as scratch packet is used to receive, it is safe to + * generate profile event using rx nodes. + */ + lll_prof_send(); +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ +} + +static void isr_rx(void *param) +{ + u8_t trx_done; + u8_t crc_ok; + u8_t devmatch_ok; + u8_t devmatch_id; + u8_t irkmatch_ok; + u8_t irkmatch_id; + u8_t rssi_ready; + +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) + lll_prof_latency_capture(); +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ + + /* Read radio status and events */ + trx_done = radio_is_done(); + if (trx_done) { + crc_ok = radio_crc_is_valid(); + devmatch_ok = radio_filter_has_match(); + devmatch_id = radio_filter_match_get(); + irkmatch_ok = radio_ar_has_match(); + irkmatch_id = radio_ar_match_get(); + rssi_ready = radio_rssi_is_ready(); + } else { + crc_ok = devmatch_ok = irkmatch_ok = rssi_ready = 0; + devmatch_id = irkmatch_id = 0xFF; + } + + /* Clear radio status and events */ + radio_status_reset(); + radio_tmr_status_reset(); + radio_filter_status_reset(); + radio_ar_status_reset(); + radio_rssi_status_reset(); + +#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) || \ + defined(CONFIG_BT_CTLR_GPIO_LNA_PIN) + radio_gpio_pa_lna_disable(); +#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN || CONFIG_BT_CTLR_GPIO_LNA_PIN */ + + if (!trx_done) { + goto isr_rx_do_close; + } + + if (crc_ok) { + int err; + + err = isr_rx_pdu(param, devmatch_ok, devmatch_id, irkmatch_ok, + irkmatch_id, rssi_ready); + if (!err) { +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) + lll_prof_send(); +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ + + return; + } + } + +isr_rx_do_close: + radio_isr_set(isr_done, param); + radio_disable(); +} + +static void isr_done(void *param) +{ + struct node_rx_hdr *node_rx; + struct lll_adv *lll = param; + + /* TODO: MOVE to a common interface, isr_lll_radio_status? */ + /* Clear radio status and events */ + radio_status_reset(); + radio_tmr_status_reset(); + radio_filter_status_reset(); + radio_ar_status_reset(); + radio_rssi_status_reset(); + +#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) || \ + defined(CONFIG_BT_CTLR_GPIO_LNA_PIN) + radio_gpio_pa_lna_disable(); +#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN || CONFIG_BT_CTLR_GPIO_LNA_PIN */ + /* TODO: MOVE ^^ */ + +#if defined(CONFIG_BT_HCI_MESH_EXT) + if (_radio.advertiser.is_mesh && + !_radio.mesh_adv_end_us) { + _radio.mesh_adv_end_us = radio_tmr_end_get(); + } +#endif /* CONFIG_BT_HCI_MESH_EXT */ + +#if defined(CONFIG_BT_PERIPHERAL) + if (!lll->chan_map_curr && lll->is_hdcd) { + lll->chan_map_curr = lll->chan_map; + } +#endif /* CONFIG_BT_PERIPHERAL */ + + if (lll->chan_map_curr) { + u32_t start_us; + + chan_prepare(lll); + +#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) + start_us = radio_tmr_start_now(1); + + radio_gpio_pa_setup(); + radio_gpio_pa_lna_enable(start_us + + radio_tx_ready_delay_get(0, 0) - + CONFIG_BT_CTLR_GPIO_PA_OFFSET); +#else /* !CONFIG_BT_CTLR_GPIO_PA_PIN */ + ARG_UNUSED(start_us); + + radio_tx_enable(); +#endif /* !CONFIG_BT_CTLR_GPIO_PA_PIN */ + + /* capture end of Tx-ed PDU, used to calculate HCTO. */ + radio_tmr_end_capture(); + + return; + } + + radio_filter_disable(); + +#if defined(CONFIG_BT_PERIPHERAL) + if (!lll->is_hdcd) +#endif /* CONFIG_BT_PERIPHERAL */ + { +#if defined(CONFIG_BT_HCI_MESH_EXT) + if (_radio.advertiser.is_mesh) { + u32_t err; + + err = isr_close_adv_mesh(); + if (err) { + return 0; + } + } +#endif /* CONFIG_BT_HCI_MESH_EXT */ + } + +#if defined(CONFIG_BT_CTLR_ADV_INDICATION) + node_rx = ull_pdu_rx_alloc_peek(3); + if (node_rx) { + ull_pdu_rx_alloc(); + + /* TODO: add other info by defining a payload struct */ + node_rx->type = NODE_RX_TYPE_ADV_INDICATION; + + ull_rx_put(node_rx->link, node_rx); + ull_rx_sched(); + } +#else /* !CONFIG_BT_CTLR_ADV_INDICATION */ + ARG_UNUSED(node_rx); +#endif /* !CONFIG_BT_CTLR_ADV_INDICATION */ + + isr_cleanup(param); +} + +static void isr_abort(void *param) +{ + radio_filter_disable(); + + isr_cleanup(param); +} + +static void isr_cleanup(void *param) +{ + int err; + + radio_isr_set(isr_race, param); + radio_tmr_stop(); + + err = lll_clk_off(); + LL_ASSERT(!err || err == -EBUSY); + + lll_done(NULL); +} + +static void isr_race(void *param) +{ + /* NOTE: lll_disable could have a race with ... */ + radio_status_reset(); +} + +static void chan_prepare(struct lll_adv *lll) +{ + struct pdu_adv *pdu; + struct pdu_adv *scan_pdu; + u8_t chan; + u8_t upd = 0; + + pdu = lll_adv_data_latest_get(lll, &upd); + scan_pdu = lll_adv_scan_rsp_latest_get(lll, &upd); +#if defined(CONFIG_BT_CTLR_PRIVACY) + if (upd) { + /* Copy the address from the adv packet we will send into the + * scan response. + */ + memcpy(&scan_pdu->scan_rsp.addr[0], + &pdu->adv_ind.addr[0], BDADDR_SIZE); + } +#else + ARG_UNUSED(scan_pdu); + ARG_UNUSED(upd); +#endif /* !CONFIG_BT_CTLR_PRIVACY */ + + radio_pkt_tx_set(pdu); + + if ((pdu->type != PDU_ADV_TYPE_NONCONN_IND) && + (!IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) || + (pdu->type != PDU_ADV_TYPE_EXT_IND))) { + radio_isr_set(isr_tx, lll); + radio_tmr_tifs_set(TIFS_US); + radio_switch_complete_and_rx(0); + } else { + radio_isr_set(isr_done, lll); + radio_switch_complete_and_disable(); + } + + chan = find_lsb_set(lll->chan_map_curr); + LL_ASSERT(chan); + + lll->chan_map_curr &= (lll->chan_map_curr - 1); + + lll_chan_set(36 + chan); +} + +static inline int isr_rx_pdu(struct lll_adv *lll, + u8_t devmatch_ok, u8_t devmatch_id, + u8_t irkmatch_ok, u8_t irkmatch_id, + u8_t rssi_ready) +{ + struct pdu_adv *pdu_rx, *pdu_adv; +#if defined(CONFIG_BT_CTLR_PRIVACY) + /* An IRK match implies address resolution enabled */ + u8_t rl_idx = irkmatch_ok ? ctrl_rl_irk_idx(irkmatch_id) : + FILTER_IDX_NONE; +#else + u8_t rl_idx = FILTER_IDX_NONE; +#endif /* CONFIG_BT_CTLR_PRIVACY */ + + pdu_rx = (void *)radio_pkt_scratch_get(); + pdu_adv = lll_adv_data_curr_get(lll); + if ((pdu_rx->type == PDU_ADV_TYPE_SCAN_REQ) && + (pdu_rx->len == sizeof(struct pdu_adv_scan_req)) && + isr_rx_sr_check(lll, pdu_adv, pdu_rx, devmatch_ok, &rl_idx)) { + radio_isr_set(isr_done, lll); + radio_switch_complete_and_disable(); + radio_pkt_tx_set(lll_adv_scan_rsp_curr_get(lll)); + + /* assert if radio packet ptr is not set and radio started tx */ + LL_ASSERT(!radio_is_ready()); + +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) + lll_prof_cputime_capture(); +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ + +#if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY) + if (!IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) || + 0 /* TODO: extended adv. scan req notification enabled */) { + u32_t err; + + /* Generate the scan request event */ + err = isr_rx_sr_report(pdu_rx, rssi_ready); + if (err) { + /* Scan Response will not be transmitted */ + return err; + } + } +#endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */ + +#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) + /* PA/LNA enable is overwriting packet end used in ISR + * profiling, hence back it up for later use. + */ + lll_prof_radio_end_backup(); +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ + + radio_gpio_pa_setup(); + radio_gpio_pa_lna_enable(radio_tmr_tifs_base_get() + TIFS_US - + radio_rx_chain_delay_get(0, 0) - + CONFIG_BT_CTLR_GPIO_PA_OFFSET); +#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN */ + return 0; + +#if defined(CONFIG_BT_PERIPHERAL) + } else if ((pdu_rx->type == PDU_ADV_TYPE_CONNECT_IND) && + (pdu_rx->len == sizeof(struct pdu_adv_connect_ind)) && + isr_rx_ci_check(lll, pdu_adv, pdu_rx, devmatch_ok, + &rl_idx) && + lll->conn) { + struct node_rx_ftr *ftr; + struct node_rx_pdu *rx; + int ret; + + if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) { + rx = ull_pdu_rx_alloc_peek(4); + } else { + rx = ull_pdu_rx_alloc_peek(3); + } + + if (!rx) { + return -ENOBUFS; + } + + radio_isr_set(isr_abort, lll); + radio_disable(); + + /* assert if radio started tx */ + LL_ASSERT(!radio_is_ready()); + +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) + lll_prof_cputime_capture(); +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ + + /* Stop further LLL radio events */ + ret = lll_stop(lll); + LL_ASSERT(!ret); + + rx = ull_pdu_rx_alloc(); + + rx->hdr.type = NODE_RX_TYPE_CONNECTION; + rx->hdr.handle = 0xffff; + + memcpy(rx->pdu, pdu_rx, (offsetof(struct pdu_adv, connect_ind) + + sizeof(struct pdu_adv_connect_ind))); + + ftr = (void *)((u8_t *)rx->pdu + + (offsetof(struct pdu_adv, connect_ind) + + sizeof(struct pdu_adv_connect_ind))); + + ftr->param = lll; + ftr->ticks_anchor = radio_tmr_start_get(); + ftr->us_radio_end = radio_tmr_end_get() - + radio_tx_chain_delay_get(0, 0); + ftr->us_radio_rdy = radio_rx_ready_delay_get(0, 0); + + if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) { + ftr->extra = ull_pdu_rx_alloc(); + } + + ull_rx_put(rx->hdr.link, rx); + ull_rx_sched(); + + return 0; +#endif /* CONFIG_BT_PERIPHERAL */ + } + + return -EINVAL; +} + +static inline bool isr_rx_sr_check(struct lll_adv *lll, struct pdu_adv *adv, + struct pdu_adv *sr, u8_t devmatch_ok, + u8_t *rl_idx) +{ +#if defined(CONFIG_BT_CTLR_PRIVACY) + return ((((_radio.advertiser.filter_policy & 0x01) == 0) && + ctrl_rl_addr_allowed(sr->tx_addr, sr->scan_req.scan_addr, + rl_idx)) || + (((_radio.advertiser.filter_policy & 0x01) != 0) && + (devmatch_ok || ctrl_irk_whitelisted(*rl_idx)))) && + isr_rx_sr_adva_check(adv, sr); +#else + return (((lll->filter_policy & 0x01) == 0) || devmatch_ok) && + isr_rx_sr_adva_check(adv, sr); +#endif /* CONFIG_BT_CTLR_PRIVACY */ +} + +static inline bool isr_rx_sr_adva_check(struct pdu_adv *adv, + struct pdu_adv *sr) +{ + return (adv->tx_addr == sr->rx_addr) && + !memcmp(adv->adv_ind.addr, sr->scan_req.adv_addr, BDADDR_SIZE); +} + +#if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY) +static inline int isr_rx_sr_report(struct pdu_adv *pdu_adv_rx, + u8_t rssi_ready) +{ + struct node_rx_pdu *node_rx; + struct pdu_adv *pdu_adv; + u8_t pdu_len; + + node_rx = ull_pdu_rx_alloc_peek(3); + if (!node_rx) { + return -ENOBUFS; + } + ull_pdu_rx_alloc(); + + /* Prepare the report (scan req) */ + node_rx->hdr.type = NODE_RX_TYPE_SCAN_REQ; + node_rx->hdr.handle = 0xffff; + + /* Make a copy of PDU into Rx node (as the received PDU is in the + * scratch buffer), and save the RSSI value. + */ + pdu_adv = (void *)node_rx->pdu; + pdu_len = offsetof(struct pdu_adv, payload) + pdu_adv_rx->len; + memcpy(pdu_adv, pdu_adv_rx, pdu_len); + ((u8_t *)pdu_adv)[pdu_len] = (rssi_ready) ? (radio_rssi_get() & 0x7f) : + 0x7f; + + ull_rx_put(node_rx->hdr.link, node_rx); + ull_rx_sched(); + + return 0; +} +#endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */ + +static inline bool isr_rx_ci_check(struct lll_adv *lll, struct pdu_adv *adv, + struct pdu_adv *ci, u8_t devmatch_ok, + u8_t *rl_idx) +{ + /* LL 4.3.2: filter policy shall be ignored for directed adv */ + if (adv->type == PDU_ADV_TYPE_DIRECT_IND) { +#if defined(CONFIG_BT_CTLR_PRIVACY) + return ctrl_rl_addr_allowed(ci->tx_addr, + ci->connect_ind.init_addr, + rl_idx) && +#else + return (1) && +#endif + isr_rx_ci_adva_check(adv, ci) && + isr_rx_ci_tgta_check(adv, ci, *rl_idx); + } + +#if defined(CONFIG_BT_CTLR_PRIVACY) + return ((((_radio.advertiser.filter_policy & 0x02) == 0) && + ctrl_rl_addr_allowed(ci->tx_addr, ci->connect_ind.init_addr, + rl_idx)) || + (((_radio.advertiser.filter_policy & 0x02) != 0) && + (devmatch_ok || ctrl_irk_whitelisted(*rl_idx)))) && + isr_rx_ci_adva_check(adv, ci); +#else + return (((lll->filter_policy & 0x02) == 0) || + (devmatch_ok)) && + isr_rx_ci_adva_check(adv, ci); +#endif /* CONFIG_BT_CTLR_PRIVACY */ +} + +static inline bool isr_rx_ci_tgta_check(struct pdu_adv *adv, struct pdu_adv *ci, + u8_t rl_idx) +{ +#if defined(CONFIG_BT_CTLR_PRIVACY) + if (rl_idx != FILTER_IDX_NONE) { + return rl_idx == _radio.advertiser.rl_idx; + } +#endif /* CONFIG_BT_CTLR_PRIVACY */ + return (adv->rx_addr == ci->tx_addr) && + !memcmp(adv->direct_ind.tgt_addr, ci->connect_ind.init_addr, + BDADDR_SIZE); +} + +static inline bool isr_rx_ci_adva_check(struct pdu_adv *adv, + struct pdu_adv *ci) +{ + return (adv->tx_addr == ci->rx_addr) && + (((adv->type == PDU_ADV_TYPE_DIRECT_IND) && + !memcmp(adv->direct_ind.adv_addr, ci->connect_ind.adv_addr, + BDADDR_SIZE)) || + (!memcmp(adv->adv_ind.addr, ci->connect_ind.adv_addr, + BDADDR_SIZE))); +} diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_adv.h b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_adv.h new file mode 100644 index 00000000000..500a2980b32 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_adv.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2017-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +struct lll_adv_pdu { + u8_t first; + u8_t last; + /* TODO: use, + * struct pdu_adv *pdu[DOUBLE_BUFFER_SIZE]; + */ + u8_t pdu[DOUBLE_BUFFER_SIZE][PDU_AC_SIZE_MAX]; +}; + +struct lll_adv { + struct lll_hdr hdr; + +#if defined(CONFIG_BT_PERIPHERAL) + /* NOTE: conn context has to be after lll_hdr */ + struct lll_conn *conn; + u8_t is_hdcd:1; +#endif /* CONFIG_BT_PERIPHERAL */ + + u8_t chan_map:3; + u8_t chan_map_curr:3; + u8_t filter_policy:2; + +#if defined(CONFIG_BT_CTLR_ADV_EXT) + u8_t phy_p:3; +#endif /* !CONFIG_BT_CTLR_ADV_EXT */ + +#if defined(CONFIG_BT_HCI_MESH_EXT) + u8_t is_mesh:1; +#endif /* CONFIG_BT_HCI_MESH_EXT */ + + struct lll_adv_pdu adv_data; + struct lll_adv_pdu scan_rsp; +}; + +int lll_adv_init(void); +int lll_adv_reset(void); + +void lll_adv_prepare(void *param); + +static inline struct pdu_adv *lll_adv_pdu_alloc(struct lll_adv_pdu *pdu, + u8_t *idx) +{ + u8_t last; + + if (pdu->first == pdu->last) { + last = pdu->last + 1; + if (last == DOUBLE_BUFFER_SIZE) { + last = 0; + } + } else { + last = pdu->last; + } + + *idx = last; + + return (void *)pdu->pdu[last]; +} + +static inline void lll_adv_pdu_enqueue(struct lll_adv_pdu *pdu, u8_t idx) +{ + pdu->last = idx; +} + +static inline struct pdu_adv *lll_adv_data_alloc(struct lll_adv *lll, u8_t *idx) +{ + return lll_adv_pdu_alloc(&lll->adv_data, idx); +} + +static inline void lll_adv_data_enqueue(struct lll_adv *lll, u8_t idx) +{ + lll_adv_pdu_enqueue(&lll->adv_data, idx); +} + +static inline struct pdu_adv *lll_adv_data_peek(struct lll_adv *lll) +{ + return (void *)lll->adv_data.pdu[lll->adv_data.last]; +} + +static inline struct pdu_adv *lll_adv_scan_rsp_alloc(struct lll_adv *lll, + u8_t *idx) +{ + return lll_adv_pdu_alloc(&lll->scan_rsp, idx); +} + +static inline void lll_adv_scan_rsp_enqueue(struct lll_adv *lll, u8_t idx) +{ + lll_adv_pdu_enqueue(&lll->scan_rsp, idx); +} + +static inline struct pdu_adv *lll_adv_scan_rsp_peek(struct lll_adv *lll) +{ + return (void *)lll->scan_rsp.pdu[lll->scan_rsp.last]; +} diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_adv_internal.h b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_adv_internal.h new file mode 100644 index 00000000000..386797e215f --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_adv_internal.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +static inline struct pdu_adv *lll_adv_pdu_latest_get(struct lll_adv_pdu *pdu, + u8_t *is_modified) +{ + u8_t first; + + first = pdu->first; + if (first != pdu->last) { + first += 1; + if (first == DOUBLE_BUFFER_SIZE) { + first = 0; + } + pdu->first = first; + *is_modified = 1; + } + + return (void *)pdu->pdu[first]; +} + +static inline struct pdu_adv *lll_adv_data_latest_get(struct lll_adv *lll, + u8_t *is_modified) +{ + return lll_adv_pdu_latest_get(&lll->adv_data, is_modified); +} + +static inline struct pdu_adv *lll_adv_scan_rsp_latest_get(struct lll_adv *lll, + u8_t *is_modified) +{ + return lll_adv_pdu_latest_get(&lll->scan_rsp, is_modified); +} + +static inline struct pdu_adv *lll_adv_data_curr_get(struct lll_adv *lll) +{ + return (void *)lll->adv_data.pdu[lll->adv_data.first]; +} + +static inline struct pdu_adv *lll_adv_scan_rsp_curr_get(struct lll_adv *lll) +{ + return (void *)lll->scan_rsp.pdu[lll->scan_rsp.first]; +} diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_clock.c b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_clock.c new file mode 100644 index 00000000000..794223cc16a --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_clock.c @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include + +#define LOG_MODULE_NAME bt_ctlr_llsw_nordic_lll_clock +#include "common/log.h" +#include "hal/debug.h" + +#define DRV_NAME CONFIG_CLOCK_CONTROL_NRF_K32SRC_DRV_NAME +#define K32SRC CLOCK_CONTROL_NRF_K32SRC + +static u8_t is_k32src_stable; + +void lll_clock_wait(void) +{ + if (!is_k32src_stable) { + struct device *clk_k32; + + is_k32src_stable = 1; + + clk_k32 = device_get_binding(DRV_NAME); + LL_ASSERT(clk_k32); + + while (clock_control_on(clk_k32, (void *)K32SRC)) { + DEBUG_CPU_SLEEP(1); + k_cpu_idle(); + DEBUG_CPU_SLEEP(0); + } + } +} diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_clock.h b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_clock.h new file mode 100644 index 00000000000..079ca9c1a9e --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_clock.h @@ -0,0 +1,7 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +void lll_clock_wait(void); diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_conn.c b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_conn.c new file mode 100644 index 00000000000..49ac60c2e00 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_conn.c @@ -0,0 +1,913 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +#include +#include +#include +#include + +#include "util/memq.h" +#include "util/mfifo.h" + +#include "hal/ccm.h" +#include "hal/radio.h" + +#include "pdu.h" + +#include "lll.h" +#include "lll_conn.h" + +#include "lll_internal.h" +#include "lll_tim_internal.h" +#include "lll_prof_internal.h" + +#define LOG_MODULE_NAME bt_ctlr_llsw_nordic_lll_conn +#include "common/log.h" +#include +#include "hal/debug.h" + +static int init_reset(void); +static void isr_done(void *param); +static void isr_cleanup(void *param); +static void isr_race(void *param); +static int isr_rx_pdu(struct lll_conn *lll, struct pdu_data *pdu_data_rx, + struct node_tx **tx_release, u8_t *is_rx_enqueue); +static struct pdu_data *empty_tx_enqueue(struct lll_conn *lll); + +static u16_t const sca_ppm_lut[] = {500, 250, 150, 100, 75, 50, 30, 20}; +static u8_t crc_expire; +static u8_t crc_valid; +static u16_t trx_cnt; + +#if defined(CONFIG_BT_CTLR_LE_ENC) +static u8_t mic_state; +#endif /* CONFIG_BT_CTLR_LE_ENC */ + +static MFIFO_DEFINE(conn_ack, sizeof(struct lll_tx), + CONFIG_BT_CTLR_TX_BUFFERS); + +int lll_conn_init(void) +{ + int err; + + err = init_reset(); + if (err) { + return err; + } + + return 0; +} + +int lll_conn_reset(void) +{ + int err; + + MFIFO_INIT(conn_ack); + + err = init_reset(); + if (err) { + return err; + } + + return 0; +} + +u8_t lll_conn_sca_local_get(void) +{ + return CLOCK_CONTROL_NRF_K32SRC_ACCURACY; +} + +u32_t lll_conn_ppm_local_get(void) +{ + return sca_ppm_lut[CLOCK_CONTROL_NRF_K32SRC_ACCURACY]; +} + +u32_t lll_conn_ppm_get(u8_t sca) +{ + return sca_ppm_lut[sca]; +} + +void lll_conn_prepare_reset(void) +{ + trx_cnt = 0; + crc_expire = 0; + crc_valid = 0; + +#if defined(CONFIG_BT_CTLR_LE_ENC) + mic_state = LLL_CONN_MIC_NONE; +#endif /* CONFIG_BT_CTLR_LE_ENC */ +} + +int lll_conn_is_abort_cb(void *next, int prio, void *curr, + lll_prepare_cb_t *resume_cb, int *resume_prio) +{ + return -ECANCELED; +} + +void lll_conn_abort_cb(struct lll_prepare_param *prepare_param, void *param) +{ + int err; + + /* NOTE: This is not a prepare being cancelled */ + if (!prepare_param) { + /* Perform event abort here. + * After event has been cleanly aborted, clean up resources + * and dispatch event done. + */ + radio_isr_set(isr_done, param); + radio_disable(); + return; + } + + /* NOTE: Else clean the top half preparations of the aborted event + * currently in preparation pipeline. + */ + err = lll_clk_off(); + LL_ASSERT(!err || err == -EBUSY); + + lll_done(param); +} + +void lll_conn_isr_rx(void *param) +{ + struct node_tx *tx_release = NULL; + struct lll_conn *lll = param; + struct pdu_data *pdu_data_rx; + struct pdu_data *pdu_data_tx; + struct node_rx_pdu *node_rx; + u8_t is_empty_pdu_tx_retry; + u8_t is_crc_backoff = 0; + u8_t is_rx_enqueue = 0; + u8_t is_ull_rx = 0; + u8_t rssi_ready; + u8_t trx_done; + u8_t is_done; + u8_t crc_ok; + +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) + lll_prof_latency_capture(); +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ + + /* Read radio status and events */ + trx_done = radio_is_done(); + if (trx_done) { + crc_ok = radio_crc_is_valid(); + rssi_ready = radio_rssi_is_ready(); + } else { + crc_ok = rssi_ready = 0; + } + + /* Clear radio status and events */ + radio_status_reset(); + radio_tmr_status_reset(); + radio_rssi_status_reset(); + +#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) || \ + defined(CONFIG_BT_CTLR_GPIO_LNA_PIN) + radio_gpio_pa_lna_disable(); +#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN || CONFIG_BT_CTLR_GPIO_LNA_PIN */ + + if (!trx_done) { + radio_isr_set(isr_done, param); + radio_disable(); + + return; + } + + trx_cnt++; + + node_rx = ull_pdu_rx_alloc_peek(1); + LL_ASSERT(node_rx); + + pdu_data_rx = (void *)node_rx->pdu; + + if (crc_ok) { + u32_t err; + + err = isr_rx_pdu(lll, pdu_data_rx, &tx_release, &is_rx_enqueue); + if (err) { + goto lll_conn_isr_rx_exit; + } + + /* Reset CRC expiry counter */ + crc_expire = 0; + + /* CRC valid flag used to detect supervision timeout */ + crc_valid = 1; + } else { + /* Start CRC error countdown, if not already started */ + if (crc_expire == 0) { + crc_expire = 2; + } + + /* CRC error countdown */ + crc_expire--; + is_crc_backoff = (crc_expire == 0); + } + + /* prepare tx packet */ + is_empty_pdu_tx_retry = lll->empty; + lll_conn_pdu_tx_prep(lll, &pdu_data_tx); + + /* Decide on event continuation and hence Radio Shorts to use */ + is_done = is_crc_backoff || ((crc_ok) && (pdu_data_rx->md == 0) && + (pdu_data_tx->len == 0)); + + if (is_done) { + radio_isr_set(isr_done, param); + + if (0) { +#if defined(CONFIG_BT_CENTRAL) + /* Event done for master */ + } else if (!lll->role) { + radio_disable(); + + /* assert if radio packet ptr is not set and radio + * started tx. + */ + LL_ASSERT(!radio_is_ready()); + + /* Restore state if last transmitted was empty PDU */ + lll->empty = is_empty_pdu_tx_retry; + + goto lll_conn_isr_rx_exit; +#endif /* CONFIG_BT_CENTRAL */ +#if defined(CONFIG_BT_PERIPHERAL) + /* Event done for slave */ + } else { + radio_switch_complete_and_disable(); +#endif /* CONFIG_BT_PERIPHERAL */ + } + } else { + radio_isr_set(lll_conn_isr_tx, param); + radio_tmr_tifs_set(TIFS_US); + +#if defined(CONFIG_BT_CTLR_PHY) + radio_switch_complete_and_rx(lll->phy_rx); +#else /* !CONFIG_BT_CTLR_PHY */ + radio_switch_complete_and_rx(0); +#endif /* !CONFIG_BT_CTLR_PHY */ + + /* capture end of Tx-ed PDU, used to calculate HCTO. */ + radio_tmr_end_capture(); + } + + /* Fill sn and nesn */ + pdu_data_tx->sn = lll->sn; + pdu_data_tx->nesn = lll->nesn; + + /* setup the radio tx packet buffer */ + lll_conn_tx_pkt_set(lll, pdu_data_tx); + +#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) + +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) + /* PA enable is overwriting packet end used in ISR profiling, hence + * back it up for later use. + */ + lll_prof_radio_end_backup(); +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ + + radio_gpio_pa_setup(); + +#if defined(CONFIG_BT_CTLR_PHY) + radio_gpio_pa_lna_enable(radio_tmr_tifs_base_get() + TIFS_US - + radio_rx_chain_delay_get(lll->phy_rx, 1) - + CONFIG_BT_CTLR_GPIO_PA_OFFSET); +#else /* !CONFIG_BT_CTLR_PHY */ + radio_gpio_pa_lna_enable(radio_tmr_tifs_base_get() + TIFS_US - + radio_rx_chain_delay_get(0, 0) - + CONFIG_BT_CTLR_GPIO_PA_OFFSET); +#endif /* !CONFIG_BT_CTLR_PHY */ +#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN */ + + /* assert if radio packet ptr is not set and radio started tx */ + LL_ASSERT(!radio_is_ready()); + +lll_conn_isr_rx_exit: + /* Save the AA captured for the first Rx in connection event */ + if (!radio_tmr_aa_restore()) { + radio_tmr_aa_save(radio_tmr_aa_get()); + } + +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) + lll_prof_cputime_capture(); +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ + + if (tx_release) { + struct lll_tx *tx; + u8_t idx; + + LL_ASSERT(lll->handle != 0xFFFF); + + idx = MFIFO_ENQUEUE_GET(conn_ack, (void **)&tx); + LL_ASSERT(tx); + + tx->handle = lll->handle; + tx->node = tx_release; + + MFIFO_ENQUEUE(conn_ack, idx); + + is_ull_rx = 1; + } + + if (is_rx_enqueue) { + LL_ASSERT(lll->handle != 0xFFFF); + + ull_pdu_rx_alloc(); + + node_rx->hdr.type = NODE_RX_TYPE_DC_PDU; + node_rx->hdr.handle = lll->handle; + + ull_rx_put(node_rx->hdr.link, node_rx); + is_ull_rx = 1; + } + + if (is_ull_rx) { + ull_rx_sched(); + } + +#if defined(CONFIG_BT_CTLR_CONN_RSSI) + /* Collect RSSI for connection */ + if (rssi_ready) { + u8_t rssi = radio_rssi_get(); + + lll->rssi_latest = rssi; + + if (((lll->rssi_reported - rssi) & 0xFF) > + LLL_CONN_RSSI_THRESHOLD) { + if (lll->rssi_sample_count) { + lll->rssi_sample_count--; + } + } else { + lll->rssi_sample_count = LLL_CONN_RSSI_SAMPLE_COUNT; + } + } +#else /* !CONFIG_BT_CTLR_CONN_RSSI */ + ARG_UNUSED(rssi_ready); +#endif /* !CONFIG_BT_CTLR_CONN_RSSI */ + +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) + lll_prof_send(); +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ +} + +void lll_conn_isr_tx(void *param) +{ + struct lll_conn *lll = (void *)param; + u32_t hcto; + + /* TODO: MOVE to a common interface, isr_lll_radio_status? */ + /* Clear radio status and events */ + radio_status_reset(); + radio_tmr_status_reset(); + +#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) || \ + defined(CONFIG_BT_CTLR_GPIO_LNA_PIN) + radio_gpio_pa_lna_disable(); +#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN || CONFIG_BT_CTLR_GPIO_LNA_PIN */ + /* TODO: MOVE ^^ */ + + radio_isr_set(lll_conn_isr_rx, param); + radio_tmr_tifs_set(TIFS_US); +#if defined(CONFIG_BT_CTLR_PHY) + radio_switch_complete_and_tx(lll->phy_rx, 0, + lll->phy_tx, + lll->phy_flags); +#else /* !CONFIG_BT_CTLR_PHY */ + radio_switch_complete_and_tx(0, 0, 0, 0); +#endif /* !CONFIG_BT_CTLR_PHY */ + + lll_conn_rx_pkt_set(lll); + + /* assert if radio packet ptr is not set and radio started rx */ + LL_ASSERT(!radio_is_ready()); + + /* +/- 2us active clock jitter, +1 us hcto compensation */ + hcto = radio_tmr_tifs_base_get() + TIFS_US + 4 + 1; +#if defined(CONFIG_BT_CTLR_PHY) + hcto += radio_rx_chain_delay_get(lll->phy_rx, 1); + hcto += addr_us_get(lll->phy_rx); + hcto -= radio_tx_chain_delay_get(lll->phy_tx, lll->phy_flags); +#else /* !CONFIG_BT_CTLR_PHY */ + hcto += radio_rx_chain_delay_get(0, 0); + hcto += addr_us_get(0); + hcto -= radio_tx_chain_delay_get(0, 0); +#endif /* !CONFIG_BT_CTLR_PHY */ + + radio_tmr_hcto_configure(hcto); + +#if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_CTLR_CONN_RSSI) + if (!lll->role) { + radio_rssi_measure(); + } +#endif /* iCONFIG_BT_CENTRAL && CONFIG_BT_CTLR_CONN_RSSI */ + +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) || \ + defined(CONFIG_BT_CTLR_GPIO_PA_PIN) + radio_tmr_end_capture(); +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ + +#if defined(CONFIG_BT_CTLR_GPIO_LNA_PIN) + radio_gpio_lna_setup(); +#if defined(CONFIG_BT_CTLR_PHY) + radio_gpio_pa_lna_enable(radio_tmr_tifs_base_get() + TIFS_US - 4 - + radio_tx_chain_delay_get(lll->phy_tx, + lll->phy_flags) - + CONFIG_BT_CTLR_GPIO_LNA_OFFSET); +#else /* !CONFIG_BT_CTLR_PHY */ + radio_gpio_pa_lna_enable(radio_tmr_tifs_base_get() + TIFS_US - 4 - + radio_tx_chain_delay_get(0, 0) - + CONFIG_BT_CTLR_GPIO_LNA_OFFSET); +#endif /* !CONFIG_BT_CTLR_PHY */ +#endif /* CONFIG_BT_CTLR_GPIO_LNA_PIN */ +} + +void lll_conn_isr_abort(void *param) +{ + isr_cleanup(param); +} + +void lll_conn_rx_pkt_set(struct lll_conn *lll) +{ + struct node_rx_pdu *node_rx; + u16_t max_rx_octets; + u8_t phy; + + node_rx = ull_pdu_rx_alloc_peek(1); + LL_ASSERT(node_rx); + +#if defined(CONFIG_BT_CTLR_DATA_LENGTH) + max_rx_octets = lll->max_rx_octets; +#else /* !CONFIG_BT_CTLR_DATA_LENGTH */ + max_rx_octets = PDU_DC_PAYLOAD_SIZE_MIN; +#endif /* !CONFIG_BT_CTLR_DATA_LENGTH */ + +#if defined(CONFIG_BT_CTLR_PHY) + phy = lll->phy_rx; +#else /* !CONFIG_BT_CTLR_PHY */ + phy = 0; +#endif /* !CONFIG_BT_CTLR_PHY */ + + radio_phy_set(phy, 0); + + if (0) { +#if defined(CONFIG_BT_CTLR_LE_ENC) + } else if (lll->enc_rx) { + radio_pkt_configure(8, (max_rx_octets + 4), (phy << 1) | 0x01); + + radio_pkt_rx_set(radio_ccm_rx_pkt_set(&lll->ccm_rx, phy, + node_rx->pdu)); +#endif /* CONFIG_BT_CTLR_LE_ENC */ + } else { + radio_pkt_configure(8, max_rx_octets, (phy << 1) | 0x01); + + radio_pkt_rx_set(node_rx->pdu); + } +} + +void lll_conn_tx_pkt_set(struct lll_conn *lll, struct pdu_data *pdu_data_tx) +{ + u16_t max_tx_octets; + u8_t phy, flags; + +#if defined(CONFIG_BT_CTLR_DATA_LENGTH) + max_tx_octets = lll->max_tx_octets; +#else /* !CONFIG_BT_CTLR_DATA_LENGTH */ + max_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN; +#endif /* !CONFIG_BT_CTLR_DATA_LENGTH */ + +#if defined(CONFIG_BT_CTLR_PHY) + phy = lll->phy_tx; + flags = lll->phy_flags; +#else /* !CONFIG_BT_CTLR_PHY */ + phy = 0; + flags = 0; +#endif /* !CONFIG_BT_CTLR_PHY */ + + radio_phy_set(phy, flags); + + if (0) { +#if defined(CONFIG_BT_CTLR_LE_ENC) + } else if (lll->enc_tx) { + radio_pkt_configure(8, (max_tx_octets + 4), (phy << 1) | 0x01); + + radio_pkt_tx_set(radio_ccm_tx_pkt_set(&lll->ccm_tx, + pdu_data_tx)); +#endif /* CONFIG_BT_CTLR_LE_ENC */ + } else { + radio_pkt_configure(8, max_tx_octets, (phy << 1) | 0x01); + + radio_pkt_tx_set(pdu_data_tx); + } +} + +void lll_conn_pdu_tx_prep(struct lll_conn *lll, struct pdu_data **pdu_data_tx) +{ + struct node_tx *tx; + struct pdu_data *p; + memq_link_t *link; + + if (lll->empty) { + *pdu_data_tx = empty_tx_enqueue(lll); + return; + } + + link = memq_peek(lll->memq_tx.head, lll->memq_tx.tail, (void **)&tx); + if (!link) { + p = empty_tx_enqueue(lll); + } else { + u16_t max_tx_octets; + + p = (void *)(tx->pdu + lll->packet_tx_head_offset); + + if (!lll->packet_tx_head_len) { + lll->packet_tx_head_len = p->len; + } + + if (lll->packet_tx_head_offset) { + p->ll_id = PDU_DATA_LLID_DATA_CONTINUE; + } + + p->len = lll->packet_tx_head_len - lll->packet_tx_head_offset; + p->md = 0; + +#if defined(CONFIG_BT_CTLR_DATA_LENGTH) +#if defined(CONFIG_BT_CTLR_PHY) + switch (lll->phy_tx_time) { + default: + case BIT(0): + /* 1M PHY, 1us = 1 bit, hence divide by 8. + * Deduct 10 bytes for preamble (1), access address (4), + * header (2), and CRC (3). + */ + max_tx_octets = (lll->max_tx_time >> 3) - 10; + break; + + case BIT(1): + /* 2M PHY, 1us = 2 bits, hence divide by 4. + * Deduct 11 bytes for preamble (2), access address (4), + * header (2), and CRC (3). + */ + max_tx_octets = (lll->max_tx_time >> 2) - 11; + break; + +#if defined(CONFIG_BT_CTLR_PHY_CODED) + case BIT(2): + if (lll->phy_flags & 0x01) { + /* S8 Coded PHY, 8us = 1 bit, hence divide by + * 64. + * Subtract time for preamble (80), AA (256), + * CI (16), TERM1 (24), CRC (192) and + * TERM2 (24), total 592 us. + * Subtract 2 bytes for header. + */ + max_tx_octets = ((lll->max_tx_time - 592) >> + 6) - 2; + } else { + /* S2 Coded PHY, 2us = 1 bit, hence divide by + * 16. + * Subtract time for preamble (80), AA (256), + * CI (16), TERM1 (24), CRC (48) and + * TERM2 (6), total 430 us. + * Subtract 2 bytes for header. + */ + max_tx_octets = ((lll->max_tx_time - 430) >> + 4) - 2; + } + break; +#endif /* CONFIG_BT_CTLR_PHY_CODED */ + } + +#if defined(CONFIG_BT_CTLR_LE_ENC) + if (lll->enc_tx) { + /* deduct the MIC */ + max_tx_octets -= 4; + } +#endif /* CONFIG_BT_CTLR_LE_ENC */ + + if (max_tx_octets > lll->max_tx_octets) { + max_tx_octets = lll->max_tx_octets; + } +#else /* !CONFIG_BT_CTLR_PHY */ + max_tx_octets = lll->max_tx_octets; +#endif /* !CONFIG_BT_CTLR_PHY */ +#else /* !CONFIG_BT_CTLR_DATA_LENGTH */ + max_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN; +#endif /* !CONFIG_BT_CTLR_DATA_LENGTH */ + + if (p->len > max_tx_octets) { + p->len = max_tx_octets; + p->md = 1; + } + + if (link->next) { + p->md = 1; + } + } + + *pdu_data_tx = p; +} + +u8_t lll_conn_ack_last_idx_get(void) +{ + return mfifo_conn_ack.l; +} + +memq_link_t *lll_conn_ack_peek(u8_t *ack_last, u16_t *handle, + struct node_tx **node_tx) +{ + struct lll_tx *tx; + + tx = MFIFO_DEQUEUE_GET(conn_ack); + if (!tx) { + return NULL; + } + + *ack_last = mfifo_conn_ack.l; + + *handle = tx->handle; + *node_tx = tx->node; + + return (*node_tx)->link; +} + +memq_link_t *lll_conn_ack_by_last_peek(u8_t last, u16_t *handle, + struct node_tx **node_tx) +{ + struct lll_tx *tx; + + tx = mfifo_dequeue_get(mfifo_conn_ack.m, mfifo_conn_ack.s, + mfifo_conn_ack.f, last); + if (!tx) { + return NULL; + } + + *handle = tx->handle; + *node_tx = tx->node; + + return (*node_tx)->link; +} + +void *lll_conn_ack_dequeue(void) +{ + return MFIFO_DEQUEUE(conn_ack); +} + +void lll_conn_tx_flush(void *param) +{ + struct lll_conn *lll = param; + struct node_tx *node_tx; + memq_link_t *link; + + link = memq_dequeue(lll->memq_tx.tail, &lll->memq_tx.head, + (void **)&node_tx); + while (link) { + struct pdu_data *p; + struct lll_tx *tx; + u8_t idx; + + idx = MFIFO_ENQUEUE_GET(conn_ack, (void **)&tx); + LL_ASSERT(tx); + + tx->handle = 0xFFFF; + tx->node = node_tx; + link->next = node_tx->next; + node_tx->link = link; + p = (void *)node_tx->pdu; + p->ll_id = PDU_DATA_LLID_RESV; + + MFIFO_ENQUEUE(conn_ack, idx); + + link = memq_dequeue(lll->memq_tx.tail, &lll->memq_tx.head, + (void **)&node_tx); + } +} + +static int init_reset(void) +{ + return 0; +} + +static void isr_done(void *param) +{ + struct event_done_extra *e; + + /* TODO: MOVE to a common interface, isr_lll_radio_status? */ + /* Clear radio status and events */ + radio_status_reset(); + radio_tmr_status_reset(); + radio_filter_status_reset(); + radio_ar_status_reset(); + radio_rssi_status_reset(); + +#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) || \ + defined(CONFIG_BT_CTLR_GPIO_LNA_PIN) + radio_gpio_pa_lna_disable(); +#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN || CONFIG_BT_CTLR_GPIO_LNA_PIN */ + /* TODO: MOVE ^^ */ + + e = ull_event_done_extra_get(); + e->type = EVENT_DONE_EXTRA_TYPE_CONN; + e->trx_cnt = trx_cnt; + e->crc_valid = crc_valid; + +#if defined(CONFIG_BT_CTLR_LE_ENC) + e->mic_state = mic_state; +#endif /* CONFIG_BT_CTLR_LE_ENC */ + + if (trx_cnt) { + struct lll_conn *lll = param; + + if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && lll->role) { + u32_t preamble_to_addr_us; + +#if defined(CONFIG_BT_CTLR_PHY) + preamble_to_addr_us = + addr_us_get(lll->phy_rx); +#else /* !CONFIG_BT_CTLR_PHY */ + preamble_to_addr_us = + addr_us_get(0); +#endif /* !CONFIG_BT_CTLR_PHY */ + + e->slave.start_to_address_actual_us = + radio_tmr_aa_restore() - radio_tmr_ready_get(); + e->slave.window_widening_event_us = + lll->slave.window_widening_event_us; + e->slave.preamble_to_addr_us = preamble_to_addr_us; + + /* Reset window widening, as anchor point sync-ed */ + lll->slave.window_widening_event_us = 0; + lll->slave.window_size_event_us = 0; + } + } + + isr_cleanup(param); +} + +static void isr_cleanup(void *param) +{ + int err; + + radio_isr_set(isr_race, param); + radio_tmr_stop(); + + err = lll_clk_off(); + LL_ASSERT(!err || err == -EBUSY); + + lll_done(NULL); +} + +static void isr_race(void *param) +{ + /* NOTE: lll_disable could have a race with ... */ + radio_status_reset(); +} + +static int isr_rx_pdu(struct lll_conn *lll, struct pdu_data *pdu_data_rx, + struct node_tx **tx_release, u8_t *is_rx_enqueue) +{ + /* Ack for tx-ed data */ + if (pdu_data_rx->nesn != lll->sn) { + /* Increment serial number */ + lll->sn++; + + /* First ack (and redundantly any other ack) enable use of + * slave latency. + */ + if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && lll->role) { + lll->slave.latency_enabled = 1; + } + + if (!lll->empty) { + struct pdu_data *pdu_data_tx; + u8_t pdu_data_tx_len; + struct node_tx *tx; + memq_link_t *link; + + link = memq_peek(lll->memq_tx.head, lll->memq_tx.tail, + (void **)&tx); + LL_ASSERT(link); + + pdu_data_tx = (void *)(tx->pdu + + lll->packet_tx_head_offset); + + pdu_data_tx_len = pdu_data_tx->len; +#if defined(CONFIG_BT_CTLR_LE_ENC) + if (pdu_data_tx_len != 0) { + /* if encrypted increment tx counter */ + if (lll->enc_tx) { + lll->ccm_tx.counter++; + } + } +#endif /* CONFIG_BT_CTLR_LE_ENC */ + + lll->packet_tx_head_offset += pdu_data_tx_len; + if (lll->packet_tx_head_offset == + lll->packet_tx_head_len) { + lll->packet_tx_head_len = 0; + lll->packet_tx_head_offset = 0; + + memq_dequeue(lll->memq_tx.tail, + &lll->memq_tx.head, NULL); + + link->next = tx->next; + tx->next = link; + + *tx_release = tx; + } + } else { + lll->empty = 0; + } + } + + /* process received data */ + if ((pdu_data_rx->sn == lll->nesn) && + /* check so that we will NEVER use the rx buffer reserved for empty + * packet and internal control enqueue + */ + (ull_pdu_rx_alloc_peek(3) != 0)) { + /* Increment next expected serial number */ + lll->nesn++; + + if (pdu_data_rx->len != 0) { +#if defined(CONFIG_BT_CTLR_LE_ENC) + /* If required, wait for CCM to finish + */ + if (lll->enc_rx) { + u32_t done; + + done = radio_ccm_is_done(); + LL_ASSERT(done); + + if (!radio_ccm_mic_is_valid()) { + /* Record MIC invalid */ + mic_state = LLL_CONN_MIC_FAIL; + + return -EINVAL; + } + + /* Increment counter */ + lll->ccm_rx.counter++; + + /* Record MIC valid */ + mic_state = LLL_CONN_MIC_PASS; + } +#endif /* CONFIG_BT_CTLR_LE_ENC */ + + /* Enqueue non-empty PDU */ + *is_rx_enqueue = 1; +#if 0 + /* MIC Failure Check or data rx during pause */ + if ((_radio.conn_curr->enc_rx && + !radio_ccm_mic_is_valid()) || + (_radio.conn_curr->pause_rx && + isr_rx_conn_enc_unexpected(_radio.conn_curr, + pdu_data_rx))) { + _radio.state = STATE_CLOSE; + radio_disable(); + + /* assert if radio packet ptr is not set and + * radio started tx + */ + LL_ASSERT(!radio_is_ready()); + + terminate_ind_rx_enqueue(_radio.conn_curr, + 0x3d); + + connection_release(_radio.conn_curr); + _radio.conn_curr = NULL; + + return 1; /* terminated */ + } + +#endif + } + } + + return 0; +} + +static struct pdu_data *empty_tx_enqueue(struct lll_conn *lll) +{ + struct pdu_data *p; + + lll->empty = 1; + + p = (void *)radio_pkt_empty_get(); + p->ll_id = PDU_DATA_LLID_DATA_CONTINUE; + p->len = 0; + if (memq_peek(lll->memq_tx.head, lll->memq_tx.tail, NULL)) { + p->md = 1; + } else { + p->md = 0; + } + + return p; +} diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_filter.c b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_filter.c new file mode 100644 index 00000000000..5753a565de6 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_filter.c @@ -0,0 +1,981 @@ +/* + * Copyright (c) 2017-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include + +#include +#include +#include + +#include "hal/ccm.h" + +#include "util/util.h" +#include "util/memq.h" + +#include "pdu.h" +#include "ll.h" + +#include "lll.h" +#include "lll_adv.h" +#include "lll_scan.h" +#include "lll_conn.h" +#include "lll_filter.h" + +#include "ull_adv_types.h" +#include "ull_scan_types.h" + +#include "ull_internal.h" +#include "ull_adv_internal.h" +#include "ull_scan_internal.h" + +#define ADDR_TYPE_ANON 0xFF + +#define LOG_MODULE_NAME bt_ctlr_llsw_nordic_lll_filter +#include "common/log.h" + +#include "hal/debug.h" + +/* Hardware whitelist */ +static struct ll_filter wl_filter; +u8_t wl_anon; + +#if defined(CONFIG_BT_CTLR_PRIVACY) +#include "common/rpa.h" + +/* Whitelist peer list */ +static struct { + u8_t taken:1; + u8_t id_addr_type:1; + u8_t rl_idx; + bt_addr_t id_addr; +} wl[WL_SIZE]; + +static u8_t rl_enable; +static struct rl_dev { + u8_t taken:1; + u8_t rpas_ready:1; + u8_t pirk:1; + u8_t lirk:1; + u8_t dev:1; + u8_t wl:1; + + u8_t id_addr_type:1; + bt_addr_t id_addr; + + u8_t local_irk[16]; + u8_t pirk_idx; + bt_addr_t curr_rpa; + bt_addr_t peer_rpa; + bt_addr_t *local_rpa; + +} rl[CONFIG_BT_CTLR_RL_SIZE]; + +static u8_t peer_irks[CONFIG_BT_CTLR_RL_SIZE][16]; +static u8_t peer_irk_rl_ids[CONFIG_BT_CTLR_RL_SIZE]; +static u8_t peer_irk_count; + +static bt_addr_t local_rpas[CONFIG_BT_CTLR_RL_SIZE]; + +BUILD_ASSERT(ARRAY_SIZE(wl) < FILTER_IDX_NONE); +BUILD_ASSERT(ARRAY_SIZE(rl) < FILTER_IDX_NONE); + +/* Hardware filter for the resolving list */ +static struct ll_filter rl_filter; + +#define DEFAULT_RPA_TIMEOUT_MS (900 * 1000) +u32_t rpa_timeout_ms; +s64_t rpa_last_ms; + +struct k_delayed_work rpa_work; + +#define LIST_MATCH(list, i, type, addr) (list[i].taken && \ + (list[i].id_addr_type == (type & 0x1)) && \ + !memcmp(list[i].id_addr.val, addr, BDADDR_SIZE)) + +#else /* CONFIG_BT_CTLR_PRIVACY */ + +static void filter_clear(struct ll_filter *filter) +{ + filter->enable_bitmask = 0; + filter->addr_type_bitmask = 0; +} + +static void filter_insert(struct ll_filter *filter, int index, u8_t addr_type, + u8_t *bdaddr) +{ + filter->enable_bitmask |= BIT(index); + filter->addr_type_bitmask |= ((addr_type & 0x01) << index); + memcpy(&filter->bdaddr[index][0], bdaddr, BDADDR_SIZE); +} + +static u32_t filter_add(struct ll_filter *filter, u8_t addr_type, u8_t *bdaddr) +{ + int index; + + if (filter->enable_bitmask == 0xFF) { + return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED; + } + + for (index = 0; + (filter->enable_bitmask & BIT(index)); + index++) { + } + + filter_insert(filter, index, addr_type, bdaddr); + return 0; +} + +static u32_t filter_remove(struct ll_filter *filter, u8_t addr_type, + u8_t *bdaddr) +{ + int index; + + if (!filter->enable_bitmask) { + return BT_HCI_ERR_INVALID_PARAM; + } + + index = 8; + while (index--) { + if ((filter->enable_bitmask & BIT(index)) && + (((filter->addr_type_bitmask >> index) & 0x01) == + (addr_type & 0x01)) && + !memcmp(filter->bdaddr[index], bdaddr, BDADDR_SIZE)) { + filter->enable_bitmask &= ~BIT(index); + filter->addr_type_bitmask &= ~BIT(index); + return 0; + } + } + + return BT_HCI_ERR_INVALID_PARAM; +} +#endif /* !CONFIG_BT_CTLR_PRIVACY */ + +#if defined(CONFIG_BT_CTLR_PRIVACY) +static void wl_clear(void) +{ + for (int i = 0; i < WL_SIZE; i++) { + wl[i].taken = 0; + } +} + +static u8_t wl_find(u8_t addr_type, u8_t *addr, u8_t *free) +{ + int i; + + if (free) { + *free = FILTER_IDX_NONE; + } + + for (i = 0; i < WL_SIZE; i++) { + if (LIST_MATCH(wl, i, addr_type, addr)) { + return i; + } else if (free && !wl[i].taken && (*free == FILTER_IDX_NONE)) { + *free = i; + } + } + + return FILTER_IDX_NONE; +} + +static u32_t wl_add(bt_addr_le_t *id_addr) +{ + u8_t i, j; + + i = wl_find(id_addr->type, id_addr->a.val, &j); + + /* Duplicate check */ + if (i < ARRAY_SIZE(wl)) { + return BT_HCI_ERR_INVALID_PARAM; + } else if (j >= ARRAY_SIZE(wl)) { + return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED; + } + + i = j; + + wl[i].id_addr_type = id_addr->type & 0x1; + bt_addr_copy(&wl[i].id_addr, &id_addr->a); + /* Get index to Resolving List if applicable */ + j = ll_rl_find(id_addr->type, id_addr->a.val, NULL); + if (j < ARRAY_SIZE(rl)) { + wl[i].rl_idx = j; + rl[j].wl = 1; + } else { + wl[i].rl_idx = FILTER_IDX_NONE; + } + wl[i].taken = 1; + + return 0; +} + +static u32_t wl_remove(bt_addr_le_t *id_addr) +{ + /* find the device and mark it as empty */ + u8_t i = wl_find(id_addr->type, id_addr->a.val, NULL); + + if (i < ARRAY_SIZE(wl)) { + u8_t j = wl[i].rl_idx; + + if (j < ARRAY_SIZE(rl)) { + rl[j].wl = 0; + } + wl[i].taken = 0; + return 0; + } + + return BT_HCI_ERR_UNKNOWN_CONN_ID; +} + +bt_addr_t *ctrl_lrpa_get(u8_t rl_idx) +{ + if ((rl_idx >= ARRAY_SIZE(rl)) || !rl[rl_idx].lirk || + !rl[rl_idx].rpas_ready) { + return NULL; + } + + return rl[rl_idx].local_rpa; +} + +u8_t *ctrl_irks_get(u8_t *count) +{ + *count = peer_irk_count; + return (u8_t *)peer_irks; +} + +u8_t ctrl_rl_idx(bool whitelist, u8_t devmatch_id) +{ + u8_t i; + + if (whitelist) { + LL_ASSERT(devmatch_id < ARRAY_SIZE(wl)); + LL_ASSERT(wl[devmatch_id].taken); + i = wl[devmatch_id].rl_idx; + } else { + LL_ASSERT(devmatch_id < ARRAY_SIZE(rl)); + i = devmatch_id; + LL_ASSERT(rl[i].taken); + } + + return i; +} + +u8_t ctrl_rl_irk_idx(u8_t irkmatch_id) +{ + u8_t i; + + LL_ASSERT(irkmatch_id < peer_irk_count); + i = peer_irk_rl_ids[irkmatch_id]; + LL_ASSERT(i < CONFIG_BT_CTLR_RL_SIZE); + LL_ASSERT(rl[i].taken); + + return i; +} + +bool ctrl_irk_whitelisted(u8_t rl_idx) +{ + if (rl_idx >= ARRAY_SIZE(rl)) { + return false; + } + + LL_ASSERT(rl[rl_idx].taken); + + return rl[rl_idx].wl; +} +#endif /* CONFIG_BT_CTLR_PRIVACY */ + +struct ll_filter *ctrl_filter_get(bool whitelist) +{ +#if defined(CONFIG_BT_CTLR_PRIVACY) + if (whitelist) { + return &wl_filter; + } + return &rl_filter; +#else + LL_ASSERT(whitelist); + return &wl_filter; +#endif +} + +u8_t ll_wl_size_get(void) +{ + return WL_SIZE; +} + +u8_t ll_wl_clear(void) +{ +#if defined(CONFIG_BT_BROADCASTER) + if (ull_adv_filter_pol_get(0)) { + return BT_HCI_ERR_CMD_DISALLOWED; + } +#endif /* CONFIG_BT_BROADCASTER */ + +#if defined(CONFIG_BT_OBSERVER) + if (ull_scan_filter_pol_get(0) & 0x1) { + return BT_HCI_ERR_CMD_DISALLOWED; + } +#endif /* CONFIG_BT_OBSERVER */ + +#if defined(CONFIG_BT_CTLR_PRIVACY) + wl_clear(); +#else + filter_clear(&wl_filter); +#endif /* CONFIG_BT_CTLR_PRIVACY */ + + wl_anon = 0; + + return 0; +} + +u8_t ll_wl_add(bt_addr_le_t *addr) +{ +#if defined(CONFIG_BT_BROADCASTER) + if (ull_adv_filter_pol_get(0)) { + return BT_HCI_ERR_CMD_DISALLOWED; + } +#endif /* CONFIG_BT_BROADCASTER */ + +#if defined(CONFIG_BT_OBSERVER) + if (ull_scan_filter_pol_get(0) & 0x1) { + return BT_HCI_ERR_CMD_DISALLOWED; + } +#endif /* CONFIG_BT_OBSERVER */ + + if (addr->type == ADDR_TYPE_ANON) { + wl_anon = 1; + return 0; + } + +#if defined(CONFIG_BT_CTLR_PRIVACY) + return wl_add(addr); +#else + return filter_add(&wl_filter, addr->type, addr->a.val); +#endif /* CONFIG_BT_CTLR_PRIVACY */ +} + +u8_t ll_wl_remove(bt_addr_le_t *addr) +{ +#if defined(CONFIG_BT_BROADCASTER) + if (ull_adv_filter_pol_get(0)) { + return BT_HCI_ERR_CMD_DISALLOWED; + } +#endif /* CONFIG_BT_BROADCASTER */ + +#if defined(CONFIG_BT_OBSERVER) + if (ull_scan_filter_pol_get(0) & 0x1) { + return BT_HCI_ERR_CMD_DISALLOWED; + } +#endif /* CONFIG_BT_OBSERVER */ + + if (addr->type == ADDR_TYPE_ANON) { + wl_anon = 0; + return 0; + } + +#if defined(CONFIG_BT_CTLR_PRIVACY) + return wl_remove(addr); +#else + return filter_remove(&wl_filter, addr->type, addr->a.val); +#endif /* CONFIG_BT_CTLR_PRIVACY */ +} + +#if defined(CONFIG_BT_CTLR_PRIVACY) +static void filter_wl_update(void) +{ + u8_t i; + + /* Populate filter from wl peers */ + for (i = 0; i < WL_SIZE; i++) { + u8_t j; + + if (!wl[i].taken) { + continue; + } + + j = wl[i].rl_idx; + + if (!rl_enable || j >= ARRAY_SIZE(rl) || !rl[j].pirk || + rl[j].dev) { + filter_insert(&wl_filter, i, wl[i].id_addr_type, + wl[i].id_addr.val); + } + } +} + +static void filter_rl_update(void) +{ + u8_t i; + + /* Populate filter from rl peers */ + for (i = 0; i < CONFIG_BT_CTLR_RL_SIZE; i++) { + if (rl[i].taken) { + filter_insert(&rl_filter, i, rl[i].id_addr_type, + rl[i].id_addr.val); + } + } +} + +void ll_filters_adv_update(u8_t adv_fp) +{ + /* Clear before populating filter */ + filter_clear(&wl_filter); + + /* enabling advertising */ + if (adv_fp && !(ull_scan_filter_pol_get(0) & 0x1)) { + /* whitelist not in use, update whitelist */ + filter_wl_update(); + } + + /* Clear before populating rl filter */ + filter_clear(&rl_filter); + + if (rl_enable && !ll_scan_is_enabled()) { + /* rl not in use, update resolving list LUT */ + filter_rl_update(); + } +} + +void ll_filters_scan_update(u8_t scan_fp) +{ + /* Clear before populating filter */ + filter_clear(&wl_filter); + + /* enabling advertising */ + if ((scan_fp & 0x1) && !ull_adv_filter_pol_get(0)) { + /* whitelist not in use, update whitelist */ + filter_wl_update(); + } + + /* Clear before populating rl filter */ + filter_clear(&rl_filter); + + if (rl_enable && !ll_adv_is_enabled(LL_ADV_SET_MAX)) { + /* rl not in use, update resolving list LUT */ + filter_rl_update(); + } +} + +u8_t ll_rl_find(u8_t id_addr_type, u8_t *id_addr, u8_t *free) +{ + u8_t i; + + if (free) { + *free = FILTER_IDX_NONE; + } + + for (i = 0; i < CONFIG_BT_CTLR_RL_SIZE; i++) { + if (LIST_MATCH(rl, i, id_addr_type, id_addr)) { + return i; + } else if (free && !rl[i].taken && (*free == FILTER_IDX_NONE)) { + *free = i; + } + } + + return FILTER_IDX_NONE; +} + +bool ctrl_rl_idx_allowed(u8_t irkmatch_ok, u8_t rl_idx) +{ + /* If AR is disabled or we don't know the device or we matched an IRK + * then we're all set. + */ + if (!rl_enable || rl_idx >= ARRAY_SIZE(rl) || irkmatch_ok) { + return true; + } + + LL_ASSERT(rl_idx < CONFIG_BT_CTLR_RL_SIZE); + LL_ASSERT(rl[rl_idx].taken); + + return !rl[rl_idx].pirk || rl[rl_idx].dev; +} + +void ll_rl_id_addr_get(u8_t rl_idx, u8_t *id_addr_type, u8_t *id_addr) +{ + LL_ASSERT(rl_idx < CONFIG_BT_CTLR_RL_SIZE); + LL_ASSERT(rl[rl_idx].taken); + + *id_addr_type = rl[rl_idx].id_addr_type; + memcpy(id_addr, rl[rl_idx].id_addr.val, BDADDR_SIZE); +} + +bool ctrl_rl_addr_allowed(u8_t id_addr_type, u8_t *id_addr, u8_t *rl_idx) +{ + u8_t i, j; + + /* If AR is disabled or we matched an IRK then we're all set. No hw + * filters are used in this case. + */ + if (!rl_enable || *rl_idx != FILTER_IDX_NONE) { + return true; + } + + for (i = 0; i < CONFIG_BT_CTLR_RL_SIZE; i++) { + if (rl[i].taken && (rl[i].id_addr_type == id_addr_type)) { + u8_t *addr = rl[i].id_addr.val; + + for (j = 0; j < BDADDR_SIZE; j++) { + if (addr[j] != id_addr[j]) { + break; + } + } + + if (j == BDADDR_SIZE) { + *rl_idx = i; + return !rl[i].pirk || rl[i].dev; + } + } + } + + return true; +} + +bool ctrl_rl_addr_resolve(u8_t id_addr_type, u8_t *id_addr, u8_t rl_idx) +{ + /* Unable to resolve if AR is disabled, no RL entry or no local IRK */ + if (!rl_enable || rl_idx >= ARRAY_SIZE(rl) || !rl[rl_idx].lirk) { + return false; + } + + if ((id_addr_type != 0) && ((id_addr[5] & 0xc0) == 0x40)) { + return bt_rpa_irk_matches(rl[rl_idx].local_irk, + (bt_addr_t *)id_addr); + } + + return false; +} + +bool ctrl_rl_enabled(void) +{ + return rl_enable; +} + +#if defined(CONFIG_BT_BROADCASTER) +void ll_rl_pdu_adv_update(struct ll_adv_set *adv, u8_t idx, + struct pdu_adv *pdu) +{ + u8_t *adva = pdu->type == PDU_ADV_TYPE_SCAN_RSP ? + &pdu->scan_rsp.addr[0] : + &pdu->adv_ind.addr[0]; + + /* AdvA */ + if (idx < ARRAY_SIZE(rl) && rl[idx].lirk) { + LL_ASSERT(rl[idx].rpas_ready); + pdu->tx_addr = 1; + memcpy(adva, rl[idx].local_rpa->val, BDADDR_SIZE); + } else { + pdu->tx_addr = adv->own_addr_type & 0x1; + ll_addr_get(adv->own_addr_type & 0x1, adva); + } + + /* TargetA */ + if (pdu->type == PDU_ADV_TYPE_DIRECT_IND) { + if (idx < ARRAY_SIZE(rl) && rl[idx].pirk) { + pdu->rx_addr = 1; + memcpy(&pdu->direct_ind.tgt_addr[0], + rl[idx].peer_rpa.val, BDADDR_SIZE); + } else { + pdu->rx_addr = adv->id_addr_type; + memcpy(&pdu->direct_ind.tgt_addr[0], + adv->id_addr, BDADDR_SIZE); + } + } +} + +static void rpa_adv_refresh(struct ll_adv_set *adv) +{ + struct radio_adv_data *radio_adv_data; + struct pdu_adv *prev; + struct pdu_adv *pdu; + u8_t last; + u8_t idx; + + if (adv->own_addr_type != BT_ADDR_LE_PUBLIC_ID && + adv->own_addr_type != BT_ADDR_LE_RANDOM_ID) { + return; + } + + radio_adv_data = radio_adv_data_get(); + prev = (struct pdu_adv *)&radio_adv_data->data[radio_adv_data->last][0]; + /* use the last index in double buffer, */ + if (radio_adv_data->first == radio_adv_data->last) { + last = radio_adv_data->last + 1; + if (last == DOUBLE_BUFFER_SIZE) { + last = 0; + } + } else { + last = radio_adv_data->last; + } + + /* update adv pdu fields. */ + pdu = (struct pdu_adv *)&radio_adv_data->data[last][0]; + pdu->type = prev->type; + pdu->rfu = 0; + + if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) { + pdu->chan_sel = prev->chan_sel; + } else { + pdu->chan_sel = 0; + } + + idx = ll_rl_find(adv->id_addr_type, adv->id_addr, NULL); + LL_ASSERT(idx < ARRAY_SIZE(rl)); + ll_rl_pdu_adv_update(adv, idx, pdu); + + memcpy(&pdu->adv_ind.data[0], &prev->adv_ind.data[0], + prev->len - BDADDR_SIZE); + pdu->len = prev->len; + + /* commit the update so controller picks it. */ + radio_adv_data->last = last; +} +#endif + +static void rl_clear(void) +{ + for (u8_t i = 0; i < CONFIG_BT_CTLR_RL_SIZE; i++) { + rl[i].taken = 0; + } + + peer_irk_count = 0; +} + +static int rl_access_check(bool check_ar) +{ + if (check_ar) { + /* If address resolution is disabled, allow immediately */ + if (!rl_enable) { + return -1; + } + } + + return (ll_adv_is_enabled(LL_ADV_SET_MAX) || + ll_scan_is_enabled()) ? 0 : 1; +} + +void ll_rl_rpa_update(bool timeout) +{ + u8_t i; + int err; + s64_t now = k_uptime_get(); + bool all = timeout || (rpa_last_ms == -1) || + (now - rpa_last_ms >= rpa_timeout_ms); + BT_DBG(""); + + for (i = 0; i < CONFIG_BT_CTLR_RL_SIZE; i++) { + if ((rl[i].taken) && (all || !rl[i].rpas_ready)) { + + if (rl[i].pirk) { + u8_t irk[16]; + + /* TODO: move this swap to the driver level */ + sys_memcpy_swap(irk, peer_irks[rl[i].pirk_idx], + 16); + err = bt_rpa_create(irk, &rl[i].peer_rpa); + LL_ASSERT(!err); + } + + if (rl[i].lirk) { + bt_addr_t rpa; + + err = bt_rpa_create(rl[i].local_irk, &rpa); + LL_ASSERT(!err); + /* pointer read/write assumed to be atomic + * so that if ISR fires the local_rpa pointer + * will always point to a valid full RPA + */ + rl[i].local_rpa = &rpa; + bt_addr_copy(&local_rpas[i], &rpa); + rl[i].local_rpa = &local_rpas[i]; + } + + rl[i].rpas_ready = 1; + } + } + + if (all) { + rpa_last_ms = now; + } + + if (timeout) { +#if defined(CONFIG_BT_BROADCASTER) + struct ll_adv_set *adv; + + /* TODO: foreach adv set */ + adv = ll_adv_is_enabled_get(0); + if (adv) { + rpa_adv_refresh(adv); + } +#endif + } +} + +static void rpa_timeout(struct k_work *work) +{ + ll_rl_rpa_update(true); + k_delayed_work_submit(&rpa_work, rpa_timeout_ms); +} + +static void rpa_refresh_start(void) +{ + if (!rl_enable) { + return; + } + + BT_DBG(""); + k_delayed_work_submit(&rpa_work, rpa_timeout_ms); +} + +static void rpa_refresh_stop(void) +{ + if (!rl_enable) { + return; + } + + k_delayed_work_cancel(&rpa_work); +} + +void ll_adv_scan_state_cb(u8_t bm) +{ + if (bm) { + rpa_refresh_start(); + } else { + rpa_refresh_stop(); + } +} + +u32_t ll_rl_size_get(void) +{ + return CONFIG_BT_CTLR_RL_SIZE; +} + +u32_t ll_rl_clear(void) +{ + if (!rl_access_check(false)) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + rl_clear(); + + return 0; +} + +u32_t ll_rl_add(bt_addr_le_t *id_addr, const u8_t pirk[16], + const u8_t lirk[16]) +{ + u8_t i, j; + + if (!rl_access_check(false)) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + i = ll_rl_find(id_addr->type, id_addr->a.val, &j); + + /* Duplicate check */ + if (i < ARRAY_SIZE(rl)) { + return BT_HCI_ERR_INVALID_PARAM; + } else if (j >= ARRAY_SIZE(rl)) { + return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED; + } + + /* Device not found but empty slot found */ + i = j; + + bt_addr_copy(&rl[i].id_addr, &id_addr->a); + rl[i].id_addr_type = id_addr->type & 0x1; + rl[i].pirk = mem_nz((u8_t *)pirk, 16); + rl[i].lirk = mem_nz((u8_t *)lirk, 16); + if (rl[i].pirk) { + /* cross-reference */ + rl[i].pirk_idx = peer_irk_count; + peer_irk_rl_ids[peer_irk_count] = i; + /* AAR requires big-endian IRKs */ + sys_memcpy_swap(peer_irks[peer_irk_count++], pirk, 16); + } + if (rl[i].lirk) { + memcpy(rl[i].local_irk, lirk, 16); + rl[i].local_rpa = NULL; + } + memset(rl[i].curr_rpa.val, 0x00, sizeof(rl[i].curr_rpa)); + rl[i].rpas_ready = 0; + /* Default to Network Privacy */ + rl[i].dev = 0; + /* Add reference to a whitelist entry */ + j = wl_find(id_addr->type, id_addr->a.val, NULL); + if (j < ARRAY_SIZE(wl)) { + wl[j].rl_idx = i; + rl[i].wl = 1; + } else { + rl[i].wl = 0; + } + rl[i].taken = 1; + + return 0; +} + +u32_t ll_rl_remove(bt_addr_le_t *id_addr) +{ + u8_t i; + + if (!rl_access_check(false)) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + /* find the device and mark it as empty */ + i = ll_rl_find(id_addr->type, id_addr->a.val, NULL); + if (i < ARRAY_SIZE(rl)) { + u8_t j, k; + + if (rl[i].pirk) { + /* Swap with last item */ + u8_t pi = rl[i].pirk_idx, pj = peer_irk_count - 1; + + if (pj && pi != pj) { + memcpy(peer_irks[pi], peer_irks[pj], 16); + for (k = 0; + k < CONFIG_BT_CTLR_RL_SIZE; + k++) { + + if (rl[k].taken && rl[k].pirk && + rl[k].pirk_idx == pj) { + rl[k].pirk_idx = pi; + peer_irk_rl_ids[pi] = k; + break; + } + } + } + peer_irk_count--; + } + + /* Check if referenced by a whitelist entry */ + j = wl_find(id_addr->type, id_addr->a.val, NULL); + if (j < ARRAY_SIZE(wl)) { + wl[j].rl_idx = FILTER_IDX_NONE; + } + rl[i].taken = 0; + return 0; + } + + return BT_HCI_ERR_UNKNOWN_CONN_ID; +} + +void ll_rl_crpa_set(u8_t id_addr_type, u8_t *id_addr, u8_t rl_idx, u8_t *crpa) +{ + if ((crpa[5] & 0xc0) == 0x40) { + + if (id_addr) { + /* find the device and return its RPA */ + rl_idx = ll_rl_find(id_addr_type, id_addr, NULL); + } + + if (rl_idx < ARRAY_SIZE(rl) && rl[rl_idx].taken) { + memcpy(rl[rl_idx].curr_rpa.val, crpa, + sizeof(bt_addr_t)); + } + } +} + +u32_t ll_rl_crpa_get(bt_addr_le_t *id_addr, bt_addr_t *crpa) +{ + u8_t i; + + /* find the device and return its RPA */ + i = ll_rl_find(id_addr->type, id_addr->a.val, NULL); + if (i < ARRAY_SIZE(rl) && + mem_nz(rl[i].curr_rpa.val, sizeof(rl[i].curr_rpa.val))) { + bt_addr_copy(crpa, &rl[i].curr_rpa); + return 0; + } + + return BT_HCI_ERR_UNKNOWN_CONN_ID; +} + +u32_t ll_rl_lrpa_get(bt_addr_le_t *id_addr, bt_addr_t *lrpa) +{ + u8_t i; + + /* find the device and return the local RPA */ + i = ll_rl_find(id_addr->type, id_addr->a.val, NULL); + if (i < ARRAY_SIZE(rl)) { + bt_addr_copy(lrpa, rl[i].local_rpa); + return 0; + } + + return BT_HCI_ERR_UNKNOWN_CONN_ID; +} + +u32_t ll_rl_enable(u8_t enable) +{ + if (!rl_access_check(false)) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + switch (enable) { + case BT_HCI_ADDR_RES_DISABLE: + rl_enable = 0; + break; + case BT_HCI_ADDR_RES_ENABLE: + rl_enable = 1; + break; + default: + return BT_HCI_ERR_INVALID_PARAM; + } + + return 0; +} + +void ll_rl_timeout_set(u16_t timeout) +{ + rpa_timeout_ms = timeout * 1000; +} + +u32_t ll_priv_mode_set(bt_addr_le_t *id_addr, u8_t mode) +{ + u8_t i; + + if (!rl_access_check(false)) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + /* find the device and mark it as empty */ + i = ll_rl_find(id_addr->type, id_addr->a.val, NULL); + if (i < ARRAY_SIZE(rl)) { + switch (mode) { + case BT_HCI_LE_PRIVACY_MODE_NETWORK: + rl[i].dev = 0; + break; + case BT_HCI_LE_PRIVACY_MODE_DEVICE: + rl[i].dev = 1; + break; + default: + return BT_HCI_ERR_INVALID_PARAM; + } + } else { + return BT_HCI_ERR_UNKNOWN_CONN_ID; + } + + return 0; +} + +#endif /* CONFIG_BT_CTLR_PRIVACY */ + +void ll_filter_reset(bool init) +{ + wl_anon = 0; + +#if defined(CONFIG_BT_CTLR_PRIVACY) + wl_clear(); + + rl_enable = 0; + rpa_timeout_ms = DEFAULT_RPA_TIMEOUT_MS; + rpa_last_ms = -1; + rl_clear(); + if (init) { + k_delayed_work_init(&rpa_work, rpa_timeout); + } else { + k_delayed_work_cancel(&rpa_work); + } +#else + filter_clear(&wl_filter); +#endif /* CONFIG_BT_CTLR_PRIVACY */ +} diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_internal.h b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_internal.h new file mode 100644 index 00000000000..df783156325 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_internal.h @@ -0,0 +1,15 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +int lll_prepare_done(void *param); +int lll_done(void *param); +bool lll_is_done(void *param); +int lll_clk_on(void); +int lll_clk_on_wait(void); +int lll_clk_off(void); +u32_t lll_evt_offset_get(struct evt_hdr *evt); +u32_t lll_preempt_calc(struct evt_hdr *evt, u8_t ticker_id, + u32_t ticks_at_event); diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_master.c b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_master.c new file mode 100644 index 00000000000..ee06f528265 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_master.c @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include + +#include +#include +#include + +#include "hal/ccm.h" +#include "hal/radio.h" +#include "hal/ticker.h" + +#include "util/memq.h" + +#include "pdu.h" + +#include "lll.h" +#include "lll_vendor.h" +#include "lll_conn.h" +#include "lll_master.h" +#include "lll_chan.h" + +#include "lll_internal.h" +#include "lll_tim_internal.h" + +#define LOG_MODULE_NAME bt_ctlr_llsw_nordic_lll_master +#include "common/log.h" +#include +#include "hal/debug.h" + +static int init_reset(void); +static int prepare_cb(struct lll_prepare_param *prepare_param); + +int lll_master_init(void) +{ + int err; + + err = init_reset(); + if (err) { + return err; + } + + return 0; +} + +int lll_master_reset(void) +{ + int err; + + err = init_reset(); + if (err) { + return err; + } + + return 0; +} + +void lll_master_prepare(void *param) +{ + struct lll_prepare_param *p = param; + int err; + + err = lll_clk_on(); + LL_ASSERT(!err || err == -EINPROGRESS); + + err = lll_prepare(lll_conn_is_abort_cb, lll_conn_abort_cb, prepare_cb, + 0, p); + LL_ASSERT(!err || err == -EINPROGRESS); +} + +static int init_reset(void) +{ + return 0; +} + +static int prepare_cb(struct lll_prepare_param *prepare_param) +{ + struct lll_conn *lll = prepare_param->param; + struct pdu_data *pdu_data_tx; + u32_t ticks_at_event; + struct evt_hdr *evt; + u16_t event_counter; + u32_t remainder_us; + u8_t data_chan_use; + u32_t remainder; + u16_t lazy; + + DEBUG_RADIO_START_M(1); + + /* TODO: Do the below in ULL ? */ + + lazy = prepare_param->lazy; + + /* save the latency for use in event */ + lll->latency_prepare += lazy; + + /* calc current event counter value */ + event_counter = lll->event_counter + lll->latency_prepare; + + /* store the next event counter value */ + lll->event_counter = event_counter + 1; + + /* TODO: Do the above in ULL ? */ + + /* Reset connection event global variables */ + lll_conn_prepare_reset(); + + /* TODO: can we do something in ULL? */ + lll->latency_event = lll->latency_prepare; + lll->latency_prepare = 0; + + if (lll->data_chan_sel) { +#if defined(CONFIG_BT_CTLR_CHAN_SEL_2) + data_chan_use = lll_chan_sel_2(lll->event_counter - 1, + lll->data_chan_id, + &lll->data_chan_map[0], + lll->data_chan_count); +#else /* !CONFIG_BT_CTLR_CHAN_SEL_2 */ + LL_ASSERT(0); +#endif /* !CONFIG_BT_CTLR_CHAN_SEL_2 */ + } else { + data_chan_use = lll_chan_sel_1(&lll->data_chan_use, + lll->data_chan_hop, + lll->latency_event, + &lll->data_chan_map[0], + lll->data_chan_count); + } + + /* Prepare the Tx PDU */ + lll_conn_pdu_tx_prep(lll, &pdu_data_tx); + pdu_data_tx->sn = lll->sn; + pdu_data_tx->nesn = lll->nesn; + + /* Start setting up of Radio h/w */ + radio_reset(); + /* TODO: other Tx Power settings */ + radio_tx_power_set(RADIO_TXP_DEFAULT); + radio_aa_set(lll->access_addr); + radio_crc_configure(((0x5bUL) | ((0x06UL) << 8) | ((0x00UL) << 16)), + (((u32_t)lll->crc_init[2] << 16) | + ((u32_t)lll->crc_init[1] << 8) | + ((u32_t)lll->crc_init[0]))); + lll_chan_set(data_chan_use); + + /* setup the radio tx packet buffer */ + lll_conn_tx_pkt_set(lll, pdu_data_tx); + + radio_isr_set(lll_conn_isr_tx, lll); + + radio_tmr_tifs_set(TIFS_US); + +#if defined(CONFIG_BT_CTLR_PHY) + radio_switch_complete_and_rx(lll->phy_rx); +#else /* !CONFIG_BT_CTLR_PHY */ + radio_switch_complete_and_rx(0); +#endif /* !CONFIG_BT_CTLR_PHY */ + + ticks_at_event = prepare_param->ticks_at_expire; + evt = HDR_LLL2EVT(lll); + ticks_at_event += lll_evt_offset_get(evt); + ticks_at_event += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US); + + remainder = prepare_param->remainder; + remainder_us = radio_tmr_start(1, ticks_at_event, remainder); + + /* capture end of Tx-ed PDU, used to calculate HCTO. */ + radio_tmr_end_capture(); + +#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) + radio_gpio_pa_setup(); + +#if defined(CONFIG_BT_CTLR_PHY) + radio_gpio_pa_lna_enable(remainder_us + + radio_tx_ready_delay_get(lll->phy_tx, + lll->phy_flags) - + CONFIG_BT_CTLR_GPIO_PA_OFFSET); +#else /* !CONFIG_BT_CTLR_PHY */ + radio_gpio_pa_lna_enable(remainder_us + + radio_tx_ready_delay_get(0, 0) - + CONFIG_BT_CTLR_GPIO_PA_OFFSET); +#endif /* !CONFIG_BT_CTLR_PHY */ +#else /* !CONFIG_BT_CTLR_GPIO_PA_PIN */ + ARG_UNUSED(remainder_us); +#endif /* !CONFIG_BT_CTLR_GPIO_PA_PIN */ + +#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED) && \ + (EVENT_OVERHEAD_PREEMPT_US <= EVENT_OVERHEAD_PREEMPT_MIN_US) + /* check if preempt to start has changed */ + if (lll_preempt_calc(evt, TICKER_ID_CONN_BASE, ticks_at_event)) { + radio_isr_set(lll_conn_isr_abort, lll); + radio_disable(); + } else +#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */ + { + u32_t ret; + + ret = lll_prepare_done(lll); + LL_ASSERT(!ret); + } + + DEBUG_RADIO_START_M(1); + + return 0; +} diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_master.h b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_master.h new file mode 100644 index 00000000000..64d73c1a171 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_master.h @@ -0,0 +1,9 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +int lll_master_init(void); +int lll_master_reset(void); +void lll_master_prepare(void *param); diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_prof.c b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_prof.c new file mode 100644 index 00000000000..0dad4f236e8 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_prof.c @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +#include "hal/ccm.h" +#include "hal/radio.h" + +#include "util/memq.h" + +#include "pdu.h" + +#include "lll.h" + +static u8_t latency_min = (u8_t) -1; +static u8_t latency_max; +static u8_t latency_prev; +static u8_t cputime_min = (u8_t) -1; +static u8_t cputime_max; +static u8_t cputime_prev; +static u32_t timestamp_latency; + +void lll_prof_latency_capture(void) +{ + /* sample the packet timer, use it to calculate ISR latency + * and generate the profiling event at the end of the ISR. + */ + radio_tmr_sample(); +} + +#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) +static u32_t timestamp_radio_end; + +u32_t lll_prof_radio_end_backup(void) +{ + /* PA enable is overwriting packet end used in ISR profiling, hence + * back it up for later use. + */ + timestamp_radio_end = radio_tmr_end_get(); + + return timestamp_radio_end; +} +#endif /* !CONFIG_BT_CTLR_GPIO_PA_PIN */ + +void lll_prof_cputime_capture(void) +{ + /* get the ISR latency sample */ + timestamp_latency = radio_tmr_sample_get(); + + /* sample the packet timer again, use it to calculate ISR execution time + * and use it in profiling event + */ + radio_tmr_sample(); +} + +void lll_prof_send(void) +{ + u8_t latency, cputime, prev; + u8_t chg = 0; + + /* calculate the elapsed time in us since on-air radio packet end + * to ISR entry + */ +#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) + latency = timestamp_latency - timestamp_radio_end; +#else /* !CONFIG_BT_CTLR_GPIO_PA_PIN */ + latency = timestamp_latency - radio_tmr_end_get(); +#endif /* !CONFIG_BT_CTLR_GPIO_PA_PIN */ + + /* check changes in min, avg and max of latency */ + if (latency > latency_max) { + latency_max = latency; + chg = 1; + } + if (latency < latency_min) { + latency_min = latency; + chg = 1; + } + + /* check for +/- 1us change */ + prev = ((u16_t)latency_prev + latency) >> 1; + if (prev != latency_prev) { + latency_prev = latency; + chg = 1; + } + + /* calculate the elapsed time in us since ISR entry */ + cputime = radio_tmr_sample_get() - timestamp_latency; + + /* check changes in min, avg and max */ + if (cputime > cputime_max) { + cputime_max = cputime; + chg = 1; + } + + if (cputime < cputime_min) { + cputime_min = cputime; + chg = 1; + } + + /* check for +/- 1us change */ + prev = ((u16_t)cputime_prev + cputime) >> 1; + if (prev != cputime_prev) { + cputime_prev = cputime; + chg = 1; + } + + /* generate event if any change */ + if (chg) { + struct node_rx_pdu *rx; + + /* NOTE: enqueue only if rx buffer available, else ignore */ + rx = ull_pdu_rx_alloc_peek(3); + if (rx) { + struct profile *p; + + ull_pdu_rx_alloc(); + + rx->hdr.type = NODE_RX_TYPE_PROFILE; + rx->hdr.handle = 0xFFFF; + + p = &((struct pdu_data *)rx->pdu)->profile; + p->lcur = latency; + p->lmin = latency_min; + p->lmax = latency_max; + p->cur = cputime; + p->min = cputime_min; + p->max = cputime_max; + + ull_rx_put(rx->hdr.link, rx); + ull_rx_sched(); + } + } +} diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_prof_internal.h b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_prof_internal.h new file mode 100644 index 00000000000..df4243894ea --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_prof_internal.h @@ -0,0 +1,10 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +void lll_prof_latency_capture(void); +void lll_prof_radio_end_backup(void); +void lll_prof_cputime_capture(void); +void lll_prof_send(void); diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_scan.c b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_scan.c new file mode 100644 index 00000000000..b7b5645530b --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_scan.c @@ -0,0 +1,1054 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include + +#include "hal/ccm.h" +#include "hal/radio.h" +#include "hal/ticker.h" + +#include "util/util.h" +#include "util/memq.h" +#include "util/mayfly.h" + +#include "ticker/ticker.h" + +#include "pdu.h" + +#include "lll.h" +#include "lll_vendor.h" +#include "lll_scan.h" +#include "lll_conn.h" +#include "lll_chan.h" + +#include "lll_internal.h" +#include "lll_tim_internal.h" +#include "lll_prof_internal.h" + +#include "lll_filter.h" + +#define LOG_MODULE_NAME bt_ctlr_llsw_nordic_lll_scan +#include "common/log.h" +#include +#include "hal/debug.h" + +static int init_reset(void); +static int prepare_cb(struct lll_prepare_param *prepare_param); +static int is_abort_cb(void *next, int prio, void *curr, + lll_prepare_cb_t *resume_cb, int *resume_prio); +static void abort_cb(struct lll_prepare_param *prepare_param, void *param); +static void ticker_stop_cb(u32_t ticks_at_expire, u32_t remainder, u16_t lazy, + void *param); +static void ticker_op_start_cb(u32_t status, void *param); +static void isr_rx(void *param); +static void isr_tx(void *param); +static void isr_done(void *param); +static void isr_abort(void *param); +static void isr_cleanup(void *param); +static void isr_race(void *param); + +static inline bool isr_rx_scan_check(struct lll_scan *lll, u8_t irkmatch_ok, + u8_t devmatch_ok, u8_t rl_idx); +static inline u32_t isr_rx_pdu(struct lll_scan *lll, u8_t devmatch_ok, + u8_t devmatch_id, u8_t irkmatch_ok, + u8_t irkmatch_id, u8_t rl_idx, u8_t rssi_ready); +static inline bool isr_scan_init_check(struct lll_scan *lll, + struct pdu_adv *pdu, u8_t rl_idx); +static inline bool isr_scan_init_adva_check(struct lll_scan *lll, + struct pdu_adv *pdu, u8_t rl_idx); +static inline bool isr_scan_tgta_check(struct lll_scan *lll, bool init, + struct pdu_adv *pdu, u8_t rl_idx, + bool *dir_report); +static inline bool isr_scan_tgta_rpa_check(struct lll_scan *lll, + struct pdu_adv *pdu, + bool *dir_report); +static inline bool isr_scan_rsp_adva_matches(struct pdu_adv *srsp); +static u32_t isr_rx_scan_report(struct lll_scan *lll, u8_t rssi_ready, + u8_t rl_idx, bool dir_report); + + +int lll_scan_init(void) +{ + int err; + + err = init_reset(); + if (err) { + return err; + } + + return 0; +} + +int lll_scan_reset(void) +{ + int err; + + err = init_reset(); + if (err) { + return err; + } + + return 0; +} + +void lll_scan_prepare(void *param) +{ + struct lll_prepare_param *p = param; + int err; + + err = lll_clk_on(); + LL_ASSERT(!err || err == -EINPROGRESS); + + err = lll_prepare(is_abort_cb, abort_cb, prepare_cb, 0, p); + LL_ASSERT(!err || err == -EINPROGRESS); +} + +static int init_reset(void) +{ + return 0; +} + +static int prepare_cb(struct lll_prepare_param *prepare_param) +{ + struct lll_scan *lll = prepare_param->param; + struct node_rx_pdu *node_rx; + u32_t aa = 0x8e89bed6; + u32_t ticks_at_event; + struct evt_hdr *evt; + u32_t remainder_us; + u32_t remainder; + + DEBUG_RADIO_START_O(1); + + /* Check if stopped (on connection establishment race between LLL and + * ULL. + */ + if (lll_is_stop(lll)) { + int err; + + err = lll_clk_off(); + LL_ASSERT(!err || err == -EBUSY); + + lll_done(NULL); + + DEBUG_RADIO_START_O(0); + return 0; + } + + node_rx = ull_pdu_rx_alloc_peek(1); + LL_ASSERT(node_rx); + + radio_reset(); + /* TODO: other Tx Power settings */ + radio_tx_power_set(0); + +#if defined(CONFIG_BT_CTLR_ADV_EXT) + /* TODO: if coded we use S8? */ + radio_phy_set(lll->phy, 1); + radio_pkt_configure(8, PDU_AC_PAYLOAD_SIZE_MAX, (lll->phy << 1)); +#else /* !CONFIG_BT_CTLR_ADV_EXT */ + radio_phy_set(0, 0); + radio_pkt_configure(8, PDU_AC_PAYLOAD_SIZE_MAX, 0); +#endif /* !CONFIG_BT_CTLR_ADV_EXT */ + + radio_pkt_rx_set(node_rx->pdu); + + radio_aa_set((u8_t *)&aa); + radio_crc_configure(((0x5bUL) | ((0x06UL) << 8) | ((0x00UL) << 16)), + 0x555555); + + lll_chan_set(37 + lll->chan); + + radio_isr_set(isr_rx, lll); + + radio_tmr_tifs_set(TIFS_US); + radio_switch_complete_and_tx(0, 0, 0, 0); + +#if defined(CONFIG_BT_CTLR_PRIVACY) + if (ctrl_rl_enabled()) { + struct ll_filter *filter = + ctrl_filter_get(!!(lll->filter_policy & 0x1)); + u8_t count, *irks = ctrl_irks_get(&count); + + radio_filter_configure(filter->enable_bitmask, + filter->addr_type_bitmask, + (u8_t *)filter->bdaddr); + + radio_ar_configure(count, irks); + } else +#endif /* CONFIG_BT_CTLR_PRIVACY */ + +#if defined(CONFIG_BT_CTLR_FILTER) + /* Setup Radio Filter */ + if (lll->filter_policy) { + + struct ll_filter *wl = ctrl_filter_get(true); + + radio_filter_configure(wl->enable_bitmask, + wl->addr_type_bitmask, + (u8_t *)wl->bdaddr); + } +#endif /* CONFIG_BT_CTLR_FILTER */ + + ticks_at_event = prepare_param->ticks_at_expire; + evt = HDR_LLL2EVT(lll); + ticks_at_event += lll_evt_offset_get(evt); + ticks_at_event += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US); + + remainder = prepare_param->remainder; + remainder_us = radio_tmr_start(0, ticks_at_event, remainder); + + /* capture end of Rx-ed PDU, for initiator to calculate first + * master event. + */ + radio_tmr_end_capture(); + + /* scanner always measures RSSI */ + radio_rssi_measure(); + +#if defined(CONFIG_BT_CTLR_GPIO_LNA_PIN) + radio_gpio_lna_setup(); + radio_gpio_pa_lna_enable(remainder_us + + radio_rx_ready_delay_get(0, 0) - + CONFIG_BT_CTLR_GPIO_LNA_OFFSET); +#else /* !CONFIG_BT_CTLR_GPIO_LNA_PIN */ + ARG_UNUSED(remainder_us); +#endif /* !CONFIG_BT_CTLR_GPIO_LNA_PIN */ + +#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED) && \ + (EVENT_OVERHEAD_PREEMPT_US <= EVENT_OVERHEAD_PREEMPT_MIN_US) + /* check if preempt to start has changed */ + if (lll_preempt_calc(evt, TICKER_ID_SCAN_BASE, ticks_at_event)) { + radio_isr_set(isr_abort, lll); + radio_disable(); + } else +#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */ + { + u32_t ret; + + if (lll->ticks_window) { + /* start window close timeout */ + ret = ticker_start(TICKER_INSTANCE_ID_CTLR, + TICKER_USER_ID_LLL, + TICKER_ID_SCAN_STOP, + ticks_at_event, lll->ticks_window, + TICKER_NULL_PERIOD, + TICKER_NULL_REMAINDER, + TICKER_NULL_LAZY, TICKER_NULL_SLOT, + ticker_stop_cb, lll, + ticker_op_start_cb, + (void *)__LINE__); + LL_ASSERT((ret == TICKER_STATUS_SUCCESS) || + (ret == TICKER_STATUS_BUSY)); + } + + ret = lll_prepare_done(lll); + LL_ASSERT(!ret); + } + + DEBUG_RADIO_START_O(1); + + return 0; +} + +static int resume_prepare_cb(struct lll_prepare_param *p) +{ + struct evt_hdr *evt = HDR_LLL2EVT(p->param); + + p->ticks_at_expire = ticker_ticks_now_get() - lll_evt_offset_get(evt); + p->remainder = 0; + p->lazy = 0; + + return prepare_cb(p); +} + +static int is_abort_cb(void *next, int prio, void *curr, + lll_prepare_cb_t *resume_cb, int *resume_prio) +{ + struct lll_scan *lll = curr; + + /* TODO: check prio */ + if (next != curr) { + int err; + + /* wrap back after the pre-empter */ + *resume_cb = resume_prepare_cb; + *resume_prio = 0; /* TODO: */ + + /* Retain HF clk */ + err = lll_clk_on(); + LL_ASSERT(!err || err == -EINPROGRESS); + + return -EAGAIN; + } + + radio_isr_set(isr_done, lll); + radio_disable(); + + if (++lll->chan == 3) { + lll->chan = 0; + } + + lll_chan_set(37 + lll->chan); + + return 0; +} + +static void abort_cb(struct lll_prepare_param *prepare_param, void *param) +{ + int err; + + /* NOTE: This is not a prepare being cancelled */ + if (!prepare_param) { + /* Perform event abort here. + * After event has been cleanly aborted, clean up resources + * and dispatch event done. + */ + radio_isr_set(isr_abort, param); + radio_disable(); + return; + } + + /* NOTE: Else clean the top half preparations of the aborted event + * currently in preparation pipeline. + */ + err = lll_clk_off(); + LL_ASSERT(!err || err == -EBUSY); + + lll_done(param); +} + +static void ticker_stop_cb(u32_t ticks_at_expire, u32_t remainder, u16_t lazy, + void *param) +{ + radio_isr_set(isr_cleanup, param); + radio_disable(); +} + +static void ticker_op_start_cb(u32_t status, void *param) +{ + ARG_UNUSED(param); + + LL_ASSERT(status == TICKER_STATUS_SUCCESS); +} + +static void isr_rx(void *param) +{ + u8_t trx_done; + u8_t crc_ok; + u8_t devmatch_ok; + u8_t devmatch_id; + u8_t irkmatch_ok; + u8_t irkmatch_id; + u8_t rssi_ready; + u8_t rl_idx; + +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) + lll_prof_latency_capture(); +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ + + /* Read radio status and events */ + trx_done = radio_is_done(); + if (trx_done) { + crc_ok = radio_crc_is_valid(); + devmatch_ok = radio_filter_has_match(); + devmatch_id = radio_filter_match_get(); + irkmatch_ok = radio_ar_has_match(); + irkmatch_id = radio_ar_match_get(); + rssi_ready = radio_rssi_is_ready(); + } else { + crc_ok = devmatch_ok = irkmatch_ok = rssi_ready = 0; + devmatch_id = irkmatch_id = 0xFF; + } + + /* Clear radio status and events */ + radio_status_reset(); + radio_tmr_status_reset(); + radio_filter_status_reset(); + radio_ar_status_reset(); + radio_rssi_status_reset(); + +#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) || \ + defined(CONFIG_BT_CTLR_GPIO_LNA_PIN) + radio_gpio_pa_lna_disable(); +#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN || CONFIG_BT_CTLR_GPIO_LNA_PIN */ + + if (!trx_done) { + goto isr_rx_do_close; + } + +#if defined(CONFIG_BT_CTLR_PRIVACY) + rl_idx = devmatch_ok ? ctrl_rl_idx(!!(_radio.scanner.filter_policy & + 0x01), + devmatch_id) : + irkmatch_ok ? ctrl_rl_irk_idx(irkmatch_id) : + FILTER_IDX_NONE; +#else + rl_idx = FILTER_IDX_NONE; +#endif + if (crc_ok && isr_rx_scan_check(param, irkmatch_ok, devmatch_ok, + rl_idx)) { + u32_t err; + + err = isr_rx_pdu(param, devmatch_ok, devmatch_id, irkmatch_ok, + irkmatch_id, rl_idx, rssi_ready); + if (!err) { +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) + lll_prof_send(); +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ + + return; + } + } + +isr_rx_do_close: + radio_isr_set(isr_done, param); + radio_disable(); +} + +static void isr_tx(void *param) +{ + struct node_rx_pdu *node_rx; + u32_t hcto; + + /* TODO: MOVE to a common interface, isr_lll_radio_status? */ + /* Clear radio status and events */ + radio_status_reset(); + radio_tmr_status_reset(); + +#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) || \ + defined(CONFIG_BT_CTLR_GPIO_LNA_PIN) + radio_gpio_pa_lna_disable(); +#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN || CONFIG_BT_CTLR_GPIO_LNA_PIN */ + /* TODO: MOVE ^^ */ + + node_rx = ull_pdu_rx_alloc_peek(1); + LL_ASSERT(node_rx); + + radio_isr_set(isr_rx, param); + radio_tmr_tifs_set(TIFS_US); + radio_switch_complete_and_tx(0, 0, 0, 0); + radio_pkt_rx_set(node_rx->pdu); + + /* assert if radio packet ptr is not set and radio started rx */ + LL_ASSERT(!radio_is_ready()); + +#if defined(CONFIG_BT_CTLR_PRIVACY) + if (ctrl_rl_enabled()) { + u8_t count, *irks = ctrl_irks_get(&count); + + radio_ar_configure(count, irks); + } +#endif /* CONFIG_BT_CTLR_PRIVACY */ + + /* +/- 2us active clock jitter, +1 us hcto compensation */ + hcto = radio_tmr_tifs_base_get() + TIFS_US + 4 + 1; + hcto += radio_rx_chain_delay_get(0, 0); + hcto += addr_us_get(0); + hcto -= radio_tx_chain_delay_get(0, 0); + + radio_tmr_hcto_configure(hcto); + + radio_rssi_measure(); + +#if defined(CONFIG_BT_CTLR_GPIO_LNA_PIN) + radio_gpio_lna_setup(); + radio_gpio_pa_lna_enable(radio_tmr_tifs_base_get() + TIFS_US - 4 - + radio_tx_chain_delay_get(0, 0) - + CONFIG_BT_CTLR_GPIO_LNA_OFFSET); +#endif /* CONFIG_BT_CTLR_GPIO_LNA_PIN */ +} + +static void isr_done(void *param) +{ + struct node_rx_pdu *node_rx; + u32_t start_us; + + /* TODO: MOVE to a common interface, isr_lll_radio_status? */ + /* Clear radio status and events */ + radio_status_reset(); + radio_tmr_status_reset(); + radio_filter_status_reset(); + radio_ar_status_reset(); + radio_rssi_status_reset(); + +#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) || \ + defined(CONFIG_BT_CTLR_GPIO_LNA_PIN) + radio_gpio_pa_lna_disable(); +#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN || CONFIG_BT_CTLR_GPIO_LNA_PIN */ + /* TODO: MOVE ^^ */ + + node_rx = ull_pdu_rx_alloc_peek(1); + LL_ASSERT(node_rx); + + radio_tmr_tifs_set(TIFS_US); + radio_switch_complete_and_tx(0, 0, 0, 0); + radio_pkt_rx_set(node_rx->pdu); + radio_rssi_measure(); + +#if defined(CONFIG_BT_CTLR_PRIVACY) + if (ctrl_rl_enabled()) { + u8_t count, *irks = ctrl_irks_get(&count); + + radio_ar_configure(count, irks); + } +#endif /* CONFIG_BT_CTLR_PRIVACY */ + + radio_isr_set(isr_rx, param); + +#if defined(CONFIG_BT_CTLR_GPIO_LNA_PIN) + start_us = radio_tmr_start_now(0); + + radio_gpio_lna_setup(); + radio_gpio_pa_lna_enable(start_us + + radio_rx_ready_delay_get(0, 0) - + CONFIG_BT_CTLR_GPIO_LNA_OFFSET); +#else /* !CONFIG_BT_CTLR_GPIO_LNA_PIN */ + ARG_UNUSED(start_us); + + radio_rx_enable(); +#endif /* !CONFIG_BT_CTLR_GPIO_LNA_PIN */ + + /* capture end of Rx-ed PDU, for initiator to calculate first + * master event. + */ + radio_tmr_end_capture(); +} + +static void isr_abort(void *param) +{ + /* Scanner stop can expire while here in this ISR. + * Deferred attempt to stop can fail as it would have + * expired, hence ignore failure. + */ + ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_LLL, + TICKER_ID_SCAN_STOP, NULL, NULL); + + isr_cleanup(param); +} + +static void isr_cleanup(void *param) +{ + struct lll_scan *lll = param; + struct node_rx_hdr *node_rx; + int err; + + if (lll_is_done(param)) { + return; + } + + radio_filter_disable(); + + if (++lll->chan == 3) { + lll->chan = 0; + } + +#if defined(CONFIG_BT_HCI_MESH_EXT) + if (_radio.advertiser.is_enabled && _radio.advertiser.is_mesh && + !_radio.advertiser.retry) { + mayfly_mesh_stop(NULL); + } +#endif /* CONFIG_BT_HCI_MESH_EXT */ + +#if defined(CONFIG_BT_CTLR_SCAN_INDICATION) + node_rx = ull_pdu_rx_alloc_peek(3); + if (node_rx) { + ull_pdu_rx_alloc(); + + /* TODO: add other info by defining a payload struct */ + node_rx->type = NODE_RX_TYPE_SCAN_INDICATION; + + ull_rx_put(node_rx->link, node_rx); + ull_rx_sched(); + } +#else /* !CONFIG_BT_CTLR_SCAN_INDICATION */ + ARG_UNUSED(node_rx); +#endif /* !CONFIG_BT_CTLR_SCAN_INDICATION */ + + radio_isr_set(isr_race, param); + radio_tmr_stop(); + + err = lll_clk_off(); + LL_ASSERT(!err || err == -EBUSY); + + lll_done(NULL); +} + +static void isr_race(void *param) +{ + /* NOTE: lll_disable could have a race with ... */ + radio_status_reset(); +} + +static inline bool isr_rx_scan_check(struct lll_scan *lll, u8_t irkmatch_ok, + u8_t devmatch_ok, u8_t rl_idx) +{ +#if defined(CONFIG_BT_CTLR_PRIVACY) + return (((_radio.scanner.filter_policy & 0x01) == 0) && + (!devmatch_ok || ctrl_rl_idx_allowed(irkmatch_ok, rl_idx))) || + (((_radio.scanner.filter_policy & 0x01) != 0) && + (devmatch_ok || ctrl_irk_whitelisted(rl_idx))); +#else + return ((lll->filter_policy & 0x01) == 0) || + devmatch_ok; +#endif /* CONFIG_BT_CTLR_PRIVACY */ +} + +static inline u32_t isr_rx_pdu(struct lll_scan *lll, u8_t devmatch_ok, + u8_t devmatch_id, u8_t irkmatch_ok, + u8_t irkmatch_id, u8_t rl_idx, u8_t rssi_ready) +{ + struct node_rx_pdu *node_rx; + struct pdu_adv *pdu_adv_rx; + bool dir_report = false; + + node_rx = ull_pdu_rx_alloc_peek(1); + LL_ASSERT(node_rx); + + pdu_adv_rx = (void *)node_rx->pdu; + + if (0) { +#if defined(CONFIG_BT_CENTRAL) + /* Initiator */ + } else if ((lll->conn) && + isr_scan_init_check(lll, pdu_adv_rx, rl_idx)) { + struct lll_conn *lll_conn; + struct node_rx_ftr *ftr; + struct node_rx_pdu *rx; + struct pdu_adv *pdu_tx; + u32_t conn_interval_us; + u32_t conn_offset_us; + u32_t conn_space_us; + struct evt_hdr *evt; + u32_t pdu_end_us; +#if defined(CONFIG_BT_CTLR_PRIVACY) + bt_addr_t *lrpa; +#endif /* CONFIG_BT_CTLR_PRIVACY */ + int ret; + + if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) { + rx = ull_pdu_rx_alloc_peek(4); + } else { + rx = ull_pdu_rx_alloc_peek(3); + } + + if (!rx) { + return -ENOBUFS; + } + + pdu_end_us = radio_tmr_end_get(); + if (!lll->ticks_window) { + u32_t scan_interval_us; + + /* FIXME: is this correct for continuous scanning? */ + scan_interval_us = lll->interval * 625; + pdu_end_us %= scan_interval_us; + } + evt = HDR_LLL2EVT(lll); + if (pdu_end_us > (HAL_TICKER_TICKS_TO_US(evt->ticks_slot) - + 502 - EVENT_OVERHEAD_START_US - + (EVENT_JITTER_US << 1))) { + return -ETIME; + } + + radio_isr_set(isr_cleanup, lll); + radio_switch_complete_and_disable(); + + /* Acquire the connection context */ + lll_conn = lll->conn; + + /* Tx the connect request packet */ + pdu_tx = (void *)radio_pkt_scratch_get(); + pdu_tx->type = PDU_ADV_TYPE_CONNECT_IND; + + if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) { + pdu_tx->chan_sel = 1; + } else { + pdu_tx->chan_sel = 0; + } + + pdu_tx->rx_addr = pdu_adv_rx->tx_addr; + pdu_tx->len = sizeof(struct pdu_adv_connect_ind); +#if defined(CONFIG_BT_CTLR_PRIVACY) + lrpa = ctrl_lrpa_get(rl_idx); + if (_radio.scanner.rpa_gen && lrpa) { + pdu_tx->tx_addr = 1; + memcpy(&pdu_tx->connect_ind.init_addr[0], lrpa->val, + BDADDR_SIZE); + } else { +#else + if (1) { +#endif /* CONFIG_BT_CTLR_PRIVACY */ + pdu_tx->tx_addr = lll->init_addr_type; + memcpy(&pdu_tx->connect_ind.init_addr[0], + &lll->init_addr[0], BDADDR_SIZE); + } + memcpy(&pdu_tx->connect_ind.adv_addr[0], + &pdu_adv_rx->adv_ind.addr[0], BDADDR_SIZE); + memcpy(&pdu_tx->connect_ind.access_addr[0], + &lll_conn->access_addr[0], 4); + memcpy(&pdu_tx->connect_ind.crc_init[0], + &lll_conn->crc_init[0], 3); + pdu_tx->connect_ind.win_size = 1; + + conn_interval_us = (u32_t)lll_conn->interval * 1250; + conn_offset_us = radio_tmr_end_get() + 502 + 1250; + + if (!IS_ENABLED(CONFIG_BT_CTLR_SCHED_ADVANCED) || + lll->conn_win_offset_us == 0) { + conn_space_us = conn_offset_us; + pdu_tx->connect_ind.win_offset = 0; + } else { + conn_space_us = lll->conn_win_offset_us; + while ((conn_space_us & ((u32_t)1 << 31)) || + (conn_space_us < conn_offset_us)) { + conn_space_us += conn_interval_us; + } + pdu_tx->connect_ind.win_offset = + (conn_space_us - conn_offset_us) / 1250; + pdu_tx->connect_ind.win_size++; + } + + pdu_tx->connect_ind.interval = lll_conn->interval; + pdu_tx->connect_ind.latency = lll_conn->latency; + pdu_tx->connect_ind.timeout = lll->conn_timeout; + memcpy(&pdu_tx->connect_ind.chan_map[0], + &lll_conn->data_chan_map[0], + sizeof(pdu_tx->connect_ind.chan_map)); + pdu_tx->connect_ind.hop = lll_conn->data_chan_hop; + pdu_tx->connect_ind.sca = lll_conn_sca_local_get(); + + radio_pkt_tx_set(pdu_tx); + + /* assert if radio packet ptr is not set and radio started tx */ + LL_ASSERT(!radio_is_ready()); + +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) + lll_prof_cputime_capture(); +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ + +#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) + /* PA/LNA enable is overwriting packet end used in ISR + * profiling, hence back it up for later use. + */ + lll_prof_radio_end_backup(); +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ + + radio_gpio_pa_setup(); + radio_gpio_pa_lna_enable(radio_tmr_tifs_base_get() + TIFS_US - + radio_rx_chain_delay_get(0, 0) - + CONFIG_BT_CTLR_GPIO_PA_OFFSET); +#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN */ + + /* block CPU so that there is no CRC error on pdu tx, + * this is only needed if we want the CPU to sleep. + * while(!radio_has_disabled()) + * {cpu_sleep();} + * radio_status_reset(); + */ + + /* Stop further LLL radio events */ + ret = lll_stop(lll); + LL_ASSERT(!ret); + + rx = ull_pdu_rx_alloc(); + + rx->hdr.type = NODE_RX_TYPE_CONNECTION; + rx->hdr.handle = 0xffff; + + ftr = (void *)((u8_t *)rx->pdu + + (offsetof(struct pdu_adv, connect_ind) + + sizeof(struct pdu_adv_connect_ind))); + + ftr->param = lll; + ftr->ticks_anchor = radio_tmr_start_get(); + ftr->us_radio_end = conn_space_us - + radio_tx_chain_delay_get(0, 0); + ftr->us_radio_rdy = radio_tx_ready_delay_get(0, 0); + + if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) { + ftr->extra = ull_pdu_rx_alloc(); + } + + ull_rx_put(rx->hdr.link, rx); + ull_rx_sched(); + + return 0; +#endif /* CONFIG_BT_CENTRAL */ + + /* Active scanner */ + } else if (((pdu_adv_rx->type == PDU_ADV_TYPE_ADV_IND) || + (pdu_adv_rx->type == PDU_ADV_TYPE_SCAN_IND)) && + lll->type && +#if defined(CONFIG_BT_CENTRAL) + !lll->conn) { +#else /* !CONFIG_BT_CENTRAL */ + 1) { +#endif /* !CONFIG_BT_CENTRAL */ + struct pdu_adv *pdu_tx; +#if defined(CONFIG_BT_CTLR_PRIVACY) + bt_addr_t *lrpa; +#endif /* CONFIG_BT_CTLR_PRIVACY */ + u32_t err; + + /* save the adv packet */ + err = isr_rx_scan_report(lll, rssi_ready, + irkmatch_ok ? rl_idx : FILTER_IDX_NONE, + false); + if (err) { + return err; + } + + /* prepare the scan request packet */ + pdu_tx = (void *)radio_pkt_scratch_get(); + pdu_tx->type = PDU_ADV_TYPE_SCAN_REQ; + pdu_tx->rx_addr = pdu_adv_rx->tx_addr; + pdu_tx->len = sizeof(struct pdu_adv_scan_req); +#if defined(CONFIG_BT_CTLR_PRIVACY) + lrpa = ctrl_lrpa_get(rl_idx); + if (_radio.scanner.rpa_gen && lrpa) { + pdu_tx->tx_addr = 1; + memcpy(&pdu_tx->scan_req.scan_addr[0], lrpa->val, + BDADDR_SIZE); + } else { +#else + if (1) { +#endif /* CONFIG_BT_CTLR_PRIVACY */ + pdu_tx->tx_addr = lll->init_addr_type; + memcpy(&pdu_tx->scan_req.scan_addr[0], + &lll->init_addr[0], BDADDR_SIZE); + } + memcpy(&pdu_tx->scan_req.adv_addr[0], + &pdu_adv_rx->adv_ind.addr[0], BDADDR_SIZE); + + /* switch scanner state to active */ + lll->state = 1; + radio_isr_set(isr_tx, lll); + + radio_tmr_tifs_set(TIFS_US); + radio_switch_complete_and_rx(0); + radio_pkt_tx_set(pdu_tx); + + /* assert if radio packet ptr is not set and radio started tx */ + LL_ASSERT(!radio_is_ready()); + +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) + lll_prof_cputime_capture(); +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ + + /* capture end of Tx-ed PDU, used to calculate HCTO. */ + radio_tmr_end_capture(); + +#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) + /* PA/LNA enable is overwriting packet end used in ISR + * profiling, hence back it up for later use. + */ + lll_prof_radio_end_backup(); +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ + + radio_gpio_pa_setup(); + radio_gpio_pa_lna_enable(radio_tmr_tifs_base_get() + TIFS_US - + radio_rx_chain_delay_get(0, 0) - + CONFIG_BT_CTLR_GPIO_PA_OFFSET); +#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN */ + + return 0; + } + /* Passive scanner or scan responses */ + else if (((pdu_adv_rx->type == PDU_ADV_TYPE_ADV_IND) || + ((pdu_adv_rx->type == PDU_ADV_TYPE_DIRECT_IND) && + (/* allow directed adv packets addressed to this device */ + isr_scan_tgta_check(lll, false, pdu_adv_rx, rl_idx, + &dir_report))) || + (pdu_adv_rx->type == PDU_ADV_TYPE_NONCONN_IND) || + (pdu_adv_rx->type == PDU_ADV_TYPE_SCAN_IND) || +#if defined(CONFIG_BT_CTLR_ADV_EXT) + ((pdu_adv_rx->type == PDU_ADV_TYPE_EXT_IND) && + (lll->phy)) || +#endif /* CONFIG_BT_CTLR_ADV_EXT */ + ((pdu_adv_rx->type == PDU_ADV_TYPE_SCAN_RSP) && + (lll->state != 0) && + isr_scan_rsp_adva_matches(pdu_adv_rx))) && + (pdu_adv_rx->len != 0) && +#if defined(CONFIG_BT_CENTRAL) + !lll->conn) { +#else /* !CONFIG_BT_CENTRAL */ + 1) { +#endif /* !CONFIG_BT_CENTRAL */ + u32_t err; + + /* save the scan response packet */ + err = isr_rx_scan_report(lll, rssi_ready, + irkmatch_ok ? rl_idx : + FILTER_IDX_NONE, + dir_report); + if (err) { + return err; + } + } + /* invalid PDU */ + else { + /* ignore and close this rx/tx chain ( code below ) */ + return 1; + } + + return 1; +} + +static inline bool isr_scan_init_check(struct lll_scan *lll, + struct pdu_adv *pdu, u8_t rl_idx) +{ + return ((((lll->filter_policy & 0x01) != 0) || + isr_scan_init_adva_check(lll, pdu, rl_idx)) && + ((pdu->type == PDU_ADV_TYPE_ADV_IND) || + ((pdu->type == PDU_ADV_TYPE_DIRECT_IND) && + (/* allow directed adv packets addressed to this device */ + isr_scan_tgta_check(lll, true, pdu, rl_idx, NULL))))); +} + +static inline bool isr_scan_init_adva_check(struct lll_scan *lll, + struct pdu_adv *pdu, u8_t rl_idx) +{ +#if defined(CONFIG_BT_CTLR_PRIVACY) + /* Only applies to initiator with no whitelist */ + if (rl_idx != FILTER_IDX_NONE) { + return (rl_idx == lll->rl_idx); + } +#endif /* CONFIG_BT_CTLR_PRIVACY */ + return ((lll->adv_addr_type == pdu->tx_addr) && + !memcmp(lll->adv_addr, &pdu->adv_ind.addr[0], BDADDR_SIZE)); +} + +static inline bool isr_scan_tgta_check(struct lll_scan *lll, bool init, + struct pdu_adv *pdu, u8_t rl_idx, + bool *dir_report) +{ +#if defined(CONFIG_BT_CTLR_PRIVACY) + if (ctrl_rl_addr_resolve(pdu->rx_addr, + pdu->direct_ind.tgt_addr, rl_idx)) { + return true; + } else if (init && _radio.scanner.rpa_gen && ctrl_lrpa_get(rl_idx)) { + /* Initiator generating RPAs, and could not resolve TargetA: + * discard + */ + return false; + } +#endif /* CONFIG_BT_CTLR_PRIVACY */ + + return (((lll->init_addr_type == pdu->rx_addr) && + !memcmp(lll->init_addr, pdu->direct_ind.tgt_addr, + BDADDR_SIZE))) || + /* allow directed adv packets where TargetA address + * is resolvable private address (scanner only) + */ + isr_scan_tgta_rpa_check(lll, pdu, dir_report); +} + +static inline bool isr_scan_tgta_rpa_check(struct lll_scan *lll, + struct pdu_adv *pdu, + bool *dir_report) +{ + if (((lll->filter_policy & 0x02) != 0) && + (pdu->rx_addr != 0) && + ((pdu->direct_ind.tgt_addr[5] & 0xc0) == 0x40)) { + + if (dir_report) { + *dir_report = true; + } + + return true; + } + + return false; +} + +static inline bool isr_scan_rsp_adva_matches(struct pdu_adv *srsp) +{ + struct pdu_adv *sreq = (void *)radio_pkt_scratch_get(); + + return ((sreq->rx_addr == srsp->tx_addr) && + (memcmp(&sreq->scan_req.adv_addr[0], + &srsp->scan_rsp.addr[0], BDADDR_SIZE) == 0)); +} + +static u32_t isr_rx_scan_report(struct lll_scan *lll, u8_t rssi_ready, + u8_t rl_idx, bool dir_report) +{ + struct node_rx_pdu *node_rx; + struct pdu_adv *pdu_adv_rx; + u8_t *extra; + + node_rx = ull_pdu_rx_alloc_peek(3); + if (!node_rx) { + return 1; + } + ull_pdu_rx_alloc(); + + /* Prepare the report (adv or scan resp) */ + node_rx->hdr.handle = 0xffff; + if (0) { + +#if defined(CONFIG_BT_HCI_MESH_EXT) + } else if (_radio.advertiser.is_enabled && + _radio.advertiser.is_mesh) { + node_rx->hdr.type = NODE_RX_TYPE_MESH_REPORT; +#endif /* CONFIG_BT_HCI_MESH_EXT */ + +#if defined(CONFIG_BT_CTLR_ADV_EXT) + } else if (lll->phy) { + switch (lll->phy) { + case BIT(0): + node_rx->hdr.type = NODE_RX_TYPE_EXT_1M_REPORT; + break; + + case BIT(2): + node_rx->hdr.type = NODE_RX_TYPE_EXT_CODED_REPORT; + break; + + default: + LL_ASSERT(0); + break; + } +#endif /* CONFIG_BT_CTLR_ADV_EXT */ + } else { + node_rx->hdr.type = NODE_RX_TYPE_REPORT; + } + + pdu_adv_rx = (void *)node_rx->pdu; + extra = &((u8_t *)pdu_adv_rx)[offsetof(struct pdu_adv, payload) + + pdu_adv_rx->len]; + /* save the RSSI value */ + *extra = (rssi_ready) ? (radio_rssi_get() & 0x7f) : 0x7f; + extra += PDU_AC_SIZE_RSSI; + +#if defined(CONFIG_BT_CTLR_PRIVACY) + /* save the resolving list index. */ + *extra = rl_idx; + extra += PDU_AC_SIZE_PRIV; +#endif /* CONFIG_BT_CTLR_PRIVACY */ +#if defined(CONFIG_BT_CTLR_EXT_SCAN_FP) + /* save the directed adv report flag */ + *extra = dir_report ? 1 : 0; + extra += PDU_AC_SIZE_SCFP; +#endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */ +#if defined(CONFIG_BT_HCI_MESH_EXT) + if (node_rx->hdr.type == NODE_RX_TYPE_MESH_REPORT) { + /* save the directed adv report flag */ + *extra = _radio.scanner.chan - 1; + extra++; + sys_put_le32(_radio.ticks_anchor, extra); + } +#endif /* CONFIG_BT_CTLR_EXT_SCAN_FP */ + + ull_rx_put(node_rx->hdr.link, node_rx); + ull_rx_sched(); + + return 0; +} diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_scan.h b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_scan.h new file mode 100644 index 00000000000..cc43ec4b4e1 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_scan.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +struct lll_scan { + struct lll_hdr hdr; + +#if defined(CONFIG_BT_CENTRAL) + /* NOTE: conn context has to be after lll_hdr */ + struct lll_conn *conn; + u32_t conn_ticks_slot; + u32_t conn_win_offset_us; + u16_t conn_timeout; +#endif /* CONFIG_BT_CENTRAL */ + + u8_t state:1; + u8_t chan:2; + u8_t filter_policy:2; + u8_t adv_addr_type:1; + u8_t init_addr_type:1; + u8_t type:1; + +#if defined(CONFIG_BT_CTLR_ADV_EXT) + u8_t phy:3; +#endif /* CONFIG_BT_CTLR_ADV_EXT */ + +#if defined(CONFIG_BT_CTLR_PRIVACY) + u8_t rpa_gen:1; + /* initiator only */ + u8_t rl_idx; +#endif /* CONFIG_BT_CTLR_PRIVACY */ + + u8_t init_addr[BDADDR_SIZE]; + u8_t adv_addr[BDADDR_SIZE]; + + u16_t interval; + u32_t ticks_window; +}; + +int lll_scan_init(void); +int lll_scan_reset(void); + +void lll_scan_prepare(void *param); diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_slave.c b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_slave.c new file mode 100644 index 00000000000..5b843fbe52f --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_slave.c @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include + +#include +#include +#include + +#include "hal/ccm.h" +#include "hal/radio.h" +#include "hal/ticker.h" + +#include "util/memq.h" + +#include "pdu.h" + +#include "lll.h" +#include "lll_vendor.h" +#include "lll_conn.h" +#include "lll_slave.h" +#include "lll_chan.h" + +#include "lll_internal.h" +#include "lll_tim_internal.h" + +#define LOG_MODULE_NAME bt_ctlr_llsw_nordic_lll_slave +#include "common/log.h" +#include +#include "hal/debug.h" + +static int init_reset(void); +static int prepare_cb(struct lll_prepare_param *prepare_param); + +int lll_slave_init(void) +{ + int err; + + err = init_reset(); + if (err) { + return err; + } + + return 0; +} + +int lll_slave_reset(void) +{ + int err; + + err = init_reset(); + if (err) { + return err; + } + + return 0; +} + +void lll_slave_prepare(void *param) +{ + struct lll_prepare_param *p = param; + int err; + + err = lll_clk_on(); + LL_ASSERT(!err || err == -EINPROGRESS); + + err = lll_prepare(lll_conn_is_abort_cb, lll_conn_abort_cb, prepare_cb, + 0, p); + LL_ASSERT(!err || err == -EINPROGRESS); +} + +static int init_reset(void) +{ + return 0; +} + +static int prepare_cb(struct lll_prepare_param *prepare_param) +{ + struct lll_conn *lll = prepare_param->param; + u32_t ticks_at_event; + struct evt_hdr *evt; + u16_t event_counter; + u32_t remainder_us; + u8_t data_chan_use; + u32_t remainder; + u32_t hcto; + u16_t lazy; + + DEBUG_RADIO_START_S(1); + + /* TODO: Do the below in ULL ? */ + + lazy = prepare_param->lazy; + + /* Calc window widening */ + if (lll->role) { + lll->slave.window_widening_prepare_us += + lll->slave.window_widening_periodic_us * (lazy + 1); + if (lll->slave.window_widening_prepare_us > + lll->slave.window_widening_max_us) { + lll->slave.window_widening_prepare_us = + lll->slave.window_widening_max_us; + } + } + + /* save the latency for use in event */ + lll->latency_prepare += lazy; + + /* calc current event counter value */ + event_counter = lll->event_counter + lll->latency_prepare; + + /* store the next event counter value */ + lll->event_counter = event_counter + 1; + + /* TODO: Do the above in ULL ? */ + + /* Reset connection event global variables */ + lll_conn_prepare_reset(); + + /* TODO: can we do something in ULL? */ + lll->latency_event = lll->latency_prepare; + lll->latency_prepare = 0; + + if (lll->data_chan_sel) { +#if defined(CONFIG_BT_CTLR_CHAN_SEL_2) + data_chan_use = lll_chan_sel_2(lll->event_counter - 1, + lll->data_chan_id, + &lll->data_chan_map[0], + lll->data_chan_count); +#else /* !CONFIG_BT_CTLR_CHAN_SEL_2 */ + LL_ASSERT(0); +#endif /* !CONFIG_BT_CTLR_CHAN_SEL_2 */ + } else { + data_chan_use = lll_chan_sel_1(&lll->data_chan_use, + lll->data_chan_hop, + lll->latency_event, + &lll->data_chan_map[0], + lll->data_chan_count); + } + + /* current window widening */ + lll->slave.window_widening_event_us += + lll->slave.window_widening_prepare_us; + lll->slave.window_widening_prepare_us = 0; + if (lll->slave.window_widening_event_us > + lll->slave.window_widening_max_us) { + lll->slave.window_widening_event_us = + lll->slave.window_widening_max_us; + } + + /* current window size */ + lll->slave.window_size_event_us += + lll->slave.window_size_prepare_us; + lll->slave.window_size_prepare_us = 0; + + /* Start setting up Radio h/w */ + radio_reset(); + /* TODO: other Tx Power settings */ + radio_tx_power_set(RADIO_TXP_DEFAULT); + + lll_conn_rx_pkt_set(lll); + + radio_aa_set(lll->access_addr); + radio_crc_configure(((0x5bUL) | ((0x06UL) << 8) | ((0x00UL) << 16)), + (((u32_t)lll->crc_init[2] << 16) | + ((u32_t)lll->crc_init[1] << 8) | + ((u32_t)lll->crc_init[0]))); + + lll_chan_set(data_chan_use); + + radio_isr_set(lll_conn_isr_rx, lll); + + radio_tmr_tifs_set(TIFS_US); + +#if defined(CONFIG_BT_CTLR_PHY) + radio_switch_complete_and_tx(lll->phy_rx, 0, lll->phy_tx, + lll->phy_flags); +#else /* !CONFIG_BT_CTLR_PHY */ + radio_switch_complete_and_tx(0, 0, 0, 0); +#endif /* !CONFIG_BT_CTLR_PHY */ + + ticks_at_event = prepare_param->ticks_at_expire; + evt = HDR_LLL2EVT(lll); + ticks_at_event += lll_evt_offset_get(evt); + ticks_at_event += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US); + + remainder = prepare_param->remainder; + remainder_us = radio_tmr_start(0, ticks_at_event, remainder); + + radio_tmr_aa_capture(); + radio_tmr_aa_save(0); + + hcto = remainder_us + EVENT_JITTER_US + (EVENT_JITTER_US << 2) + + (lll->slave.window_widening_event_us << 1) + + lll->slave.window_size_event_us; + +#if defined(CONFIG_BT_CTLR_PHY) + hcto += radio_rx_ready_delay_get(lll->phy_rx, 1); + hcto += addr_us_get(lll->phy_rx); + hcto += radio_rx_chain_delay_get(lll->phy_rx, 1); +#else /* !CONFIG_BT_CTLR_PHY */ + hcto += radio_rx_ready_delay_get(0, 0); + hcto += addr_us_get(0); + hcto += radio_rx_chain_delay_get(0, 0); +#endif /* !CONFIG_BT_CTLR_PHY */ + + radio_tmr_hcto_configure(hcto); + +#if defined(CONFIG_BT_CTLR_GPIO_LNA_PIN) + radio_gpio_lna_setup(); + +#if defined(CONFIG_BT_CTLR_PHY) + radio_gpio_pa_lna_enable(remainder_us + + radio_rx_ready_delay_get(conn->phy_rx, 1) - + CONFIG_BT_CTLR_GPIO_LNA_OFFSET); +#else /* !CONFIG_BT_CTLR_PHY */ + radio_gpio_pa_lna_enable(remainder_us + + radio_rx_ready_delay_get(0, 0) - + CONFIG_BT_CTLR_GPIO_LNA_OFFSET); +#endif /* !CONFIG_BT_CTLR_PHY */ +#endif /* CONFIG_BT_CTLR_GPIO_LNA_PIN */ + +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) || \ + defined(CONFIG_BT_CTLR_GPIO_PA_PIN) + radio_tmr_end_capture(); +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ + +#if defined(CONFIG_BT_CTLR_CONN_RSSI) + radio_rssi_measure(); +#endif /* CONFIG_BT_CTLR_CONN_RSSI */ + +#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED) && \ + (EVENT_OVERHEAD_PREEMPT_US <= EVENT_OVERHEAD_PREEMPT_MIN_US) + /* check if preempt to start has changed */ + if (lll_preempt_calc(evt, TICKER_ID_CONN_BASE, ticks_at_event)) { + radio_isr_set(lll_conn_isr_abort, lll); + radio_disable(); + } else +#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */ + { + u32_t ret; + + ret = lll_prepare_done(lll); + LL_ASSERT(!ret); + } + + DEBUG_RADIO_START_S(1); + + return 0; +} diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_slave.h b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_slave.h new file mode 100644 index 00000000000..668f41fed6a --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_slave.h @@ -0,0 +1,9 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +int lll_slave_init(void); +int lll_slave_reset(void); +void lll_slave_prepare(void *param); diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_test.c b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_test.c new file mode 100644 index 00000000000..2875ac4dc50 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_test.c @@ -0,0 +1,343 @@ +/* + * Copyright (c) 2017-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +#include +#include +#include +#include + +#include "hal/cpu.h" +#include "hal/cntr.h" +#include "hal/ccm.h" +#include "hal/radio.h" + +#include "util/memq.h" + +#include "lll.h" +#include "lll_internal.h" + +#include "ll_test.h" + +#define CNTR_MIN_DELTA 3 + +static const u32_t test_sync_word = 0x71764129; +static u8_t test_phy; +static u8_t test_phy_flags; +static u16_t test_num_rx; +static bool started; + +/* NOTE: The PRBS9 sequence used as packet payload. + * The bytes in the sequence are in the right order, but the bits of each byte + * in the array are reverse from that found by running the PRBS9 algorithm. This + * is done to transmit MSbit first on air. + */ + +static const u8_t prbs9[] = { + 0xFF, 0xC1, 0xFB, 0xE8, 0x4C, 0x90, 0x72, 0x8B, + 0xE7, 0xB3, 0x51, 0x89, 0x63, 0xAB, 0x23, 0x23, + 0x02, 0x84, 0x18, 0x72, 0xAA, 0x61, 0x2F, 0x3B, + 0x51, 0xA8, 0xE5, 0x37, 0x49, 0xFB, 0xC9, 0xCA, + 0x0C, 0x18, 0x53, 0x2C, 0xFD, 0x45, 0xE3, 0x9A, + 0xE6, 0xF1, 0x5D, 0xB0, 0xB6, 0x1B, 0xB4, 0xBE, + 0x2A, 0x50, 0xEA, 0xE9, 0x0E, 0x9C, 0x4B, 0x5E, + 0x57, 0x24, 0xCC, 0xA1, 0xB7, 0x59, 0xB8, 0x87, + 0xFF, 0xE0, 0x7D, 0x74, 0x26, 0x48, 0xB9, 0xC5, + 0xF3, 0xD9, 0xA8, 0xC4, 0xB1, 0xD5, 0x91, 0x11, + 0x01, 0x42, 0x0C, 0x39, 0xD5, 0xB0, 0x97, 0x9D, + 0x28, 0xD4, 0xF2, 0x9B, 0xA4, 0xFD, 0x64, 0x65, + 0x06, 0x8C, 0x29, 0x96, 0xFE, 0xA2, 0x71, 0x4D, + 0xF3, 0xF8, 0x2E, 0x58, 0xDB, 0x0D, 0x5A, 0x5F, + 0x15, 0x28, 0xF5, 0x74, 0x07, 0xCE, 0x25, 0xAF, + 0x2B, 0x12, 0xE6, 0xD0, 0xDB, 0x2C, 0xDC, 0xC3, + 0x7F, 0xF0, 0x3E, 0x3A, 0x13, 0xA4, 0xDC, 0xE2, + 0xF9, 0x6C, 0x54, 0xE2, 0xD8, 0xEA, 0xC8, 0x88, + 0x00, 0x21, 0x86, 0x9C, 0x6A, 0xD8, 0xCB, 0x4E, + 0x14, 0x6A, 0xF9, 0x4D, 0xD2, 0x7E, 0xB2, 0x32, + 0x03, 0xC6, 0x14, 0x4B, 0x7F, 0xD1, 0xB8, 0xA6, + 0x79, 0x7C, 0x17, 0xAC, 0xED, 0x06, 0xAD, 0xAF, + 0x0A, 0x94, 0x7A, 0xBA, 0x03, 0xE7, 0x92, 0xD7, + 0x15, 0x09, 0x73, 0xE8, 0x6D, 0x16, 0xEE, 0xE1, + 0x3F, 0x78, 0x1F, 0x9D, 0x09, 0x52, 0x6E, 0xF1, + 0x7C, 0x36, 0x2A, 0x71, 0x6C, 0x75, 0x64, 0x44, + 0x80, 0x10, 0x43, 0x4E, 0x35, 0xEC, 0x65, 0x27, + 0x0A, 0xB5, 0xFC, 0x26, 0x69, 0x3F, 0x59, 0x99, + 0x01, 0x63, 0x8A, 0xA5, 0xBF, 0x68, 0x5C, 0xD3, + 0x3C, 0xBE, 0x0B, 0xD6, 0x76, 0x83, 0xD6, 0x57, + 0x05, 0x4A, 0x3D, 0xDD, 0x81, 0x73, 0xC9, 0xEB, + 0x8A, 0x84, 0x39, 0xF4, 0x36, 0x0B, 0xF7}; + +/* TODO: fill correct prbs15 */ +static const u8_t prbs15[255] = { 0x00, }; + +static u8_t tx_req; +static u8_t volatile tx_ack; + +static void isr_tx(void *param) +{ + u32_t l, i, s, t; + + /* Clear radio status and events */ + radio_status_reset(); + radio_tmr_status_reset(); + +#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) + radio_gpio_pa_lna_disable(); +#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN */ + + /* Exit if radio disabled */ + if (((tx_req - tx_ack) & 0x01) == 0) { + tx_ack = tx_req; + + return; + } + + /* LE Test Packet Interval */ + l = radio_tmr_end_get() - radio_tmr_ready_get(); + i = ((l + 249 + 624) / 625) * 625; + t = radio_tmr_end_get() - l + i; + t -= radio_tx_ready_delay_get(test_phy, test_phy_flags); + + /* Set timer capture in the future. */ + radio_tmr_sample(); + s = radio_tmr_sample_get(); + while (t < s) { + t += 625; + } + + /* Setup next Tx */ + radio_switch_complete_and_disable(); + radio_tmr_start_us(1, t); + radio_tmr_aa_capture(); + radio_tmr_end_capture(); + + /* TODO: check for probable stale timer capture being set */ + +#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) + radio_gpio_pa_setup(); + radio_gpio_pa_lna_enable(t + radio_tx_ready_delay_get(test_phy, + test_phy_flags) - + CONFIG_BT_CTLR_GPIO_PA_OFFSET); +#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN */ +} + +static void isr_rx(void *param) +{ + u8_t crc_ok = 0; + u8_t trx_done; + + /* Read radio status and events */ + trx_done = radio_is_done(); + if (trx_done) { + crc_ok = radio_crc_is_valid(); + } + + /* Clear radio status and events */ + radio_status_reset(); + radio_tmr_status_reset(); + + /* Exit if radio disabled */ + if (!trx_done) { + return; + } + + /* Setup next Rx */ + radio_switch_complete_and_rx(test_phy); + + /* Count Rx-ed packets */ + if (crc_ok) { + test_num_rx++; + } +} + +static u32_t init(u8_t chan, u8_t phy, void (*isr)(void *)) +{ + int err; + + if (started) { + return 1; + } + + /* start coarse timer */ + cntr_start(); + + /* Setup resources required by Radio */ + err = lll_clk_on_wait(); + + /* Reset Radio h/w */ + radio_reset(); + radio_isr_set(isr, NULL); + + /* Store value needed in Tx/Rx ISR */ + if (phy < 0x04) { + test_phy = BIT(phy - 1); + test_phy_flags = 1; + } else { + test_phy = BIT(2); + test_phy_flags = 0; + } + + /* Setup Radio in Tx/Rx */ + /* NOTE: No whitening in test mode. */ + radio_phy_set(test_phy, test_phy_flags); + radio_tmr_tifs_set(150); + radio_tx_power_max_set(); + radio_freq_chan_set((chan << 1) + 2); + radio_aa_set((u8_t *)&test_sync_word); + radio_crc_configure(0x65b, 0x555555); + radio_pkt_configure(8, 255, (test_phy << 1)); + + return 0; +} + +u32_t ll_test_tx(u8_t chan, u8_t len, u8_t type, u8_t phy) +{ + u32_t start_us; + u8_t *payload; + u8_t *pdu; + u32_t err; + + if ((type > 0x07) || !phy || (phy > 0x04)) { + return 1; + } + + err = init(chan, phy, isr_tx); + if (err) { + return err; + } + + tx_req++; + + pdu = radio_pkt_scratch_get(); + payload = &pdu[2]; + + switch (type) { + case 0x00: + memcpy(payload, prbs9, len); + break; + + case 0x01: + memset(payload, 0x0f, len); + break; + + case 0x02: + memset(payload, 0x55, len); + break; + + case 0x03: + memcpy(payload, prbs15, len); + break; + + case 0x04: + memset(payload, 0xff, len); + break; + + case 0x05: + memset(payload, 0x00, len); + break; + + case 0x06: + memset(payload, 0xf0, len); + break; + + case 0x07: + memset(payload, 0xaa, len); + break; + } + + pdu[0] = type; + pdu[1] = len; + + radio_pkt_tx_set(pdu); + radio_switch_complete_and_disable(); + start_us = radio_tmr_start(1, cntr_cnt_get() + CNTR_MIN_DELTA, 0); + radio_tmr_aa_capture(); + radio_tmr_end_capture(); + +#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) + radio_gpio_pa_setup(); + radio_gpio_pa_lna_enable(start_us + + radio_tx_ready_delay_get(test_phy, + test_phy_flags) - + CONFIG_BT_CTLR_GPIO_PA_OFFSET); +#else /* !CONFIG_BT_CTLR_GPIO_PA_PIN */ + ARG_UNUSED(start_us); +#endif /* !CONFIG_BT_CTLR_GPIO_PA_PIN */ + + started = true; + + return 0; +} + +u32_t ll_test_rx(u8_t chan, u8_t phy, u8_t mod_idx) +{ + u32_t err; + + if (!phy || (phy > 0x03)) { + return 1; + } + + err = init(chan, phy, isr_rx); + if (err) { + return err; + } + + radio_pkt_rx_set(radio_pkt_scratch_get()); + radio_switch_complete_and_rx(test_phy); + radio_tmr_start(0, cntr_cnt_get() + CNTR_MIN_DELTA, 0); + +#if defined(CONFIG_BT_CTLR_GPIO_LNA_PIN) + radio_gpio_lna_on(); +#endif /* !CONFIG_BT_CTLR_GPIO_LNA_PIN */ + + started = true; + + return 0; +} + +u32_t ll_test_end(u16_t *num_rx) +{ + u8_t ack; + + if (!started) { + return 1; + } + + /* Return packets Rx-ed/Completed */ + *num_rx = test_num_rx; + test_num_rx = 0; + + /* Disable Radio, if in Rx test */ + ack = tx_ack; + if (tx_req == ack) { + radio_disable(); + } else { + /* Wait for Tx to complete */ + tx_req = ack + 2; + while (tx_req != tx_ack) { + cpu_sleep(); + } + } + + /* Stop packet timer */ + radio_tmr_stop(); + + /* Release resources acquired for Radio */ + lll_clk_off(); + + /* Stop coarse timer */ + cntr_stop(); + +#if defined(CONFIG_BT_CTLR_GPIO_LNA_PIN) + radio_gpio_lna_off(); +#endif /* !CONFIG_BT_CTLR_GPIO_LNA_PIN */ + + started = false; + + return 0; +} diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_tim_internal.h b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_tim_internal.h new file mode 100644 index 00000000000..4a54f817a57 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_tim_internal.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#define TIFS_US 150 + +/* Macro to return PDU time */ +#if defined(CONFIG_BT_CTLR_PHY_CODED) +#define PKT_US(octets, phy) \ + (((phy) & BIT(2)) ? \ + (80 + 256 + 16 + 24 + ((((2 + (octets) + 4) * 8) + 24 + 3) * 8)) : \ + (((octets) + 14) * 8 / BIT(((phy) & 0x03) >> 1))) +#else /* !CONFIG_BT_CTLR_PHY_CODED */ +#define PKT_US(octets, phy) \ + (((octets) + 14) * 8 / BIT(((phy) & 0x03) >> 1)) +#endif /* !CONFIG_BT_CTLR_PHY_CODED */ + + +static inline u32_t addr_us_get(u8_t phy) +{ + switch (phy) { + default: + case BIT(0): + return 40; + case BIT(1): + return 24; + case BIT(2): + return 376; + } +} diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_tmp.c b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_tmp.c new file mode 100644 index 00000000000..d166967ef9e --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_tmp.c @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +#include +#include + +#if defined(CONFIG_BT_CTLR_DEBUG_PINS) +#if defined(CONFIG_PRINTK) +#undef CONFIG_PRINTK +#endif +#endif + +#include "hal/ccm.h" + +#include "util/mfifo.h" +#include "util/memq.h" + +#include "ticker/ticker.h" + +#include "pdu.h" + +#include "lll.h" +#include "lll_conn.h" +#include "lll_tmp.h" +#include "lll_internal.h" + +#include "common/log.h" +#include +#include "hal/debug.h" + +static MFIFO_DEFINE(tmp_ack, sizeof(struct lll_tx), + CONFIG_BT_TMP_TX_COUNT_MAX); + +static int _init_reset(void); +static int _prepare_cb(struct lll_prepare_param *prepare_param); +static int _is_abort_cb(void *next, int prio, void *curr, + lll_prepare_cb_t *resume_cb, int *resume_prio); +static void _abort_cb(struct lll_prepare_param *prepare_param, void *param); +static int _emulate_tx_rx(void *param); + +int lll_tmp_init(void) +{ + int err; + + err = _init_reset(); + if (err) { + return err; + } + + return 0; +} + +int lll_tmp_reset(void) +{ + int err; + + MFIFO_INIT(tmp_ack); + + err = _init_reset(); + if (err) { + return err; + } + + return 0; +} + +void lll_tmp_prepare(void *param) +{ + struct lll_prepare_param *p = param; + int err; + + printk("\t\tlll_tmp_prepare (%p) enter.\n", p->param); + + err = lll_clk_on(); + printk("\t\tlll_clk_on: %d.\n", err); + + err = lll_prepare(_is_abort_cb, _abort_cb, _prepare_cb, 0, p); + + printk("\t\tlll_tmp_prepare (%p) exit (%d).\n", p->param, err); +} + +u8_t lll_tmp_ack_last_idx_get(void) +{ + return mfifo_tmp_ack.l; +} + +memq_link_t *lll_tmp_ack_peek(u16_t *handle, struct node_tx **node_tx) +{ + struct lll_tx *tx; + + tx = MFIFO_DEQUEUE_GET(tmp_ack); + if (!tx) { + return NULL; + } + + *handle = tx->handle; + *node_tx = tx->node; + + return (*node_tx)->link; +} + +memq_link_t *lll_tmp_ack_by_last_peek(u8_t last, u16_t *handle, + struct node_tx **node_tx) +{ + struct lll_tx *tx; + + tx = mfifo_dequeue_get(mfifo_tmp_ack.m, mfifo_tmp_ack.s, + mfifo_tmp_ack.f, last); + if (!tx) { + return NULL; + } + + *handle = tx->handle; + *node_tx = tx->node; + + return (*node_tx)->link; +} + +void *lll_tmp_ack_dequeue(void) +{ + return MFIFO_DEQUEUE(tmp_ack); +} + +static int _init_reset(void) +{ + return 0; +} + +static int _prepare_cb(struct lll_prepare_param *prepare_param) +{ + int err; + + printk("\t\t_prepare (%p) enter: expected %u, actual %u.\n", + prepare_param->param, prepare_param->ticks_at_expire, + ticker_ticks_now_get()); + DEBUG_RADIO_PREPARE_A(1); + + err = _emulate_tx_rx(prepare_param); + + DEBUG_RADIO_PREPARE_A(1); + printk("\t\t_prepare (%p) exit (%d).\n", prepare_param->param, err); + + return err; +} + +static int _is_abort_cb(void *next, int prio, void *curr, + lll_prepare_cb_t *resume_cb, int *resume_prio) +{ + static u8_t toggle; + + toggle++; + + return toggle & 0x01; +} + +static void _abort_cb(struct lll_prepare_param *prepare_param, void *param) +{ + int err; + + printk("\t\t_abort (%p) enter.\n", param); + + /* NOTE: This is not a prepare being cancelled */ + if (!prepare_param) { + /* Perform event abort here. + * After event has been cleanly aborted, clean up resources + * and dispatch event done. + */ + + /* Current event is done, pass NULL to lll_done(). */ + param = NULL; + } + + /* NOTE: Else clean the top half preparations of the aborted event + * currently in preparation pipeline. + */ + err = lll_clk_off(); + printk("\t\tlll_clk_off: %d.\n", err); + + lll_done(param); + printk("\t\tlll_done (%p).\n", param); + + printk("\t\t_abort (%p) exit.\n", param); +} + +static int _emulate_tx_rx(void *param) +{ + struct lll_prepare_param *prepare_param = param; + struct lll_tmp *tmp = prepare_param->param; + struct node_tx *node_tx; + bool is_ull_rx = false; + memq_link_t *link; + void *free; + + /* Tx */ + link = memq_dequeue(tmp->memq_tx.tail, &tmp->memq_tx.head, + (void **)&node_tx); + while (link) { + struct lll_tx *tx; + u8_t idx; + + idx = MFIFO_ENQUEUE_GET(tmp_ack, (void **)&tx); + LL_ASSERT(tx); + + tx->handle = ull_tmp_handle_get(tmp); + tx->node = node_tx; + + node_tx->link = link; + + printk("\t\t_emulate_tx_rx: h= %u.\n", tx->handle); + + MFIFO_ENQUEUE(tmp_ack, idx); + + link = memq_dequeue(tmp->memq_tx.tail, &tmp->memq_tx.head, + (void **)&node_tx); + } + + /* Rx */ + free = ull_pdu_rx_alloc_peek(2); + if (free) { + struct node_rx_hdr *hdr = free; + void *_free; + + _free = ull_pdu_rx_alloc(); + LL_ASSERT(free == _free); + + hdr->type = NODE_RX_TYPE_DC_PDU; + + ull_rx_put(hdr->link, hdr); + + is_ull_rx = true; + } else { + printk("\t\tOUT OF PDU RX MEMORY.\n"); + } + + if (is_ull_rx) { + ull_rx_sched(); + } + + return 0; +} diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_tmp.h b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_tmp.h new file mode 100644 index 00000000000..1c85f31facb --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_tmp.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +struct lll_tmp { + struct lll_hdr hdr; + + MEMQ_DECLARE(tx); + memq_link_t _link; /* Dedicated thread allocatable */ + memq_link_t *link_free; /* Thread allocatable reference */ +}; + +int lll_tmp_init(void); +void lll_tmp_prepare(void *param); + +u8_t lll_tmp_ack_last_idx_get(void); +memq_link_t *lll_tmp_ack_peek(u16_t *handle, struct node_tx **node_tx); +memq_link_t *lll_tmp_ack_by_last_peek(u8_t last, u16_t *handle, + struct node_tx **node_tx); +void *lll_tmp_ack_dequeue(void); + +extern u16_t ull_tmp_handle_get(struct lll_tmp *tmp); diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_tmp_internal.h b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_tmp_internal.h new file mode 100644 index 00000000000..0b55ef8b880 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_tmp_internal.h @@ -0,0 +1,5 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ diff --git a/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_vendor.h b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_vendor.h new file mode 100644 index 00000000000..37aca3c6d3c --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/nordic/lll/lll_vendor.h @@ -0,0 +1,12 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#define EVENT_OVERHEAD_XTAL_US 1500 +#define EVENT_OVERHEAD_PREEMPT_US 0 /* if <= min, then dynamic preempt */ +#define EVENT_OVERHEAD_PREEMPT_MIN_US 0 +#define EVENT_OVERHEAD_PREEMPT_MAX_US EVENT_OVERHEAD_XTAL_US +#define EVENT_OVERHEAD_START_US 200 +#define EVENT_JITTER_US 16 diff --git a/subsys/bluetooth/controller/ll_sw/ull.c b/subsys/bluetooth/controller/ll_sw/ull.c new file mode 100644 index 00000000000..bc16552ecb0 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/ull.c @@ -0,0 +1,1535 @@ +/* + * Copyright (c) 2017-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include "hal/cntr.h" +#include "hal/ccm.h" +#include "hal/ticker.h" + +#if defined(CONFIG_SOC_FAMILY_NRF) +#include "hal/radio.h" +#endif /* CONFIG_SOC_FAMILY_NRF */ + +#include "util/util.h" +#include "util/mem.h" +#include "util/mfifo.h" +#include "util/memq.h" +#include "util/mayfly.h" + +#include "ticker/ticker.h" + +#include "pdu.h" +#include "ll.h" +#include "lll.h" +#include "lll_filter.h" +#include "lll_adv.h" +#include "lll_scan.h" +#include "lll_conn.h" +#include "lll_tmp.h" +#include "ull_adv_types.h" +#include "ull_scan_types.h" +#include "ull_conn_types.h" +#include "ull_internal.h" +#include "ull_adv_internal.h" +#include "ull_scan_internal.h" +#include "ull_conn_internal.h" +#include "ull_tmp_internal.h" + +#define LOG_MODULE_NAME bt_ctlr_llsw_ull +#include "common/log.h" +#include "hal/debug.h" + +/* Define ticker nodes and user operations */ +#if defined(CONFIG_BT_CTLR_LOW_LAT) && \ + (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO) +#define TICKER_USER_LLL_OPS (3 + 1) +#else +#define TICKER_USER_LLL_OPS (2 + 1) +#endif /* CONFIG_BT_CTLR_LOW_LAT */ +#define TICKER_USER_ULL_HIGH_OPS (3 + 1) +#define TICKER_USER_ULL_LOW_OPS (1 + 1) +#define TICKER_USER_THREAD_OPS (1 + 1) + +#if defined(CONFIG_BT_BROADCASTER) +#define BT_ADV_TICKER_NODES ((TICKER_ID_ADV_LAST) - (TICKER_ID_ADV_STOP) + 1) +#else +#define BT_ADV_TICKER_NODES 0 +#endif + +#if defined(CONFIG_BT_OBSERVER) +#define BT_SCAN_TICKER_NODES ((TICKER_ID_SCAN_LAST) - (TICKER_ID_SCAN_STOP) + 1) +#else +#define BT_SCAN_TICKER_NODES 0 +#endif + +#if defined(CONFIG_BT_CONN) +#define BT_CONN_TICKER_NODES ((TICKER_ID_CONN_LAST) - (TICKER_ID_CONN_BASE) + 1) +#else +#define BT_CONN_TICKER_NODES 0 +#endif + +#if defined(CONFIG_BT_TMP) +#define BT_TMP_TICKER_NODES ((TICKER_ID_TMP_LAST) - (TICKER_ID_TMP_BASE) + 1) +#else +#define BT_TMP_TICKER_NODES 0 +#endif + +#if defined(CONFIG_SOC_FLASH_NRF_RADIO_SYNC) +#define FLASH_TICKER_NODES 1 /* No. of tickers reserved for flashing */ +#define FLASH_TICKER_USER_APP_OPS 1 /* No. of additional ticker operations */ +#else +#define FLASH_TICKER_NODES 0 +#define FLASH_TICKER_USER_APP_OPS 0 +#endif + +#define TICKER_NODES (TICKER_ID_ULL_BASE + \ + BT_ADV_TICKER_NODES + \ + BT_SCAN_TICKER_NODES + \ + BT_CONN_TICKER_NODES + \ + BT_TMP_TICKER_NODES + \ + FLASH_TICKER_NODES) +#define TICKER_USER_APP_OPS (TICKER_USER_THREAD_OPS + \ + FLASH_TICKER_USER_APP_OPS) +#define TICKER_USER_OPS (TICKER_USER_LLL_OPS + \ + TICKER_USER_ULL_HIGH_OPS + \ + TICKER_USER_ULL_LOW_OPS + \ + TICKER_USER_THREAD_OPS + \ + FLASH_TICKER_USER_APP_OPS) + +/* Memory for ticker nodes/instances */ +static u8_t MALIGN(4) _ticker_nodes[TICKER_NODES][TICKER_NODE_T_SIZE]; + +/* Memory for users/contexts operating on ticker module */ +static u8_t MALIGN(4) _ticker_users[MAYFLY_CALLER_COUNT][TICKER_USER_T_SIZE]; + +/* Memory for user/context simultaneous API operations */ +static u8_t MALIGN(4) _ticker_user_ops[TICKER_USER_OPS][TICKER_USER_OP_T_SIZE]; + +/* Semaphire to wakeup thread on ticker API callback */ +static struct k_sem sem_ticker_api_cb; + +/* Semaphore to wakeup thread on Rx-ed objects */ +static struct k_sem *sem_recv; + +/* Entropy device */ +static struct device *dev_entropy; + +/* prepare and done event FIFOs */ +static MFIFO_DEFINE(prep, sizeof(struct lll_event), EVENT_PIPELINE_MAX); +static MFIFO_DEFINE(done, sizeof(void *), EVENT_PIPELINE_MAX); + +static struct { + void *free; + u8_t pool[sizeof(struct node_rx_event_done) * EVENT_PIPELINE_MAX]; +} mem_done; + +static struct { + void *free; + u8_t pool[sizeof(memq_link_t) * EVENT_PIPELINE_MAX]; +} mem_link_done; + +#define PDU_RX_CNT (CONFIG_BT_CTLR_RX_BUFFERS + 3) +#define LL_PDU_RX_CNT 1 +#define RX_CNT (PDU_RX_CNT + LL_PDU_RX_CNT) + +static MFIFO_DEFINE(pdu_rx_free, sizeof(void *), PDU_RX_CNT); +static MFIFO_DEFINE(ll_pdu_rx_free, sizeof(void *), LL_PDU_RX_CNT); + +#define PDU_RX_SIZE_MIN MROUND(offsetof(struct node_rx_pdu, pdu) + \ + sizeof(struct node_rx_ftr) + \ + (PDU_AC_SIZE_MAX + PDU_AC_SIZE_EXTRA)) + +#if defined(CONFIG_BT_RX_BUF_LEN) +#define PDU_RX_OCTETS_MAX (CONFIG_BT_RX_BUF_LEN - 11) +#else +#define PDU_RX_OCTETS_MAX 0 +#endif + +#define PDU_RX_POOL_SIZE (MROUND(offsetof(struct node_rx_pdu, pdu) + \ + sizeof(struct node_rx_ftr) + \ + max((PDU_AC_SIZE_MAX + PDU_AC_SIZE_EXTRA), \ + (offsetof(struct pdu_data, lldata) + \ + PDU_RX_OCTETS_MAX))) * RX_CNT) + +static struct { + u8_t size; /* Runtime (re)sized info */ + + void *free; + u8_t pool[PDU_RX_POOL_SIZE]; +} mem_pdu_rx; + +#if defined(CONFIG_BT_MAX_CONN) +#define CONFIG_BT_CTLR_MAX_CONN CONFIG_BT_MAX_CONN +#else +#define CONFIG_BT_CTLR_MAX_CONN 0 +#endif + +#define LINK_RX_POOL_SIZE (sizeof(memq_link_t) * (RX_CNT + 2 + \ + CONFIG_BT_CTLR_MAX_CONN)) +static struct { + u8_t quota_pdu; + + void *free; + u8_t pool[LINK_RX_POOL_SIZE]; +} mem_link_rx; + +static MEMQ_DECLARE(ull_rx); +static MEMQ_DECLARE(ll_rx); + +#if defined(CONFIG_BT_CONN) +static MFIFO_DEFINE(tx_ack, sizeof(struct lll_tx), + CONFIG_BT_CTLR_TX_BUFFERS); +#endif /* CONFIG_BT_CONN */ + +static void *mark; + +static inline int _init_reset(void); +static inline void _done_alloc(void); +static inline void _rx_alloc(u8_t max); +static void _rx_demux(void *param); +#if defined(CONFIG_BT_TMP) +static inline void _rx_demux_tx_ack(u16_t handle, memq_link_t *link, + struct node_tx *node_tx); +#endif /* CONFIG_BT_TMP */ +static inline void _rx_demux_rx(memq_link_t *link, struct node_rx_hdr *rx); +static inline void _rx_demux_event_done(memq_link_t *link, + struct node_rx_hdr *rx); +static void _disabled_cb(void *param); + +#if defined(CONFIG_BT_CONN) +static u8_t tx_cmplt_get(u16_t *handle, u8_t *first, u8_t last); +#endif /* CONFIG_BT_CONN */ + +int ll_init(struct k_sem *sem_rx) +{ + int err; + + /* Store the semaphore to be used to wakeup Thread context */ + sem_recv = sem_rx; + /* Get reference to entropy device */ + dev_entropy = device_get_binding(CONFIG_ENTROPY_NAME); + if (!dev_entropy) { + return -ENODEV; + } + + /* Initialize counter */ + /* TODO: Bind and use counter driver? */ + cntr_init(); + + /* Initialize Mayfly */ + mayfly_init(); + + /* Initialize Ticker */ + _ticker_users[MAYFLY_CALL_ID_0][0] = TICKER_USER_LLL_OPS; + _ticker_users[MAYFLY_CALL_ID_1][0] = TICKER_USER_ULL_HIGH_OPS; + _ticker_users[MAYFLY_CALL_ID_2][0] = TICKER_USER_ULL_LOW_OPS; + _ticker_users[MAYFLY_CALL_ID_PROGRAM][0] = TICKER_USER_APP_OPS; + + err = ticker_init(TICKER_INSTANCE_ID_CTLR, + TICKER_NODES, &_ticker_nodes[0], + MAYFLY_CALLER_COUNT, &_ticker_users[0], + TICKER_USER_OPS, &_ticker_user_ops[0], + hal_ticker_instance0_caller_id_get, + hal_ticker_instance0_sched, + hal_ticker_instance0_trigger_set); + LL_ASSERT(!err); + + /* Initialize semaphore for ticker API blocking wait */ + k_sem_init(&sem_ticker_api_cb, 0, 1); + + /* Initialize LLL */ + err = lll_init(); + if (err) { + return err; + } + + /* Initialize ULL internals */ + /* TODO: globals? */ + + /* Common to init and reset */ + err = _init_reset(); + if (err) { + return err; + } + +#if defined(CONFIG_BT_BROADCASTER) + err = lll_adv_init(); + if (err) { + return err; + } + + err = ull_adv_init(); + if (err) { + return err; + } +#endif /* CONFIG_BT_BROADCASTER */ + +#if defined(CONFIG_BT_OBSERVER) + err = lll_scan_init(); + if (err) { + return err; + } + + err = ull_scan_init(); + if (err) { + return err; + } +#endif /* CONFIG_BT_OBSERVER */ + +#if defined(CONFIG_BT_CONN) + err = lll_conn_init(); + if (err) { + return err; + } + + err = ull_conn_init(); + if (err) { + return err; + } +#endif /* CONFIG_BT_CONN */ + + /* Initialize state/roles */ +#if defined(CONFIG_BT_TMP) + err = lll_tmp_init(); + if (err) { + return err; + } + + err = ull_tmp_init(); + if (err) { + return err; + } +#endif /* CONFIG_BT_TMP */ + + return 0; +} + +void ll_reset(void) +{ + int err; + +#if defined(CONFIG_BT_BROADCASTER) + /* Reset adv state */ + err = ull_adv_reset(); + LL_ASSERT(!err); +#endif /* CONFIG_BT_BROADCASTER */ + +#if defined(CONFIG_BT_OBSERVER) + /* Reset scan state */ + err = ull_scan_reset(); + LL_ASSERT(!err); +#endif /* CONFIG_BT_OBSERVER */ + +#if defined(CONFIG_BT_CONN) +#if defined(CONFIG_BT_CENTRAL) + /* Reset initiator */ + { + void *rx; + + err = ll_connect_disable(&rx); + if (!err) { + struct ll_scan_set *scan; + + scan = ull_scan_is_enabled_get(0); + LL_ASSERT(scan); + + scan->is_enabled = 0; + scan->lll.conn = NULL; + } + + ARG_UNUSED(rx); + } +#endif /* CONFIG_BT_CENTRAL */ + + /* Reset conn role */ + err = ull_conn_reset(); + LL_ASSERT(!err); + + MFIFO_INIT(tx_ack); +#endif /* CONFIG_BT_CONN */ + +#if defined(CONFIG_BT_TMP) + /* Reset tmp */ + err = ull_tmp_reset(); + LL_ASSERT(!err); +#endif /* CONFIG_BT_TMP */ + + /* Re-initialize ULL internals */ + + /* Re-initialize the prep mfifo */ + MFIFO_INIT(prep); + + /* Re-initialize the free done mfifo */ + MFIFO_INIT(done); + + /* Re-initialize the free rx mfifo */ + MFIFO_INIT(pdu_rx_free); + + /* Re-initialize the free ll rx mfifo */ + MFIFO_INIT(ll_pdu_rx_free); + + /* Common to init and reset */ + err = _init_reset(); + LL_ASSERT(!err); +} + +u8_t ll_rx_get(void **node_rx, u16_t *handle) +{ + struct node_rx_hdr *rx; + memq_link_t *link; + u8_t cmplt = 0; + +#if defined(CONFIG_BT_CONN) +ll_rx_get_again: +#endif /* CONFIG_BT_CONN */ + + link = memq_peek(memq_ll_rx.head, memq_ll_rx.tail, (void **)&rx); + if (link) { +#if defined(CONFIG_BT_CONN) + cmplt = tx_cmplt_get(handle, &mfifo_tx_ack.f, rx->ack_last); + if (!cmplt) { + u8_t f, cmplt_prev, cmplt_curr; + u16_t h; + + cmplt_curr = 0; + f = mfifo_tx_ack.f; + do { + cmplt_prev = cmplt_curr; + cmplt_curr = tx_cmplt_get(&h, &f, + mfifo_tx_ack.l); + } while ((cmplt_prev != 0) || + (cmplt_prev != cmplt_curr)); + + if (rx->type == NODE_RX_TYPE_DC_PDU_RELEASE) { + (void)memq_dequeue(memq_ll_rx.tail, + &memq_ll_rx.head, NULL); + mem_release(link, &mem_link_rx.free); + + LL_ASSERT(mem_link_rx.quota_pdu < RX_CNT); + mem_link_rx.quota_pdu++; + + mem_release(rx, &mem_pdu_rx.free); + + _rx_alloc(1); + + goto ll_rx_get_again; + } +#endif /* CONFIG_BT_CONN */ + + *node_rx = rx; + +#if defined(CONFIG_BT_CONN) + } else { + *node_rx = NULL; + } + } else { + cmplt = tx_cmplt_get(handle, &mfifo_tx_ack.f, mfifo_tx_ack.l); + *node_rx = NULL; +#endif /* CONFIG_BT_CONN */ + + } + + return cmplt; +} + +void ll_rx_dequeue(void) +{ + struct node_rx_hdr *rx = NULL; + memq_link_t *link; + +#if defined(CONFIG_BT_CONN) + struct node_rx_cc *cc = NULL; +#endif /* CONFIG_BT_CONN */ + + link = memq_dequeue(memq_ll_rx.tail, &memq_ll_rx.head, + (void **)&rx); + LL_ASSERT(link); + + mem_release(link, &mem_link_rx.free); + + /* handle object specific clean up */ + switch (rx->type) { +#if defined(CONFIG_BT_OBSERVER) || \ + defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY) || \ + defined(CONFIG_BT_CTLR_PROFILE_ISR) || \ + defined(CONFIG_BT_CTLR_ADV_INDICATION) || \ + defined(CONFIG_BT_CTLR_SCAN_INDICATION) || \ + defined(CONFIG_BT_CONN) + +#if defined(CONFIG_BT_CONN) + /* fallthrough */ + case NODE_RX_TYPE_CONNECTION: + { + cc = (void *)((struct node_rx_pdu *)rx)->pdu; + if (cc->status) { + break; + } + } + + case NODE_RX_TYPE_DC_PDU: +#endif /* CONFIG_BT_CONN */ + +#if defined(CONFIG_BT_OBSERVER) + case NODE_RX_TYPE_REPORT: +#endif /* CONFIG_BT_OBSERVER */ + +#if defined(CONFIG_BT_CTLR_ADV_EXT) + case NODE_RX_TYPE_EXT_1M_REPORT: + case NODE_RX_TYPE_EXT_CODED_REPORT: +#endif /* CONFIG_BT_CTLR_ADV_EXT */ + +#if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY) + case NODE_RX_TYPE_SCAN_REQ: +#endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */ + +#if defined(CONFIG_BT_CONN) + case NODE_RX_TYPE_CONN_UPDATE: + case NODE_RX_TYPE_ENC_REFRESH: + +#if defined(CONFIG_BT_CTLR_LE_PING) + case NODE_RX_TYPE_APTO: +#endif /* CONFIG_BT_CTLR_LE_PING */ + + case NODE_RX_TYPE_CHAN_SEL_ALGO: + +#if defined(CONFIG_BT_CTLR_PHY) + case NODE_RX_TYPE_PHY_UPDATE: +#endif /* CONFIG_BT_CTLR_PHY */ + +#if defined(CONFIG_BT_CTLR_CONN_RSSI) + case NODE_RX_TYPE_RSSI: +#endif /* CONFIG_BT_CTLR_CONN_RSSI */ +#endif /* CONFIG_BT_CONN */ + +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) + /* fallthrough */ + case NODE_RX_TYPE_PROFILE: +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ + +#if defined(CONFIG_BT_CTLR_ADV_INDICATION) + case NODE_RX_TYPE_ADV_INDICATION: +#endif /* CONFIG_BT_CTLR_ADV_INDICATION */ + +#if defined(CONFIG_BT_CTLR_SCAN_INDICATION) + case NODE_RX_TYPE_SCAN_INDICATION: +#endif /* CONFIG_BT_CTLR_SCAN_INDICATION */ + +#if defined(CONFIG_BT_HCI_MESH_EXT) + case NODE_RX_TYPE_MESH_ADV_CPLT: + case NODE_RX_TYPE_MESH_REPORT: +#endif /* CONFIG_BT_HCI_MESH_EXT */ + + LL_ASSERT(mem_link_rx.quota_pdu < RX_CNT); + + mem_link_rx.quota_pdu++; + break; +#endif /* CONFIG_BT_OBSERVER || + * CONFIG_BT_CTLR_SCAN_REQ_NOTIFY || + * CONFIG_BT_CTLR_PROFILE_ISR || + * CONFIG_BT_CTLR_ADV_INDICATION || + * CONFIG_BT_CTLR_SCAN_INDICATION || + * CONFIG_BT_CONN + */ + +#if defined(CONFIG_BT_CONN) + /* fallthrough */ + case NODE_RX_TYPE_TERMINATE: + /* Did not use data link quota */ + break; +#endif /* CONFIG_BT_CONN */ + + default: + LL_ASSERT(0); + break; + } + + if (0) { +#if defined(CONFIG_BT_CONN) + } else if (rx->type == NODE_RX_TYPE_CONNECTION) { + struct node_rx_ftr *ftr; + + ftr = (void *)((u8_t *)((struct node_rx_pdu *)rx)->pdu + + (offsetof(struct pdu_adv, connect_ind) + + sizeof(struct pdu_adv_connect_ind))); + + if (0) { +#if defined(CONFIG_BT_PERIPHERAL) + } else if ((cc->status == 0x3c) || cc->role) { + struct lll_adv *lll = ftr->param; + struct ll_adv_set *adv = (void *)HDR_LLL2EVT(lll); + + if (cc->status == 0x3c) { + struct lll_conn *conn_lll; + struct ll_conn *conn; + memq_link_t *link; + + conn_lll = lll->conn; + LL_ASSERT(conn_lll); + + LL_ASSERT(!conn_lll->link_tx_free); + link = memq_deinit(&conn_lll->memq_tx.head, + &conn_lll->memq_tx.tail); + LL_ASSERT(link); + conn_lll->link_tx_free = link; + + conn = (void *)HDR_LLL2EVT(conn_lll); + ll_conn_release(conn); + + lll->conn = NULL; + } else { + /* Release un-utilized node rx */ + if (adv->node_rx_cc_free) { + void *rx_free; + + rx_free = adv->node_rx_cc_free; + adv->node_rx_cc_free = NULL; + + ll_rx_release(rx_free); + } + } + + adv->is_enabled = 0; +#endif /* CONFIG_BT_PERIPHERAL */ +#if defined(CONFIG_BT_CENTRAL) + } else { + struct lll_scan *lll = ftr->param; + struct ll_scan_set *scan = (void *)HDR_LLL2EVT(lll); + + scan->is_enabled = 0; +#endif /* CONFIG_BT_CENTRAL */ + } + + if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY)) { + u8_t bm; + + bm = (ull_scan_is_enabled(0) << 1) | + ull_adv_is_enabled(0); + + if (!bm) { + ll_adv_scan_state_cb(0); + } + } +#endif /* CONFIG_BT_CONN */ + +#if defined(CONFIG_BT_HCI_MESH_EXT) + } else if (rx->type == NODE_RX_TYPE_MESH_ADV_CPLT) { + struct ll_adv_set *adv; + struct ll_scan_set *scan; + + adv = ull_adv_is_enabled_get(0); + LL_ASSERT(adv); + adv->is_enabled = 0; + + scan = ull_scan_is_enabled_get(0); + LL_ASSERT(scan); + + scan->is_enabled = 0; + + ll_adv_scan_state_cb(0); +#endif /* CONFIG_BT_HCI_MESH_EXT */ + } +} + +void ll_rx_mem_release(void **node_rx) +{ + struct node_rx_hdr *_node_rx; + + _node_rx = *node_rx; + while (_node_rx) { + struct node_rx_hdr *_node_rx_free; + + _node_rx_free = _node_rx; + _node_rx = _node_rx->next; + + switch (_node_rx_free->type) { +#if defined(CONFIG_BT_CONN) + case NODE_RX_TYPE_CONNECTION: +#if defined(CONFIG_BT_CENTRAL) + { + struct node_rx_pdu *rx = (void *)_node_rx_free; + + if (*((u8_t *)rx->pdu) == + BT_HCI_ERR_UNKNOWN_CONN_ID) { + struct lll_conn *conn_lll; + struct ll_scan_set *scan; + struct ll_conn *conn; + memq_link_t *link; + + scan = ull_scan_is_enabled_get(0); + LL_ASSERT(scan); + + conn_lll = scan->lll.conn; + LL_ASSERT(conn_lll); + + LL_ASSERT(!conn_lll->link_tx_free); + link = memq_deinit(&conn_lll->memq_tx.head, + &conn_lll->memq_tx.tail); + LL_ASSERT(link); + conn_lll->link_tx_free = link; + + conn = (void *)HDR_LLL2EVT(conn_lll); + ll_conn_release(conn); + + scan->is_enabled = 0; + + scan->lll.conn = NULL; + +#if defined(CONFIG_BT_CTLR_PRIVACY) +#if defined(CONFIG_BT_BROADCASTER) + if (!ull_adv_is_enabled_get(0)) +#endif + { + ll_adv_scan_state_cb(0); + } +#endif + break; + } + } +#endif /* CONFIG_BT_CENTRAL */ + /* passthrough */ + case NODE_RX_TYPE_DC_PDU: +#endif /* CONFIG_BT_CONN */ + +#if defined(CONFIG_BT_OBSERVER) + case NODE_RX_TYPE_REPORT: +#endif /* CONFIG_BT_OBSERVER */ + +#if defined(CONFIG_BT_CTLR_ADV_EXT) + case NODE_RX_TYPE_EXT_1M_REPORT: + case NODE_RX_TYPE_EXT_CODED_REPORT: +#endif /* CONFIG_BT_CTLR_ADV_EXT */ + +#if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY) + case NODE_RX_TYPE_SCAN_REQ: +#endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */ + +#if defined(CONFIG_BT_CONN) + case NODE_RX_TYPE_CONN_UPDATE: + case NODE_RX_TYPE_ENC_REFRESH: + +#if defined(CONFIG_BT_CTLR_LE_PING) + case NODE_RX_TYPE_APTO: +#endif /* CONFIG_BT_CTLR_LE_PING */ + + case NODE_RX_TYPE_CHAN_SEL_ALGO: + +#if defined(CONFIG_BT_CTLR_PHY) + case NODE_RX_TYPE_PHY_UPDATE: +#endif /* CONFIG_BT_CTLR_PHY */ + +#if defined(CONFIG_BT_CTLR_CONN_RSSI) + case NODE_RX_TYPE_RSSI: +#endif /* CONFIG_BT_CTLR_CONN_RSSI */ +#endif /* CONFIG_BT_CONN */ + +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) + case NODE_RX_TYPE_PROFILE: +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ + +#if defined(CONFIG_BT_CTLR_ADV_INDICATION) + case NODE_RX_TYPE_ADV_INDICATION: +#endif /* CONFIG_BT_CTLR_ADV_INDICATION */ + +#if defined(CONFIG_BT_CTLR_SCAN_INDICATION) + case NODE_RX_TYPE_SCAN_INDICATION: +#endif /* CONFIG_BT_CTLR_SCAN_INDICATION */ + +#if defined(CONFIG_BT_HCI_MESH_EXT) + case NODE_RX_TYPE_MESH_ADV_CPLT: + case NODE_RX_TYPE_MESH_REPORT: +#endif /* CONFIG_BT_HCI_MESH_EXT */ + + mem_release(_node_rx_free, &mem_pdu_rx.free); + break; + +#if defined(CONFIG_BT_CONN) + case NODE_RX_TYPE_TERMINATE: + { + struct ll_conn *conn; + memq_link_t *link; + + conn = ll_conn_get(_node_rx_free->handle); + + LL_ASSERT(!conn->lll.link_tx_free); + link = memq_deinit(&conn->lll.memq_tx.head, + &conn->lll.memq_tx.tail); + LL_ASSERT(link); + conn->lll.link_tx_free = link; + + ll_conn_release(conn); + } + break; +#endif /* CONFIG_BT_CONN */ + + case NODE_RX_TYPE_NONE: + case NODE_RX_TYPE_EVENT_DONE: + default: + LL_ASSERT(0); + break; + } + } + + *node_rx = _node_rx; + + _rx_alloc(UINT8_MAX); +} + +void *ll_rx_link_alloc(void) +{ + return mem_acquire(&mem_link_rx.free); +} + +void ll_rx_link_release(void *link) +{ + mem_release(link, &mem_link_rx.free); +} + +void *ll_rx_alloc(void) +{ + return mem_acquire(&mem_pdu_rx.free); +} + +void ll_rx_release(void *node_rx) +{ + mem_release(node_rx, &mem_pdu_rx.free); +} + +void *ll_pdu_rx_alloc_peek(u8_t count) +{ + if (count > MFIFO_AVAIL_COUNT_GET(ll_pdu_rx_free)) { + return NULL; + } + + return MFIFO_DEQUEUE_PEEK(ll_pdu_rx_free); +} + +void *ll_pdu_rx_alloc(void) +{ + return MFIFO_DEQUEUE(ll_pdu_rx_free); +} + +void ll_rx_put(memq_link_t *link, void *rx) +{ + struct node_rx_hdr *rx_hdr = rx; + + /* Serialize Tx ack with Rx enqueue by storing reference to + * last element index in Tx ack FIFO. + */ +#if defined(CONFIG_BT_CONN) + rx_hdr->ack_last = mfifo_tx_ack.l; +#else /* !CONFIG_BT_CONN */ + ARG_UNUSED(rx_hdr); +#endif /* !CONFIG_BT_CONN */ + + /* Enqueue the Rx object */ + memq_enqueue(link, rx, &memq_ll_rx.tail); +} + +void ll_rx_sched(void) +{ + k_sem_give(sem_recv); +} + +void ll_timeslice_ticker_id_get(u8_t * const instance_index, + u8_t * const user_id) +{ + *instance_index = TICKER_INSTANCE_ID_CTLR; + *user_id = (TICKER_NODES - FLASH_TICKER_NODES); +} + +void ll_radio_state_abort(void) +{ + static memq_link_t _link; + static struct mayfly _mfy = {0, 0, &_link, NULL, lll_disable}; + u32_t ret; + + ret = mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_LLL, 0, + &_mfy); + LL_ASSERT(!ret); +} + +u32_t ll_radio_state_is_idle(void) +{ + return radio_is_idle(); +} + +void ull_ticker_status_give(u32_t status, void *param) +{ + *((u32_t volatile *)param) = status; + + k_sem_give(&sem_ticker_api_cb); +} + +u32_t ull_ticker_status_take(u32_t ret, u32_t volatile *ret_cb) +{ + if (ret == TICKER_STATUS_BUSY) { + /* TODO: Enable ticker job in case of CONFIG_BT_CTLR_LOW_LAT */ + } + + k_sem_take(&sem_ticker_api_cb, K_FOREVER); + + return *ret_cb; +} + +void *ull_disable_mark(void *param) +{ + if (!mark) { + mark = param; + } + + return mark; +} + +void *ull_disable_unmark(void *param) +{ + if (mark && mark == param) { + mark = NULL; + } + + return param; +} + +void *ull_disable_mark_get(void) +{ + return mark; +} + +int ull_disable(void *lll) +{ + static memq_link_t _link; + static struct mayfly _mfy = {0, 0, &_link, NULL, lll_disable}; + struct ull_hdr *hdr; + struct k_sem sem; + u32_t ret; + + hdr = HDR_ULL(((struct lll_hdr *)lll)->parent); + if (!hdr || !hdr->ref) { + return ULL_STATUS_SUCCESS; + } + + k_sem_init(&sem, 0, 1); + hdr->disabled_param = &sem; + hdr->disabled_cb = _disabled_cb; + + _mfy.param = lll; + ret = mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_LLL, 0, + &_mfy); + LL_ASSERT(!ret); + + return k_sem_take(&sem, K_FOREVER); +} + +void *ull_pdu_rx_alloc_peek(u8_t count) +{ + if (count > MFIFO_AVAIL_COUNT_GET(pdu_rx_free)) { + return NULL; + } + + return MFIFO_DEQUEUE_PEEK(pdu_rx_free); +} + +void *ull_pdu_rx_alloc_peek_iter(u8_t *idx) +{ + return *(void **)MFIFO_DEQUEUE_ITER_GET(pdu_rx_free, idx); +} + +void *ull_pdu_rx_alloc(void) +{ + return MFIFO_DEQUEUE(pdu_rx_free); +} + +void ull_rx_put(memq_link_t *link, void *rx) +{ + struct node_rx_hdr *rx_hdr = rx; + + /* Serialize Tx ack with Rx enqueue by storing reference to + * last element index in Tx ack FIFO. + */ +#if defined(CONFIG_BT_CONN) + rx_hdr->ack_last = lll_conn_ack_last_idx_get(); +#elif defined(CONFIG_BT_TMP) + rx_hdr->ack_last = lll_tmp_ack_last_idx_get(); +#else /* !CONFIG_BT_TMP */ + ARG_UNUSED(rx_hdr); +#endif /* !CONFIG_BT_TMP */ + + /* Enqueue the Rx object */ + memq_enqueue(link, rx, &memq_ull_rx.tail); +} + +void ull_rx_sched(void) +{ + static memq_link_t _link; + static struct mayfly _mfy = {0, 0, &_link, NULL, _rx_demux}; + + /* Kick the ULL (using the mayfly, tailchain it) */ + mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_HIGH, 1, &_mfy); +} + +#if defined(CONFIG_BT_CONN) +void ull_tx_ack_put(u16_t handle, struct node_tx *node_tx) +{ + struct lll_tx *tx; + u8_t idx; + + idx = MFIFO_ENQUEUE_GET(tx_ack, (void **)&tx); + LL_ASSERT(tx); + + tx->handle = handle; + tx->node = node_tx; + + MFIFO_ENQUEUE(tx_ack, idx); +} +#endif /* CONFIG_BT_CONN */ + +int ull_prepare_enqueue(lll_is_abort_cb_t is_abort_cb, + lll_abort_cb_t abort_cb, + struct lll_prepare_param *prepare_param, + lll_prepare_cb_t prepare_cb, int prio, + u8_t is_resume) +{ + struct lll_event *e; + u8_t idx; + + idx = MFIFO_ENQUEUE_GET(prep, (void **)&e); + if (!e) { + return -ENOBUFS; + } + + memcpy(&e->prepare_param, prepare_param, sizeof(e->prepare_param)); + e->prepare_cb = prepare_cb; + e->is_abort_cb = is_abort_cb; + e->abort_cb = abort_cb; + e->prio = prio; + e->is_resume = is_resume; + e->is_aborted = 0; + + MFIFO_ENQUEUE(prep, idx); + + return 0; +} + +void *ull_prepare_dequeue_get(void) +{ + return MFIFO_DEQUEUE_GET(prep); +} + +void *ull_prepare_dequeue_iter(u8_t *idx) +{ + return MFIFO_DEQUEUE_ITER_GET(prep, idx); +} + +void *ull_event_done_extra_get(void) +{ + struct node_rx_event_done *done; + + done = MFIFO_DEQUEUE_PEEK(done); + if (!done) { + return NULL; + } + + return &done->extra; +} + +void *ull_event_done(void *param) +{ + struct node_rx_event_done *done; + memq_link_t *link; + + done = MFIFO_DEQUEUE(done); + if (!done) { + return NULL; + } + + link = done->hdr.link; + done->hdr.link = NULL; + + done->hdr.type = NODE_RX_TYPE_EVENT_DONE; + done->param = param; + + ull_rx_put(link, done); + ull_rx_sched(); + + return done; +} + +u8_t ull_entropy_get(u8_t len, u8_t *rand) +{ + return entropy_get_entropy_isr(dev_entropy, rand, len, 0); +} + +static inline int _init_reset(void) +{ + memq_link_t *link; + + /* Initialize done pool. */ + mem_init(mem_done.pool, sizeof(struct node_rx_event_done), + EVENT_PIPELINE_MAX, &mem_done.free); + + /* Initialize done link pool. */ + mem_init(mem_link_done.pool, sizeof(memq_link_t), EVENT_PIPELINE_MAX, + &mem_link_done.free); + + /* Allocate done buffers */ + _done_alloc(); + + /* Initialize rx pool. */ + mem_pdu_rx.size = PDU_RX_SIZE_MIN; + mem_init(mem_pdu_rx.pool, mem_pdu_rx.size, + sizeof(mem_pdu_rx.pool) / mem_pdu_rx.size, + &mem_pdu_rx.free); + + /* Initialize rx link pool. */ + mem_init(mem_link_rx.pool, sizeof(memq_link_t), + sizeof(mem_link_rx.pool) / sizeof(memq_link_t), + &mem_link_rx.free); + + /* Acquire a link to initialize ull rx memq */ + link = mem_acquire(&mem_link_rx.free); + LL_ASSERT(link); + + /* Initialize ull rx memq */ + MEMQ_INIT(ull_rx, link); + + /* Acquire a link to initialize ll rx memq */ + link = mem_acquire(&mem_link_rx.free); + LL_ASSERT(link); + + /* Initialize ll rx memq */ + MEMQ_INIT(ll_rx, link); + + /* Allocate rx free buffers */ + mem_link_rx.quota_pdu = RX_CNT; + _rx_alloc(UINT8_MAX); + + return 0; +} + +static inline void _done_alloc(void) +{ + u8_t idx; + + while (MFIFO_ENQUEUE_IDX_GET(done, &idx)) { + memq_link_t *link; + struct node_rx_hdr *rx; + + link = mem_acquire(&mem_link_done.free); + if (!link) { + break; + } + + rx = mem_acquire(&mem_done.free); + if (!rx) { + mem_release(link, &mem_link_done.free); + break; + } + + rx->link = link; + + MFIFO_BY_IDX_ENQUEUE(done, idx, rx); + } +} + +static inline void *_done_release(memq_link_t *link, + struct node_rx_event_done *done) +{ + u8_t idx; + + done->hdr.link = link; + + if (!MFIFO_ENQUEUE_IDX_GET(done, &idx)) { + return NULL; + } + + MFIFO_BY_IDX_ENQUEUE(done, idx, done); + + return done; +} + +static inline void _rx_alloc(u8_t max) +{ + u8_t idx; + + while (mem_link_rx.quota_pdu && + MFIFO_ENQUEUE_IDX_GET(ll_pdu_rx_free, &idx)) { + memq_link_t *link; + struct node_rx_hdr *rx; + + link = mem_acquire(&mem_link_rx.free); + if (!link) { + break; + } + + rx = mem_acquire(&mem_pdu_rx.free); + if (!rx) { + mem_release(link, &mem_link_rx.free); + break; + } + + rx->link = link; + + MFIFO_BY_IDX_ENQUEUE(ll_pdu_rx_free, idx, rx); + + mem_link_rx.quota_pdu--; + } + + if (max > mem_link_rx.quota_pdu) { + max = mem_link_rx.quota_pdu; + } + + while ((max--) && MFIFO_ENQUEUE_IDX_GET(pdu_rx_free, &idx)) { + memq_link_t *link; + struct node_rx_hdr *rx; + + link = mem_acquire(&mem_link_rx.free); + if (!link) { + break; + } + + rx = mem_acquire(&mem_pdu_rx.free); + if (!rx) { + mem_release(link, &mem_link_rx.free); + break; + } + + rx->link = link; + + MFIFO_BY_IDX_ENQUEUE(pdu_rx_free, idx, rx); + + mem_link_rx.quota_pdu--; + } +} + +#if defined(CONFIG_BT_CONN) +static u8_t tx_cmplt_get(u16_t *handle, u8_t *first, u8_t last) +{ + struct lll_tx *tx; + u8_t cmplt; + + tx = mfifo_dequeue_iter_get(mfifo_tx_ack.m, mfifo_tx_ack.s, + mfifo_tx_ack.n, mfifo_tx_ack.f, last, + first); + if (!tx) { + return 0; + } + + *handle = tx->handle; + cmplt = 0; + do { + struct node_tx *node_tx; + struct pdu_data *p; + + node_tx = tx->node; + p = (void *)node_tx->pdu; + if (!node_tx || (node_tx == (void *)1) || + (((u32_t)node_tx & ~3) && + (p->ll_id == PDU_DATA_LLID_DATA_START || + p->ll_id == PDU_DATA_LLID_DATA_CONTINUE))) { + /* data packet, hence count num cmplt */ + tx->node = (void *)1; + cmplt++; + } else { + /* ctrl packet or flushed, hence dont count num cmplt */ + tx->node = (void *)2; + } + + if (((u32_t)node_tx & ~3)) { + ll_tx_mem_release(node_tx); + } + + tx = mfifo_dequeue_iter_get(mfifo_tx_ack.m, mfifo_tx_ack.s, + mfifo_tx_ack.n, mfifo_tx_ack.f, + last, first); + } while (tx && tx->handle == *handle); + + return cmplt; +} + +static inline void _rx_demux_conn_tx_ack(u8_t ack_last, u16_t handle, + memq_link_t *link, + struct node_tx *node_tx) +{ + do { + /* Dequeue node */ + lll_conn_ack_dequeue(); + + if (handle != 0xFFFF) { + struct ll_conn *conn; + + /* Get the conn instance */ + conn = ll_conn_get(handle); + + /* Process Tx ack */ + ull_conn_tx_ack(conn, link, node_tx); + + /* Release link mem */ + ull_conn_link_tx_release(link); + + /* De-mux 1 tx node from FIFO */ + ull_conn_tx_demux(1); + + /* Enqueue towards LLL */ + ull_conn_tx_lll_enqueue(conn, 1); + } else { + /* Pass through Tx ack */ + ull_tx_ack_put(0xFFFF, node_tx); + + /* Release link mem */ + ull_conn_link_tx_release(link); + + /* De-mux 1 tx node from FIFO */ + ull_conn_tx_demux(1); + } + + link = lll_conn_ack_by_last_peek(ack_last, &handle, &node_tx); + } while (link); + + /* trigger thread to call ll_rx_get() */ + ll_rx_sched(); +} +#endif /* CONFIG_BT_CONN */ + +#if defined(CONFIG_BT_TMP) +static inline void _rx_demux_tx_ack(u16_t handle, memq_link_t *link, + struct node_tx *node_tx) +{ + lll_tmp_ack_dequeue(); + + ull_tmp_link_tx_release(link); +} +#endif /* CONFIG_BT_TMP */ + +static void _rx_demux(void *param) +{ + memq_link_t *link; + + do { + struct node_rx_hdr *rx; + + link = memq_peek(memq_ull_rx.head, memq_ull_rx.tail, + (void **)&rx); + if (link) { +#if defined(CONFIG_BT_CONN) + struct node_tx *node_tx; + memq_link_t *link_tx; + u16_t handle; +#endif /* CONFIG_BT_CONN */ + + LL_ASSERT(rx); + +#if defined(CONFIG_BT_CONN) + link_tx = lll_conn_ack_by_last_peek(rx->ack_last, + &handle, &node_tx); + if (link_tx) { + _rx_demux_conn_tx_ack(rx->ack_last, handle, + link_tx, node_tx); + } else +#elif defined(CONFIG_BT_TMP) + link_tx = lll_tmp_ack_by_last_peek(rx->ack_last, + &handle, &node_tx); + if (link_tx) { + _rx_demux_tx_ack(handle, link_tx, node_tx); + } else +#endif /* CONFIG_BT_TMP */ + { + _rx_demux_rx(link, rx); + } +#if defined(CONFIG_BT_CONN) + } else { + struct node_tx *node_tx; + u8_t ack_last; + u16_t handle; + + link = lll_conn_ack_peek(&ack_last, &handle, &node_tx); + if (link) { + _rx_demux_conn_tx_ack(ack_last, handle, + link, node_tx); + } +#elif defined(CONFIG_BT_TMP) + } else { + struct node_tx *node_tx; + u16_t handle; + + link = lll_tmp_ack_peek(&handle, &node_tx); + if (link) { + _rx_demux_tx_ack(handle, link, node_tx); + } +#endif /* CONFIG_BT_TMP */ + } + } while (link); +} + +static inline void _rx_demux_rx(memq_link_t *link, struct node_rx_hdr *rx) +{ + /* Demux Rx objects */ + switch (rx->type) { + case NODE_RX_TYPE_EVENT_DONE: + { + memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL); + _rx_demux_event_done(link, rx); + } + break; + +#if defined(CONFIG_BT_OBSERVER) || \ + defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY) || \ + defined(CONFIG_BT_CTLR_PROFILE_ISR) || \ + defined(CONFIG_BT_CTLR_ADV_INDICATION) || \ + defined(CONFIG_BT_CTLR_SCAN_INDICATION) +#if defined(CONFIG_BT_OBSERVER) + case NODE_RX_TYPE_REPORT: +#endif /* CONFIG_BT_OBSERVER */ + +#if defined(CONFIG_BT_CTLR_ADV_EXT) + case NODE_RX_TYPE_EXT_1M_REPORT: + case NODE_RX_TYPE_EXT_CODED_REPORT: +#endif /* CONFIG_BT_CTLR_ADV_EXT */ + +#if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY) + case NODE_RX_TYPE_SCAN_REQ: +#endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */ + +#if defined(CONFIG_BT_CTLR_PROFILE_ISR) + /* fallthrough */ + case NODE_RX_TYPE_PROFILE: +#endif /* CONFIG_BT_CTLR_PROFILE_ISR */ + +#if defined(CONFIG_BT_CTLR_ADV_INDICATION) + case NODE_RX_TYPE_ADV_INDICATION: +#endif /* CONFIG_BT_CTLR_ADV_INDICATION */ + +#if defined(CONFIG_BT_CTLR_SCAN_INDICATION) + case NODE_RX_TYPE_SCAN_INDICATION: +#endif /* CONFIG_BT_CTLR_SCAN_INDICATION */ + { + memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL); + ll_rx_put(link, rx); + ll_rx_sched(); + } + break; +#endif /* CONFIG_BT_OBSERVER || + * CONFIG_BT_CTLR_SCAN_REQ_NOTIFY || + * CONFIG_BT_CTLR_PROFILE_ISR || + * CONFIG_BT_CTLR_ADV_INDICATION || + * CONFIG_BT_CTLR_SCAN_INDICATION + */ + +#if defined(CONFIG_BT_CONN) + /* fallthrough */ + case NODE_RX_TYPE_CONNECTION: + { + memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL); + ull_conn_setup(link, rx); + } + break; + + case NODE_RX_TYPE_DC_PDU: + { + int nack; + + nack = ull_conn_rx(link, (void *)&rx); + if (!nack) { + memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL); + + if (rx) { + ll_rx_put(link, rx); + ll_rx_sched(); + } + } + } + break; +#endif /* CONFIG_BT_CONN */ + + default: + { + LL_ASSERT(0); + } + break; + } +} + +static inline void _rx_demux_event_done(memq_link_t *link, + struct node_rx_hdr *rx) +{ + struct node_rx_event_done *done = (void *)rx; + struct ull_hdr *ull_hdr; + struct lll_event *next; + + /* Get the ull instance */ + ull_hdr = done->param; + + /* Process role dependent event done */ + switch (done->extra.type) { +#if defined(CONFIG_BT_CONN) + case EVENT_DONE_EXTRA_TYPE_CONN: + ull_conn_done(done); + break; +#endif /* CONFIG_BT_CONN */ + case EVENT_DONE_EXTRA_TYPE_NONE: + /* ignore */ + break; + + default: + LL_ASSERT(0); + break; + } + + /* release done */ + done->extra.type = 0; + _done_release(link, done); + + /* dequeue prepare pipeline */ + next = ull_prepare_dequeue_get(); + while (next) { + u8_t is_resume = next->is_resume; + + if (!next->is_aborted) { + static memq_link_t _link; + static struct mayfly _mfy = {0, 0, &_link, NULL, + lll_resume}; + u32_t ret; + + _mfy.param = next; + ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, + TICKER_USER_ID_LLL, 0, &_mfy); + LL_ASSERT(!ret); + } + + MFIFO_DEQUEUE(prep); + + next = ull_prepare_dequeue_get(); + + if (!next || next->is_resume || !is_resume) { + break; + } + } + + /* ull instance will resume, dont decrement ref */ + if (!ull_hdr) { + return; + } + + /* Decrement prepare reference */ + LL_ASSERT(ull_hdr->ref); + ull_hdr->ref--; + + /* If disable initiated, signal the semaphore */ + if (!ull_hdr->ref && ull_hdr->disabled_cb) { + ull_hdr->disabled_cb(ull_hdr->disabled_param); + } +} + +static void _disabled_cb(void *param) +{ + k_sem_give(param); +} diff --git a/subsys/bluetooth/controller/ll_sw/ull_adv.c b/subsys/bluetooth/controller/ll_sw/ull_adv.c new file mode 100644 index 00000000000..b45555d196b --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/ull_adv.c @@ -0,0 +1,1141 @@ +/* + * Copyright (c) 2016-2019 Nordic Semiconductor ASA + * Copyright (c) 2016 Vinayak Kariappa Chettimada + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include + +#include +#include + +#include "hal/ccm.h" +#include "hal/ticker.h" + +#include "util/util.h" +#include "util/memq.h" +#include "util/mayfly.h" + +#include "ticker/ticker.h" + +#include "pdu.h" +#include "ll.h" +#include "ll_feat.h" +#include "lll.h" +#include "lll_vendor.h" +#include "lll_clock.h" +#include "lll_adv.h" +#include "lll_scan.h" +#include "lll_conn.h" +#include "lll_filter.h" + +#include "ull_adv_types.h" +#include "ull_scan_types.h" +#include "ull_conn_types.h" +#include "ull_adv_internal.h" +#include "ull_scan_internal.h" +#include "ull_conn_internal.h" +#include "ull_internal.h" + +#define LOG_MODULE_NAME bt_ctlr_llsw_ull_adv +#include "common/log.h" +#include +#include "hal/debug.h" + +inline struct ll_adv_set *ull_adv_set_get(u16_t handle); +inline u16_t ull_adv_handle_get(struct ll_adv_set *adv); + +static int _init_reset(void); +static inline struct ll_adv_set *is_disabled_get(u16_t handle); +static void ticker_cb(u32_t ticks_at_expire, u32_t remainder, u16_t lazy, + void *param); +static void ticker_op_update_cb(u32_t status, void *params); + +#if defined(CONFIG_BT_PERIPHERAL) +static void ticker_stop_cb(u32_t ticks_at_expire, u32_t remainder, u16_t lazy, + void *param); +static void ticker_op_stop_cb(u32_t status, void *params); +static void disabled_cb(void *param); +static inline void _conn_release(struct ll_adv_set *adv); +#endif /* CONFIG_BT_PERIPHERAL */ + +static inline u8_t disable(u16_t handle); + +static struct ll_adv_set ll_adv[CONFIG_BT_ADV_MAX]; + +#if defined(CONFIG_BT_CTLR_ADV_EXT) +u8_t ll_adv_params_set(u8_t handle, u16_t evt_prop, u32_t interval, + u8_t adv_type, u8_t own_addr_type, + u8_t direct_addr_type, u8_t const *const direct_addr, + u8_t chan_map, u8_t filter_policy, u8_t *tx_pwr, + u8_t phy_p, u8_t skip, u8_t phy_s, u8_t sid, u8_t sreq) +{ + u8_t const pdu_adv_type[] = {PDU_ADV_TYPE_ADV_IND, + PDU_ADV_TYPE_DIRECT_IND, + PDU_ADV_TYPE_SCAN_IND, + PDU_ADV_TYPE_NONCONN_IND, + PDU_ADV_TYPE_DIRECT_IND, + PDU_ADV_TYPE_EXT_IND}; +#else /* !CONFIG_BT_CTLR_ADV_EXT */ +u8_t ll_adv_params_set(u16_t interval, u8_t adv_type, + u8_t own_addr_type, u8_t direct_addr_type, + u8_t const *const direct_addr, u8_t chan_map, + u8_t filter_policy) +{ + u8_t const pdu_adv_type[] = {PDU_ADV_TYPE_ADV_IND, + PDU_ADV_TYPE_DIRECT_IND, + PDU_ADV_TYPE_SCAN_IND, + PDU_ADV_TYPE_NONCONN_IND, + PDU_ADV_TYPE_DIRECT_IND}; + u16_t const handle = 0; +#endif /* !CONFIG_BT_CTLR_ADV_EXT */ + + struct ll_adv_set *adv; + struct pdu_adv *pdu; + + adv = is_disabled_get(handle); + if (!adv) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + +#if defined(CONFIG_BT_CTLR_ADV_EXT) + /* TODO: check and fail (0x12, invalid HCI cmd param) if invalid + * evt_prop bits. + */ + + adv->lll.phy_p = BIT(0); + + /* extended */ + if (adv_type > 0x04) { + /* legacy */ + if (evt_prop & BIT(4)) { + u8_t const leg_adv_type[] = { 0x03, 0x04, 0x02, 0x00}; + + adv_type = leg_adv_type[evt_prop & 0x03]; + + /* high duty cycle directed */ + if (evt_prop & BIT(3)) { + adv_type = 0x01; + } + } else { + /* - Connectable and scannable not allowed; + * - High duty cycle directed connectable not allowed + */ + if (((evt_prop & 0x03) == 0x03) || + ((evt_prop & 0x0C) == 0x0C)) { + return 0x12; /* invalid HCI cmd param */ + } + + adv_type = 0x05; /* PDU_ADV_TYPE_EXT_IND */ + + adv->lll.phy_p = phy_p; + } + } +#endif /* CONFIG_BT_CTLR_ADV_EXT */ + + /* remember params so that set adv/scan data and adv enable + * interface can correctly update adv/scan data in the + * double buffer between caller and controller context. + */ + /* Set interval for Undirected or Low Duty Cycle Directed Advertising */ + if (adv_type != 0x01) { + adv->interval = interval; + } else { + adv->interval = 0; + } + adv->lll.chan_map = chan_map; + adv->lll.filter_policy = filter_policy; + + /* update the "current" primary adv data */ + pdu = lll_adv_data_peek(&adv->lll); + pdu->type = pdu_adv_type[adv_type]; + pdu->rfu = 0; + + if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2) && + ((pdu->type == PDU_ADV_TYPE_ADV_IND) || + (pdu->type == PDU_ADV_TYPE_DIRECT_IND))) { + pdu->chan_sel = 1; + } else { + pdu->chan_sel = 0; + } + +#if defined(CONFIG_BT_CTLR_PRIVACY) + adv->own_addr_type = own_addr_type; + if (adv->own_addr_type == BT_ADDR_LE_PUBLIC_ID || + adv->own_addr_type == BT_ADDR_LE_RANDOM_ID) { + adv->id_addr_type = direct_addr_type; + memcpy(&adv->id_addr, direct_addr, BDADDR_SIZE); + } +#endif /* CONFIG_BT_CTLR_PRIVACY */ + pdu->tx_addr = own_addr_type & 0x1; + pdu->rx_addr = 0; + if (pdu->type == PDU_ADV_TYPE_DIRECT_IND) { + pdu->rx_addr = direct_addr_type; + memcpy(&pdu->direct_ind.tgt_addr[0], direct_addr, BDADDR_SIZE); + pdu->len = sizeof(struct pdu_adv_direct_ind); + +#if defined(CONFIG_BT_CTLR_ADV_EXT) + } else if (pdu->type == PDU_ADV_TYPE_EXT_IND) { + struct pdu_adv_com_ext_adv *p; + struct ext_adv_hdr _h, *h; + u8_t *_ptr, *ptr; + u8_t len; + + p = (void *)&pdu->adv_ext_ind; + h = (void *)p->ext_hdr_adi_adv_data; + ptr = (u8_t *)h + sizeof(*h); + _ptr = ptr; + + /* No ACAD and no AdvData */ + p->ext_hdr_len = 0; + p->adv_mode = evt_prop & 0x03; + + /* Zero-init header flags */ + *(u8_t *)&_h = *(u8_t *)h; + *(u8_t *)h = 0; + + /* AdvA flag */ + if (_h.adv_addr) { + _ptr += BDADDR_SIZE; + } + if (!p->adv_mode && + (!_h.aux_ptr || + (!(evt_prop & BIT(5)) && (phy_p != BIT(2))))) { + /* TODO: optional on 1M with Aux Ptr */ + h->adv_addr = 1; + + /* NOTE: AdvA is filled at enable */ + ptr += BDADDR_SIZE; + } + + /* TODO: TargetA flag */ + + /* ADI flag */ + if (_h.adi) { + h->adi = 1; + ptr += sizeof(struct ext_adv_adi); + } + + /* AuxPtr flag */ + if (_h.aux_ptr) { + h->aux_ptr = 1; + ptr += sizeof(struct ext_adv_aux_ptr); + } + + /* No SyncInfo flag in primary channel PDU */ + + /* Tx Power flag */ + if (evt_prop & BIT(6) && + (!_h.aux_ptr || (phy_p != BIT(2)))) { + h->tx_pwr = 1; + ptr++; + } + + /* Calc primary PDU len */ + len = ptr - (u8_t *)p; + if (len > (offsetof(struct pdu_adv_com_ext_adv, + ext_hdr_adi_adv_data) + sizeof(*h))) { + p->ext_hdr_len = len - + offsetof(struct pdu_adv_com_ext_adv, + ext_hdr_adi_adv_data); + pdu->len = len; + } else { + pdu->len = offsetof(struct pdu_adv_com_ext_adv, + ext_hdr_adi_adv_data); + } + + /* Start filling primary PDU payload based on flags */ + + /* No AdvData in primary channel PDU */ + + /* No ACAD in primary channel PDU */ + + /* Tx Power */ + if (h->tx_pwr) { + u8_t _tx_pwr; + + _tx_pwr = 0; + if (tx_pwr) { + if (*tx_pwr != 0x7F) { + _tx_pwr = *tx_pwr; + } else { + *tx_pwr = _tx_pwr; + } + } + + ptr--; + *ptr = _tx_pwr; + } + + /* No SyncInfo in primary channel PDU */ + + /* AuxPtr */ + if (h->aux_ptr) { + struct ext_adv_aux_ptr *aux; + + ptr -= sizeof(struct ext_adv_aux_ptr); + + /* NOTE: Channel Index, CA, Offset Units and AUX Offset + * will be set in Advertiser Event. + */ + aux = (void *)ptr; + aux->phy = find_lsb_set(phy_s); + } + + /* ADI */ + if (h->adi) { + struct ext_adv_adi *adi; + + ptr -= sizeof(struct ext_adv_adi); + /* NOTE: memcpy shall handle overlapping buffers */ + memcpy(ptr, _ptr, sizeof(struct ext_adv_adi)); + + adi = (void *)ptr; + adi->sid = sid; + } + + /* NOTE: TargetA, filled at enable and RPA timeout */ + + /* NOTE: AdvA, filled at enable and RPA timeout */ +#endif /* CONFIG_BT_CTLR_ADV_EXT */ + + } else if (pdu->len == 0) { + pdu->len = BDADDR_SIZE; + } + + /* update the current scan data */ + pdu = lll_adv_scan_rsp_peek(&adv->lll); + pdu->type = PDU_ADV_TYPE_SCAN_RSP; + pdu->rfu = 0; + pdu->chan_sel = 0; + pdu->tx_addr = own_addr_type & 0x1; + pdu->rx_addr = 0; + if (pdu->len == 0) { + pdu->len = BDADDR_SIZE; + } + + return 0; +} + +#if defined(CONFIG_BT_CTLR_ADV_EXT) +u8_t ll_adv_data_set(u16_t handle, u8_t len, u8_t const *const data) +{ +#else /* !CONFIG_BT_CTLR_ADV_EXT */ +u8_t ll_adv_data_set(u8_t len, u8_t const *const data) +{ + const u16_t handle = 0; +#endif /* !CONFIG_BT_CTLR_ADV_EXT */ + struct ll_adv_set *adv; + struct pdu_adv *prev; + struct pdu_adv *pdu; + u8_t idx; + + adv = ull_adv_set_get(handle); + if (!adv) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + /* Dont update data if directed or extended advertising. */ + prev = lll_adv_data_peek(&adv->lll); + if ((prev->type == PDU_ADV_TYPE_DIRECT_IND) || + (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) && + (prev->type == PDU_ADV_TYPE_EXT_IND))) { + /* TODO: remember data, to be used if type is changed using + * parameter set function ll_adv_params_set afterwards. + */ + return 0; + } + + /* update adv pdu fields. */ + pdu = lll_adv_data_alloc(&adv->lll, &idx); + pdu->type = prev->type; + pdu->rfu = 0; + + if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) { + pdu->chan_sel = prev->chan_sel; + } else { + pdu->chan_sel = 0; + } + + pdu->tx_addr = prev->tx_addr; + pdu->rx_addr = prev->rx_addr; + memcpy(&pdu->adv_ind.addr[0], &prev->adv_ind.addr[0], BDADDR_SIZE); + memcpy(&pdu->adv_ind.data[0], data, len); + pdu->len = BDADDR_SIZE + len; + + lll_adv_data_enqueue(&adv->lll, idx); + + return 0; +} + +#if defined(CONFIG_BT_CTLR_ADV_EXT) +u8_t ll_adv_scan_rsp_set(u16_t handle, u8_t len, u8_t const *const data) +{ +#else /* !CONFIG_BT_CTLR_ADV_EXT */ +u8_t ll_adv_scan_rsp_set(u8_t len, u8_t const *const data) +{ + const u16_t handle = 0; +#endif /* !CONFIG_BT_CTLR_ADV_EXT */ + struct ll_adv_set *adv; + struct pdu_adv *prev; + struct pdu_adv *pdu; + u8_t idx; + + adv = ull_adv_set_get(handle); + if (!adv) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + /* update scan pdu fields. */ + prev = lll_adv_scan_rsp_peek(&adv->lll); + pdu = lll_adv_scan_rsp_alloc(&adv->lll, &idx); + pdu->type = PDU_ADV_TYPE_SCAN_RSP; + pdu->rfu = 0; + pdu->chan_sel = 0; + pdu->tx_addr = prev->tx_addr; + pdu->rx_addr = 0; + pdu->len = BDADDR_SIZE + len; + memcpy(&pdu->scan_rsp.addr[0], &prev->scan_rsp.addr[0], BDADDR_SIZE); + memcpy(&pdu->scan_rsp.data[0], data, len); + + lll_adv_scan_rsp_enqueue(&adv->lll, idx); + + return 0; +} + +#if defined(CONFIG_BT_CTLR_ADV_EXT) || defined(CONFIG_BT_HCI_MESH_EXT) +#if defined(CONFIG_BT_HCI_MESH_EXT) +u8_t ll_adv_enable(u16_t handle, u8_t enable, + u8_t at_anchor, u32_t ticks_anchor, u8_t retry, + u8_t scan_window, u8_t scan_delay) +{ +#else /* !CONFIG_BT_HCI_MESH_EXT */ +u8_t ll_adv_enable(u16_t handle, u8_t enable) +{ + u32_t ticks_anchor; +#endif /* !CONFIG_BT_HCI_MESH_EXT */ +#else /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */ +u8_t ll_adv_enable(u8_t enable) +{ + u16_t const handle = 0; + u32_t ticks_anchor; +#endif /* !CONFIG_BT_CTLR_ADV_EXT || !CONFIG_BT_HCI_MESH_EXT */ + volatile u32_t ret_cb = TICKER_STATUS_BUSY; + u8_t rl_idx = FILTER_IDX_NONE; + u32_t ticks_slot_overhead; + struct pdu_adv *pdu_scan; + struct pdu_adv *pdu_adv; + u32_t ticks_slot_offset; + struct ll_adv_set *adv; + struct lll_adv *lll; + u16_t interval; + u32_t slot_us; + u8_t chan_map; + u8_t chan_cnt; + u32_t ret; + + if (!enable) { + return disable(handle); + } + + adv = is_disabled_get(handle); + if (!adv) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + /* remember addr to use and also update the addr in + * both adv and scan response PDUs. + */ + lll = &adv->lll; + pdu_adv = lll_adv_data_peek(lll); + pdu_scan = lll_adv_scan_rsp_peek(lll); + + if (0) { + +#if defined(CONFIG_BT_CTLR_ADV_EXT) + } else if (pdu_adv->type == PDU_ADV_TYPE_EXT_IND) { + struct pdu_adv_com_ext_adv *p; + struct ext_adv_hdr *h; + u8_t *ptr; + + p = (void *)&pdu_adv->adv_ext_ind; + h = (void *)p->ext_hdr_adi_adv_data; + ptr = (u8_t *)h + sizeof(*h); + + /* AdvA, fill here at enable */ + if (h->adv_addr) { + memcpy(ptr, ll_addr_get(pdu_adv->tx_addr, NULL), + BDADDR_SIZE); + } + + /* TODO: TargetA, fill here at enable */ +#endif /* CONFIG_BT_CTLR_ADV_EXT */ + } else { + bool priv = false; + +#if defined(CONFIG_BT_CTLR_PRIVACY) + /* Prepare whitelist and optionally resolving list */ + ll_filters_adv_update(lll->filter_policy); + + if (adv->own_addr_type == BT_ADDR_LE_PUBLIC_ID || + adv->own_addr_type == BT_ADDR_LE_RANDOM_ID) { + /* Look up the resolving list */ + rl_idx = ll_rl_find(adv->id_addr_type, adv->id_addr, + NULL); + + if (rl_idx != FILTER_IDX_NONE) { + /* Generate RPAs if required */ + ll_rl_rpa_update(false); + } + + ll_rl_pdu_adv_update(adv, rl_idx, pdu_adv); + ll_rl_pdu_adv_update(adv, rl_idx, pdu_scan); + priv = true; + } +#endif /* !CONFIG_BT_CTLR_PRIVACY */ + + if (!priv) { + memcpy(&pdu_adv->adv_ind.addr[0], + ll_addr_get(pdu_adv->tx_addr, NULL), + BDADDR_SIZE); + memcpy(&pdu_scan->scan_rsp.addr[0], + ll_addr_get(pdu_adv->tx_addr, NULL), + BDADDR_SIZE); + } + } + +#if defined(CONFIG_BT_HCI_MESH_EXT) + if (scan_delay) { + if (ull_scan_is_enabled(0)) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + lll->is_mesh = 1; + } +#endif /* CONFIG_BT_HCI_MESH_EXT */ + +#if defined(CONFIG_BT_PERIPHERAL) + /* prepare connectable advertising */ + if ((pdu_adv->type == PDU_ADV_TYPE_ADV_IND) || + (pdu_adv->type == PDU_ADV_TYPE_DIRECT_IND)) { + struct node_rx_pdu *node_rx; + struct ll_conn *conn; + struct lll_conn *conn_lll; + void *link; + + if (lll->conn) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + link = ll_rx_link_alloc(); + if (!link) { + return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED; + } + + node_rx = ll_rx_alloc(); + if (!node_rx) { + ll_rx_link_release(link); + + return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED; + } + + conn = ll_conn_acquire(); + if (!conn) { + ll_rx_release(node_rx); + ll_rx_link_release(link); + + return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED; + } + + conn_lll = &conn->lll; + conn_lll->handle = 0xFFFF; + + if (!conn_lll->link_tx_free) { + conn_lll->link_tx_free = &conn_lll->link_tx; + } + + memq_init(conn_lll->link_tx_free, &conn_lll->memq_tx.head, + &conn_lll->memq_tx.tail); + conn_lll->link_tx_free = NULL; + + conn_lll->packet_tx_head_len = 0; + conn_lll->packet_tx_head_offset = 0; + + conn_lll->sn = 0; + conn_lll->nesn = 0; + conn_lll->empty = 0; + +#if defined(CONFIG_BT_CTLR_LE_ENC) + conn_lll->enc_rx = 0; + conn_lll->enc_tx = 0; +#endif /* CONFIG_BT_CTLR_LE_ENC */ + +#if defined(CONFIG_BT_CTLR_PHY) + conn_lll->phy_tx = BIT(0); + conn_lll->phy_flags = 0; + conn_lll->phy_tx_time = BIT(0); + conn_lll->phy_rx = BIT(0); +#endif /* CONFIG_BT_CTLR_PHY */ + +#if defined(CONFIG_BT_CTLR_CONN_RSSI) + conn_lll->rssi_latest = 0x7F; + conn_lll->rssi_reported = 0x7F; + conn_lll->rssi_sample_count = 0; +#endif /* CONFIG_BT_CTLR_CONN_RSSI */ + + /* FIXME: BEGIN: Move to ULL? */ + conn_lll->role = 1; + conn_lll->data_chan_sel = 0; + conn_lll->data_chan_use = 0; + conn_lll->event_counter = 0; + + conn_lll->latency_prepare = 0; + conn_lll->latency_event = 0; + conn_lll->slave.latency_enabled = 0; + conn_lll->slave.latency_cancel = 0; + conn_lll->slave.window_widening_prepare_us = 0; + conn_lll->slave.window_widening_event_us = 0; + conn_lll->slave.window_size_prepare_us = 0; + /* FIXME: END: Move to ULL? */ + + conn->connect_expire = 6; + conn->supervision_expire = 0; + conn->procedure_expire = 0; + + conn->common.fex_valid = 0; + + conn->llcp_req = conn->llcp_ack = conn->llcp_type = 0; + conn->llcp_rx = NULL; + conn->llcp_features = LL_FEAT; + conn->llcp_version.tx = conn->llcp_version.rx = 0; + conn->llcp_terminate.reason_peer = 0; + /* NOTE: use allocated link for generating dedicated + * terminate ind rx node + */ + conn->llcp_terminate.node_rx.hdr.link = link; + +#if defined(CONFIG_BT_CTLR_LE_ENC) + conn->pause_tx = conn->pause_rx = conn->refresh = 0; +#endif /* CONFIG_BT_CTLR_LE_ENC */ + +#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) + conn->llcp_conn_param.req = 0; + conn->llcp_conn_param.ack = 0; + conn->llcp_conn_param.disabled = 0; +#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */ + +#if defined(CONFIG_BT_CTLR_PHY) + conn->llcp_phy.req = conn->llcp_phy.ack = 0; + conn->phy_pref_tx = ull_conn_default_phy_tx_get(); + conn->phy_pref_rx = ull_conn_default_phy_rx_get(); + conn->phy_pref_flags = 0; +#endif /* CONFIG_BT_CTLR_PHY */ + + conn->tx_head = conn->tx_ctrl = conn->tx_ctrl_last = + conn->tx_data = conn->tx_data_last = 0; + + /* NOTE: using same link as supplied for terminate ind */ + adv->link_cc_free = link; + adv->node_rx_cc_free = node_rx; + lll->conn = conn_lll; + + ull_hdr_init(&conn->ull); + lll_hdr_init(&conn->lll, conn); + + /* wait for stable clocks */ + lll_clock_wait(); + } +#endif /* CONFIG_BT_PERIPHERAL */ + +#if defined(CONFIG_BT_CTLR_PRIVACY) + _radio.advertiser.rl_idx = rl_idx; +#else + ARG_UNUSED(rl_idx); +#endif /* CONFIG_BT_CTLR_PRIVACY */ + + interval = adv->interval; + chan_map = lll->chan_map; + chan_cnt = util_ones_count_get(&chan_map, sizeof(chan_map)); + + /* TODO: use adv data len in slot duration calculation, instead of + * hardcoded max. numbers used below. + */ + if (pdu_adv->type == PDU_ADV_TYPE_DIRECT_IND) { + /* Max. chain is DIRECT_IND * channels + CONNECT_IND */ + slot_us = ((EVENT_OVERHEAD_START_US + 176 + 152 + 40) * + chan_cnt) - 40 + 352; + } else if (pdu_adv->type == PDU_ADV_TYPE_NONCONN_IND) { + slot_us = (EVENT_OVERHEAD_START_US + 376) * chan_cnt; + } else { + /* Max. chain is ADV/SCAN_IND + SCAN_REQ + SCAN_RESP */ + slot_us = (EVENT_OVERHEAD_START_US + 376 + 152 + 176 + + 152 + 376) * chan_cnt; + } + +#if defined(CONFIG_BT_HCI_MESH_EXT) + if (lll->is_mesh) { + u16_t interval_min_us; + + _radio.advertiser.retry = retry; + _radio.advertiser.scan_delay_ms = scan_delay; + _radio.advertiser.scan_window_ms = scan_window; + + interval_min_us = slot_us + (scan_delay + scan_window) * 1000; + if ((interval * 625) < interval_min_us) { + interval = (interval_min_us + (625 - 1)) / 625; + } + + /* passive scanning */ + _radio.scanner.type = 0; + +#if defined(CONFIG_BT_CTLR_ADV_EXT) + /* TODO: Coded PHY support */ + _radio.scanner.phy = 0; +#endif /* CONFIG_BT_CTLR_ADV_EXT */ + +#if defined(CONFIG_BT_CTLR_PRIVACY) + /* TODO: Privacy support */ + _radio.scanner.rpa_gen = 0; + _radio.scanner.rl_idx = rl_idx; +#endif /* CONFIG_BT_CTLR_PRIVACY */ + + _radio.scanner.filter_policy = filter_policy; + } +#endif /* CONFIG_BT_HCI_MESH_EXT */ + + ull_hdr_init(&adv->ull); + lll_hdr_init(lll, adv); + + /* TODO: active_to_start feature port */ + adv->evt.ticks_active_to_start = 0; + adv->evt.ticks_xtal_to_start = + HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US); + adv->evt.ticks_preempt_to_start = + HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US); + adv->evt.ticks_slot = HAL_TICKER_US_TO_TICKS(slot_us); + + ticks_slot_offset = max(adv->evt.ticks_active_to_start, + adv->evt.ticks_xtal_to_start); + + if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) { + ticks_slot_overhead = ticks_slot_offset; + } else { + ticks_slot_overhead = 0; + } + +#if !defined(CONFIG_BT_HCI_MESH_EXT) + ticks_anchor = ticker_ticks_now_get(); +#else /* CONFIG_BT_HCI_MESH_EXT */ + if (!at_anchor) { + ticks_anchor = ticker_ticks_now_get(); + } +#endif /* !CONFIG_BT_HCI_MESH_EXT */ + + /* High Duty Cycle Directed Advertising if interval is 0. */ +#if defined(CONFIG_BT_PERIPHERAL) + lll->is_hdcd = !interval && (pdu_adv->type == PDU_ADV_TYPE_DIRECT_IND); + if (lll->is_hdcd) { + ret = ticker_start(TICKER_INSTANCE_ID_CTLR, + TICKER_USER_ID_THREAD, + (TICKER_ID_ADV_BASE + handle), + ticks_anchor, 0, + adv->evt.ticks_slot, + TICKER_NULL_REMAINDER, TICKER_NULL_LAZY, + (adv->evt.ticks_slot + ticks_slot_overhead), + ticker_cb, adv, + ull_ticker_status_give, (void *)&ret_cb); + + ret = ull_ticker_status_take(ret, &ret_cb); + if (ret != TICKER_STATUS_SUCCESS) { + goto failure_cleanup; + } + + ret_cb = TICKER_STATUS_BUSY; + ret = ticker_start(TICKER_INSTANCE_ID_CTLR, + TICKER_USER_ID_THREAD, + TICKER_ID_ADV_STOP, ticks_anchor, + HAL_TICKER_US_TO_TICKS(ticks_slot_offset + + (1280 * 1000)), + TICKER_NULL_PERIOD, TICKER_NULL_REMAINDER, + TICKER_NULL_LAZY, TICKER_NULL_SLOT, + ticker_stop_cb, adv, + ull_ticker_status_give, (void *)&ret_cb); + } else +#endif /* CONFIG_BT_PERIPHERAL */ + { + ret = ticker_start(TICKER_INSTANCE_ID_CTLR, + TICKER_USER_ID_THREAD, + (TICKER_ID_ADV_BASE + handle), + ticks_anchor, 0, + HAL_TICKER_US_TO_TICKS((u64_t)interval * + 625), + TICKER_NULL_REMAINDER, TICKER_NULL_LAZY, + (adv->evt.ticks_slot + ticks_slot_overhead), + ticker_cb, adv, + ull_ticker_status_give, (void *)&ret_cb); + } + + ret = ull_ticker_status_take(ret, &ret_cb); + if (ret != TICKER_STATUS_SUCCESS) { + goto failure_cleanup; + } + + adv->is_enabled = 1; + +#if defined(CONFIG_BT_CTLR_PRIVACY) +#if defined(CONFIG_BT_HCI_MESH_EXT) + if (_radio.advertiser.is_mesh) { + _radio.scanner.is_enabled = 1; + + ll_adv_scan_state_cb(BIT(0) | BIT(1)); + } +#else /* !CONFIG_BT_HCI_MESH_EXT */ + if (!ull_scan_is_enabled_get(0)) { + ll_adv_scan_state_cb(BIT(0)); + } +#endif /* !CONFIG_BT_HCI_MESH_EXT */ +#endif /* CONFIG_BT_CTLR_PRIVACY */ + + return 0; + +failure_cleanup: + +#if defined(CONFIG_BT_PERIPHERAL) + if (adv->lll.conn) { + _conn_release(adv); + } +#endif /* CONFIG_BT_PERIPHERAL */ + + return BT_HCI_ERR_CMD_DISALLOWED; +} + +int ull_adv_init(void) +{ + int err; + + err = _init_reset(); + if (err) { + return err; + } + + return 0; +} + +int ull_adv_reset(void) +{ + u16_t handle; + int err; + + for (handle = 0; handle < CONFIG_BT_ADV_MAX; handle++) { + (void)disable(handle); + } + + err = _init_reset(); + if (err) { + return err; + } + + return 0; +} + +inline struct ll_adv_set *ull_adv_set_get(u16_t handle) +{ + if (handle >= CONFIG_BT_ADV_MAX) { + return NULL; + } + + return &ll_adv[handle]; +} + +inline u16_t ull_adv_handle_get(struct ll_adv_set *adv) +{ + return ((u8_t *)adv - (u8_t *)ll_adv) / sizeof(*adv); +} + +inline struct ll_adv_set *ull_adv_is_enabled_get(u16_t handle) +{ + struct ll_adv_set *adv; + + adv = ull_adv_set_get(handle); + if (!adv || !adv->is_enabled) { + return NULL; + } + + return adv; +} + +u32_t ull_adv_is_enabled(u16_t handle) +{ + struct ll_adv_set *adv; + + adv = ull_adv_is_enabled_get(handle); + if (!adv) { + return 0; + } + + return BIT(0); +} + +u32_t ull_adv_filter_pol_get(u16_t handle) +{ + struct ll_adv_set *adv; + + adv = ull_adv_is_enabled_get(handle); + if (!adv) { + return 0; + } + + return adv->lll.filter_policy; +} + +static int _init_reset(void) +{ + return 0; +} + +static inline struct ll_adv_set *is_disabled_get(u16_t handle) +{ + struct ll_adv_set *adv; + + adv = ull_adv_set_get(handle); + if (!adv || adv->is_enabled) { + return NULL; + } + + return adv; +} + +static void ticker_cb(u32_t ticks_at_expire, u32_t remainder, u16_t lazy, + void *param) +{ + static memq_link_t _link; + static struct mayfly _mfy = {0, 0, &_link, NULL, lll_adv_prepare}; + static struct lll_prepare_param p; + struct ll_adv_set *adv = param; + struct lll_adv *lll; + u32_t ret; + u8_t ref; + + DEBUG_RADIO_PREPARE_A(1); + + /* Increment prepare reference count */ + ref = ull_ref_inc(&adv->ull); + LL_ASSERT(ref); + + lll = &adv->lll; + + /* Append timing parameters */ + p.ticks_at_expire = ticks_at_expire; + p.remainder = remainder; + p.lazy = lazy; + p.param = lll; + _mfy.param = &p; + + /* Kick LLL prepare */ + ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, + 0, &_mfy); + LL_ASSERT(!ret); + + /* Apply adv random delay */ +#if defined(CONFIG_BT_PERIPHERAL) + if (!lll->is_hdcd) +#endif /* CONFIG_BT_PERIPHERAL */ + { + u8_t random_delay; + u32_t ret; + + ull_entropy_get(sizeof(random_delay), &random_delay); + random_delay %= 10; + random_delay += 1; + + ret = ticker_update(TICKER_INSTANCE_ID_CTLR, + TICKER_USER_ID_ULL_HIGH, + (TICKER_ID_ADV_BASE + + ull_adv_handle_get(adv)), + HAL_TICKER_US_TO_TICKS(random_delay * 1000), + 0, 0, 0, 0, 0, + ticker_op_update_cb, adv); + LL_ASSERT((ret == TICKER_STATUS_SUCCESS) || + (ret == TICKER_STATUS_BUSY)); + } + + DEBUG_RADIO_PREPARE_A(1); +} + +static void ticker_op_update_cb(u32_t status, void *param) +{ + LL_ASSERT(status == TICKER_STATUS_SUCCESS || + param == ull_disable_mark_get()); +} + +#if defined(CONFIG_BT_PERIPHERAL) +static void ticker_stop_cb(u32_t ticks_at_expire, u32_t remainder, u16_t lazy, + void *param) +{ + struct ll_adv_set *adv = param; + u16_t handle; + u32_t ret; + +#if 0 + /* NOTE: abort the event, so as to permit ticker_job execution, if + * disabled inside events. + */ + if (adv->ull.ref) { + static memq_link_t _link; + static struct mayfly _mfy = {0, 0, &_link, NULL, lll_disable}; + + _mfy.param = &adv->lll; + ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, + TICKER_USER_ID_LLL, 0, &_mfy); + LL_ASSERT(!ret); + } +#endif + + handle = ull_adv_handle_get(adv); + LL_ASSERT(handle < CONFIG_BT_ADV_MAX); + + ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH, + TICKER_ID_ADV_BASE + handle, + ticker_op_stop_cb, adv); + LL_ASSERT((ret == TICKER_STATUS_SUCCESS) || + (ret == TICKER_STATUS_BUSY)); +} + +static void ticker_op_stop_cb(u32_t status, void *param) +{ + static memq_link_t _link; + static struct mayfly _mfy = {0, 0, &_link, NULL, NULL}; + struct ll_adv_set *adv; + struct ull_hdr *hdr; + u32_t ret; + + /* Ignore if race between thread and ULL */ + if (status != TICKER_STATUS_SUCCESS) { + /* TODO: detect race */ + + return; + } + +#if defined(CONFIG_BT_HCI_MESH_EXT) + /* FIXME: why is this here for Mesh commands? */ + if (params) { + return; + } +#endif /* CONFIG_BT_HCI_MESH_EXT */ + + adv = param; + hdr = &adv->ull; + _mfy.param = &adv->lll; + if (hdr->ref) { + LL_ASSERT(!hdr->disabled_cb); + hdr->disabled_param = _mfy.param; + hdr->disabled_cb = disabled_cb; + + _mfy.fp = lll_disable; + ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW, + TICKER_USER_ID_LLL, 0, &_mfy); + LL_ASSERT(!ret); + } else { + _mfy.fp = disabled_cb; + ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW, + TICKER_USER_ID_ULL_HIGH, 0, &_mfy); + LL_ASSERT(!ret); + } +} + +static void disabled_cb(void *param) +{ + struct node_rx_ftr *ftr; + struct ll_adv_set *adv; + struct node_rx_pdu *rx; + struct node_rx_cc *cc; + memq_link_t *link; + + adv = ((struct lll_hdr *)param)->parent; + + LL_ASSERT(adv->link_cc_free); + link = adv->link_cc_free; + adv->link_cc_free = NULL; + + LL_ASSERT(adv->node_rx_cc_free); + rx = adv->node_rx_cc_free; + adv->node_rx_cc_free = NULL; + + rx->hdr.type = NODE_RX_TYPE_CONNECTION; + rx->hdr.handle = 0xffff; + + cc = (void *)rx->pdu; + memset(cc, 0x00, sizeof(struct node_rx_cc)); + cc->status = 0x3c; + + ftr = (void *)((u8_t *)rx->pdu + + (offsetof(struct pdu_adv, connect_ind) + + sizeof(struct pdu_adv_connect_ind))); + + ftr->param = param; + + ll_rx_put(link, rx); + ll_rx_sched(); +} + +static inline void _conn_release(struct ll_adv_set *adv) +{ + ll_conn_release(adv->lll.conn->hdr.parent); + adv->lll.conn = NULL; + ll_rx_release(adv->node_rx_cc_free); + adv->node_rx_cc_free = NULL; + ll_rx_link_release(adv->link_cc_free); + adv->link_cc_free = NULL; +} +#endif /* CONFIG_BT_PERIPHERAL */ + +static inline u8_t disable(u16_t handle) +{ + volatile u32_t ret_cb = TICKER_STATUS_BUSY; + struct ll_adv_set *adv; + void *mark; + u32_t ret; + + adv = ull_adv_is_enabled_get(handle); + if (!adv) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + mark = ull_disable_mark(adv); + LL_ASSERT(mark == adv); + + ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD, + TICKER_ID_ADV_BASE + handle, + ull_ticker_status_give, (void *)&ret_cb); + + ret = ull_ticker_status_take(ret, &ret_cb); + if (ret) { + mark = ull_disable_mark(adv); + LL_ASSERT(mark == adv); + + return BT_HCI_ERR_CMD_DISALLOWED; + } + + ret = ull_disable(&adv->lll); + LL_ASSERT(!ret); + + mark = ull_disable_unmark(adv); + LL_ASSERT(mark == adv); + +#if defined(CONFIG_BT_PERIPHERAL) + if (adv->lll.conn) { + _conn_release(adv); + } +#endif /* CONFIG_BT_PERIPHERAL */ + + adv->is_enabled = 0; + +#if defined(CONFIG_BT_CTLR_PRIVACY) + if (!ull_scan_is_enabled_get(0)) { + ll_adv_scan_state_cb(0); + } +#endif /* CONFIG_BT_CTLR_PRIVACY */ + + return 0; +} diff --git a/subsys/bluetooth/controller/ll_sw/ull_adv_aux.c b/subsys/bluetooth/controller/ll_sw/ull_adv_aux.c new file mode 100644 index 00000000000..f663c3e9fc8 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/ull_adv_aux.c @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2017-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include + +#include +#include + +#include "hal/ccm.h" + +#include "util/util.h" +#include "util/memq.h" + +#include "pdu.h" +#include "lll.h" +#include "lll_adv.h" +#include "lll_conn.h" +#include "ull_internal.h" +#include "ull_adv_types.h" +#include "ull_adv_internal.h" + +u8_t ll_adv_aux_random_addr_set(u8_t handle, u8_t *addr) +{ + /* TODO: store in adv set instance */ + return 0; +} + +u8_t *ll_adv_aux_random_addr_get(u8_t handle, u8_t *addr) +{ + /* TODO: copy adv set instance addr into addr and/or return reference */ + return NULL; +} + +u8_t ll_adv_aux_ad_data_set(u8_t handle, u8_t op, u8_t frag_pref, u8_t len, + u8_t *data) +{ + struct pdu_adv_com_ext_adv *p; + struct ll_adv_set *adv; + struct ext_adv_hdr *h; + struct pdu_adv *prev; + struct pdu_adv *pdu; + u8_t idx; + + /* TODO: */ + + adv = ull_adv_set_get(handle); + if (!adv) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + /* Dont update data if not extended advertising. */ + prev = lll_adv_data_peek(&adv->lll); + if (prev->type != PDU_ADV_TYPE_EXT_IND) { + return 0; + } + + pdu = lll_adv_data_alloc(&adv->lll, &idx); + p = (void *)&pdu->adv_ext_ind; + h = (void *)p->ext_hdr_adi_adv_data; + + if (!h->aux_ptr) { + if (!len) { + return 0; + } + } + + lll_adv_data_enqueue(&adv->lll, idx); + + return 0; +} + +u8_t ll_adv_aux_sr_data_set(u8_t handle, u8_t op, u8_t frag_pref, u8_t len, + u8_t *data) +{ + /* TODO: */ + return 0; +} + +u16_t ll_adv_aux_max_data_length_get(void) +{ + /* TODO: return a Kconfig value */ + return 0; +} + +u8_t ll_adv_aux_set_count_get(void) +{ + /* TODO: return a Kconfig value */ + return 0; +} + +u8_t ll_adv_aux_set_remove(u8_t handle) +{ + /* TODO: reset/release primary channel and Aux channel PDUs */ + return 0; +} + +u8_t ll_adv_aux_set_clear(void) +{ + /* TODO: reset/release all adv set primary channel and Aux channel + * PDUs + */ + return 0; +} diff --git a/subsys/bluetooth/controller/ll_sw/ull_adv_aux.h b/subsys/bluetooth/controller/ll_sw/ull_adv_aux.h new file mode 100644 index 00000000000..360af6b3d7c --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/ull_adv_aux.h @@ -0,0 +1,16 @@ +/* + * Copyright (c) 2017-2018 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +u8_t ll_adv_aux_random_addr_set(u8_t handle, u8_t *addr); +u8_t *ll_adv_aux_random_addr_get(u8_t handle, u8_t *addr); +u8_t ll_adv_aux_ad_data_set(u8_t handle, u8_t op, u8_t frag_pref, u8_t len, + u8_t *data); +u8_t ll_adv_aux_sr_data_set(u8_t handle, u8_t op, u8_t frag_pref, u8_t len, + u8_t *data); +u16_t ll_adv_aux_max_data_length_get(void); +u8_t ll_adv_aux_set_count_get(void); +u8_t ll_adv_aux_set_remove(u8_t handle); +u8_t ll_adv_aux_set_clear(void); diff --git a/subsys/bluetooth/controller/ll_sw/ull_adv_internal.h b/subsys/bluetooth/controller/ll_sw/ull_adv_internal.h new file mode 100644 index 00000000000..bf1e006ea67 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/ull_adv_internal.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2017-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +int ull_adv_init(void); +int ull_adv_reset(void); + +/* Return ll_adv_set context (unconditional) */ +struct ll_adv_set *ull_adv_set_get(u16_t handle); + +/* Return the adv set handle given the adv set instance */ +u16_t ull_adv_handle_get(struct ll_adv_set *adv); + +/* Return ll_adv_set context if enabled */ +struct ll_adv_set *ull_adv_is_enabled_get(u16_t handle); + +/* Return flags, for now just: enabled */ +u32_t ull_adv_is_enabled(u16_t handle); + +/* Return filter policy used */ +u32_t ull_adv_filter_pol_get(u16_t handle); diff --git a/subsys/bluetooth/controller/ll_sw/ull_adv_types.h b/subsys/bluetooth/controller/ll_sw/ull_adv_types.h new file mode 100644 index 00000000000..d247d763841 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/ull_adv_types.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2017-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +struct ll_adv_set { + struct evt_hdr evt; + struct ull_hdr ull; + struct lll_adv lll; + + u8_t is_enabled:1; + +#if defined(CONFIG_BT_PERIPHERAL) + memq_link_t *link_cc_free; + struct node_rx_pdu *node_rx_cc_free; +#endif /* CONFIG_BT_PERIPHERAL */ + +#if defined(CONFIG_BT_CTLR_ADV_EXT) + u32_t interval; +#else /* !CONFIG_BT_CTLR_ADV_EXT */ + u16_t interval; +#endif /* !CONFIG_BT_CTLR_ADV_EXT */ + +#if defined(CONFIG_BT_CTLR_PRIVACY) + u8_t own_addr_type:2; + u8_t id_addr_type:1; + u8_t rl_idx; + u8_t id_addr[BDADDR_SIZE]; +#endif /* CONFIG_BT_CTLR_PRIVACY */ +}; diff --git a/subsys/bluetooth/controller/ll_sw/ull_conn.c b/subsys/bluetooth/controller/ll_sw/ull_conn.c new file mode 100644 index 00000000000..4dec8e67db5 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/ull_conn.c @@ -0,0 +1,4764 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include + +#include "hal/ecb.h" +#include "hal/ccm.h" +#include "hal/ticker.h" + +#include "util/util.h" +#include "util/mem.h" +#include "util/memq.h" +#include "util/mfifo.h" +#include "util/mayfly.h" + +#include "ticker/ticker.h" + +#include "pdu.h" +#include "lll.h" +#include "lll_tim_internal.h" +#include "lll_conn.h" +#include "ull_conn_types.h" +#include "ull_internal.h" +#include "ull_sched_internal.h" +#include "ull_conn_internal.h" +#include "ull_slave_internal.h" +#include "ull_master_internal.h" + +#include "ll.h" +#include "ll_feat.h" + +#define LOG_MODULE_NAME bt_ctlr_llsw_ull_conn +#include "common/log.h" +#include +#include "hal/debug.h" + +static int _init_reset(void); +static void ticker_op_update_cb(u32_t status, void *param); +static inline void disable(u16_t handle); +static void conn_cleanup(struct ll_conn *conn); +static void ctrl_tx_enqueue(struct ll_conn *conn, struct node_tx *tx); +static inline void event_fex_prep(struct ll_conn *conn); +static inline void event_vex_prep(struct ll_conn *conn); +static inline int event_conn_upd_prep(struct ll_conn *conn, + u16_t event_counter, + u32_t ticks_at_expire); +static inline void event_ch_map_prep(struct ll_conn *conn, + u16_t event_counter); +static void terminate_ind_rx_enqueue(struct ll_conn *conn, u8_t reason); + +#if defined(CONFIG_BT_CTLR_LE_ENC) +static inline void event_enc_prep(struct ll_conn *conn); +static int enc_rsp_send(struct ll_conn *conn); +static int start_enc_rsp_send(struct ll_conn *conn, + struct pdu_data *pdu_ctrl_tx); +static inline bool ctrl_is_unexpected(struct ll_conn *conn, u8_t opcode); +#endif /* CONFIG_BT_CTLR_LE_ENC */ + +#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) +static inline void event_conn_param_prep(struct ll_conn *conn, + u16_t event_counter, + u32_t ticks_at_expire); +#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */ + +#if defined(CONFIG_BT_CTLR_LE_PING) +static inline void event_ping_prep(struct ll_conn *conn); +#endif /* CONFIG_BT_CTLR_LE_PING */ + +#if defined(CONFIG_BT_CTLR_DATA_LENGTH) +static inline void event_len_prep(struct ll_conn *conn); +#endif /* CONFIG_BT_CTLR_DATA_LENGTH */ + +#if defined(CONFIG_BT_CTLR_PHY) +static inline void event_phy_req_prep(struct ll_conn *conn); +static inline void event_phy_upd_ind_prep(struct ll_conn *conn, + u16_t event_counter); +#endif /* CONFIG_BT_CTLR_PHY */ + +static inline void ctrl_tx_ack(struct ll_conn *conn, struct node_tx **tx, + struct pdu_data *pdu_tx); +static inline u8_t ctrl_rx(memq_link_t *link, struct node_rx_pdu **rx, + struct pdu_data *pdu_rx, struct ll_conn *conn); +static void ticker_op_cb(u32_t status, void *params); + +#define CONN_TX_BUF_SIZE MROUND(offsetof(struct node_tx, pdu) + \ + offsetof(struct pdu_data, lldata) + \ + CONFIG_BT_CTLR_TX_BUFFER_SIZE) + +#define CONN_TX_CTRL_BUFFERS 2 +#define CONN_TX_CTRL_BUF_SIZE (MROUND(offsetof(struct node_tx, pdu) + \ + offsetof(struct pdu_data, llctrl) + \ + sizeof(struct pdu_data_llctrl)) * \ + CONN_TX_CTRL_BUFFERS) + +static MFIFO_DEFINE(conn_tx, sizeof(struct lll_tx), + CONFIG_BT_CTLR_TX_BUFFERS); + +static struct { + void *free; + u8_t pool[CONN_TX_BUF_SIZE * CONFIG_BT_CTLR_TX_BUFFERS]; +} mem_conn_tx; + +static struct { + void *free; + u8_t pool[CONN_TX_CTRL_BUF_SIZE * CONN_TX_CTRL_BUFFERS]; +} mem_conn_tx_ctrl; + +static struct { + void *free; + u8_t pool[sizeof(memq_link_t) * + (CONFIG_BT_CTLR_TX_BUFFERS + CONN_TX_CTRL_BUFFERS)]; +} mem_link_tx; + +static u8_t data_chan_map[5] = {0xFF, 0xFF, 0xFF, 0xFF, 0x1F}; +static u8_t data_chan_count = 37U; + +#if defined(CONFIG_BT_CTLR_PHY) +static u8_t default_phy_tx; +static u8_t default_phy_rx; +#endif /* CONFIG_BT_CTLR_PHY */ + +static struct ll_conn conn_pool[CONFIG_BT_MAX_CONN]; +static struct ll_conn *conn_upd_curr; +static void *conn_free; + +static struct device *entropy; + +struct ll_conn *ll_conn_acquire(void) +{ + return mem_acquire(&conn_free); +} + +void ll_conn_release(struct ll_conn *conn) +{ + mem_release(conn, &conn_free); +} + +u16_t ll_conn_handle_get(struct ll_conn *conn) +{ + return mem_index_get(conn, conn_pool, sizeof(struct ll_conn)); +} + +struct ll_conn *ll_conn_get(u16_t handle) +{ + return mem_get(conn_pool, sizeof(struct ll_conn), handle); +} + +struct ll_conn *ll_connected_get(u16_t handle) +{ + struct ll_conn *conn; + + if (handle >= CONFIG_BT_MAX_CONN) { + return NULL; + } + + conn = ll_conn_get(handle); + if (conn->lll.handle != handle) { + return NULL; + } + + return conn; +} + +void *ll_tx_mem_acquire(void) +{ + return mem_acquire(&mem_conn_tx.free); +} + +void ll_tx_mem_release(void *tx) +{ + mem_release(tx, &mem_conn_tx.free); +} + +int ll_tx_mem_enqueue(u16_t handle, void *tx) +{ + struct lll_tx *lll_tx; + struct ll_conn *conn; + u8_t idx; + + conn = ll_connected_get(handle); + if (!conn) { + return -EINVAL; + } + + idx = MFIFO_ENQUEUE_GET(conn_tx, (void **) &lll_tx); + if (!lll_tx) { + return -ENOBUFS; + } + + lll_tx->handle = handle; + lll_tx->node = tx; + + MFIFO_ENQUEUE(conn_tx, idx); + + return 0; +} + +u8_t ll_conn_update(u16_t handle, u8_t cmd, u8_t status, u16_t interval_min, + u16_t interval_max, u16_t latency, u16_t timeout) +{ + struct ll_conn *conn; + + conn = ll_connected_get(handle); + if (!conn) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + if (!cmd) { +#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) + if (!conn->llcp_conn_param.disabled && + (!conn->common.fex_valid || + (conn->llcp_features & + BIT(BT_LE_FEAT_BIT_CONN_PARAM_REQ)))) { + cmd++; + } else if (conn->lll.role) { + return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE; + } +#else /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */ + if (conn->lll.role) { + return BT_HCI_ERR_CMD_DISALLOWED; + } +#endif /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */ + } + + if (!cmd) { + if (conn->llcp_req != conn->llcp_ack) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + conn->llcp.conn_upd.win_size = 1; + conn->llcp.conn_upd.win_offset_us = 0; + conn->llcp.conn_upd.interval = interval_max; + conn->llcp.conn_upd.latency = latency; + conn->llcp.conn_upd.timeout = timeout; + /* conn->llcp.conn_upd.instant = 0; */ + conn->llcp.conn_upd.state = LLCP_CUI_STATE_USE; + conn->llcp.conn_upd.is_internal = 0; + + conn->llcp_type = LLCP_CONN_UPD; + conn->llcp_req++; + } else { +#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) + cmd--; + + if (cmd) { + if ((conn->llcp_conn_param.req == + conn->llcp_conn_param.ack) || + (conn->llcp_conn_param.state != + LLCP_CPR_STATE_APP_WAIT)) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + conn->llcp_conn_param.status = status; + conn->llcp_conn_param.state = cmd; + conn->llcp_conn_param.cmd = 1; + } else { + if (conn->llcp_conn_param.req != + conn->llcp_conn_param.ack) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + conn->llcp_conn_param.status = 0; + conn->llcp_conn_param.interval_min = interval_min; + conn->llcp_conn_param.interval_max = interval_max; + conn->llcp_conn_param.latency = latency; + conn->llcp_conn_param.timeout = timeout; + conn->llcp_conn_param.state = cmd; + conn->llcp_conn_param.cmd = 1; + conn->llcp_conn_param.req++; + } + +#else /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */ + /* CPR feature not supported */ + return BT_HCI_ERR_CMD_DISALLOWED; +#endif /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */ + } + + return 0; +} + +u8_t ll_chm_get(u16_t handle, u8_t *chm) +{ + struct ll_conn *conn; + + conn = ll_connected_get(handle); + if (!conn) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + /* Iterate until we are sure the ISR did not modify the value while + * we were reading it from memory. + */ + do { + conn->chm_updated = 0; + memcpy(chm, conn->lll.data_chan_map, + sizeof(conn->lll.data_chan_map)); + } while (conn->chm_updated); + + return 0; +} + +u8_t ll_terminate_ind_send(u16_t handle, u8_t reason) +{ + struct ll_conn *conn; + + conn = ll_connected_get(handle); + if (!conn) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + conn->llcp_terminate.reason_own = reason; + + conn->llcp_terminate.req++; + + return 0; +} + +u8_t ll_feature_req_send(u16_t handle) +{ + struct ll_conn *conn; + + conn = ll_connected_get(handle); + if (!conn || (conn->llcp_req != conn->llcp_ack)) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + conn->llcp_type = LLCP_FEATURE_EXCHANGE; + conn->llcp_req++; + + return 0; +} + +u8_t ll_version_ind_send(u16_t handle) +{ + struct ll_conn *conn; + + conn = ll_connected_get(handle); + if (!conn || (conn->llcp_req != conn->llcp_ack)) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + conn->llcp_type = LLCP_VERSION_EXCHANGE; + conn->llcp_req++; + + return 0; +} + +#if defined(CONFIG_BT_CTLR_PHY) +u8_t ll_phy_get(u16_t handle, u8_t *tx, u8_t *rx) +{ + struct ll_conn *conn; + + conn = ll_connected_get(handle); + if (!conn) { + return BT_HCI_ERR_UNKNOWN_CONN_ID; + } + + /* TODO: context safe read */ + *tx = conn->lll.phy_tx; + *rx = conn->lll.phy_rx; + + return 0; +} + +u8_t ll_phy_default_set(u8_t tx, u8_t rx) +{ + /* TODO: validate against supported phy */ + + default_phy_tx = tx; + default_phy_rx = rx; + + return 0; +} + +u8_t ll_phy_req_send(u16_t handle, u8_t tx, u8_t flags, u8_t rx) +{ + struct ll_conn *conn; + + conn = ll_connected_get(handle); + if (!conn) { + return BT_HCI_ERR_UNKNOWN_CONN_ID; + } + + if ((conn->llcp_req != conn->llcp_ack) || + (conn->llcp_phy.req != conn->llcp_phy.ack)) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + conn->llcp_phy.state = LLCP_PHY_STATE_REQ; + conn->llcp_phy.cmd = 1; + conn->llcp_phy.tx = tx; + conn->llcp_phy.flags = flags; + conn->llcp_phy.rx = rx; + conn->llcp_phy.req++; + + return 0; +} +#endif /* CONFIG_BT_CTLR_PHY */ + +#if defined(CONFIG_BT_CTLR_CONN_RSSI) +u8_t ll_rssi_get(u16_t handle, u8_t *rssi) +{ + struct ll_conn *conn; + + conn = ll_connected_get(handle); + if (!conn) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + *rssi = conn->lll.rssi_latest; + + return 0; +} +#endif /* CONFIG_BT_CTLR_CONN_RSSI */ + +#if defined(CONFIG_BT_CTLR_LE_PING) +u8_t ll_apto_get(u16_t handle, u16_t *apto) +{ + struct ll_conn *conn; + + conn = ll_connected_get(handle); + if (!conn) { + return BT_HCI_ERR_UNKNOWN_CONN_ID; + } + + *apto = conn->apto_reload * conn->lll.interval * 125 / 1000; + + return 0; +} + +u8_t ll_apto_set(u16_t handle, u16_t apto) +{ + struct ll_conn *conn; + + conn = ll_connected_get(handle); + if (!conn) { + return BT_HCI_ERR_UNKNOWN_CONN_ID; + } + + conn->apto_reload = RADIO_CONN_EVENTS(apto * 10 * 1000, + conn->lll.interval * 1250); + + return 0; +} +#endif /* CONFIG_BT_CTLR_LE_PING */ + +int ull_conn_init(void) +{ + int err; + + entropy = device_get_binding(CONFIG_ENTROPY_NAME); + if (!entropy) { + return -ENODEV; + } + + err = _init_reset(); + if (err) { + return err; + } + + return 0; +} + +int ull_conn_reset(void) +{ + u16_t handle; + int err; + + for (handle = 0; handle < CONFIG_BT_MAX_CONN; handle++) { + disable(handle); + } + + /* initialise connection channel map */ + data_chan_map[0] = 0xFF; + data_chan_map[1] = 0xFF; + data_chan_map[2] = 0xFF; + data_chan_map[3] = 0xFF; + data_chan_map[4] = 0x1F; + data_chan_count = 37U; + + /* Re-initialize the Tx mfifo */ + MFIFO_INIT(conn_tx); + + /* Reset the current conn update conn context pointer */ + conn_upd_curr = NULL; + + err = _init_reset(); + if (err) { + return err; + } + + return 0; +} + +u8_t ull_conn_chan_map_cpy(u8_t *chan_map) +{ + memcpy(chan_map, data_chan_map, sizeof(data_chan_map)); + + return data_chan_count; +} + +void ull_conn_chan_map_set(u8_t *chan_map) +{ + memcpy(data_chan_map, chan_map, sizeof(data_chan_map)); + data_chan_count = util_ones_count_get(data_chan_map, + sizeof(data_chan_map)); +} + +#if defined(CONFIG_BT_CTLR_PHY) +u8_t ull_conn_default_phy_tx_get(void) +{ + return default_phy_tx; +} + +u8_t ull_conn_default_phy_rx_get(void) +{ + return default_phy_rx; +} +#endif /* CONFIG_BT_CTLR_PHY */ + +void ull_conn_setup(memq_link_t *link, struct node_rx_hdr *rx) +{ + struct node_rx_ftr *ftr; + struct lll_conn *lll; + + ftr = (void *)((u8_t *)((struct node_rx_pdu *)rx)->pdu + + (offsetof(struct pdu_adv, connect_ind) + + sizeof(struct pdu_adv_connect_ind))); + + lll = *((struct lll_conn **)((u8_t *)ftr->param + + sizeof(struct lll_hdr))); + switch (lll->role) { +#if defined(CONFIG_BT_CENTRAL) + case 0: + ull_master_setup(link, rx, ftr, lll); + break; +#endif /* CONFIG_BT_CENTRAL */ + +#if defined(CONFIG_BT_PERIPHERAL) + case 1: + ull_slave_setup(link, rx, ftr, lll); + break; +#endif /* CONFIG_BT_PERIPHERAL */ + + default: + LL_ASSERT(0); + break; + } +} + +int ull_conn_rx(memq_link_t *link, struct node_rx_pdu **rx) +{ + struct pdu_data *pdu_rx; + struct ll_conn *conn; + + conn = ll_conn_get((*rx)->hdr.handle); + LL_ASSERT(conn); + + pdu_rx = (void *)(*rx)->pdu; + + switch (pdu_rx->ll_id) { + case PDU_DATA_LLID_CTRL: + { + int nack; + + nack = ctrl_rx(link, rx, pdu_rx, conn); + return nack; + } + + case PDU_DATA_LLID_DATA_CONTINUE: + case PDU_DATA_LLID_DATA_START: +#if defined(CONFIG_BT_CTLR_LE_ENC) + if (conn->pause_rx) { + conn->llcp_terminate.reason_peer = + BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL; + + /* Mark for buffer for release */ + (*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + } +#endif /* CONFIG_BT_CTLR_LE_ENC */ + break; + + case PDU_DATA_LLID_RESV: + default: +#if defined(CONFIG_BT_CTLR_LE_ENC) + if (conn->pause_rx) { + conn->llcp_terminate.reason_peer = + BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL; + } +#endif /* CONFIG_BT_CTLR_LE_ENC */ + + /* Invalid LL id, drop it. */ + + /* Mark for buffer for release */ + (*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + break; + } + + + return 0; +} + +int ull_conn_llcp(struct ll_conn *conn, u32_t ticks_at_expire, u16_t lazy) +{ + LL_ASSERT(conn->lll.handle != 0xFFFF); + +#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) || defined(CONFIG_BT_CTLR_PHY) + /* Check if no other procedure with instant is requested and not in + * Encryption setup. + */ + if ((conn->llcp_ack == conn->llcp_req) && +#if defined(CONFIG_BT_CTLR_LE_ENC) + !conn->pause_rx) { +#else /* !CONFIG_BT_CTLR_LE_ENC */ + 1) { +#endif /* !CONFIG_BT_CTLR_LE_ENC */ + if (0) { +#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) + /* check if CPR procedure is requested */ + } else if (conn->llcp_conn_param.ack != + conn->llcp_conn_param.req) { + struct lll_conn *lll = &conn->lll; + u16_t event_counter; + + /* Calculate current event counter */ + event_counter = lll->event_counter + + lll->latency_prepare + lazy; + + /* handle CPR state machine */ + event_conn_param_prep(conn, event_counter, + ticks_at_expire); +#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */ + +#if defined(CONFIG_BT_CTLR_PHY) + /* check if PHY Req procedure is requested */ + } else if (conn->llcp_phy.ack != conn->llcp_phy.req) { + /* handle PHY Upd state machine */ + event_phy_req_prep(conn); +#endif /* CONFIG_BT_CTLR_PHY */ + } + } +#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ || CONFIG_BT_CTLR_PHY */ + + /* check if procedure is requested */ + if (conn->llcp_ack != conn->llcp_req) { + switch (conn->llcp_type) { + case LLCP_CONN_UPD: + { + struct lll_conn *lll = &conn->lll; + u16_t event_counter; + + /* Calculate current event counter */ + event_counter = lll->event_counter + + lll->latency_prepare + lazy; + + if (event_conn_upd_prep(conn, event_counter, + ticks_at_expire) == 0) { + return -ECANCELED; + } + } + break; + + case LLCP_CHAN_MAP: + { + struct lll_conn *lll = &conn->lll; + u16_t event_counter; + + /* Calculate current event counter */ + event_counter = lll->event_counter + + lll->latency_prepare + lazy; + + event_ch_map_prep(conn, event_counter); + } + break; + +#if defined(CONFIG_BT_CTLR_LE_ENC) + case LLCP_ENCRYPTION: + event_enc_prep(conn); + break; +#endif /* CONFIG_BT_CTLR_LE_ENC */ + + case LLCP_FEATURE_EXCHANGE: + event_fex_prep(conn); + break; + + case LLCP_VERSION_EXCHANGE: + event_vex_prep(conn); + break; + +#if defined(CONFIG_BT_CTLR_LE_PING) + case LLCP_PING: + event_ping_prep(conn); + break; +#endif /* CONFIG_BT_CTLR_LE_PING */ + +#if defined(CONFIG_BT_CTLR_PHY) + case LLCP_PHY_UPD: + { + struct lll_conn *lll = &conn->lll; + u16_t event_counter; + + /* Calculate current event counter */ + event_counter = lll->event_counter + + lll->latency_prepare + lazy; + + event_phy_upd_ind_prep(conn, event_counter); + } + break; +#endif /* CONFIG_BT_CTLR_PHY */ + + default: + LL_ASSERT(0); + break; + } + } + +#if defined(CONFIG_BT_CTLR_DATA_LENGTH) + /* check if procedure is requested */ + if (conn->llcp_length.ack != conn->llcp_length.req) { + /* handle DLU state machine */ + event_len_prep(conn); + } +#endif /* CONFIG_BT_CTLR_DATA_LENGTH */ + + /* Terminate Procedure Request */ + if (conn->llcp_terminate.ack != conn->llcp_terminate.req) { + struct node_tx *tx; + + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (tx) { + struct pdu_data *pdu_tx = (void *)tx->pdu; + + /* Terminate Procedure acked */ + conn->llcp_terminate.ack = conn->llcp_terminate.req; + + /* place the terminate ind packet in tx queue */ + pdu_tx->ll_id = PDU_DATA_LLID_CTRL; + pdu_tx->len = offsetof(struct pdu_data_llctrl, + terminate_ind) + + sizeof(struct pdu_data_llctrl_terminate_ind); + pdu_tx->llctrl.opcode = + PDU_DATA_LLCTRL_TYPE_TERMINATE_IND; + pdu_tx->llctrl.terminate_ind.error_code = + conn->llcp_terminate.reason_own; + + ctrl_tx_enqueue(conn, tx); + } + + if (!conn->procedure_expire) { + /* Terminate Procedure timeout is started, will + * replace any other timeout running + */ + conn->procedure_expire = conn->supervision_reload; + + /* NOTE: if supervision timeout equals connection + * interval, dont timeout in current event. + */ + if (conn->procedure_expire <= 1) { + conn->procedure_expire++; + } + } + } + + return 0; +} + +void ull_conn_done(struct node_rx_event_done *done) +{ + struct lll_conn *lll = (void *)HDR_ULL2LLL(done->param); + struct ll_conn *conn = (void *)HDR_LLL2EVT(lll); + u32_t ticks_drift_minus; + u32_t ticks_drift_plus; + u16_t latency_event; + u16_t elapsed_event; + u8_t reason_peer; + u16_t lazy; + u8_t force; + + /* Skip if connection terminated by local host */ + if (lll->handle == 0xFFFF) { + return; + } + +#if defined(CONFIG_BT_CTLR_LE_ENC) + /* Check authenticated payload expiry or MIC failure */ + switch (done->extra.mic_state) { + case LLL_CONN_MIC_NONE: +#if defined(CONFIG_BT_CTLR_LE_PING) + if (lll->enc_rx || conn->pause_rx) { + u16_t appto_reload_new; + + /* check for change in apto */ + appto_reload_new = (conn->apto_reload > + (lll->latency + 6)) ? + (conn->apto_reload - + (lll->latency + 6)) : + conn->apto_reload; + if (conn->appto_reload != appto_reload_new) { + conn->appto_reload = appto_reload_new; + conn->apto_expire = 0; + } + + /* start authenticated payload (pre) timeout */ + if (conn->apto_expire == 0) { + conn->appto_expire = conn->appto_reload; + conn->apto_expire = conn->apto_reload; + } + } +#endif /* CONFIG_BT_CTLR_LE_PING */ + break; + + case LLL_CONN_MIC_PASS: +#if defined(CONFIG_BT_CTLR_LE_PING) + conn->appto_expire = conn->apto_expire = 0; +#endif /* CONFIG_BT_CTLR_LE_PING */ + break; + + case LLL_CONN_MIC_FAIL: + conn->llcp_terminate.reason_peer = + BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL; + break; + } +#endif /* CONFIG_BT_CTLR_LE_ENC */ + + /* Master transmitted ack for the received terminate ind or + * Slave received terminate ind or MIC failure + */ + reason_peer = conn->llcp_terminate.reason_peer; + if (reason_peer && (lll->role || lll->master.terminate_ack)) { + terminate_ind_rx_enqueue(conn, reason_peer); + conn_cleanup(conn); + + return; + } + + /* Slave drift compensation calc or master terminate acked */ + ticks_drift_plus = 0; + ticks_drift_minus = 0; + if (done->extra.trx_cnt) { + if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && lll->role) { + ull_slave_done(done, &ticks_drift_plus, + &ticks_drift_minus); + } else if (reason_peer) { + lll->master.terminate_ack = 1; + } + + /* Reset connection failed to establish countdown */ + conn->connect_expire = 0; + } + + /* Events elapsed used in timeout checks below */ + latency_event = lll->latency_event; + elapsed_event = latency_event + 1; + + /* Reset supervision countdown */ + if (done->extra.crc_valid) { + conn->supervision_expire = 0; + } + + /* check connection failed to establish */ + else if (conn->connect_expire) { + if (conn->connect_expire > elapsed_event) { + conn->connect_expire -= elapsed_event; + } else { + terminate_ind_rx_enqueue(conn, 0x3e); + + conn_cleanup(conn); + + return; + } + } + + /* if anchor point not sync-ed, start supervision timeout, and break + * latency if any. + */ + else { + /* Start supervision timeout, if not started already */ + if (!conn->supervision_expire) { + conn->supervision_expire = conn->supervision_reload; + } + } + + /* check supervision timeout */ + force = 0; + if (conn->supervision_expire) { + if (conn->supervision_expire > elapsed_event) { + conn->supervision_expire -= elapsed_event; + + /* break latency */ + lll->latency_event = 0; + + /* Force both master and slave when close to + * supervision timeout. + */ + if (conn->supervision_expire <= 6) { + force = 1; + } + /* use randomness to force slave role when anchor + * points are being missed. + */ + else if (lll->role) { + if (latency_event) { + force = 1; + } else { + /* FIXME:*/ + #if 0 + force = lll->slave.force & 0x01; + + /* rotate force bits */ + lll->slave.force >>= 1; + if (force) { + lll->slave.force |= BIT(31); + } + #endif + } + } + } else { + terminate_ind_rx_enqueue(conn, 0x08); + + conn_cleanup(conn); + + return; + } + } + + /* check procedure timeout */ + if (conn->procedure_expire != 0) { + if (conn->procedure_expire > elapsed_event) { + conn->procedure_expire -= elapsed_event; + } else { + terminate_ind_rx_enqueue(conn, 0x22); + + conn_cleanup(conn); + + return; + } + } + +#if defined(CONFIG_BT_CTLR_LE_PING) + /* check apto */ + if (conn->apto_expire != 0) { + if (conn->apto_expire > elapsed_event) { + conn->apto_expire -= elapsed_event; + } else { + struct node_rx_hdr *rx; + + rx = ll_pdu_rx_alloc(); + if (rx) { + conn->apto_expire = 0; + + rx->handle = lll->handle; + rx->type = NODE_RX_TYPE_APTO; + + /* enqueue apto event into rx queue */ + ll_rx_put(rx->link, rx); + ll_rx_sched(); + } else { + conn->apto_expire = 1; + } + } + } + + /* check appto */ + if (conn->appto_expire != 0) { + if (conn->appto_expire > elapsed_event) { + conn->appto_expire -= elapsed_event; + } else { + conn->appto_expire = 0; + + if ((conn->procedure_expire == 0) && + (conn->llcp_req == conn->llcp_ack)) { + conn->llcp_type = LLCP_PING; + conn->llcp_ack--; + } + } + } +#endif /* CONFIG_BT_CTLR_LE_PING */ + +#if defined(CONFIG_BT_CTLR_CONN_RSSI) + /* generate RSSI event */ + if (lll->rssi_sample_count == 0) { + struct node_rx_pdu *rx; + struct pdu_data *pdu_data_rx; + + rx = ll_pdu_rx_alloc(); + if (rx) { + lll->rssi_reported = lll->rssi_latest; + lll->rssi_sample_count = LLL_CONN_RSSI_SAMPLE_COUNT; + + /* Prepare the rx packet structure */ + rx->hdr.handle = lll->handle; + rx->hdr.type = NODE_RX_TYPE_RSSI; + + /* prepare connection RSSI structure */ + pdu_data_rx = (void *)rx->pdu; + pdu_data_rx->rssi = lll->rssi_reported; + + /* enqueue connection RSSI structure into queue */ + ll_rx_put(rx->hdr.link, rx); + ll_rx_sched(); + } + } +#endif /* CONFIG_BT_CTLR_CONN_RSSI */ + + /* break latency based on ctrl procedure pending */ + if ((conn->llcp_ack != conn->llcp_req) && + ((conn->llcp_type == LLCP_CONN_UPD) || + (conn->llcp_type == LLCP_CHAN_MAP))) { + lll->latency_event = 0; + } + + /* check if latency needs update */ + lazy = 0; + if ((force) || (latency_event != lll->latency_event)) { + lazy = lll->latency_event + 1; + } + + /* update conn ticker */ + if ((ticks_drift_plus != 0) || (ticks_drift_minus != 0) || + (lazy != 0) || (force != 0)) { + u8_t ticker_id = TICKER_ID_CONN_BASE + lll->handle; + struct ll_conn *conn = lll->hdr.parent; + u32_t ticker_status; + + /* Call to ticker_update can fail under the race + * condition where in the Slave role is being stopped but + * at the same time it is preempted by Slave event that + * gets into close state. Accept failure when Slave role + * is being stopped. + */ + ticker_status = ticker_update(TICKER_INSTANCE_ID_CTLR, + TICKER_USER_ID_ULL_HIGH, + ticker_id, + ticks_drift_plus, + ticks_drift_minus, 0, 0, + lazy, force, + ticker_op_update_cb, + conn); + LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) || + (ticker_status == TICKER_STATUS_BUSY) || + ((void *)conn == ull_disable_mark_get())); + } +} + +void ull_conn_tx_demux(u8_t count) +{ + do { + struct ll_conn *conn; + struct lll_tx *lll_tx; + + lll_tx = MFIFO_DEQUEUE_GET(conn_tx); + if (!lll_tx) { + break; + } + + conn = ll_conn_get(lll_tx->handle); + if (conn->lll.handle == lll_tx->handle) { + struct node_tx *tx = lll_tx->node; + + tx->next = NULL; + if (!conn->tx_data) { + conn->tx_data = tx; + if (!conn->tx_head) { + conn->tx_head = tx; + conn->tx_data_last = NULL; + } + } + + if (conn->tx_data_last) { + conn->tx_data_last->next = tx; + } + + conn->tx_data_last = tx; + } else { + struct node_tx *tx = lll_tx->node; + struct pdu_data *p = (void *)tx->pdu; + + p->ll_id = PDU_DATA_LLID_RESV; + ull_tx_ack_put(0xFFFF, tx); + } + + MFIFO_DEQUEUE(conn_tx); + } while (--count); +} + +void ull_conn_tx_lll_enqueue(struct ll_conn *conn, u8_t count) +{ + struct node_tx *tx; + + tx = conn->tx_head; +#if defined(CONFIG_BT_CTLR_LE_ENC) + while (tx && ((tx == conn->tx_ctrl) || !conn->pause_tx) && count--) { +#else /* !CONFIG_BT_CTLR_LE_ENC */ + while (tx && (tx == conn->tx_ctrl) && count--) { +#endif /* !CONFIG_BT_CTLR_LE_ENC */ + struct node_tx *tx_lll; + memq_link_t *link; + + tx_lll = tx; + + if (tx == conn->tx_ctrl) { + tx = conn->tx_head = conn->tx_head->next; + if (conn->tx_ctrl == conn->tx_ctrl_last) { + conn->tx_ctrl = NULL; + conn->tx_ctrl_last = NULL; + } else { + conn->tx_ctrl = tx; + } + + /* point to self to indicate a control PDU mem alloc */ + tx_lll->next = tx_lll; + } else { + if (tx == conn->tx_data) { + conn->tx_data = conn->tx_data->next; + } + tx = conn->tx_head = conn->tx_head->next; + } + + link = mem_acquire(&mem_link_tx.free); + LL_ASSERT(link); + + memq_enqueue(link, tx_lll, &conn->lll.memq_tx.tail); + } +} + +void ull_conn_link_tx_release(void *link) +{ + mem_release(link, &mem_link_tx.free); +} + +void ull_conn_tx_ack(struct ll_conn *conn, memq_link_t *link, + struct node_tx *tx) +{ + struct pdu_data *pdu_tx; + + pdu_tx = (void *)tx->pdu; + LL_ASSERT(pdu_tx->len); + + if (pdu_tx->ll_id == PDU_DATA_LLID_CTRL) { + ctrl_tx_ack(conn, &tx, pdu_tx); + + /* release mem if points to itself */ + if (link->next == (void *)tx) { + mem_release(tx, &mem_conn_tx_ctrl.free); + return; + } else if (!tx) { + return; + } + } + + ull_tx_ack_put(conn->lll.handle, tx); +} + +static int _init_reset(void) +{ + /* Initialize conn pool. */ + mem_init(conn_pool, sizeof(struct ll_conn), + sizeof(conn_pool) / sizeof(struct ll_conn), &conn_free); + + /* Initialize tx pool. */ + mem_init(mem_conn_tx.pool, CONN_TX_BUF_SIZE, CONFIG_BT_CTLR_TX_BUFFERS, + &mem_conn_tx.free); + + /* Initialize tx ctrl pool. */ + mem_init(mem_conn_tx_ctrl.pool, CONN_TX_CTRL_BUF_SIZE, + CONN_TX_CTRL_BUFFERS, &mem_conn_tx_ctrl.free); + + /* Initialize tx link pool. */ + mem_init(mem_link_tx.pool, sizeof(memq_link_t), + CONFIG_BT_CTLR_TX_BUFFERS + CONN_TX_CTRL_BUFFERS, + &mem_link_tx.free); + +#if defined(CONFIG_BT_CTLR_DATA_LENGTH) + /* Initialize the DLE defaults */ + _radio.default_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN; + _radio.default_tx_time = RADIO_PKT_TIME(PDU_DC_PAYLOAD_SIZE_MIN, 0); +#endif /* CONFIG_BT_CTLR_DATA_LENGTH */ + +#if defined(CONFIG_BT_CTLR_PHY) + /* Initialize the PHY defaults */ + default_phy_tx = BIT(0); + default_phy_rx = BIT(0); + +#if defined(CONFIG_BT_CTLR_PHY_2M) + default_phy_tx |= BIT(1); + default_phy_rx |= BIT(1); +#endif /* CONFIG_BT_CTLR_PHY_2M */ + +#if defined(CONFIG_BT_CTLR_PHY_CODED) + default_phy_tx |= BIT(2); + default_phy_rx |= BIT(2); +#endif /* CONFIG_BT_CTLR_PHY_CODED */ +#endif /* CONFIG_BT_CTLR_PHY */ + + return 0; +} + +static void ticker_op_update_cb(u32_t status, void *param) +{ + LL_ASSERT(status == TICKER_STATUS_SUCCESS || + param == ull_disable_mark_get()); +} + +static void ticker_op_stop_cb(u32_t status, void *param) +{ + static memq_link_t _link; + static struct mayfly _mfy = {0, 0, &_link, NULL, lll_conn_tx_flush}; + + LL_ASSERT(status == TICKER_STATUS_SUCCESS); + + _mfy.param = param; + + /* Flush pending tx PDUs in LLL (using a mayfly) */ + mayfly_enqueue(TICKER_USER_ID_ULL_LOW, TICKER_USER_ID_LLL, 1, &_mfy); +} + +static inline void disable(u16_t handle) +{ + volatile u32_t ret_cb = TICKER_STATUS_BUSY; + struct ll_conn *conn; + void *mark; + u32_t ret; + + conn = ll_conn_get(handle); + + mark = ull_disable_mark(conn); + LL_ASSERT(mark == conn); + + ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD, + TICKER_ID_CONN_BASE + handle, + ull_ticker_status_give, (void *)&ret_cb); + + ret = ull_ticker_status_take(ret, &ret_cb); + if (!ret) { + ret = ull_disable(&conn->lll); + LL_ASSERT(!ret); + } + + conn->lll.link_tx_free = NULL; + + mark = ull_disable_unmark(conn); + LL_ASSERT(mark == conn); +} + +static void conn_cleanup(struct ll_conn *conn) +{ + struct lll_conn *lll = &conn->lll; + struct node_rx_pdu *rx; + u32_t ticker_status; + + /* release any llcp reserved rx node */ + rx = conn->llcp_rx; + if (rx) { + /* Mark for buffer for release */ + rx->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + /* enqueue rx node towards Thread */ + ll_rx_put(rx->hdr.link, rx); + } + + /* Enable Ticker Job, we are in a radio event which disabled it if + * worker0 and job0 priority where same. + */ + mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1); + + /* Stop Master or Slave role ticker */ + ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR, + TICKER_USER_ID_ULL_HIGH, + TICKER_ID_CONN_BASE + lll->handle, + ticker_op_stop_cb, (void *)lll); + LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) || + (ticker_status == TICKER_STATUS_BUSY)); + + /* Invalidate the connection context */ + lll->handle = 0xFFFF; +} + +static void ctrl_tx_data_last_enqueue(struct ll_conn *conn, + struct node_tx *tx) +{ + tx->next = conn->tx_ctrl_last->next; + conn->tx_ctrl_last->next = tx; + conn->tx_ctrl_last = tx; +} + +static void ctrl_tx_enqueue(struct ll_conn *conn, struct node_tx *tx) +{ + /* check if a packet was tx-ed and not acked by peer */ + if ( + /* data/ctrl packet is in the head */ + conn->tx_head && +#if defined(CONFIG_BT_CTLR_LE_ENC) + /* data PDU tx is not paused */ + !conn->pause_tx) { +#else /* !CONFIG_BT_CTLR_LE_ENC */ + 1) { +#endif /* !CONFIG_BT_CTLR_LE_ENC */ + /* data or ctrl may have been transmitted once, but not acked + * by peer, hence place this new ctrl after head + */ + + /* if data transmited once, keep it at head of the tx list, + * as we will insert a ctrl after it, hence advance the + * data pointer + */ + if (conn->tx_head == conn->tx_data) { + conn->tx_data = conn->tx_data->next; + } + + /* if no ctrl packet already queued, new ctrl added will be + * the ctrl pointer and is inserted after head. + */ + if (!conn->tx_ctrl) { + tx->next = conn->tx_head->next; + conn->tx_head->next = tx; + conn->tx_ctrl = tx; + conn->tx_ctrl_last = tx; + } else { + ctrl_tx_data_last_enqueue(conn, tx); + } + } else { + /* No packet needing ACK. */ + + /* If first ctrl packet then add it as head else add it to the + * tail of the ctrl packets. + */ + if (!conn->tx_ctrl) { + tx->next = conn->tx_head; + conn->tx_head = tx; + conn->tx_ctrl = tx; + conn->tx_ctrl_last = tx; + } else { + ctrl_tx_data_last_enqueue(conn, tx); + } + } + + /* Update last pointer if ctrl added at end of tx list */ + if (tx->next == 0) { + conn->tx_data_last = tx; + } +} + +static void ctrl_tx_sec_enqueue(struct ll_conn *conn, struct node_tx *tx) +{ +#if defined(CONFIG_BT_CTLR_LE_ENC) + if (conn->pause_tx) { + if (!conn->tx_ctrl) { + tx->next = conn->tx_head; + conn->tx_head = tx; + } else { + tx->next = conn->tx_ctrl_last->next; + conn->tx_ctrl_last->next = tx; + } + } else +#endif /* CONFIG_BT_CTLR_LE_ENC */ + + { + ctrl_tx_enqueue(conn, tx); + } +} + +static inline void event_conn_upd_init(struct ll_conn *conn, + u16_t event_counter, + u32_t ticks_at_expire, + struct pdu_data *pdu_ctrl_tx, + struct mayfly *mfy_sched_offset, + void (*fp_mfy_select_or_use)(void *)) +{ + /* move to in progress */ + conn->llcp.conn_upd.state = LLCP_CUI_STATE_INPROG; + + /* set instant */ + conn->llcp.conn_upd.instant = event_counter + conn->lll.latency + 6; + + /* place the conn update req packet as next in tx queue */ + pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL; + pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl, conn_update_ind) + + sizeof(struct pdu_data_llctrl_conn_update_ind); + pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_CONN_UPDATE_IND; + pdu_ctrl_tx->llctrl.conn_update_ind.win_size = + conn->llcp.conn_upd.win_size; + pdu_ctrl_tx->llctrl.conn_update_ind.win_offset = + conn->llcp.conn_upd.win_offset_us / 1250; + pdu_ctrl_tx->llctrl.conn_update_ind.interval = + conn->llcp.conn_upd.interval; + pdu_ctrl_tx->llctrl.conn_update_ind.latency = + conn->llcp.conn_upd.latency; + pdu_ctrl_tx->llctrl.conn_update_ind.timeout = + conn->llcp.conn_upd.timeout; + pdu_ctrl_tx->llctrl.conn_update_ind.instant = + conn->llcp.conn_upd.instant; + +#if defined(CONFIG_BT_CTLR_SCHED_ADVANCED) + { + u32_t retval; + + /* calculate window offset that places the connection in the + * next available slot after existing masters. + */ + conn->llcp.conn_upd.ticks_anchor = ticks_at_expire; + +#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED) + if (conn->evt.ticks_xtal_to_start & XON_BITMASK) { + u32_t ticks_prepare_to_start = + max(conn->evt.ticks_active_to_start, + conn->evt.ticks_preempt_to_start); + + conn->llcp.conn_upd.ticks_anchor -= + (conn->evt.ticks_xtal_to_start & + ~XON_BITMASK) - ticks_prepare_to_start; + } +#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */ + + conn->llcp.conn_upd.pdu_win_offset = (u16_t *) + &pdu_ctrl_tx->llctrl.conn_update_ind.win_offset; + + mfy_sched_offset->fp = fp_mfy_select_or_use; + mfy_sched_offset->param = (void *)conn; + + retval = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, + TICKER_USER_ID_ULL_LOW, 1, + mfy_sched_offset); + LL_ASSERT(!retval); + } +#else /* !CONFIG_BT_CTLR_SCHED_ADVANCED */ + ARG_UNUSED(ticks_at_expire); + ARG_UNUSED(mfy_sched_offset); + ARG_UNUSED(fp_mfy_select_or_use); +#endif /* !CONFIG_BT_CTLR_SCHED_ADVANCED */ +} + +static inline int event_conn_upd_prep(struct ll_conn *conn, + u16_t event_counter, + u32_t ticks_at_expire) +{ + struct ll_conn *conn_upd; + u16_t instant_latency; + + conn_upd = conn_upd_curr; + + /* set mutex */ + if (!conn_upd) { + conn_upd_curr = conn; + } + + instant_latency = (event_counter - conn->llcp.conn_upd.instant) & + 0xffff; + if (conn->llcp.conn_upd.state != LLCP_CUI_STATE_INPROG) { +#if defined(CONFIG_BT_CTLR_SCHED_ADVANCED) + static memq_link_t s_link; + static struct mayfly s_mfy_sched_offset = {0, 0, + &s_link, 0, 0 }; + void (*fp_mfy_select_or_use)(void *) = NULL; +#endif /* CONFIG_BT_CTLR_SCHED_ADVANCED */ + struct pdu_data *pdu_ctrl_tx; + struct node_rx_pdu *rx; + struct node_tx *tx; + + LL_ASSERT(!conn->llcp_rx); + + rx = ll_pdu_rx_alloc_peek(1); + if (!rx) { + return -ENOBUFS; + } + + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (!tx) { + return -ENOBUFS; + } + + (void)ll_pdu_rx_alloc(); + conn->llcp_rx = rx; + + pdu_ctrl_tx = (void *)tx->pdu; + +#if defined(CONFIG_BT_CTLR_SCHED_ADVANCED) + switch (conn->llcp.conn_upd.state) { + case LLCP_CUI_STATE_USE: + fp_mfy_select_or_use = ull_sched_mfy_win_offset_use; + break; + +#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) + case LLCP_CUI_STATE_SELECT: + fp_mfy_select_or_use = ull_sched_mfy_win_offset_select; + break; +#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */ + + default: + LL_ASSERT(0); + break; + } + + event_conn_upd_init(conn, event_counter, ticks_at_expire, + pdu_ctrl_tx, &s_mfy_sched_offset, + fp_mfy_select_or_use); +#else /* !CONFIG_BT_CTLR_SCHED_ADVANCED */ + event_conn_upd_init(conn, event_counter, ticks_at_expire, + pdu_ctrl_tx, NULL, NULL); +#endif /* !CONFIG_BT_CTLR_SCHED_ADVANCED */ + + ctrl_tx_enqueue(conn, tx); + + } else if (instant_latency <= 0x7FFF) { + u32_t mayfly_was_enabled; + u16_t conn_interval_old; + u16_t conn_interval_new; + u32_t ticks_slot_offset; + u32_t ticks_win_offset; + u32_t conn_interval_us; + struct node_rx_pdu *rx; + struct lll_conn *lll; + u8_t ticker_id_conn; + u32_t ticker_status; + u32_t periodic_us; + u16_t latency; + + /* procedure request acked */ + conn->llcp_ack = conn->llcp_req; + +#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) + if ((conn->llcp_conn_param.req != conn->llcp_conn_param.ack) && + (conn->llcp_conn_param.state == LLCP_CPR_STATE_UPD)) { + conn->llcp_conn_param.ack = conn->llcp_conn_param.req; + + /* Stop procedure timeout */ + conn->procedure_expire = 0; + } +#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */ + + /* reset mutex */ + if (conn_upd_curr == conn) { + conn_upd_curr = NULL; + } + + lll = &conn->lll; + + /* Acquire Rx node */ + rx = conn->llcp_rx; + conn->llcp_rx = NULL; + + LL_ASSERT(rx && rx->hdr.link); + + /* Prepare the rx packet structure */ + if ((conn->llcp.conn_upd.interval != lll->interval) || + (conn->llcp.conn_upd.latency != lll->latency) || + (RADIO_CONN_EVENTS(conn->llcp.conn_upd.timeout * 10000, + lll->interval * 1250) != + conn->supervision_reload)) { + struct node_rx_cu *cu; + + rx->hdr.handle = lll->handle; + rx->hdr.type = NODE_RX_TYPE_CONN_UPDATE; + + /* prepare connection update complete structure */ + cu = (void *)rx->pdu; + cu->status = 0x00; + cu->interval = conn->llcp.conn_upd.interval; + cu->latency = conn->llcp.conn_upd.latency; + cu->timeout = conn->llcp.conn_upd.timeout; + } else { + /* Mark for buffer for release */ + rx->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + } + + /* enqueue rx node towards Thread */ + ll_rx_put(rx->hdr.link, rx); + ll_rx_sched(); + +#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED) + /* restore to normal prepare */ + if (conn->evt.ticks_xtal_to_start & XON_BITMASK) { + u32_t ticks_prepare_to_start = + max(conn->evt.ticks_active_to_start, + conn->evt.ticks_preempt_to_start); + + conn->evt.ticks_xtal_to_start &= ~XON_BITMASK; + ticks_at_expire -= (conn->evt.ticks_xtal_to_start - + ticks_prepare_to_start); + } +#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */ + + /* compensate for instant_latency due to laziness */ + conn_interval_old = instant_latency * lll->interval; + latency = conn_interval_old / + conn->llcp.conn_upd.interval; + conn_interval_new = latency * + conn->llcp.conn_upd.interval; + if (conn_interval_new > conn_interval_old) { + ticks_at_expire += HAL_TICKER_US_TO_TICKS( + (conn_interval_new - conn_interval_old) * 1250); + } else { + ticks_at_expire -= HAL_TICKER_US_TO_TICKS( + (conn_interval_old - conn_interval_new) * 1250); + } + lll->latency_prepare -= (instant_latency - latency); + + /* calculate the offset, window widening and interval */ + ticks_slot_offset = max(conn->evt.ticks_active_to_start, + conn->evt.ticks_xtal_to_start); + conn_interval_us = conn->llcp.conn_upd.interval * 1250; + periodic_us = conn_interval_us; + if (lll->role) { + lll->slave.window_widening_prepare_us -= + lll->slave.window_widening_periodic_us * + instant_latency; + + lll->slave.window_widening_periodic_us = + (((lll_conn_ppm_local_get() + + lll_conn_ppm_get(lll->slave.sca)) * + conn_interval_us) + (1000000 - 1)) / 1000000; + lll->slave.window_widening_max_us = + (conn_interval_us >> 1) - TIFS_US; + lll->slave.window_size_prepare_us = + conn->llcp.conn_upd.win_size * 1250; + conn->slave.ticks_to_offset = 0; + + lll->slave.window_widening_prepare_us += + lll->slave.window_widening_periodic_us * + latency; + if (lll->slave.window_widening_prepare_us > + lll->slave.window_widening_max_us) { + lll->slave.window_widening_prepare_us = + lll->slave.window_widening_max_us; + } + + ticks_at_expire -= HAL_TICKER_US_TO_TICKS( + lll->slave.window_widening_periodic_us * + latency); + ticks_win_offset = HAL_TICKER_US_TO_TICKS( + (conn->llcp.conn_upd.win_offset_us / 1250) * + 1250); + periodic_us -= lll->slave.window_widening_periodic_us; + } else { + ticks_win_offset = HAL_TICKER_US_TO_TICKS( + conn->llcp.conn_upd.win_offset_us); + + /* Workaround: Due to the missing remainder param in + * ticker_start function for first interval; add a + * tick so as to use the ceiled value. + */ + ticks_win_offset += 1; + } + lll->interval = conn->llcp.conn_upd.interval; + lll->latency = conn->llcp.conn_upd.latency; + conn->supervision_reload = + RADIO_CONN_EVENTS((conn->llcp.conn_upd.timeout + * 10 * 1000), conn_interval_us); + conn->procedure_reload = + RADIO_CONN_EVENTS((40 * 1000 * 1000), conn_interval_us); + +#if defined(CONFIG_BT_CTLR_LE_PING) + /* APTO in no. of connection events */ + conn->apto_reload = RADIO_CONN_EVENTS((30 * 1000 * 1000), + conn_interval_us); + /* Dispatch LE Ping PDU 6 connection events (that peer would + * listen to) before 30s timeout + * TODO: "peer listens to" is greater than 30s due to latency + */ + conn->appto_reload = (conn->apto_reload > (lll->latency + 6)) ? + (conn->apto_reload - (lll->latency + 6)) : + conn->apto_reload; +#endif /* CONFIG_BT_CTLR_LE_PING */ + + if (!conn->llcp.conn_upd.is_internal) { + conn->supervision_expire = 0; + } + + /* disable ticker job, in order to chain stop and start + * to avoid RTC being stopped if no tickers active. + */ + mayfly_was_enabled = mayfly_is_enabled(TICKER_USER_ID_ULL_HIGH, + TICKER_USER_ID_ULL_LOW); + mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, + 0); + + /* start slave/master with new timings */ + ticker_id_conn = TICKER_ID_CONN_BASE + ll_conn_handle_get(conn); + ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR, + TICKER_USER_ID_ULL_HIGH, + ticker_id_conn, ticker_op_cb, + (void *)__LINE__); + LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) || + (ticker_status == TICKER_STATUS_BUSY)); + ticker_status = + ticker_start(TICKER_INSTANCE_ID_CTLR, + TICKER_USER_ID_ULL_HIGH, + ticker_id_conn, + ticks_at_expire, ticks_win_offset, + HAL_TICKER_US_TO_TICKS(periodic_us), + HAL_TICKER_REMAINDER(periodic_us), + TICKER_NULL_LAZY, + (ticks_slot_offset + conn->evt.ticks_slot), +#if defined(CONFIG_BT_PERIPHERAL) && defined(CONFIG_BT_CENTRAL) + lll->role ? ull_slave_ticker_cb : + ull_master_ticker_cb, +#elif defined(CONFIG_BT_PERIPHERAL) + ull_slave_ticker_cb, +#else + ull_master_ticker_cb, +#endif + conn, ticker_op_cb, (void *)__LINE__); + LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) || + (ticker_status == TICKER_STATUS_BUSY)); + + /* enable ticker job, if disabled in this function */ + if (mayfly_was_enabled) { + mayfly_enable(TICKER_USER_ID_ULL_HIGH, + TICKER_USER_ID_ULL_LOW, 1); + } + + return 0; + } + + return -EINPROGRESS; +} + +static inline void event_ch_map_prep(struct ll_conn *conn, + u16_t event_counter) +{ + if (conn->llcp.chan_map.initiate) { + struct node_tx *tx; + + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (tx) { + struct pdu_data *pdu_ctrl_tx = (void *)tx->pdu; + + /* reset initiate flag */ + conn->llcp.chan_map.initiate = 0; + + /* set instant */ + conn->llcp.chan_map.instant = event_counter + + conn->lll.latency + 6; + + /* place the channel map req packet as next in + * tx queue + */ + pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL; + pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl, + chan_map_ind) + + sizeof(struct pdu_data_llctrl_chan_map_ind); + pdu_ctrl_tx->llctrl.opcode = + PDU_DATA_LLCTRL_TYPE_CHAN_MAP_IND; + memcpy(&pdu_ctrl_tx->llctrl.chan_map_ind.chm[0], + &conn->llcp.chan_map.chm[0], + sizeof(pdu_ctrl_tx->llctrl.chan_map_ind.chm)); + pdu_ctrl_tx->llctrl.chan_map_ind.instant = + conn->llcp.chan_map.instant; + + ctrl_tx_enqueue(conn, tx); + } + } else if (((event_counter - conn->llcp.chan_map.instant) & 0xFFFF) + <= 0x7FFF) { + struct lll_conn *lll = &conn->lll; + + /* procedure request acked */ + conn->llcp_ack = conn->llcp_req; + + /* copy to active channel map */ + memcpy(&lll->data_chan_map[0], + &conn->llcp.chan_map.chm[0], + sizeof(lll->data_chan_map)); + lll->data_chan_count = + util_ones_count_get(&lll->data_chan_map[0], + sizeof(lll->data_chan_map)); + conn->chm_updated = 1; + } + +} + +#if defined(CONFIG_BT_CTLR_LE_ENC) +static inline void event_enc_reject_prep(struct ll_conn *conn, + struct pdu_data *pdu) +{ + pdu->ll_id = PDU_DATA_LLID_CTRL; + + if (conn->common.fex_valid && + (conn->llcp_features & BIT(BT_LE_FEAT_BIT_EXT_REJ_IND))) { + struct pdu_data_llctrl_reject_ext_ind *p; + + pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND; + + p = (void *)&pdu->llctrl.reject_ext_ind; + p->reject_opcode = PDU_DATA_LLCTRL_TYPE_ENC_REQ; + p->error_code = conn->llcp.encryption.error_code; + + pdu->len = sizeof(struct pdu_data_llctrl_reject_ext_ind); + } else { + struct pdu_data_llctrl_reject_ind *p; + + pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_REJECT_IND; + + p = (void *)&pdu->llctrl.reject_ind; + p->error_code = conn->llcp.encryption.error_code; + + pdu->len = sizeof(struct pdu_data_llctrl_reject_ind); + } + + pdu->len += offsetof(struct pdu_data_llctrl, reject_ind); + + conn->llcp.encryption.error_code = 0; +} + +static inline void event_enc_prep(struct ll_conn *conn) +{ + struct pdu_data *pdu_ctrl_tx; + struct node_tx *tx; + struct lll_conn *lll; + + if (conn->llcp.encryption.initiate) { + return; + } + + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (!tx) { + return; + } + + lll = &conn->lll; + + pdu_ctrl_tx = (void *)tx->pdu; + + /* master sends encrypted enc start rsp in control priority */ + if (!lll->role) { + /* calc the Session Key */ + ecb_encrypt(&conn->llcp.encryption.ltk[0], + &conn->llcp.encryption.skd[0], + NULL, &lll->ccm_rx.key[0]); + + /* copy the Session Key */ + memcpy(&lll->ccm_tx.key[0], &lll->ccm_rx.key[0], + sizeof(lll->ccm_tx.key)); + + /* copy the IV */ + memcpy(&lll->ccm_tx.iv[0], &lll->ccm_rx.iv[0], + sizeof(lll->ccm_tx.iv)); + + /* initialise counter */ + lll->ccm_rx.counter = 0; + lll->ccm_tx.counter = 0; + + /* set direction: slave to master = 0, + * master to slave = 1 + */ + lll->ccm_rx.direction = 0; + lll->ccm_tx.direction = 1; + + /* enable receive encryption */ + lll->enc_rx = 1; + + /* send enc start resp */ + start_enc_rsp_send(conn, pdu_ctrl_tx); + } + + /* slave send reject ind or start enc req at control priority */ + +#if defined(CONFIG_BT_CTLR_FAST_ENC) + else { +#else /* !CONFIG_BT_CTLR_FAST_ENC */ + else if (!conn->pause_tx || conn->refresh) { +#endif /* !CONFIG_BT_CTLR_FAST_ENC */ + + /* place the reject ind packet as next in tx queue */ + if (conn->llcp.encryption.error_code) { + event_enc_reject_prep(conn, pdu_ctrl_tx); + } + /* place the start enc req packet as next in tx queue */ + else { + +#if !defined(CONFIG_BT_CTLR_FAST_ENC) + u8_t err; + + /* TODO BT Spec. text: may finalize the sending + * of additional data channel PDUs queued in the + * controller. + */ + err = enc_rsp_send(conn); + if (err) { + mem_release(tx, &mem_conn_tx_ctrl.free); + + return; + } +#endif /* !CONFIG_BT_CTLR_FAST_ENC */ + + /* calc the Session Key */ + ecb_encrypt(&conn->llcp.encryption.ltk[0], + &conn->llcp.encryption.skd[0], NULL, + &lll->ccm_rx.key[0]); + + /* copy the Session Key */ + memcpy(&lll->ccm_tx.key[0], + &lll->ccm_rx.key[0], + sizeof(lll->ccm_tx.key)); + + /* copy the IV */ + memcpy(&lll->ccm_tx.iv[0], &lll->ccm_rx.iv[0], + sizeof(lll->ccm_tx.iv)); + + /* initialise counter */ + lll->ccm_rx.counter = 0; + lll->ccm_tx.counter = 0; + + /* set direction: slave to master = 0, + * master to slave = 1 + */ + lll->ccm_rx.direction = 1; + lll->ccm_tx.direction = 0; + + /* enable receive encryption (transmit turned + * on when start enc resp from master is + * received) + */ + lll->enc_rx = 1; + + /* prepare the start enc req */ + pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL; + pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl, + start_enc_req) + + sizeof(struct pdu_data_llctrl_start_enc_req); + pdu_ctrl_tx->llctrl.opcode = + PDU_DATA_LLCTRL_TYPE_START_ENC_REQ; + } + +#if !defined(CONFIG_BT_CTLR_FAST_ENC) + } else { + start_enc_rsp_send(conn, pdu_ctrl_tx); + + /* resume data packet rx and tx */ + conn->pause_rx = 0; + conn->pause_tx = 0; +#endif /* !CONFIG_BT_CTLR_FAST_ENC */ + + } + + ctrl_tx_enqueue(conn, tx); + + /* procedure request acked */ + conn->llcp_ack = conn->llcp_req; +} +#endif /* CONFIG_BT_CTLR_LE_ENC */ + +static inline void event_fex_prep(struct ll_conn *conn) +{ + struct node_tx *tx; + + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (tx) { + struct pdu_data *pdu = (void *)tx->pdu; + + /* procedure request acked */ + conn->llcp_ack = conn->llcp_req; + + /* use initial feature bitmap */ + conn->llcp_features = LL_FEAT; + + /* place the feature exchange req packet as next in tx queue */ + pdu->ll_id = PDU_DATA_LLID_CTRL; + pdu->len = offsetof(struct pdu_data_llctrl, feature_req) + + sizeof(struct pdu_data_llctrl_feature_req); + pdu->llctrl.opcode = !conn->lll.role ? + PDU_DATA_LLCTRL_TYPE_FEATURE_REQ : + PDU_DATA_LLCTRL_TYPE_SLAVE_FEATURE_REQ; + (void)memset(&pdu->llctrl.feature_req.features[0], + 0x00, + sizeof(pdu->llctrl.feature_req.features)); + pdu->llctrl.feature_req.features[0] = + conn->llcp_features & 0xFF; + pdu->llctrl.feature_req.features[1] = + (conn->llcp_features >> 8) & 0xFF; + pdu->llctrl.feature_req.features[2] = + (conn->llcp_features >> 16) & 0xFF; + + ctrl_tx_enqueue(conn, tx); + + /* Start Procedure Timeout (TODO: this shall not replace + * terminate procedure) + */ + conn->procedure_expire = conn->procedure_reload; + } + +} + +static inline void event_vex_prep(struct ll_conn *conn) +{ + if (conn->llcp_version.tx == 0) { + struct node_tx *tx; + + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (tx) { + struct pdu_data *pdu = (void *)tx->pdu; + + /* procedure request acked */ + conn->llcp_ack = conn->llcp_req; + + /* set version ind tx-ed flag */ + conn->llcp_version.tx = 1; + + /* place the version ind packet as next in tx queue */ + pdu->ll_id = PDU_DATA_LLID_CTRL; + pdu->len = + offsetof(struct pdu_data_llctrl, version_ind) + + sizeof(struct pdu_data_llctrl_version_ind); + pdu->llctrl.opcode = + PDU_DATA_LLCTRL_TYPE_VERSION_IND; + pdu->llctrl.version_ind.version_number = + LL_VERSION_NUMBER; + pdu->llctrl.version_ind.company_id = + CONFIG_BT_CTLR_COMPANY_ID; + pdu->llctrl.version_ind.sub_version_number = + CONFIG_BT_CTLR_SUBVERSION_NUMBER; + + ctrl_tx_enqueue(conn, tx); + + /* Start Procedure Timeout (TODO: this shall not + * replace terminate procedure) + */ + conn->procedure_expire = conn->procedure_reload; + } + } else if (conn->llcp_version.rx) { + struct node_rx_pdu *rx; + struct pdu_data *pdu; + + /* get a rx node for ULL->LL */ + rx = ll_pdu_rx_alloc(); + if (!rx) { + return; + }; + + /* procedure request acked */ + conn->llcp_ack = conn->llcp_req; + + rx->hdr.handle = conn->lll.handle; + rx->hdr.type = NODE_RX_TYPE_DC_PDU; + + /* prepare version ind structure */ + pdu = (void *)rx->pdu; + pdu->ll_id = PDU_DATA_LLID_CTRL; + pdu->len = offsetof(struct pdu_data_llctrl, version_ind) + + sizeof(struct pdu_data_llctrl_version_ind); + pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_VERSION_IND; + pdu->llctrl.version_ind.version_number = + conn->llcp_version.version_number; + pdu->llctrl.version_ind.company_id = + conn->llcp_version.company_id; + pdu->llctrl.version_ind.sub_version_number = + conn->llcp_version.sub_version_number; + + /* enqueue version ind structure into rx queue */ + ll_rx_put(rx->hdr.link, rx); + ll_rx_sched(); + } else { + /* tx-ed but no rx, and new request placed */ + LL_ASSERT(0); + } +} + +#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) +static inline void event_conn_param_req(struct ll_conn *conn, + u16_t event_counter, + u32_t ticks_at_expire) +{ + struct pdu_data_llctrl_conn_param_req *p; + struct pdu_data *pdu_ctrl_tx; + struct node_tx *tx; + + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (!tx) { + return; + } + + /* move to wait for conn_update/rsp/rej */ + conn->llcp_conn_param.state = LLCP_CPR_STATE_RSP_WAIT; + + /* place the conn param req packet as next in tx queue */ + pdu_ctrl_tx = (void *)tx->pdu; + pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL; + pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl, conn_param_req) + + sizeof(struct pdu_data_llctrl_conn_param_req); + pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ; + p = (void *)&pdu_ctrl_tx->llctrl.conn_param_req; + p->interval_min = conn->llcp_conn_param.interval_min; + p->interval_max = conn->llcp_conn_param.interval_max; + p->latency = conn->llcp_conn_param.latency; + p->timeout = conn->llcp_conn_param.timeout; + p->preferred_periodicity = 0; + p->reference_conn_event_count = event_counter; + p->offset0 = 0x0000; + p->offset1 = 0xffff; + p->offset2 = 0xffff; + p->offset3 = 0xffff; + p->offset4 = 0xffff; + p->offset5 = 0xffff; + + ctrl_tx_enqueue(conn, tx); + + /* set CUI/CPR mutex */ + conn_upd_curr = conn; + + /* Start Procedure Timeout (TODO: this shall not replace + * terminate procedure). + */ + conn->procedure_expire = conn->procedure_reload; + +#if defined(CONFIG_BT_CTLR_SCHED_ADVANCED) + { + static memq_link_t s_link; + static struct mayfly s_mfy_sched_offset = {0, 0, &s_link, NULL, + ull_sched_mfy_free_win_offset_calc}; + u32_t retval; + + conn->llcp_conn_param.ticks_ref = ticks_at_expire; + +#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED) + if (conn->evt.ticks_xtal_to_start & XON_BITMASK) { + u32_t ticks_prepare_to_start = + max(conn->evt.ticks_active_to_start, + conn->evt.ticks_preempt_to_start); + + conn->llcp_conn_param.ticks_ref -= + (conn->evt.ticks_xtal_to_start & + ~XON_BITMASK) - ticks_prepare_to_start; + } +#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */ + + conn->llcp_conn_param.pdu_win_offset0 = (u16_t *)&p->offset0; + + s_mfy_sched_offset.param = (void *)conn; + + retval = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, + TICKER_USER_ID_ULL_LOW, 1, + &s_mfy_sched_offset); + LL_ASSERT(!retval); + } +#else /* !CONFIG_BT_CTLR_SCHED_ADVANCED */ + ARG_UNUSED(ticks_at_expire); +#endif /* !CONFIG_BT_CTLR_SCHED_ADVANCED */ +} + +static inline void event_conn_param_rsp(struct ll_conn *conn) +{ + struct pdu_data_llctrl_conn_param_rsp *rsp; + struct node_tx *tx; + struct pdu_data *pdu; + + /* handle rejects */ + if (conn->llcp_conn_param.status) { + struct pdu_data_llctrl_reject_ext_ind *rej; + + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (!tx) { + return; + } + + /* master/slave response with reject ext ind */ + pdu = (void *)tx->pdu; + pdu->ll_id = PDU_DATA_LLID_CTRL; + pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND; + pdu->len = offsetof(struct pdu_data_llctrl, reject_ext_ind) + + sizeof(struct pdu_data_llctrl_reject_ext_ind); + + rej = (void *)&pdu->llctrl.reject_ext_ind; + rej->reject_opcode = PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ; + rej->error_code = conn->llcp_conn_param.status; + + ctrl_tx_enqueue(conn, tx); + + /* procedure request acked */ + conn->llcp_conn_param.ack = conn->llcp_conn_param.req; + + /* reset mutex */ + conn_upd_curr = NULL; + + return; + } + + /* master respond with connection update */ + if (!conn->lll.role) { + if (conn->llcp_req != conn->llcp_ack) { + return; + } + + /* Move to waiting for connection update completion */ + conn->llcp_conn_param.state = LLCP_CPR_STATE_UPD; + + /* Initiate connection update procedure */ + conn->llcp.conn_upd.win_size = 1; + conn->llcp.conn_upd.win_offset_us = 0; + if (conn->llcp_conn_param.preferred_periodicity) { + conn->llcp.conn_upd.interval = + ((conn->llcp_conn_param.interval_min / + conn->llcp_conn_param.preferred_periodicity) + + 1) * + conn->llcp_conn_param.preferred_periodicity; + } else { + conn->llcp.conn_upd.interval = + conn->llcp_conn_param.interval_max; + } + conn->llcp.conn_upd.latency = conn->llcp_conn_param.latency; + conn->llcp.conn_upd.timeout = conn->llcp_conn_param.timeout; + /* conn->llcp.conn_upd.instant = 0; */ + conn->llcp.conn_upd.state = LLCP_CUI_STATE_SELECT; + conn->llcp.conn_upd.is_internal = !conn->llcp_conn_param.cmd; + conn->llcp_type = LLCP_CONN_UPD; + conn->llcp_ack--; + + return; + } + + /* slave response with connection parameter response */ + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (!tx) { + return; + } + + /* place the conn param rsp packet as next in tx queue */ + pdu = (void *)tx->pdu; + pdu->ll_id = PDU_DATA_LLID_CTRL; + pdu->len = offsetof(struct pdu_data_llctrl, conn_param_rsp) + + sizeof(struct pdu_data_llctrl_conn_param_rsp); + pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_CONN_PARAM_RSP; + rsp = (void *)&pdu->llctrl.conn_param_rsp; + rsp->interval_min = conn->llcp_conn_param.interval_min; + rsp->interval_max = conn->llcp_conn_param.interval_max; + rsp->latency = conn->llcp_conn_param.latency; + rsp->timeout = conn->llcp_conn_param.timeout; + rsp->preferred_periodicity = + conn->llcp_conn_param.preferred_periodicity; + rsp->reference_conn_event_count = + conn->llcp_conn_param.reference_conn_event_count; + rsp->offset0 = conn->llcp_conn_param.offset0; + rsp->offset1 = conn->llcp_conn_param.offset1; + rsp->offset2 = conn->llcp_conn_param.offset2; + rsp->offset3 = conn->llcp_conn_param.offset3; + rsp->offset4 = conn->llcp_conn_param.offset4; + rsp->offset5 = conn->llcp_conn_param.offset5; + + ctrl_tx_enqueue(conn, tx); + + /* procedure request acked */ + conn->llcp_conn_param.ack = conn->llcp_conn_param.req; + + /* reset mutex */ + conn_upd_curr = NULL; +} + +static inline void event_conn_param_app_req(struct ll_conn *conn) +{ + struct pdu_data_llctrl_conn_param_req *p; + struct node_rx_pdu *rx; + struct pdu_data *pdu; + +#if defined(CONFIG_BT_CTLR_LE_ENC) + /* defer until encryption setup is complete */ + if (conn->pause_tx) { + return; + } +#endif /* CONFIG_BT_CTLR_LE_ENC */ + + /* wait for free rx buffer */ + rx = ll_pdu_rx_alloc(); + if (!rx) { + return; + } + + /* move to wait for conn_update/rsp/rej */ + conn->llcp_conn_param.state = LLCP_CPR_STATE_APP_WAIT; + + /* Emulate as Rx-ed CPR data channel PDU */ + rx->hdr.handle = conn->lll.handle; + rx->hdr.type = NODE_RX_TYPE_DC_PDU; + + /* place the conn param req packet as next in rx queue */ + pdu = (void *)rx->pdu; + pdu->ll_id = PDU_DATA_LLID_CTRL; + pdu->len = offsetof(struct pdu_data_llctrl, conn_param_req) + + sizeof(struct pdu_data_llctrl_conn_param_req); + pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ; + p = (void *) &pdu->llctrl.conn_param_req; + p->interval_min = conn->llcp_conn_param.interval_min; + p->interval_max = conn->llcp_conn_param.interval_max; + p->latency = conn->llcp_conn_param.latency; + p->timeout = conn->llcp_conn_param.timeout; + + /* enqueue connection parameter request into rx queue */ + ll_rx_put(rx->hdr.link, rx); + ll_rx_sched(); +} + +static inline void event_conn_param_prep(struct ll_conn *conn, + u16_t event_counter, + u32_t ticks_at_expire) +{ + struct ll_conn *conn_upd; + + conn_upd = conn_upd_curr; + if (conn_upd && (conn_upd != conn)) { + return; + } + + switch (conn->llcp_conn_param.state) { + case LLCP_CPR_STATE_REQ: + event_conn_param_req(conn, event_counter, ticks_at_expire); + break; + + case LLCP_CPR_STATE_RSP: + event_conn_param_rsp(conn); + break; + + case LLCP_CPR_STATE_APP_REQ: + event_conn_param_app_req(conn); + break; + + case LLCP_CPR_STATE_APP_WAIT: + case LLCP_CPR_STATE_RSP_WAIT: + case LLCP_CPR_STATE_UPD: + /* Do nothing */ + break; + + default: + LL_ASSERT(0); + break; + } +} +#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */ + +#if defined(CONFIG_BT_CTLR_LE_PING) +static inline void event_ping_prep(struct ll_conn *conn) +{ + struct node_tx *tx; + + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (tx) { + struct pdu_data *pdu_ctrl_tx = (void *)tx->pdu; + + /* procedure request acked */ + conn->llcp_ack = conn->llcp_req; + + /* place the ping req packet as next in tx queue */ + pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL; + pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl, ping_req) + + sizeof(struct pdu_data_llctrl_ping_req); + pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_PING_REQ; + + ctrl_tx_enqueue(conn, tx); + + /* Start Procedure Timeout (TODO: this shall not replace + * terminate procedure) + */ + conn->procedure_expire = conn->procedure_reload; + } + +} +#endif /* CONFIG_BT_CTLR_LE_PING */ + +#if defined(CONFIG_BT_CTLR_DATA_LENGTH) +static inline void event_len_prep(struct ll_conn *conn) +{ + switch (conn->llcp_length.state) { + case LLCP_LENGTH_STATE_REQ: + { + struct pdu_data_llctrl_length_req *lr; + struct pdu_data *pdu_ctrl_tx; + struct node_tx *tx; + struct lll_conn *lll; + u16_t free_count_rx; + + free_count_rx = ll_rx_acquired_count_get() + + ll_rx_free_count_get(); + LL_ASSERT(free_count_rx <= 0xFF); + + if (ll_rx_total_count_get() != free_count_rx) { + break; + } + + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (!tx) { + break; + } + + /* wait for resp before completing the procedure */ + conn->llcp_length.state = LLCP_LENGTH_STATE_ACK_WAIT; + + lll = &conn->lll; + + /* set the default tx octets/time to requested value */ + lll->default_tx_octets = conn->llcp_length.tx_octets; + +#if defined(CONFIG_BT_CTLR_PHY) + lll->default_tx_time = conn->llcp_length.tx_time; +#endif /* CONFIG_BT_CTLR_PHY */ + + /* place the length req packet as next in tx queue */ + pdu_ctrl_tx = (void *) tx->pdu; + pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL; + pdu_ctrl_tx->len = + offsetof(struct pdu_data_llctrl, length_req) + + sizeof(struct pdu_data_llctrl_length_req); + pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_LENGTH_REQ; + + lr = &pdu_ctrl_tx->llctrl.length_req; + lr->max_rx_octets = LL_LENGTH_OCTETS_RX_MAX; + lr->max_tx_octets = lll->default_tx_octets; + lr->max_rx_time = PKT_US(LL_LENGTH_OCTETS_RX_MAX, BIT(2)); +#if !defined(CONFIG_BT_CTLR_PHY) + lr->max_tx_time = PKT_US(lll->default_tx_octets, 0); +#else /* CONFIG_BT_CTLR_PHY */ + lr->max_tx_time = lll->default_tx_time; +#endif /* CONFIG_BT_CTLR_PHY */ + + ctrl_tx_enqueue(conn, tx); + + /* Start Procedure Timeout (TODO: this shall not replace + * terminate procedure). + */ + conn->procedure_expire = conn->procedure_reload; + } + break; + + case LLCP_LENGTH_STATE_RESIZE: + { + struct pdu_data_llctrl_length_rsp *lr; + struct pdu_data *pdu_ctrl_rx; + struct node_rx_pdu *rx; + u16_t packet_rx_data_size; + u16_t free_count_conn; + struct lll_conn *lll; + u16_t free_count_rx; + + /* Ensure the rx pool is not in use. + * This is important to be able to re-size the pool + * ensuring there is no chance that an operation on + * the pool is pre-empted causing memory corruption. + */ + free_count_rx = ll_rx_acquired_count_get() + + ll_rx_free_count_get(); + LL_ASSERT(free_count_rx <= 0xFF); + + if (ll_rx_total_count_get() != free_count_rx) { + /** TODO another role instance has obtained + * memory from rx pool. + */ + LL_ASSERT(0); + } + + /* Procedure complete */ + conn->llcp_length.ack = conn->llcp_length.req; + conn->llcp_length.pause_tx = 0; + conn->procedure_expire = 0; + + lll = &conn->lll; + + /* Use the new rx octets/time in the connection */ + lll->max_rx_octets = conn->llcp_length.rx_octets; + +#if defined(CONFIG_BT_CTLR_PHY) + lll->max_rx_time = conn->llcp_length.rx_time; +#endif /* CONFIG_BT_CTLR_PHY */ + + /** TODO This design is exception as memory initialization + * and allocation is done in radio context here, breaking the + * rule that the rx buffers are allocated in application + * context. + * Design mem_* such that mem_init could interrupt mem_acquire, + * when the pool is full? + */ + free_count_conn = mem_free_count_get(conn_free); + /* TODO:*/ + #if 0 + if (_radio.advertiser.conn) { + free_count_conn++; + } + if (_radio.scanner.conn) { + free_count_conn++; + } + #endif + packet_rx_data_size = MROUND(offsetof(struct node_rx_pdu, pdu) + + offsetof(struct pdu_data, lldata) + + lll->max_rx_octets); + /* Resize to lower or higher size if this is the only active + * connection, or resize to only higher sizes as there may be + * other connections using the current size. + */ + if (((free_count_conn + 1) == CONFIG_BT_MAX_CONN) || + (packet_rx_data_size > ll_rx_data_size_get())) { + /* as rx mem is to be re-sized, release acquired + * memq link. + */ + /* TODO: */ + #if 0 + while (_radio.packet_rx_acquire != + _radio.packet_rx_last) { + if (_radio.packet_rx_acquire == 0) { + _radio.packet_rx_acquire = + _radio.packet_rx_count - 1; + } else { + _radio.packet_rx_acquire -= 1; + } + + node_rx = _radio.packet_rx[ + _radio.packet_rx_acquire]; + mem_release(node_rx->hdr.link, + &_radio.link_rx_free); + + LL_ASSERT(_radio.link_rx_data_quota < + (_radio.packet_rx_count - 1)); + _radio.link_rx_data_quota++; + + /* no need to release node_rx as we mem_init + * later down in code. + */ + } + + /* calculate the new rx node size and new count */ + if (lll->max_rx_octets < (PDU_AC_SIZE_MAX + + PDU_AC_SIZE_EXTRA)) { + _radio.packet_rx_data_size = + MROUND(offsetof(struct node_rx_pdu, pdu) + + (PDU_AC_SIZE_MAX + + PDU_AC_SIZE_EXTRA)); + } else { + _radio.packet_rx_data_size = + packet_rx_data_size; + } + _radio.packet_rx_data_count = + _radio.packet_rx_data_pool_size / + _radio.packet_rx_data_size; + LL_ASSERT(_radio.packet_rx_data_count); + + /* re-size (re-init) the free rx pool */ + mem_init(_radio.pkt_rx_data_pool, + _radio.packet_rx_data_size, + _radio.packet_rx_data_count, + &_radio.pkt_rx_data_free); + + /* allocate the rx queue include one extra for + * generating event in following lines. + */ + packet_rx_allocate(4); + #endif + } + + /* Prepare the rx packet structure */ + /* TODO: */ + LL_ASSERT(rx); + rx->hdr.handle = conn->lll.handle; + node_rx->hdr.type = NODE_RX_TYPE_DC_PDU; + + /* prepare length rsp structure */ + pdu_ctrl_rx = (void *)rx->pdu; + pdu_ctrl_rx->ll_id = PDU_DATA_LLID_CTRL; + pdu_ctrl_rx->len = + offsetof(struct pdu_data_llctrl, length_rsp) + + sizeof(struct pdu_data_llctrl_length_rsp); + pdu_ctrl_rx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_LENGTH_RSP; + + lr = &pdu_ctrl_rx->llctrl.length_rsp; + lr->max_rx_octets = lll->max_rx_octets; + lr->max_tx_octets = lll->max_tx_octets; +#if !defined(CONFIG_BT_CTLR_PHY) + lr->max_rx_time = PKT_US(lll->max_rx_octets, 0); + lr->max_tx_time = PKT_US(lll->max_tx_octets, 0); +#else /* CONFIG_BT_CTLR_PHY */ + lr->max_rx_time = lll->max_rx_time; + lr->max_tx_time = lll->max_tx_time; +#endif /* CONFIG_BT_CTLR_PHY */ + + /* enqueue version ind structure into rx queue */ + /* TODO: */ + } + break; + + case LLCP_LENGTH_STATE_ACK_WAIT: + case LLCP_LENGTH_STATE_RSP_WAIT: + /* no nothing */ + break; + + default: + LL_ASSERT(0); + break; + } +} +#endif /* CONFIG_BT_CTLR_DATA_LENGTH */ + +#if defined(CONFIG_BT_CTLR_PHY) +static inline void event_phy_req_prep(struct ll_conn *conn) +{ + switch (conn->llcp_phy.state) { + case LLCP_PHY_STATE_REQ: + { + struct pdu_data_llctrl_phy_req *pr; + struct pdu_data *pdu_ctrl_tx; + struct node_tx *tx; + + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (!tx) { + break; + } + + conn->llcp_phy.state = LLCP_PHY_STATE_ACK_WAIT; + + /* update preferred phy */ + conn->phy_pref_tx = conn->llcp_phy.tx; + conn->phy_pref_rx = conn->llcp_phy.rx; + conn->phy_pref_flags = conn->llcp_phy.flags; + + /* place the phy req packet as next in tx queue */ + pdu_ctrl_tx = (void *)tx->pdu; + pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL; + pdu_ctrl_tx->len = + offsetof(struct pdu_data_llctrl, phy_req) + + sizeof(struct pdu_data_llctrl_phy_req); + pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_PHY_REQ; + + pr = &pdu_ctrl_tx->llctrl.phy_req; + pr->tx_phys = conn->llcp_phy.tx; + pr->rx_phys = conn->llcp_phy.rx; + + ctrl_tx_enqueue(conn, tx); + + /* Start Procedure Timeout (TODO: this shall not replace + * terminate procedure). + */ + conn->procedure_expire = conn->procedure_reload; + } + break; + + case LLCP_PHY_STATE_UPD: + { + /* Procedure complete */ + conn->llcp_phy.ack = conn->llcp_phy.req; + + /* select only one tx phy, prefer 2M */ + if (conn->llcp_phy.tx & BIT(1)) { + conn->llcp_phy.tx = BIT(1); + } else if (conn->llcp_phy.tx & BIT(0)) { + conn->llcp_phy.tx = BIT(0); + } else if (conn->llcp_phy.tx & BIT(2)) { + conn->llcp_phy.tx = BIT(2); + } else { + conn->llcp_phy.tx = 0; + } + + /* select only one rx phy, prefer 2M */ + if (conn->llcp_phy.rx & BIT(1)) { + conn->llcp_phy.rx = BIT(1); + } else if (conn->llcp_phy.rx & BIT(0)) { + conn->llcp_phy.rx = BIT(0); + } else if (conn->llcp_phy.rx & BIT(2)) { + conn->llcp_phy.rx = BIT(2); + } else { + conn->llcp_phy.rx = 0; + } + + /* Initiate PHY Update Ind */ + if (conn->llcp_phy.tx != conn->lll.phy_tx) { + conn->llcp.phy_upd_ind.tx = conn->llcp_phy.tx; + } else { + conn->llcp.phy_upd_ind.tx = 0; + } + if (conn->llcp_phy.rx != conn->lll.phy_rx) { + conn->llcp.phy_upd_ind.rx = conn->llcp_phy.rx; + } else { + conn->llcp.phy_upd_ind.rx = 0; + } + /* conn->llcp.phy_upd_ind.instant = 0; */ + conn->llcp.phy_upd_ind.initiate = 1; + conn->llcp.phy_upd_ind.cmd = conn->llcp_phy.cmd; + + conn->llcp_type = LLCP_PHY_UPD; + conn->llcp_ack--; + } + break; + + case LLCP_PHY_STATE_ACK_WAIT: + case LLCP_PHY_STATE_RSP_WAIT: + /* no nothing */ + break; + + default: + LL_ASSERT(0); + break; + } +} + +static inline void event_phy_upd_ind_prep(struct ll_conn *conn, + u16_t event_counter) +{ + struct node_rx_pu *upd; + + if (conn->llcp.phy_upd_ind.initiate) { + struct pdu_data_llctrl_phy_upd_ind *ind; + struct pdu_data *pdu_ctrl_tx; + struct node_rx_pdu *rx; + struct node_tx *tx; + + LL_ASSERT(!conn->llcp_rx); + + rx = ll_pdu_rx_alloc_peek(1); + if (!rx) { + return; + } + + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (!tx) { + return; + } + + /* reset initiate flag */ + conn->llcp.phy_upd_ind.initiate = 0; + + /* Check if both tx and rx PHY unchanged */ + if (!((conn->llcp.phy_upd_ind.tx | + conn->llcp.phy_upd_ind.rx) & 0x07)) { + /* Procedure complete */ + conn->llcp_ack = conn->llcp_req; + + /* 0 instant */ + conn->llcp.phy_upd_ind.instant = 0; + + /* generate phy update event */ + if (conn->llcp.phy_upd_ind.cmd) { + struct lll_conn *lll = &conn->lll; + + (void)ll_pdu_rx_alloc(); + + rx->hdr.handle = lll->handle; + rx->hdr.type = NODE_RX_TYPE_PHY_UPDATE; + + upd = (void *)rx->pdu; + upd->status = 0; + upd->tx = lll->phy_tx; + upd->rx = lll->phy_rx; + + /* Enqueue Rx node */ + ll_rx_put(rx->hdr.link, rx); + ll_rx_sched(); + } + } else { + struct lll_conn *lll = &conn->lll; + + /* set instant */ + conn->llcp.phy_upd_ind.instant = event_counter + + lll->latency + + 6; + /* reserve rx node for event generation at instant */ + (void)ll_pdu_rx_alloc(); + conn->llcp_rx = rx; + } + + /* place the phy update ind packet as next in + * tx queue + */ + pdu_ctrl_tx = (void *)tx->pdu; + pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL; + pdu_ctrl_tx->len = + offsetof(struct pdu_data_llctrl, phy_upd_ind) + + sizeof(struct pdu_data_llctrl_phy_upd_ind); + pdu_ctrl_tx->llctrl.opcode = + PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND; + ind = &pdu_ctrl_tx->llctrl.phy_upd_ind; + ind->m_to_s_phy = conn->llcp.phy_upd_ind.tx; + ind->s_to_m_phy = conn->llcp.phy_upd_ind.rx; + ind->instant = conn->llcp.phy_upd_ind.instant; + + ctrl_tx_enqueue(conn, tx); + } else if (((event_counter - conn->llcp.phy_upd_ind.instant) & 0xFFFF) + <= 0x7FFF) { + struct lll_conn *lll = &conn->lll; + struct node_rx_pdu *rx; + u8_t old_tx, old_rx; + + /* procedure request acked */ + conn->llcp_ack = conn->llcp_req; + + /* apply new phy */ + old_tx = lll->phy_tx; + old_rx = lll->phy_rx; + if (conn->llcp.phy_upd_ind.tx) { + lll->phy_tx = conn->llcp.phy_upd_ind.tx; + } + if (conn->llcp.phy_upd_ind.rx) { + lll->phy_rx = conn->llcp.phy_upd_ind.rx; + } + lll->phy_flags = conn->phy_pref_flags; + + /* Acquire Rx node */ + rx = conn->llcp_rx; + conn->llcp_rx = NULL; + + LL_ASSERT(rx && rx->hdr.link); + + /* generate event if phy changed or initiated by cmd */ + if (conn->llcp.phy_upd_ind.cmd || (lll->phy_tx != old_tx) || + (lll->phy_rx != old_rx)) { + rx->hdr.handle = lll->handle; + rx->hdr.type = NODE_RX_TYPE_PHY_UPDATE; + + upd = (void *)rx->pdu; + upd->status = 0; + upd->tx = lll->phy_tx; + upd->rx = lll->phy_rx; + } else { + /* Mark for buffer for release */ + rx->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + } + + /* enqueue rx node towards Thread */ + ll_rx_put(rx->hdr.link, rx); + ll_rx_sched(); + } +} +#endif /* CONFIG_BT_CTLR_PHY */ + +static u8_t conn_upd_recv(struct ll_conn *conn, memq_link_t *link, + struct node_rx_pdu **rx, struct pdu_data *pdu) +{ + if (((pdu->llctrl.conn_update_ind.instant - conn->lll.event_counter) & + 0xFFFF) > 0x7FFF) { + /* Mark for buffer for release */ + (*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + return BT_HCI_ERR_INSTANT_PASSED; + } + + /* different transaction collision */ + if (conn->llcp_req != conn->llcp_ack) { + /* Mark for buffer for release */ + (*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + return BT_HCI_ERR_DIFF_TRANS_COLLISION; + } + + /* set mutex, if only not already set. As a master the mutex shall + * be set, but a slave we accept it as new 'set' of mutex. + */ + if (!conn_upd_curr) { + LL_ASSERT(conn->lll.role); + + conn_upd_curr = conn; + } + + conn->llcp.conn_upd.win_size = pdu->llctrl.conn_update_ind.win_size; + conn->llcp.conn_upd.win_offset_us = + pdu->llctrl.conn_update_ind.win_offset * 1250; + conn->llcp.conn_upd.interval = pdu->llctrl.conn_update_ind.interval; + conn->llcp.conn_upd.latency = pdu->llctrl.conn_update_ind.latency; + conn->llcp.conn_upd.timeout = pdu->llctrl.conn_update_ind.timeout; + conn->llcp.conn_upd.instant = pdu->llctrl.conn_update_ind.instant; + conn->llcp.conn_upd.state = LLCP_CUI_STATE_INPROG; + conn->llcp.conn_upd.is_internal = 0; + + LL_ASSERT(!conn->llcp_rx); + + (*rx)->hdr.link = link; + conn->llcp_rx = *rx; + *rx = NULL; + + conn->llcp_type = LLCP_CONN_UPD; + conn->llcp_ack--; + +#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) + if ((conn->llcp_conn_param.req != conn->llcp_conn_param.ack) && + (conn->llcp_conn_param.state == LLCP_CPR_STATE_RSP_WAIT)) { + conn->llcp_conn_param.ack = conn->llcp_conn_param.req; + } +#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */ + + return 0; +} + +static u8_t chan_map_upd_recv(struct ll_conn *conn, struct node_rx_pdu *rx, + struct pdu_data *pdu) +{ + u8_t err = 0; + + if (((pdu->llctrl.chan_map_ind.instant - conn->lll.event_counter) & + 0xffff) > 0x7fff) { + err = BT_HCI_ERR_INSTANT_PASSED; + + goto chan_map_upd_recv_exit; + } + + /* different transaction collision */ + if (conn->llcp_req != conn->llcp_ack) { + err = BT_HCI_ERR_DIFF_TRANS_COLLISION; + + goto chan_map_upd_recv_exit; + } + + + memcpy(&conn->llcp.chan_map.chm[0], &pdu->llctrl.chan_map_ind.chm[0], + sizeof(conn->llcp.chan_map.chm)); + conn->llcp.chan_map.instant = pdu->llctrl.chan_map_ind.instant; + conn->llcp.chan_map.initiate = 0; + + conn->llcp_type = LLCP_CHAN_MAP; + conn->llcp_ack--; + +chan_map_upd_recv_exit: + /* Mark for buffer for release */ + rx->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + return err; +} + +static void terminate_ind_recv(struct ll_conn *conn, struct node_rx_pdu *rx, + struct pdu_data *pdu) +{ + /* Ack and then terminate */ + conn->llcp_terminate.reason_peer = pdu->llctrl.terminate_ind.error_code; + + /* Mark for buffer for release */ + rx->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; +} + +static void terminate_ind_rx_enqueue(struct ll_conn *conn, u8_t reason) +{ + struct node_rx_pdu *rx; + memq_link_t *link; + + /* Prepare the rx packet structure */ + rx = (void *)&conn->llcp_terminate.node_rx; + LL_ASSERT(rx->hdr.link); + + rx->hdr.handle = conn->lll.handle; + rx->hdr.type = NODE_RX_TYPE_TERMINATE; + *((u8_t *)rx->pdu) = reason; + + /* Get the link mem reserved in the connection context */ + link = rx->hdr.link; + rx->hdr.link = NULL; + + ll_rx_put(link, rx); + ll_rx_sched(); +} + +#if defined(CONFIG_BT_CTLR_LE_ENC) +static void enc_req_reused_send(struct ll_conn *conn, struct node_tx **tx) +{ + struct pdu_data *pdu_ctrl_tx; + + pdu_ctrl_tx = (void *)(*tx)->pdu; + pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL; + pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl, enc_req) + + sizeof(struct pdu_data_llctrl_enc_req); + pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_ENC_REQ; + memcpy(&pdu_ctrl_tx->llctrl.enc_req.rand[0], + &conn->llcp.encryption.rand[0], + sizeof(pdu_ctrl_tx->llctrl.enc_req.rand)); + pdu_ctrl_tx->llctrl.enc_req.ediv[0] = + conn->llcp.encryption.ediv[0]; + pdu_ctrl_tx->llctrl.enc_req.ediv[1] = + conn->llcp.encryption.ediv[1]; + + /* + * Take advantage of the fact that ivm and skdm fields, which both have + * to be filled with random data, are adjacent and use single call to + * the entropy driver. + */ + BUILD_ASSERT(offsetof(__typeof(pdu_ctrl_tx->llctrl.enc_req), ivm) == + (offsetof(__typeof(pdu_ctrl_tx->llctrl.enc_req), skdm) + + sizeof(pdu_ctrl_tx->llctrl.enc_req.skdm))); + + /* NOTE: if not sufficient random numbers, ignore waiting */ + entropy_get_entropy_isr(entropy, pdu_ctrl_tx->llctrl.enc_req.skdm, + sizeof(pdu_ctrl_tx->llctrl.enc_req.skdm) + + sizeof(pdu_ctrl_tx->llctrl.enc_req.ivm), 0); + + ctrl_tx_enqueue(conn, *tx); + + /* dont release ctrl PDU memory */ + *tx = NULL; +} + +static int enc_rsp_send(struct ll_conn *conn) +{ + struct pdu_data *pdu_ctrl_tx; + struct node_tx *tx; + + /* acquire tx mem */ + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (!tx) { + return -ENOBUFS; + } + + pdu_ctrl_tx = (void *)tx->pdu; + pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL; + pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl, enc_rsp) + + sizeof(struct pdu_data_llctrl_enc_rsp); + pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_ENC_RSP; + + /* + * Take advantage of the fact that ivs and skds fields, which both have + * to be filled with random data, are adjacent and use single call to + * the entropy driver. + */ + BUILD_ASSERT(offsetof(__typeof(pdu_ctrl_tx->llctrl.enc_rsp), ivs) == + (offsetof(__typeof(pdu_ctrl_tx->llctrl.enc_rsp), skds) + + sizeof(pdu_ctrl_tx->llctrl.enc_rsp.skds))); + + /* NOTE: if not sufficient random numbers, ignore waiting */ + entropy_get_entropy_isr(entropy, pdu_ctrl_tx->llctrl.enc_rsp.skds, + sizeof(pdu_ctrl_tx->llctrl.enc_rsp.skds) + + sizeof(pdu_ctrl_tx->llctrl.enc_rsp.ivs), 0); + + /* things from slave stored for session key calculation */ + memcpy(&conn->llcp.encryption.skd[8], + &pdu_ctrl_tx->llctrl.enc_rsp.skds[0], 8); + memcpy(&conn->lll.ccm_rx.iv[4], + &pdu_ctrl_tx->llctrl.enc_rsp.ivs[0], 4); + + ctrl_tx_enqueue(conn, tx); + + return 0; +} + +static int start_enc_rsp_send(struct ll_conn *conn, + struct pdu_data *pdu_ctrl_tx) +{ + struct node_tx *tx = NULL; + + if (!pdu_ctrl_tx) { + /* acquire tx mem */ + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (!tx) { + return -ENOBUFS; + } + + pdu_ctrl_tx = (void *)tx->pdu; + } + + /* enable transmit encryption */ + conn->lll.enc_tx = 1; + + pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL; + pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl, enc_rsp); + pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_START_ENC_RSP; + + if (tx) { + ctrl_tx_enqueue(conn, tx); + } + + return 0; +} + +static inline bool ctrl_is_unexpected(struct ll_conn *conn, u8_t opcode) +{ + return (!conn->lll.role && + ((!conn->refresh && + (opcode != PDU_DATA_LLCTRL_TYPE_TERMINATE_IND) && + (opcode != PDU_DATA_LLCTRL_TYPE_START_ENC_REQ) && + (opcode != PDU_DATA_LLCTRL_TYPE_START_ENC_RSP) && + (opcode != PDU_DATA_LLCTRL_TYPE_REJECT_IND) && + (opcode != PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND)) || + (conn->refresh && + (opcode != PDU_DATA_LLCTRL_TYPE_TERMINATE_IND) && + (opcode != PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_RSP) && + (opcode != PDU_DATA_LLCTRL_TYPE_ENC_RSP) && + (opcode != PDU_DATA_LLCTRL_TYPE_START_ENC_REQ) && + (opcode != PDU_DATA_LLCTRL_TYPE_START_ENC_RSP) && + (opcode != PDU_DATA_LLCTRL_TYPE_REJECT_IND) && + (opcode != PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND)))) || + (conn->lll.role && + ((!conn->refresh && + (opcode != PDU_DATA_LLCTRL_TYPE_TERMINATE_IND) && + (opcode != PDU_DATA_LLCTRL_TYPE_START_ENC_RSP) && + (opcode != PDU_DATA_LLCTRL_TYPE_REJECT_IND) && + (opcode != PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND)) || + (conn->refresh && + (opcode != PDU_DATA_LLCTRL_TYPE_TERMINATE_IND) && + (opcode != PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_RSP) && + (opcode != PDU_DATA_LLCTRL_TYPE_ENC_REQ) && + (opcode != PDU_DATA_LLCTRL_TYPE_START_ENC_RSP) && + (opcode != PDU_DATA_LLCTRL_TYPE_REJECT_IND) && + (opcode != PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND)))); +} + +#endif /* CONFIG_BT_CTLR_LE_ENC */ + +static int unknown_rsp_send(struct ll_conn *conn, struct node_rx_pdu *rx, + u8_t type) +{ + struct node_tx *tx; + struct pdu_data *pdu; + + /* acquire ctrl tx mem */ + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (!tx) { + return -ENOBUFS; + } + + pdu = (void *)tx->pdu; + pdu->ll_id = PDU_DATA_LLID_CTRL; + pdu->len = offsetof(struct pdu_data_llctrl, unknown_rsp) + + sizeof(struct pdu_data_llctrl_unknown_rsp); + pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP; + pdu->llctrl.unknown_rsp.type = type; + + ctrl_tx_enqueue(conn, tx); + + /* Mark for buffer for release */ + rx->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + return 0; +} + +static inline u32_t feat_get(u8_t *features) +{ + u32_t feat; + + feat = ~LL_FEAT_BIT_MASK_VALID | features[0] | + (features[1] << 8) | (features[2] << 16); + feat &= LL_FEAT_BIT_MASK; + + return feat; +} + +static int feature_rsp_send(struct ll_conn *conn, struct node_rx_pdu *rx, + struct pdu_data *pdu_rx) +{ + struct pdu_data_llctrl_feature_req *req; + struct node_tx *tx; + struct pdu_data *pdu_tx; + + /* acquire tx mem */ + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (!tx) { + return -ENOBUFS; + } + + /* AND the feature set to get Feature USED */ + req = &pdu_rx->llctrl.feature_req; + conn->llcp_features &= feat_get(&req->features[0]); + + /* features exchanged */ + conn->common.fex_valid = 1; + + /* Enqueue feature response */ + pdu_tx = (void *)tx->pdu; + pdu_tx->ll_id = PDU_DATA_LLID_CTRL; + pdu_tx->len = offsetof(struct pdu_data_llctrl, feature_rsp) + + sizeof(struct pdu_data_llctrl_feature_rsp); + pdu_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_FEATURE_RSP; + (void)memset(&pdu_tx->llctrl.feature_rsp.features[0], 0x00, + sizeof(pdu_tx->llctrl.feature_rsp.features)); + pdu_tx->llctrl.feature_req.features[0] = + conn->llcp_features & 0xFF; + pdu_tx->llctrl.feature_req.features[1] = + (conn->llcp_features >> 8) & 0xFF; + pdu_tx->llctrl.feature_req.features[2] = + (conn->llcp_features >> 16) & 0xFF; + + ctrl_tx_sec_enqueue(conn, tx); + + /* Mark for buffer for release */ + rx->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + return 0; +} + +static void feature_rsp_recv(struct ll_conn *conn, struct pdu_data *pdu_rx) +{ + struct pdu_data_llctrl_feature_rsp *rsp; + + rsp = &pdu_rx->llctrl.feature_rsp; + + /* AND the feature set to get Feature USED */ + conn->llcp_features &= feat_get(&rsp->features[0]); + + /* features exchanged */ + conn->common.fex_valid = 1; + + /* Procedure complete */ + conn->procedure_expire = 0; +} + +#if defined(CONFIG_BT_CTLR_LE_ENC) +static int pause_enc_rsp_send(struct ll_conn *conn, struct node_rx_pdu *rx, + u8_t req) +{ + struct pdu_data *pdu_ctrl_tx; + struct node_tx *tx; + + if (req) { + /* acquire tx mem */ + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (!tx) { + return -ENOBUFS; + } + + /* key refresh */ + conn->refresh = 1; + } else if (!conn->lll.role) { + /* acquire tx mem */ + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (!tx) { + return -ENOBUFS; + } + + /* disable transmit encryption */ + conn->lll.enc_tx = 0; + } else { + /* disable transmit encryption */ + conn->lll.enc_tx = 0; + + goto pause_enc_rsp_send_exit; + } + + /* pause data packet rx */ + conn->pause_rx = 1; + + /* disable receive encryption */ + conn->lll.enc_rx = 0; + + /* Enqueue pause enc rsp */ + pdu_ctrl_tx = (void *)tx->pdu; + pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL; + pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl, enc_rsp); + pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_RSP; + + ctrl_tx_enqueue(conn, tx); + +pause_enc_rsp_send_exit: + /* Mark for buffer for release */ + rx->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + return 0; +} +#endif /* CONFIG_BT_CTLR_LE_ENC */ + +static int version_ind_send(struct ll_conn *conn, struct node_rx_pdu *rx, + struct pdu_data *pdu_rx) +{ + struct pdu_data_llctrl_version_ind *v; + struct pdu_data *pdu_tx; + struct node_tx *tx; + + if (!conn->llcp_version.tx) { + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (!tx) { + return -ENOBUFS; + } + conn->llcp_version.tx = 1; + + pdu_tx = (void *)tx->pdu; + pdu_tx->ll_id = PDU_DATA_LLID_CTRL; + pdu_tx->len = + offsetof(struct pdu_data_llctrl, version_ind) + + sizeof(struct pdu_data_llctrl_version_ind); + pdu_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_VERSION_IND; + v = &pdu_tx->llctrl.version_ind; + v->version_number = LL_VERSION_NUMBER; + v->company_id = CONFIG_BT_CTLR_COMPANY_ID; + v->sub_version_number = CONFIG_BT_CTLR_SUBVERSION_NUMBER; + + ctrl_tx_sec_enqueue(conn, tx); + + /* Mark for buffer for release */ + rx->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + } else if (!conn->llcp_version.rx) { + /* Procedure complete */ + conn->procedure_expire = 0; + } else { + /* Tx-ed and Rx-ed before, ignore this invalid Rx. */ + + /* Mark for buffer for release */ + rx->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + return 0; + } + + v = &pdu_rx->llctrl.version_ind; + conn->llcp_version.version_number = v->version_number; + conn->llcp_version.company_id = v->company_id; + conn->llcp_version.sub_version_number = v->sub_version_number; + conn->llcp_version.rx = 1; + + return 0; +} + +#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) || defined(CONFIG_BT_CTLR_PHY) +static int reject_ext_ind_send(struct ll_conn *conn, struct node_rx_pdu *rx, + u8_t reject_opcode, u8_t error_code) +{ + struct pdu_data *pdu_ctrl_tx; + struct node_tx *tx; + + /* acquire tx mem */ + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (!tx) { + return -ENOBUFS; + } + + pdu_ctrl_tx = (void *)tx->pdu; + pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL; + pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl, reject_ext_ind) + + sizeof(struct pdu_data_llctrl_reject_ext_ind); + pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND; + pdu_ctrl_tx->llctrl.reject_ext_ind.reject_opcode = reject_opcode; + pdu_ctrl_tx->llctrl.reject_ext_ind.error_code = error_code; + + ctrl_tx_enqueue(conn, tx); + + /* Mark for buffer for release */ + rx->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + return 0; +} +#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ || PHY */ + +#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) +static inline void reject_ind_conn_upd_recv(struct ll_conn *conn, + struct node_rx_pdu *rx, + struct pdu_data *pdu_rx) +{ + struct pdu_data_llctrl_reject_ext_ind *rej_ext_ind; + struct node_rx_cu *cu; + struct lll_conn *lll; + + rej_ext_ind = (void *)&pdu_rx->llctrl.reject_ext_ind; + if (rej_ext_ind->reject_opcode != PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ) { + goto reject_ind_conn_upd_recv_exit; + } + + /* Unsupported remote feature */ + lll = &conn->lll; + if (!lll->role && (rej_ext_ind->error_code == + BT_HCI_ERR_UNSUPP_REMOTE_FEATURE)) { + LL_ASSERT(conn->llcp_req == conn->llcp_ack); + + conn->llcp_conn_param.state = LLCP_CPR_STATE_UPD; + + conn->llcp.conn_upd.win_size = 1; + conn->llcp.conn_upd.win_offset_us = 0; + conn->llcp.conn_upd.interval = + conn->llcp_conn_param.interval_max; + conn->llcp.conn_upd.latency = conn->llcp_conn_param.latency; + conn->llcp.conn_upd.timeout = conn->llcp_conn_param.timeout; + /* conn->llcp.conn_upd.instant = 0; */ + conn->llcp.conn_upd.state = LLCP_CUI_STATE_USE; + conn->llcp.conn_upd.is_internal = !conn->llcp_conn_param.cmd; + conn->llcp_type = LLCP_CONN_UPD; + conn->llcp_ack--; + + goto reject_ind_conn_upd_recv_exit; + } + /* Same Procedure or Different Procedure Collision */ + + /* If not same procedure, stop procedure timeout, else + * continue timer until phy upd ind is received. + */ + else if (rej_ext_ind->error_code != BT_HCI_ERR_LL_PROC_COLLISION) { + LL_ASSERT(conn_upd_curr == conn); + + /* reset mutex */ + conn_upd_curr = NULL; + + /* Procedure complete */ + conn->llcp_conn_param.ack = + conn->llcp_conn_param.req; + + /* Stop procedure timeout */ + conn->procedure_expire = 0; + + /* update to next ticks offsets */ + if (lll->role) { + conn->slave.ticks_to_offset = + conn->llcp_conn_param.ticks_to_offset_next; + } + } + + /* skip event generation if not cmd initiated */ + if (!conn->llcp_conn_param.cmd) { + goto reject_ind_conn_upd_recv_exit; + } + + /* generate conn update complete event with error code */ + rx->hdr.type = NODE_RX_TYPE_CONN_UPDATE; + + /* prepare connection update complete structure */ + cu = (void *)pdu_rx; + cu->status = rej_ext_ind->error_code; + cu->interval = lll->interval; + cu->latency = lll->latency; + cu->timeout = conn->supervision_reload * + lll->interval * 125 / 1000; + + return; + +reject_ind_conn_upd_recv_exit: + /* Mark for buffer for release */ + rx->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; +} +#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */ + +#if defined(CONFIG_BT_CTLR_PHY) +static inline void reject_ind_phy_upd_recv(struct ll_conn *conn, + struct node_rx_pdu *rx, + struct pdu_data *pdu_rx) +{ + struct pdu_data_llctrl_reject_ext_ind *rej_ext_ind; + + rej_ext_ind = (void *)&pdu_rx->llctrl.reject_ext_ind; + if (rej_ext_ind->reject_opcode == PDU_DATA_LLCTRL_TYPE_PHY_REQ) { + struct node_rx_pu *p; + + /* Same Procedure or Different Procedure Collision */ + + /* If not same procedure, stop procedure timeout, else + * continue timer until phy upd ind is received. + */ + if (rej_ext_ind->error_code != BT_HCI_ERR_LL_PROC_COLLISION) { + /* Procedure complete */ + conn->llcp_phy.ack = conn->llcp_phy.req; + + /* Reset packet timing restrictions */ + conn->lll.phy_tx_time = conn->lll.phy_tx; + + /* Stop procedure timeout */ + conn->procedure_expire = 0; + } + + /* skip event generation if not cmd initiated */ + if (!conn->llcp_phy.cmd) { + goto reject_ind_phy_upd_recv_exit; + } + + /* generate phy update complete event with error code */ + rx->hdr.type = NODE_RX_TYPE_PHY_UPDATE; + + p = (void *)pdu_rx; + p->status = rej_ext_ind->error_code; + p->tx = conn->lll.phy_tx; + p->rx = conn->lll.phy_rx; + + return; + } + +reject_ind_phy_upd_recv_exit: + /* Mark for buffer for release */ + rx->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; +} +#endif /* CONFIG_BT_CTLR_PHY */ + +static void reject_ext_ind_recv(struct ll_conn *conn, struct node_rx_pdu *rx, + struct pdu_data *pdu_rx) +{ + if (0) { +#if defined(CONFIG_BT_CTLR_PHY) + } else if (conn->llcp_phy.ack != conn->llcp_phy.req) { + reject_ind_phy_upd_recv(conn, rx, pdu_rx); +#endif /* CONFIG_BT_CTLR_PHY */ + +#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) + } else if (conn->llcp_conn_param.ack != conn->llcp_conn_param.req) { + reject_ind_conn_upd_recv(conn, rx, pdu_rx); +#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */ + +#if defined(CONFIG_BT_CTLR_DATA_LENGTH) + } else if (conn->llcp_length.ack != conn->llcp_length.req) { + isr_rx_conn_pkt_ctrl_rej_dle(rx, rx_enqueue); +#endif /* CONFIG_BT_CTLR_DATA_LENGTH */ + +#if defined(CONFIG_BT_CTLR_LE_ENC) + } else { + struct pdu_data_llctrl_reject_ext_ind *rej_ext_ind; + + rej_ext_ind = (void *)&pdu_rx->llctrl.reject_ext_ind; + + switch (rej_ext_ind->reject_opcode) { + case PDU_DATA_LLCTRL_TYPE_ENC_REQ: + /* resume data packet rx and tx */ + conn->pause_rx = 0; + conn->pause_tx = 0; + + /* Procedure complete */ + conn->procedure_expire = 0; + + /* enqueue as if it were a reject ind */ + pdu_rx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_REJECT_IND; + pdu_rx->llctrl.reject_ind.error_code = + rej_ext_ind->error_code; + break; + + default: + /* Ignore */ + + /* Mark for buffer for release */ + rx->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + break; + } +#endif /* CONFIG_BT_CTLR_LE_ENC */ + } +} + +#if defined(CONFIG_BT_CTLR_LE_PING) +static int ping_resp_send(struct ll_conn *conn, struct node_rx_pdu *rx) +{ + struct node_tx *tx; + struct pdu_data *pdu_tx; + + /* acquire tx mem */ + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (!tx) { + return -ENOBUFS; + } + + pdu_tx = (void *)tx->pdu; + pdu_tx->ll_id = PDU_DATA_LLID_CTRL; + pdu_tx->len = offsetof(struct pdu_data_llctrl, ping_rsp) + + sizeof(struct pdu_data_llctrl_ping_rsp); + pdu_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_PING_RSP; + + ctrl_tx_enqueue(conn, tx); + + /* Mark for buffer for release */ + rx->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + return 0; +} +#endif /* CONFIG_BT_CTLR_LE_PING */ + +#if defined(CONFIG_BT_CTLR_PHY) +static int phy_rsp_send(struct ll_conn *conn, struct node_rx_pdu *rx, + struct pdu_data *pdu_rx) +{ + struct pdu_data_llctrl_phy_req *p; + struct pdu_data *pdu_ctrl_tx; + struct node_tx *tx; + + /* acquire tx mem */ + tx = mem_acquire(&mem_conn_tx_ctrl.free); + if (!tx) { + return -ENOBUFS; + } + + /* Wait for peer master to complete the procedure */ + conn->llcp_phy.state = LLCP_PHY_STATE_RSP_WAIT; + if (conn->llcp_phy.ack == + conn->llcp_phy.req) { + conn->llcp_phy.ack--; + + conn->llcp_phy.cmd = 0; + + conn->llcp_phy.tx = + conn->phy_pref_tx; + conn->llcp_phy.rx = + conn->phy_pref_rx; + + /* Start Procedure Timeout (TODO: this shall not + * replace terminate procedure). + */ + conn->procedure_expire = + conn->procedure_reload; + } + + p = &pdu_rx->llctrl.phy_req; + + conn->llcp_phy.tx &= p->rx_phys; + conn->llcp_phy.rx &= p->tx_phys; + + pdu_ctrl_tx = (void *)tx->pdu; + pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL; + pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl, phy_rsp) + + sizeof(struct pdu_data_llctrl_phy_rsp); + pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_PHY_RSP; + pdu_ctrl_tx->llctrl.phy_rsp.tx_phys = conn->phy_pref_tx; + pdu_ctrl_tx->llctrl.phy_rsp.rx_phys = conn->phy_pref_rx; + + ctrl_tx_enqueue(conn, tx); + + /* Mark for buffer for release */ + rx->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + return 0; +} + +static inline u8_t phy_upd_ind_recv(struct ll_conn *conn, memq_link_t *link, + struct node_rx_pdu **rx, + struct pdu_data *pdu_rx) +{ + struct pdu_data_llctrl_phy_upd_ind *ind = &pdu_rx->llctrl.phy_upd_ind; + + /* Both tx and rx PHY unchanged */ + if (!((ind->m_to_s_phy | ind->s_to_m_phy) & 0x07)) { + struct node_rx_pu *p; + + /* Not in PHY Update Procedure or PDU in wrong state */ + if ((conn->llcp_phy.ack == conn->llcp_phy.req) || + (conn->llcp_phy.state != LLCP_PHY_STATE_RSP_WAIT)) { + /* Mark for buffer for release */ + (*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + return 0; + } + + /* Procedure complete */ + conn->llcp_phy.ack = conn->llcp_phy.req; + conn->procedure_expire = 0; + + /* Ignore event generation if not local cmd initiated */ + if (!conn->llcp_phy.cmd) { + /* Mark for buffer for release */ + (*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + return 0; + } + + /* generate phy update complete event */ + (*rx)->hdr.type = NODE_RX_TYPE_PHY_UPDATE; + + p = (void *)pdu_rx; + p->status = 0; + p->tx = conn->lll.phy_tx; + p->rx = conn->lll.phy_rx; + + return 0; + } + + /* instant passed */ + if (((ind->instant - conn->lll.event_counter) & 0xffff) > 0x7fff) { + /* Mark for buffer for release */ + (*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + return BT_HCI_ERR_INSTANT_PASSED; + } + + /* different transaction collision */ + if (conn->llcp_req != conn->llcp_ack) { + /* Mark for buffer for release */ + (*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + return BT_HCI_ERR_DIFF_TRANS_COLLISION; + } + + if ((conn->llcp_phy.ack != conn->llcp_phy.req) && + (conn->llcp_phy.state == LLCP_PHY_STATE_RSP_WAIT)) { + conn->llcp_phy.ack = conn->llcp_phy.req; + conn->llcp.phy_upd_ind.cmd = conn->llcp_phy.cmd; + + /* Procedure complete, just wait for instant */ + conn->procedure_expire = 0; + } + + conn->llcp.phy_upd_ind.tx = ind->s_to_m_phy; + conn->llcp.phy_upd_ind.rx = ind->m_to_s_phy; + conn->llcp.phy_upd_ind.instant = ind->instant; + conn->llcp.phy_upd_ind.initiate = 0; + + LL_ASSERT(!conn->llcp_rx); + + (*rx)->hdr.link = link; + conn->llcp_rx = *rx; + *rx = NULL; + + conn->llcp_type = LLCP_PHY_UPD; + conn->llcp_ack--; + + if (conn->llcp.phy_upd_ind.tx) { + conn->lll.phy_tx_time = conn->llcp.phy_upd_ind.tx; + } + + return 0; +} +#endif /* CONFIG_BT_CTLR_PHY */ + +static inline void ctrl_tx_ack(struct ll_conn *conn, struct node_tx **tx, + struct pdu_data *pdu_tx) +{ + switch (pdu_tx->llctrl.opcode) { + case PDU_DATA_LLCTRL_TYPE_TERMINATE_IND: + { + u8_t reason = (pdu_tx->llctrl.terminate_ind.error_code == + BT_HCI_ERR_REMOTE_USER_TERM_CONN) ? + BT_HCI_ERR_LOCALHOST_TERM_CONN : + pdu_tx->llctrl.terminate_ind.error_code; + + terminate_ind_rx_enqueue(conn, reason); + conn_cleanup(conn); + } + break; + +#if defined(CONFIG_BT_CTLR_LE_ENC) + case PDU_DATA_LLCTRL_TYPE_ENC_REQ: + /* things from master stored for session key calculation */ + memcpy(&conn->llcp.encryption.skd[0], + &pdu_tx->llctrl.enc_req.skdm[0], 8); + memcpy(&conn->lll.ccm_rx.iv[0], + &pdu_tx->llctrl.enc_req.ivm[0], 4); + + /* pause data packet tx */ + conn->pause_tx = 1; + + /* Start Procedure Timeout (this will not replace terminate + * procedure which always gets place before any packets + * going out, hence safe by design). + */ + conn->procedure_expire = conn->procedure_reload; + break; + + case PDU_DATA_LLCTRL_TYPE_ENC_RSP: + /* pause data packet tx */ + conn->pause_tx = 1; + break; + + case PDU_DATA_LLCTRL_TYPE_START_ENC_REQ: + /* Nothing to do. + * Remember that we may have received encrypted START_ENC_RSP + * alongwith this tx ack at this point in time. + */ + break; + + case PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_REQ: + /* pause data packet tx */ + conn->pause_tx = 1; + + /* key refresh */ + conn->refresh = 1; + + /* Start Procedure Timeout (this will not replace terminate + * procedure which always gets place before any packets + * going out, hence safe by design). + */ + conn->procedure_expire = conn->procedure_reload; + break; + + case PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_RSP: + if (!conn->lll.role) { + /* reused tx-ed PDU and send enc req */ + enc_req_reused_send(conn, tx); + } else { + /* pause data packet tx */ + conn->pause_tx = 1; + } + break; + + case PDU_DATA_LLCTRL_TYPE_REJECT_IND: + /* resume data packet rx and tx */ + conn->pause_rx = 0; + conn->pause_tx = 0; + + /* Procedure complete */ + conn->procedure_expire = 0; + break; +#endif /* CONFIG_BT_CTLR_LE_ENC */ + +#if defined(CONFIG_BT_CTLR_DATA_LENGTH) + case PDU_DATA_LLCTRL_TYPE_LENGTH_REQ: + if ((conn->llcp_length.req != conn->llcp_length.ack) && + (conn->llcp_length.state == LLCP_LENGTH_STATE_ACK_WAIT)) { + /* pause data packet tx */ + conn->llcp_length.pause_tx = 1; + + /* wait for response */ + conn->llcp_length.state = LLCP_LENGTH_STATE_RSP_WAIT; + } + break; +#endif /* CONFIG_BT_CTLR_DATA_LENGTH */ + +#if defined(CONFIG_BT_CTLR_PHY) + case PDU_DATA_LLCTRL_TYPE_PHY_REQ: + conn->llcp_phy.state = LLCP_PHY_STATE_RSP_WAIT; + /* fall through */ + + case PDU_DATA_LLCTRL_TYPE_PHY_RSP: + if (conn->lll.role) { + /* select the probable PHY with longest Tx time, which + * will be restricted to fit current + * connEffectiveMaxTxTime. + */ + u8_t phy_tx_time[8] = {BIT(0), BIT(0), BIT(1), BIT(0), + BIT(2), BIT(2), BIT(2), BIT(2)}; + struct lll_conn *lll = &conn->lll; + u8_t phys; + + phys = conn->llcp_phy.tx | lll->phy_tx; + lll->phy_tx_time = phy_tx_time[phys]; + } + break; + + case PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND: + conn->lll.phy_tx_time = conn->llcp.phy_upd_ind.tx; + break; +#endif /* CONFIG_BT_CTLR_PHY */ + + default: + /* Do nothing for other ctrl packet ack */ + break; + } +} + +static inline bool pdu_len_cmp(u8_t opcode, u8_t len) +{ + const u8_t ctrl_len_lut[] = { + (offsetof(struct pdu_data_llctrl, conn_update_ind) + + sizeof(struct pdu_data_llctrl_conn_update_ind)), + (offsetof(struct pdu_data_llctrl, chan_map_ind) + + sizeof(struct pdu_data_llctrl_chan_map_ind)), + (offsetof(struct pdu_data_llctrl, terminate_ind) + + sizeof(struct pdu_data_llctrl_terminate_ind)), + (offsetof(struct pdu_data_llctrl, enc_req) + + sizeof(struct pdu_data_llctrl_enc_req)), + (offsetof(struct pdu_data_llctrl, enc_rsp) + + sizeof(struct pdu_data_llctrl_enc_rsp)), + (offsetof(struct pdu_data_llctrl, start_enc_req) + + sizeof(struct pdu_data_llctrl_start_enc_req)), + (offsetof(struct pdu_data_llctrl, start_enc_rsp) + + sizeof(struct pdu_data_llctrl_start_enc_rsp)), + (offsetof(struct pdu_data_llctrl, unknown_rsp) + + sizeof(struct pdu_data_llctrl_unknown_rsp)), + (offsetof(struct pdu_data_llctrl, feature_req) + + sizeof(struct pdu_data_llctrl_feature_req)), + (offsetof(struct pdu_data_llctrl, feature_rsp) + + sizeof(struct pdu_data_llctrl_feature_rsp)), + (offsetof(struct pdu_data_llctrl, pause_enc_req) + + sizeof(struct pdu_data_llctrl_pause_enc_req)), + (offsetof(struct pdu_data_llctrl, pause_enc_rsp) + + sizeof(struct pdu_data_llctrl_pause_enc_rsp)), + (offsetof(struct pdu_data_llctrl, version_ind) + + sizeof(struct pdu_data_llctrl_version_ind)), + (offsetof(struct pdu_data_llctrl, reject_ind) + + sizeof(struct pdu_data_llctrl_reject_ind)), + (offsetof(struct pdu_data_llctrl, slave_feature_req) + + sizeof(struct pdu_data_llctrl_slave_feature_req)), + (offsetof(struct pdu_data_llctrl, conn_param_req) + + sizeof(struct pdu_data_llctrl_conn_param_req)), + (offsetof(struct pdu_data_llctrl, conn_param_rsp) + + sizeof(struct pdu_data_llctrl_conn_param_rsp)), + (offsetof(struct pdu_data_llctrl, reject_ext_ind) + + sizeof(struct pdu_data_llctrl_reject_ext_ind)), + (offsetof(struct pdu_data_llctrl, ping_req) + + sizeof(struct pdu_data_llctrl_ping_req)), + (offsetof(struct pdu_data_llctrl, ping_rsp) + + sizeof(struct pdu_data_llctrl_ping_rsp)), + (offsetof(struct pdu_data_llctrl, length_req) + + sizeof(struct pdu_data_llctrl_length_req)), + (offsetof(struct pdu_data_llctrl, length_rsp) + + sizeof(struct pdu_data_llctrl_length_rsp)), + (offsetof(struct pdu_data_llctrl, phy_req) + + sizeof(struct pdu_data_llctrl_phy_req)), + (offsetof(struct pdu_data_llctrl, phy_rsp) + + sizeof(struct pdu_data_llctrl_phy_rsp)), + (offsetof(struct pdu_data_llctrl, phy_upd_ind) + + sizeof(struct pdu_data_llctrl_phy_upd_ind)), + (offsetof(struct pdu_data_llctrl, min_used_chans_ind) + + sizeof(struct pdu_data_llctrl_min_used_chans_ind)), + }; + + return ctrl_len_lut[opcode] == len; +} + +static inline u8_t ctrl_rx(memq_link_t *link, struct node_rx_pdu **rx, + struct pdu_data *pdu_rx, struct ll_conn *conn) +{ + u8_t nack = 0; + u8_t opcode; + + opcode = pdu_rx->llctrl.opcode; + +#if defined(CONFIG_BT_CTLR_LE_ENC) + /* FIXME: do check in individual case to reduce CPU time */ + if (conn->pause_rx && ctrl_is_unexpected(conn, opcode)) { + conn->llcp_terminate.reason_peer = + BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL; + + /* Mark for buffer for release */ + (*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + return 0; + } +#endif /* CONFIG_BT_CTLR_LE_ENC */ + + switch (opcode) { + case PDU_DATA_LLCTRL_TYPE_CONN_UPDATE_IND: + { + u8_t err; + + if (!conn->lll.role || + !pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_CONN_UPDATE_IND, + pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + err = conn_upd_recv(conn, link, rx, pdu_rx); + if (err) { + conn->llcp_terminate.reason_peer = err; +#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) + } else { + /* conn param req procedure, if any, is complete */ + conn->procedure_expire = 0; +#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */ + } + } + break; + + case PDU_DATA_LLCTRL_TYPE_CHAN_MAP_IND: + { + u8_t err; + + if (!conn->lll.role || + !pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_CHAN_MAP_IND, + pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + err = chan_map_upd_recv(conn, *rx, pdu_rx); + if (err) { + conn->llcp_terminate.reason_peer = err; + } + } + break; + + case PDU_DATA_LLCTRL_TYPE_TERMINATE_IND: + if (!pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_TERMINATE_IND, + pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + terminate_ind_recv(conn, *rx, pdu_rx); + break; + +#if defined(CONFIG_BT_CTLR_LE_ENC) + case PDU_DATA_LLCTRL_TYPE_ENC_REQ: + if (!conn->lll.role || + !pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_ENC_REQ, pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + +#if defined(CONFIG_BT_CTLR_FAST_ENC) + /* TODO: BT Spec. text: may finalize the sending of additional + * data channel PDUs queued in the controller. + */ + nack = enc_rsp_send(conn); + if (nack) { + break; + } +#endif /* CONFIG_BT_CTLR_FAST_ENC */ + + /* things from master stored for session key calculation */ + memcpy(&conn->llcp.encryption.skd[0], + &pdu_rx->llctrl.enc_req.skdm[0], 8); + memcpy(&conn->lll.ccm_rx.iv[0], + &pdu_rx->llctrl.enc_req.ivm[0], 4); + + /* pause rx data packets */ + conn->pause_rx = 1; + + /* Start Procedure Timeout (TODO: this shall not replace + * terminate procedure). + */ + conn->procedure_expire = conn->procedure_reload; + + break; + + case PDU_DATA_LLCTRL_TYPE_ENC_RSP: + if (conn->lll.role || + !pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_ENC_RSP, pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + /* things sent by slave stored for session key calculation */ + memcpy(&conn->llcp.encryption.skd[8], + &pdu_rx->llctrl.enc_rsp.skds[0], 8); + memcpy(&conn->lll.ccm_rx.iv[4], + &pdu_rx->llctrl.enc_rsp.ivs[0], 4); + + /* pause rx data packets */ + conn->pause_rx = 1; + + /* Mark for buffer for release */ + (*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + break; + + case PDU_DATA_LLCTRL_TYPE_START_ENC_REQ: + LL_ASSERT((conn->llcp_req == conn->llcp_ack) || + (conn->llcp_type == LLCP_ENCRYPTION)); + + if (conn->lll.role || + !pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_START_ENC_REQ, + pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + /* start enc rsp to be scheduled in master prepare */ + conn->llcp.encryption.initiate = 0; + conn->llcp_type = LLCP_ENCRYPTION; + conn->llcp_ack--; + + /* Mark for buffer for release */ + (*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + break; + + case PDU_DATA_LLCTRL_TYPE_START_ENC_RSP: + if (!pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_START_ENC_RSP, + pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + if (conn->lll.role) { +#if !defined(CONFIG_BT_CTLR_FAST_ENC) + LL_ASSERT((conn->llcp_req == conn->llcp_ack) || + (conn->llcp_type == LLCP_ENCRYPTION)); + + /* start enc rsp to be scheduled in slave prepare */ + conn->llcp.encryption.initiate = 0; + conn->llcp_type = LLCP_ENCRYPTION; + conn->llcp_ack--; +#else /* CONFIG_BT_CTLR_FAST_ENC */ + nack = start_enc_rsp_send(conn, NULL); + if (nack) { + break; + } + + /* resume data packet rx and tx */ + conn->pause_rx = 0; + conn->pause_tx = 0; +#endif /* CONFIG_BT_CTLR_FAST_ENC */ + + } else { + /* resume data packet rx and tx */ + conn->pause_rx = 0; + conn->pause_tx = 0; + } + + /* enqueue the start enc resp (encryption change/refresh) */ + if (conn->refresh) { + conn->refresh = 0; + + /* key refresh event */ + (*rx)->hdr.type = NODE_RX_TYPE_ENC_REFRESH; + } + + /* Procedure complete */ + conn->procedure_expire = 0; + + break; +#endif /* CONFIG_BT_CTLR_LE_ENC */ + + case PDU_DATA_LLCTRL_TYPE_FEATURE_REQ: + if (!conn->lll.role || + !pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_FEATURE_REQ, + pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + nack = feature_rsp_send(conn, *rx, pdu_rx); + break; + +#if defined(CONFIG_BT_CTLR_SLAVE_FEAT_REQ) + case PDU_DATA_LLCTRL_TYPE_SLAVE_FEATURE_REQ: + if (conn->lll.role || + !pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_SLAVE_FEATURE_REQ, + pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + nack = feature_rsp_send(conn, *rx, pdu_rx); + break; +#endif /* CONFIG_BT_CTLR_SLAVE_FEAT_REQ */ + + case PDU_DATA_LLCTRL_TYPE_FEATURE_RSP: + if (!pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_FEATURE_RSP, + pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + feature_rsp_recv(conn, pdu_rx); + break; + +#if defined(CONFIG_BT_CTLR_LE_ENC) + case PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_REQ: + if (!conn->lll.role || + !pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_REQ, + pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + nack = pause_enc_rsp_send(conn, *rx, 1); + break; + + case PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_RSP: + if (!pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_RSP, + pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + nack = pause_enc_rsp_send(conn, *rx, 0); + break; +#endif /* CONFIG_BT_CTLR_LE_ENC */ + + case PDU_DATA_LLCTRL_TYPE_VERSION_IND: + if (!pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_VERSION_IND, + pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + nack = version_ind_send(conn, *rx, pdu_rx); + break; + +#if defined(CONFIG_BT_CTLR_LE_ENC) + case PDU_DATA_LLCTRL_TYPE_REJECT_IND: + if (!pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_REJECT_IND, pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + /* resume data packet rx and tx */ + conn->pause_rx = 0; + conn->pause_tx = 0; + + /* Procedure complete */ + conn->procedure_expire = 0; + + break; +#endif /* CONFIG_BT_CTLR_LE_ENC */ + +#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) + case PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ: + if (!pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ, + pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + + /* check CUI/CPR mutex for other connections having CPR in + * progress. + */ + if (conn_upd_curr && (conn_upd_curr != conn)) { + /* Unsupported LL Parameter Value */ + nack = reject_ext_ind_send(conn, *rx, + PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ, + BT_HCI_ERR_UNSUPP_LL_PARAM_VAL); + break; + } + + if (!conn->lll.role) { + if ((conn->llcp_conn_param.req != + conn->llcp_conn_param.ack) && + ((conn->llcp_conn_param.state == + LLCP_CPR_STATE_REQ) || + (conn->llcp_conn_param.state == + LLCP_CPR_STATE_RSP_WAIT) || + (conn->llcp_conn_param.state == + LLCP_CPR_STATE_UPD))) { + /* Same procedure collision */ + nack = reject_ext_ind_send(conn, *rx, + PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ, + BT_HCI_ERR_LL_PROC_COLLISION); +#if defined(CONFIG_BT_CTLR_PHY) +#if defined(CONFIG_BT_CTLR_LE_ENC) + } else if (((conn->llcp_req != conn->llcp_ack) && + (conn->llcp_type != LLCP_ENCRYPTION)) || + (conn->llcp_phy.req != conn->llcp_phy.ack)) { +#else /* !CONFIG_BT_CTLR_LE_ENC */ + } else if ((conn->llcp_req != conn->llcp_ack) || + (conn->llcp_phy.req != conn->llcp_phy.ack)) { +#endif /* !CONFIG_BT_CTLR_LE_ENC */ +#else /* !CONFIG_BT_CTLR_PHY */ +#if defined(CONFIG_BT_CTLR_LE_ENC) + } else if ((conn->llcp_req != conn->llcp_ack) && + (conn->llcp_type != LLCP_ENCRYPTION)) { +#else /* !CONFIG_BT_CTLR_LE_ENC */ + } else if (conn->llcp_req != conn->llcp_ack) { +#endif /* !CONFIG_BT_CTLR_LE_ENC */ +#endif /* !CONFIG_BT_CTLR_PHY */ + /* Different procedure collision */ + nack = reject_ext_ind_send(conn, *rx, + PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ, + BT_HCI_ERR_DIFF_TRANS_COLLISION); + } else { + struct pdu_data_llctrl_conn_param_req *cpr = (void *) + &pdu_rx->llctrl.conn_param_req; + struct lll_conn *lll = &conn->lll; + + /* Invalid parameters */ + if ((cpr->interval_min < 6) || + (cpr->interval_max > 3200) || + (cpr->interval_min > cpr->interval_max) || + (cpr->latency > 499) || + (cpr->timeout < 10) || + (cpr->timeout > 3200) || + ((cpr->timeout * 4) <= + ((cpr->latency + 1) * + cpr->interval_max)) || + (cpr->preferred_periodicity > + cpr->interval_max)) { + nack = reject_ext_ind_send(conn, *rx, + PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ, + BT_HCI_ERR_INVALID_LL_PARAM); + break; + } + + /* save parameters to be used to select offset + */ + conn->llcp_conn_param.interval_min = + cpr->interval_min; + conn->llcp_conn_param.interval_max = + cpr->interval_max; + conn->llcp_conn_param.latency = cpr->latency; + conn->llcp_conn_param.timeout = cpr->timeout; + conn->llcp_conn_param.preferred_periodicity = + cpr->preferred_periodicity; + conn->llcp_conn_param.reference_conn_event_count = + cpr->reference_conn_event_count; + conn->llcp_conn_param.offset0 = cpr->offset0; + conn->llcp_conn_param.offset1 = cpr->offset1; + conn->llcp_conn_param.offset2 = cpr->offset2; + conn->llcp_conn_param.offset3 = cpr->offset3; + conn->llcp_conn_param.offset4 = cpr->offset4; + conn->llcp_conn_param.offset5 = cpr->offset5; + + /* enqueue the conn param req, if parameters + * changed, else respond. + */ + if ((conn->llcp_conn_param.interval_max != + lll->interval) || + (conn->llcp_conn_param.latency != + lll->latency) || + (RADIO_CONN_EVENTS(conn->llcp_conn_param.timeout * + 10000, + lll->interval * + 1250) != + conn->supervision_reload)) { +#if defined(CONFIG_BT_CTLR_LE_ENC) + /* postpone CP request event if under + * encryption setup + */ + if (conn->pause_tx) { + conn->llcp_conn_param.state = + LLCP_CPR_STATE_APP_REQ; + + /* Mark for buffer for release */ + (*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + } else +#endif /* CONFIG_BT_CTLR_LE_ENC */ + { + conn->llcp_conn_param.state = + LLCP_CPR_STATE_APP_WAIT; + } + } else { + conn->llcp_conn_param.status = 0; + conn->llcp_conn_param.cmd = 0; + conn->llcp_conn_param.state = + LLCP_CPR_STATE_RSP; + + /* Mark for buffer for release */ + (*rx)->hdr.type = + NODE_RX_TYPE_DC_PDU_RELEASE; + } + + conn->llcp_conn_param.ack--; + + /* set mutex */ + if (!conn_upd_curr) { + conn_upd_curr = conn; + } + } + } else if ((conn->llcp_conn_param.req == + conn->llcp_conn_param.ack) || + (conn->llcp_conn_param.state == + LLCP_CPR_STATE_REQ) || + (conn->llcp_conn_param.state == + LLCP_CPR_STATE_RSP_WAIT)) { + struct pdu_data_llctrl_conn_param_req *cpr = (void *) + &pdu_rx->llctrl.conn_param_req; + struct lll_conn *lll = &conn->lll; + + /* Invalid parameters */ + if ((cpr->interval_min < 6) || + (cpr->interval_max > 3200) || + (cpr->interval_min > cpr->interval_max) || + (cpr->latency > 499) || + (cpr->timeout < 10) || (cpr->timeout > 3200) || + ((cpr->timeout * 4) <= ((cpr->latency + 1) * + cpr->interval_max)) || + (cpr->preferred_periodicity > cpr->interval_max)) { + nack = reject_ext_ind_send(conn, *rx, + PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ, + BT_HCI_ERR_INVALID_LL_PARAM); + break; + } + + /* resp to be generated by app, for now save + * parameters + */ + conn->llcp_conn_param.interval_min = cpr->interval_min; + conn->llcp_conn_param.interval_max = cpr->interval_max; + conn->llcp_conn_param.latency = cpr->latency; + conn->llcp_conn_param.timeout = cpr->timeout; + conn->llcp_conn_param.preferred_periodicity = + cpr->preferred_periodicity; + conn->llcp_conn_param.reference_conn_event_count = + cpr->reference_conn_event_count; + conn->llcp_conn_param.offset0 = cpr->offset0; + conn->llcp_conn_param.offset1 = cpr->offset1; + conn->llcp_conn_param.offset2 = cpr->offset2; + conn->llcp_conn_param.offset3 = cpr->offset3; + conn->llcp_conn_param.offset4 = cpr->offset4; + conn->llcp_conn_param.offset5 = cpr->offset5; + + /* enqueue the conn param req, if parameters changed, + * else respond + */ + if ((conn->llcp_conn_param.interval_max != + lll->interval) || + (conn->llcp_conn_param.latency != lll->latency) || + (RADIO_CONN_EVENTS(conn->llcp_conn_param.timeout * + 10000, + lll->interval * + 1250) != + conn->supervision_reload)) { + conn->llcp_conn_param.state = + LLCP_CPR_STATE_APP_WAIT; + } else { + conn->llcp_conn_param.status = 0; + conn->llcp_conn_param.cmd = 0; + conn->llcp_conn_param.state = + LLCP_CPR_STATE_RSP; + + /* Mark for buffer for release */ + (*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + } + + conn->llcp_conn_param.ack--; + + /* set mutex */ + if (!conn_upd_curr) { + conn_upd_curr = conn; + } + } else { + LL_ASSERT(0); + } + break; + + case PDU_DATA_LLCTRL_TYPE_CONN_PARAM_RSP: + if (conn->lll.role || + !pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_CONN_PARAM_RSP, + pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + if (!conn->lll.role && + (conn->llcp_conn_param.req != + conn->llcp_conn_param.ack) && + (conn->llcp_conn_param.state == + LLCP_CPR_STATE_RSP_WAIT)) { + struct pdu_data_llctrl_conn_param_req *cpr = (void *) + &pdu_rx->llctrl.conn_param_req; + + /* Invalid parameters */ + if ((cpr->interval_min < 6) || + (cpr->interval_max > 3200) || + (cpr->interval_min > cpr->interval_max) || + (cpr->latency > 499) || + (cpr->timeout < 10) || (cpr->timeout > 3200) || + ((cpr->timeout * 4) <= ((cpr->latency + 1) * + cpr->interval_max)) || + (cpr->preferred_periodicity > cpr->interval_max)) { + nack = reject_ext_ind_send(conn, *rx, + PDU_DATA_LLCTRL_TYPE_CONN_PARAM_RSP, + BT_HCI_ERR_INVALID_LL_PARAM); + break; + } + + /* Stop procedure timeout */ + conn->procedure_expire = 0; + + /* save parameters to be used to select offset + */ + conn->llcp_conn_param.interval_min = cpr->interval_min; + conn->llcp_conn_param.interval_max = cpr->interval_max; + conn->llcp_conn_param.latency = cpr->latency; + conn->llcp_conn_param.timeout = cpr->timeout; + conn->llcp_conn_param.preferred_periodicity = + cpr->preferred_periodicity; + conn->llcp_conn_param.reference_conn_event_count = + cpr->reference_conn_event_count; + conn->llcp_conn_param.offset0 = cpr->offset0; + conn->llcp_conn_param.offset1 = cpr->offset1; + conn->llcp_conn_param.offset2 = cpr->offset2; + conn->llcp_conn_param.offset3 = cpr->offset3; + conn->llcp_conn_param.offset4 = cpr->offset4; + conn->llcp_conn_param.offset5 = cpr->offset5; + + /* Perform connection update */ + conn->llcp_conn_param.state = LLCP_CPR_STATE_RSP; + } + + /* Mark for buffer for release */ + (*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + break; +#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */ + + case PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND: + if (!pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND, + pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + reject_ext_ind_recv(conn, *rx, pdu_rx); + break; + +#if defined(CONFIG_BT_CTLR_LE_PING) + case PDU_DATA_LLCTRL_TYPE_PING_REQ: + if (!pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_PING_REQ, pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + nack = ping_resp_send(conn, *rx); + break; + + case PDU_DATA_LLCTRL_TYPE_PING_RSP: + if (!pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_PING_RSP, pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + /* Procedure complete */ + conn->procedure_expire = 0; + + /* Mark for buffer for release */ + (*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + break; +#endif /* CONFIG_BT_CTLR_LE_PING */ + + case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP: + if (!pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP, + pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + if (0) { +#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) + } else if (conn->llcp_conn_param.ack != + conn->llcp_conn_param.req) { + struct lll_conn *lll = &conn->lll; + struct node_rx_cu *cu; + + /* Mark CPR as unsupported */ + conn->llcp_conn_param.disabled = 1; + + /* TODO: check for unsupported remote feature reason */ + if (!conn->lll.role) { + LL_ASSERT(conn->llcp_req == conn->llcp_ack); + + conn->llcp_conn_param.state = + LLCP_CPR_STATE_UPD; + + conn->llcp.conn_upd.win_size = 1; + conn->llcp.conn_upd.win_offset_us = 0; + conn->llcp.conn_upd.interval = + conn->llcp_conn_param.interval_max; + conn->llcp.conn_upd.latency = + conn->llcp_conn_param.latency; + conn->llcp.conn_upd.timeout = + conn->llcp_conn_param.timeout; + /* conn->llcp.conn_upd.instant = 0; */ + conn->llcp.conn_upd.state = LLCP_CUI_STATE_USE; + conn->llcp.conn_upd.is_internal = + !conn->llcp_conn_param.cmd; + conn->llcp_type = LLCP_CONN_UPD; + conn->llcp_ack--; + + /* Mark for buffer for release */ + (*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + break; + } + + LL_ASSERT(conn_upd_curr == conn); + + /* reset mutex */ + conn_upd_curr = NULL; + + /* Procedure complete */ + conn->llcp_conn_param.ack = conn->llcp_conn_param.req; + + /* skip event generation if not cmd initiated */ + if (!conn->llcp_conn_param.cmd) { + /* Mark for buffer for release */ + (*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + break; + } + + /* generate conn upd complete event with error code */ + (*rx)->hdr.type = NODE_RX_TYPE_CONN_UPDATE; + + /* prepare connection update complete structure */ + cu = (void *)pdu_rx; + cu->status = BT_HCI_ERR_UNSUPP_REMOTE_FEATURE; + cu->interval = lll->interval; + cu->latency = lll->latency; + cu->timeout = conn->supervision_reload * + lll->interval * 125 / 1000; +#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */ + +#if defined(CONFIG_BT_CTLR_DATA_LENGTH) + } else if (conn->llcp_length.req != conn->llcp_length.ack) { + /* Procedure complete */ + conn->llcp_length.ack = conn->llcp_length.req; + conn->llcp_length.pause_tx = 0; + + /* propagate the data length procedure to + * host + */ +#endif /* CONFIG_BT_CTLR_DATA_LENGTH */ + +#if defined(CONFIG_BT_CTLR_PHY) + } else if (conn->llcp_phy.req != + conn->llcp_phy.ack) { + struct lll_conn *lll = &conn->lll; + + /* Procedure complete */ + conn->llcp_phy.ack = conn->llcp_phy.req; + + /* Reset packet timing restrictions */ + lll->phy_tx_time = lll->phy_tx; + + /* skip event generation is not cmd initiated */ + if (conn->llcp_phy.cmd) { + struct node_rx_pu *p; + + /* generate phy update complete event */ + (*rx)->hdr.type = NODE_RX_TYPE_PHY_UPDATE; + + p = (void *)pdu_rx; + p->status = 0; + p->tx = lll->phy_tx; + p->rx = lll->phy_rx; + } else { + /* Mark for buffer for release */ + (*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + } +#endif /* CONFIG_BT_CTLR_PHY */ + + } else { + struct pdu_data_llctrl *llctrl; + + llctrl = (void *)&pdu_rx->llctrl; + switch (llctrl->unknown_rsp.type) { + +#if defined(CONFIG_BT_CTLR_LE_PING) + case PDU_DATA_LLCTRL_TYPE_PING_REQ: + /* unknown rsp to LE Ping Req completes the + * procedure; nothing to do here. + */ + + /* Mark for buffer for release */ + (*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + break; +#endif /* CONFIG_BT_CTLR_LE_PING */ + + default: + /* TODO: enqueue the error and let HCI handle + * it. + */ + break; + } + } + + /* Procedure complete */ + conn->procedure_expire = 0; + break; + +#if defined(CONFIG_BT_CTLR_DATA_LENGTH) + case PDU_DATA_LLCTRL_TYPE_LENGTH_RSP: + case PDU_DATA_LLCTRL_TYPE_LENGTH_REQ: + if (!pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_LENGTH_REQ, + pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + nack = isr_rx_conn_pkt_ctrl_dle(pdu, rx_enqueue); + break; +#endif /* CONFIG_BT_CTLR_DATA_LENGTH */ + +#if defined(CONFIG_BT_CTLR_PHY) + case PDU_DATA_LLCTRL_TYPE_PHY_REQ: + if (!pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_PHY_REQ, pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + if (!conn->lll.role) { + if ((conn->llcp_phy.ack != + conn->llcp_phy.req) && + ((conn->llcp_phy.state == + LLCP_PHY_STATE_ACK_WAIT) || + (conn->llcp_phy.state == + LLCP_PHY_STATE_RSP_WAIT) || + (conn->llcp_phy.state == + LLCP_PHY_STATE_UPD))) { + /* Same procedure collision */ + nack = reject_ext_ind_send(conn, *rx, + PDU_DATA_LLCTRL_TYPE_PHY_REQ, + BT_HCI_ERR_LL_PROC_COLLISION); +#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) +#if defined(CONFIG_BT_CTLR_LE_ENC) + } else if (((conn->llcp_req != + conn->llcp_ack) && + (conn->llcp_type != + LLCP_ENCRYPTION)) || + (conn->llcp_conn_param.req != + conn->llcp_conn_param.ack)) { +#else /* !CONFIG_BT_CTLR_LE_ENC */ + } else if ((conn->llcp_req != + conn->llcp_ack) || + (conn->llcp_conn_param.req != + conn->llcp_conn_param.ack)) { +#endif /* !CONFIG_BT_CTLR_LE_ENC */ +#else /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */ +#if defined(CONFIG_BT_CTLR_LE_ENC) + } else if ((conn->llcp_req != + conn->llcp_ack) && + (conn->llcp_type != + LLCP_ENCRYPTION)) { +#else /* !CONFIG_BT_CTLR_LE_ENC */ + } else if (conn->llcp_req != + conn->llcp_ack) { +#endif /* !CONFIG_BT_CTLR_LE_ENC */ +#endif /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */ + /* Different procedure collision */ + nack = reject_ext_ind_send(conn, *rx, + PDU_DATA_LLCTRL_TYPE_PHY_REQ, + BT_HCI_ERR_DIFF_TRANS_COLLISION); + } else { + struct pdu_data_llctrl *c = &pdu_rx->llctrl; + struct pdu_data_llctrl_phy_req *p = + &c->phy_req; + + conn->llcp_phy.state = + LLCP_PHY_STATE_UPD; + + if (conn->llcp_phy.ack == + conn->llcp_phy.req) { + conn->llcp_phy.ack--; + + conn->llcp_phy.cmd = 0; + + conn->llcp_phy.tx = + conn->phy_pref_tx; + conn->llcp_phy.rx = + conn->phy_pref_rx; + } + + conn->llcp_phy.tx &= p->rx_phys; + conn->llcp_phy.rx &= p->tx_phys; + + /* Mark for buffer for release */ + (*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + } + } else { + nack = phy_rsp_send(conn, *rx, pdu_rx); + } + break; + + case PDU_DATA_LLCTRL_TYPE_PHY_RSP: + if (conn->lll.role || + !pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_PHY_RSP, pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + if (!conn->lll.role && + (conn->llcp_phy.ack != conn->llcp_phy.req) && + (conn->llcp_phy.state == LLCP_PHY_STATE_RSP_WAIT)) { + struct pdu_data_llctrl_phy_rsp *p = + &pdu_rx->llctrl.phy_rsp; + + conn->llcp_phy.state = LLCP_PHY_STATE_UPD; + + conn->llcp_phy.tx &= p->rx_phys; + conn->llcp_phy.rx &= p->tx_phys; + + /* Procedure timeout is stopped */ + conn->procedure_expire = 0; + } + + /* Mark for buffer for release */ + (*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + break; + + case PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND: + { + u8_t err; + + if (!conn->lll.role || + !pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND, + pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + err = phy_upd_ind_recv(conn, link, rx, pdu_rx); + if (err) { + conn->llcp_terminate.reason_peer = err; + } + } + break; +#endif /* CONFIG_BT_CTLR_PHY */ + +#if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) + case PDU_DATA_LLCTRL_TYPE_MIN_USED_CHAN_IND: + if (conn->lll.role || + !pdu_len_cmp(PDU_DATA_LLCTRL_TYPE_MIN_USED_CHAN_IND, + pdu_rx->len)) { + goto ull_conn_rx_unknown_rsp_send; + } + + if (!conn->lll.role) { + struct pdu_data_llctrl_min_used_chans_ind *p = + &pdu_rx->llctrl.min_used_chans_ind; + +#if defined(CONFIG_BT_CTLR_PHY) + if (!(p->phys & (conn->lll.phy_tx | + conn->lll.phy_rx))) { +#else /* !CONFIG_BT_CTLR_PHY */ + if (!(p->phys & 0x01)) { +#endif /* !CONFIG_BT_CTLR_PHY */ + break; + } + + if (conn->llcp_req != conn->llcp_ack) { + break; + } + + memcpy(&conn->llcp.chan_map.chm[0], data_chan_map, + sizeof(conn->llcp.chan_map.chm)); + /* conn->llcp.chan_map.instant = 0; */ + conn->llcp.chan_map.initiate = 1; + + conn->llcp_type = LLCP_CHAN_MAP; + conn->llcp_ack--; + } + + /* Mark for buffer for release */ + (*rx)->hdr.type = NODE_RX_TYPE_DC_PDU_RELEASE; + + break; +#endif /* CONFIG_BT_CTLR_MIN_USED_CHAN */ + + default: +ull_conn_rx_unknown_rsp_send: + nack = unknown_rsp_send(conn, *rx, opcode); + break; + } + + return nack; +} + +static void ticker_op_cb(u32_t status, void *params) +{ + ARG_UNUSED(params); + + LL_ASSERT(status == TICKER_STATUS_SUCCESS); +} diff --git a/subsys/bluetooth/controller/ll_sw/ull_conn_internal.h b/subsys/bluetooth/controller/ll_sw/ull_conn_internal.h new file mode 100644 index 00000000000..55bd57d3a17 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/ull_conn_internal.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2017-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/* Macro to convert time in us to connection interval units */ +#define RADIO_CONN_EVENTS(x, y) ((u16_t)(((x) + (y) - 1) / (y))) + +struct ll_conn *ll_conn_acquire(void); +void ll_conn_release(struct ll_conn *conn); +u16_t ll_conn_handle_get(struct ll_conn *conn); +struct ll_conn *ll_conn_get(u16_t handle); +struct ll_conn *ll_connected_get(u16_t handle); +int ull_conn_init(void); +int ull_conn_reset(void); +u8_t ull_conn_chan_map_cpy(u8_t *chan_map); +void ull_conn_chan_map_set(u8_t *chan_map); +u8_t ull_conn_default_phy_tx_get(void); +u8_t ull_conn_default_phy_rx_get(void); +void ull_conn_setup(memq_link_t *link, struct node_rx_hdr *rx); +int ull_conn_rx(memq_link_t *link, struct node_rx_pdu **rx); +int ull_conn_llcp(struct ll_conn *conn, u32_t ticks_at_expire, u16_t lazy); +void ull_conn_done(struct node_rx_event_done *done); +void ull_conn_tx_demux(u8_t count); +void ull_conn_tx_lll_enqueue(struct ll_conn *conn, u8_t count); +void ull_conn_link_tx_release(void *link); +void ull_conn_tx_ack(struct ll_conn *conn, memq_link_t *link, + struct node_tx *tx); diff --git a/subsys/bluetooth/controller/ll_sw/ull_conn_types.h b/subsys/bluetooth/controller/ll_sw/ull_conn_types.h index 90ee65e1c40..b0c32d02978 100644 --- a/subsys/bluetooth/controller/ll_sw/ull_conn_types.h +++ b/subsys/bluetooth/controller/ll_sw/ull_conn_types.h @@ -1,9 +1,202 @@ /* - * Copyright (c) 2018 Nordic Semiconductor ASA + * Copyright (c) 2018-2019 Nordic Semiconductor ASA * * SPDX-License-Identifier: Apache-2.0 */ +struct ll_conn { + struct evt_hdr evt; + struct ull_hdr ull; + struct lll_conn lll; + + u16_t connect_expire; + u16_t supervision_reload; + u16_t supervision_expire; + u16_t procedure_reload; + u16_t procedure_expire; + +#if defined(CONFIG_BT_CTLR_LE_PING) + u16_t appto_reload; + u16_t appto_expire; + u16_t apto_reload; + u16_t apto_expire; +#endif /* CONFIG_BT_CTLR_LE_PING */ + + union { + struct { + u8_t fex_valid:1; + } common; + + struct { + u8_t fex_valid:1; + u32_t ticks_to_offset; + } slave; + + struct { + u8_t fex_valid:1; + } master; + }; + + u8_t llcp_req; + u8_t llcp_ack; + u8_t llcp_type; + + union { + struct { + enum { + LLCP_CUI_STATE_INPROG, + LLCP_CUI_STATE_USE, + LLCP_CUI_STATE_SELECT + } state:2 __packed; + u8_t is_internal:1; + u16_t interval; + u16_t latency; + u16_t timeout; + u16_t instant; + u32_t win_offset_us; + u8_t win_size; + u16_t *pdu_win_offset; + u32_t ticks_anchor; + } conn_upd; + + struct { + u8_t initiate; + u8_t chm[5]; + u16_t instant; + } chan_map; + +#if defined(CONFIG_BT_CTLR_PHY) + struct { + u8_t initiate:1; + u8_t cmd:1; + u8_t tx:3; + u8_t rx:3; + u16_t instant; + } phy_upd_ind; +#endif /* CONFIG_BT_CTLR_PHY */ + +#if defined(CONFIG_BT_CTLR_LE_ENC) + struct { + u8_t initiate; + u8_t error_code; + u8_t rand[8]; + u8_t ediv[2]; + u8_t ltk[16]; + u8_t skd[16]; + } encryption; +#endif /* CONFIG_BT_CTLR_LE_ENC */ + } llcp; + + struct node_rx_pdu *llcp_rx; + + u32_t llcp_features; + + struct { + u8_t tx:1; + u8_t rx:1; + u8_t version_number; + u16_t company_id; + u16_t sub_version_number; + } llcp_version; + + struct { + u8_t req; + u8_t ack; + u8_t reason_own; + u8_t reason_peer; + struct { + struct node_rx_hdr hdr; + u8_t reason; + } node_rx; + } llcp_terminate; + +#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) + struct { + u8_t req; + u8_t ack; + enum { + LLCP_CPR_STATE_REQ, + LLCP_CPR_STATE_RSP, + LLCP_CPR_STATE_APP_REQ, + LLCP_CPR_STATE_APP_WAIT, + LLCP_CPR_STATE_RSP_WAIT, + LLCP_CPR_STATE_UPD + } state:3 __packed; + u8_t cmd:1; + u8_t disabled:1; + u8_t status; + u16_t interval_min; + u16_t interval_max; + u16_t latency; + u16_t timeout; + u8_t preferred_periodicity; + u16_t reference_conn_event_count; + u16_t offset0; + u16_t offset1; + u16_t offset2; + u16_t offset3; + u16_t offset4; + u16_t offset5; + u16_t *pdu_win_offset0; + u32_t ticks_ref; + u32_t ticks_to_offset_next; + } llcp_conn_param; +#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */ + +#if defined(CONFIG_BT_CTLR_DATA_LENGTH) + struct { + u8_t req; + u8_t ack; + u8_t state:2; +#define LLCP_LENGTH_STATE_REQ 0 +#define LLCP_LENGTH_STATE_ACK_WAIT 1 +#define LLCP_LENGTH_STATE_RSP_WAIT 2 +#define LLCP_LENGTH_STATE_RESIZE 3 + u8_t pause_tx:1; + u16_t rx_octets; + u16_t tx_octets; +#if defined(CONFIG_BT_CTLR_PHY) + u16_t rx_time; + u16_t tx_time; +#endif /* CONFIG_BT_CTLR_PHY */ + } llcp_length; +#endif /* CONFIG_BT_CTLR_DATA_LENGTH */ + +#if defined(CONFIG_BT_CTLR_PHY) + struct { + u8_t req; + u8_t ack; + u8_t state:2; +#define LLCP_PHY_STATE_REQ 0 +#define LLCP_PHY_STATE_ACK_WAIT 1 +#define LLCP_PHY_STATE_RSP_WAIT 2 +#define LLCP_PHY_STATE_UPD 3 + u8_t tx:3; + u8_t rx:3; + u8_t flags:1; + u8_t cmd:1; + } llcp_phy; + + u8_t phy_pref_tx:3; + u8_t phy_pref_flags:1; + u8_t phy_pref_rx:3; +#endif /* CONFIG_BT_CTLR_PHY */ + +#if defined(CONFIG_BT_CTLR_LE_ENC) + u8_t pause_rx:1; + u8_t pause_tx:1; + u8_t refresh:1; +#endif /* CONFIG_BT_CTLR_LE_ENC */ + + struct node_tx *tx_head; + struct node_tx *tx_ctrl; + struct node_tx *tx_ctrl_last; + struct node_tx *tx_data; + struct node_tx *tx_data_last; + + u8_t chm_updated; +}; + struct node_rx_cc { u8_t status; u8_t role; diff --git a/subsys/bluetooth/controller/ll_sw/ull_internal.h b/subsys/bluetooth/controller/ll_sw/ull_internal.h new file mode 100644 index 00000000000..edddaab4441 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/ull_internal.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2017-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +static inline u8_t ull_ref_inc(struct ull_hdr *hdr) +{ + return ++hdr->ref; +} + +static inline void ull_hdr_init(struct ull_hdr *hdr) +{ + hdr->disabled_cb = hdr->disabled_param = NULL; +} + +void *ll_rx_link_alloc(void); +void ll_rx_link_release(void *link); +void *ll_rx_alloc(void); +void ll_rx_release(void *node_rx); +void *ll_pdu_rx_alloc_peek(u8_t count); +void *ll_pdu_rx_alloc(void); +void ll_rx_put(memq_link_t *link, void *rx); +void ll_rx_sched(void); +void ull_tx_ack_put(u16_t handle, struct node_tx *node_tx); +void ull_ticker_status_give(u32_t status, void *param); +u32_t ull_ticker_status_take(u32_t ret, u32_t volatile *ret_cb); +void *ull_disable_mark(void *param); +void *ull_disable_unmark(void *param); +void *ull_disable_mark_get(void); +int ull_disable(void *param); +u8_t ull_entropy_get(u8_t len, u8_t *rand); diff --git a/subsys/bluetooth/controller/ll_sw/ull_master.c b/subsys/bluetooth/controller/ll_sw/ull_master.c new file mode 100644 index 00000000000..dd20da3871c --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/ull_master.c @@ -0,0 +1,894 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +#include "util/util.h" +#include "util/memq.h" + +#include "hal/ticker.h" +#include "hal/ccm.h" +#include "util/mayfly.h" +#include "ticker/ticker.h" + +#include "pdu.h" +#include "ll.h" +#include "ll_feat.h" + +#include "lll.h" +#include "lll_vendor.h" +#include "lll_clock.h" +#include "lll_scan.h" +#include "lll_conn.h" +#include "lll_master.h" +#include "lll_tim_internal.h" + +#include "ull_scan_types.h" +#include "ull_conn_types.h" + +#include "ull_internal.h" +#include "ull_scan_internal.h" +#include "ull_conn_internal.h" +#include "ull_master_internal.h" + +#define LOG_MODULE_NAME bt_ctlr_llsw_ull_master +#include "common/log.h" +#include +#include "hal/debug.h" + +static void ticker_op_stop_scan_cb(u32_t status, void *params); +static void ticker_op_cb(u32_t status, void *params); +static u32_t access_addr_get(void); + +u8_t ll_create_connection(u16_t scan_interval, u16_t scan_window, + u8_t filter_policy, u8_t peer_addr_type, + u8_t *p_peer_addr, u8_t own_addr_type, + u16_t interval, u16_t latency, u16_t timeout) +{ + struct lll_conn *conn_lll; + struct ll_scan_set *scan; + u32_t conn_interval_us; + struct lll_scan *lll; + struct ll_conn *conn; + memq_link_t *link; + u32_t access_addr; + u32_t err; + u8_t hop; + + scan = ull_scan_is_disabled_get(0); + if (!scan) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + lll = &scan->lll; + if (lll->conn) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + link = ll_rx_link_alloc(); + if (!link) { + return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED; + } + + conn = ll_conn_acquire(); + if (!conn) { + ll_rx_link_release(link); + + return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED; + } + + err = ull_scan_params_set(scan, 0, scan_interval, scan_window, + own_addr_type, filter_policy); + if (err) { + ll_conn_release(conn); + ll_rx_link_release(link); + return err; + } + + lll->adv_addr_type = peer_addr_type; + memcpy(lll->adv_addr, p_peer_addr, BDADDR_SIZE); + lll->conn_timeout = timeout; + lll->conn_ticks_slot = 0; /* TODO: */ + + conn_lll = &conn->lll; + + access_addr = access_addr_get(); + memcpy(conn_lll->access_addr, &access_addr, + sizeof(conn_lll->access_addr)); + bt_rand(&conn_lll->crc_init[0], 3); + + conn_lll->handle = 0xFFFF; + conn_lll->interval = interval; + conn_lll->latency = latency; + + if (!conn_lll->link_tx_free) { + conn_lll->link_tx_free = &conn_lll->link_tx; + } + + memq_init(conn_lll->link_tx_free, &conn_lll->memq_tx.head, + &conn_lll->memq_tx.tail); + conn_lll->link_tx_free = NULL; + + conn_lll->packet_tx_head_len = 0; + conn_lll->packet_tx_head_offset = 0; + + conn_lll->sn = 0; + conn_lll->nesn = 0; + conn_lll->empty = 0; + +#if defined(CONFIG_BT_CTLR_LE_ENC) + conn_lll->enc_rx = 0; + conn_lll->enc_tx = 0; +#endif /* CONFIG_BT_CTLR_LE_ENC */ + +#if defined(CONFIG_BT_CTLR_PHY) + conn_lll->phy_tx = BIT(0); + conn_lll->phy_flags = 0; + conn_lll->phy_tx_time = BIT(0); + conn_lll->phy_rx = BIT(0); +#endif /* CONFIG_BT_CTLR_PHY */ + +#if defined(CONFIG_BT_CTLR_CONN_RSSI) + conn_lll->rssi_latest = 0x7F; + conn_lll->rssi_reported = 0x7F; + conn_lll->rssi_sample_count = 0; +#endif /* CONFIG_BT_CTLR_CONN_RSSI */ + + /* FIXME: BEGIN: Move to ULL? */ + conn_lll->latency_prepare = 0; + conn_lll->latency_event = 0; + conn_lll->event_counter = 0; + + conn_lll->data_chan_count = + ull_conn_chan_map_cpy(conn_lll->data_chan_map); + bt_rand(&hop, sizeof(u8_t)); + conn_lll->data_chan_hop = 5 + (hop % 12); + conn_lll->data_chan_sel = 0; + conn_lll->data_chan_use = 0; + conn_lll->role = 0; + /* FIXME: END: Move to ULL? */ + + conn->connect_expire = 6; + conn->supervision_expire = 0; + conn_interval_us = (u32_t)interval * 1250; + conn->supervision_reload = RADIO_CONN_EVENTS(timeout * 10000, + conn_interval_us); + + conn->procedure_expire = 0; + conn->procedure_reload = RADIO_CONN_EVENTS(40000000, + conn_interval_us); + +#if defined(CONFIG_BT_CTLR_LE_PING) + conn->apto_expire = 0; + /* APTO in no. of connection events */ + conn->apto_reload = RADIO_CONN_EVENTS((30000000), conn_interval_us); + conn->appto_expire = 0; + /* Dispatch LE Ping PDU 6 connection events (that peer would listen to) + * before 30s timeout + * TODO: "peer listens to" is greater than 30s due to latency + */ + conn->appto_reload = (conn->apto_reload > (conn_lll->latency + 6)) ? + (conn->apto_reload - (conn_lll->latency + 6)) : + conn->apto_reload; +#endif /* CONFIG_BT_CTLR_LE_PING */ + + conn->common.fex_valid = 0; + + conn->llcp_req = conn->llcp_ack = conn->llcp_type = 0; + conn->llcp_rx = NULL; + conn->llcp_features = LL_FEAT; + conn->llcp_version.tx = conn->llcp_version.rx = 0; + conn->llcp_terminate.reason_peer = 0; + /* NOTE: use allocated link for generating dedicated + * terminate ind rx node + */ + conn->llcp_terminate.node_rx.hdr.link = link; + +#if defined(CONFIG_BT_CTLR_LE_ENC) + conn->pause_tx = conn->pause_rx = conn->refresh = 0; +#endif /* CONFIG_BT_CTLR_LE_ENC */ + +#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) + conn->llcp_conn_param.req = 0; + conn->llcp_conn_param.ack = 0; + conn->llcp_conn_param.disabled = 0; +#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */ + +#if defined(CONFIG_BT_CTLR_PHY) + conn->llcp_phy.req = conn->llcp_phy.ack = 0; + conn->phy_pref_tx = ull_conn_default_phy_tx_get(); + conn->phy_pref_rx = ull_conn_default_phy_rx_get(); + conn->phy_pref_flags = 0; +#endif /* CONFIG_BT_CTLR_PHY */ + + conn->tx_head = conn->tx_ctrl = conn->tx_ctrl_last = + conn->tx_data = conn->tx_data_last = 0; + + lll->conn = conn_lll; + + ull_hdr_init(&conn->ull); + lll_hdr_init(&conn->lll, conn); + +#if defined(CONFIG_BT_CTLR_PRIVACY) + ll_filters_scan_update(filter_policy); + + if (!filter_policy && ctrl_rl_enabled()) { + /* Look up the resolving list */ + rl_idx = ll_rl_find(peer_addr_type, peer_addr, NULL); + } + + if (own_addr_type == BT_ADDR_LE_PUBLIC_ID || + own_addr_type == BT_ADDR_LE_RANDOM_ID) { + + /* Generate RPAs if required */ + ll_rl_rpa_update(false); + own_addr_type &= 0x1; + rpa_gen = 1; + } +#endif + + /* wait for stable clocks */ + lll_clock_wait(); + + return ull_scan_enable(scan); +} + +u8_t ll_connect_disable(void **rx) +{ + struct lll_conn *conn_lll; + struct ll_scan_set *scan; + u8_t status; + + scan = ull_scan_is_enabled_get(0); + if (!scan) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + conn_lll = scan->lll.conn; + if (!conn_lll) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + status = ull_scan_disable(0, scan); + if (!status) { + struct ll_conn *conn = (void *)HDR_LLL2EVT(conn_lll); + struct node_rx_pdu *cc; + memq_link_t *link; + + cc = (void *)&conn->llcp_terminate.node_rx; + link = cc->hdr.link; + LL_ASSERT(link); + + /* free the memq link early, as caller could overwrite it */ + ll_rx_link_release(link); + + cc->hdr.type = NODE_RX_TYPE_CONNECTION; + cc->hdr.handle = 0xffff; + *((u8_t *)cc->pdu) = BT_HCI_ERR_UNKNOWN_CONN_ID; + *rx = cc; + } + + return status; +} + +u8_t ll_chm_update(u8_t *chm) +{ + u16_t handle; + + ull_conn_chan_map_set(chm); + + handle = CONFIG_BT_MAX_CONN; + while (handle--) { + struct ll_conn *conn; + + conn = ll_connected_get(handle); + if (!conn || conn->lll.role) { + continue; + } + + if (conn->llcp_req != conn->llcp_ack) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + memcpy(conn->llcp.chan_map.chm, chm, + sizeof(conn->llcp.chan_map.chm)); + /* conn->llcp.chan_map.instant = 0; */ + conn->llcp.chan_map.initiate = 1; + + conn->llcp_type = LLCP_CHAN_MAP; + conn->llcp_req++; + } + + return 0; +} + +#if defined(CONFIG_BT_CTLR_LE_ENC) +u8_t ll_enc_req_send(u16_t handle, u8_t *rand, u8_t *ediv, u8_t *ltk) +{ + struct ll_conn *conn; + struct node_tx *tx; + + conn = ll_connected_get(handle); + if (!conn) { + return BT_HCI_ERR_UNKNOWN_CONN_ID; + } + + if (conn->llcp_req != conn->llcp_ack) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + tx = ll_tx_mem_acquire(); + if (tx) { + struct pdu_data *pdu_data_tx; + + pdu_data_tx = (void *)tx->pdu; + + memcpy(&conn->llcp.encryption.ltk[0], ltk, + sizeof(conn->llcp.encryption.ltk)); + + if ((conn->lll.enc_rx == 0) && (conn->lll.enc_tx == 0)) { + struct pdu_data_llctrl_enc_req *enc_req; + + pdu_data_tx->ll_id = PDU_DATA_LLID_CTRL; + pdu_data_tx->len = + offsetof(struct pdu_data_llctrl, enc_rsp) + + sizeof(struct pdu_data_llctrl_enc_req); + pdu_data_tx->llctrl.opcode = + PDU_DATA_LLCTRL_TYPE_ENC_REQ; + enc_req = (void *) + &pdu_data_tx->llctrl.enc_req; + memcpy(enc_req->rand, rand, sizeof(enc_req->rand)); + enc_req->ediv[0] = ediv[0]; + enc_req->ediv[1] = ediv[1]; + bt_rand(enc_req->skdm, sizeof(enc_req->skdm)); + bt_rand(enc_req->ivm, sizeof(enc_req->ivm)); + } else if ((conn->lll.enc_rx != 0) && (conn->lll.enc_tx != 0)) { + memcpy(&conn->llcp.encryption.rand[0], rand, + sizeof(conn->llcp.encryption.rand)); + + conn->llcp.encryption.ediv[0] = ediv[0]; + conn->llcp.encryption.ediv[1] = ediv[1]; + + pdu_data_tx->ll_id = PDU_DATA_LLID_CTRL; + pdu_data_tx->len = offsetof(struct pdu_data_llctrl, + enc_req); + pdu_data_tx->llctrl.opcode = + PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_REQ; + } else { + ll_tx_mem_release(tx); + + return BT_HCI_ERR_CMD_DISALLOWED; + } + + if (ll_tx_mem_enqueue(handle, tx)) { + ll_tx_mem_release(tx); + + return BT_HCI_ERR_CMD_DISALLOWED; + } + + conn->llcp.encryption.initiate = 1; + + conn->llcp_type = LLCP_ENCRYPTION; + conn->llcp_req++; + + return 0; + } + + return BT_HCI_ERR_CMD_DISALLOWED; +} +#endif /* CONFIG_BT_CTLR_LE_ENC */ + +void ull_master_setup(memq_link_t *link, struct node_rx_hdr *rx, + struct node_rx_ftr *ftr, struct lll_conn *lll) +{ + u32_t conn_offset_us, conn_interval_us; + u8_t ticker_id_scan, ticker_id_conn; + u32_t ticks_slot_overhead; + u32_t mayfly_was_enabled; + u32_t ticks_slot_offset; + struct ll_scan_set *scan; + struct node_rx_cc *cc; + struct ll_conn *conn; + struct pdu_adv *pdu; + u32_t ticker_status; + u8_t chan_sel; + + ((struct lll_scan *)ftr->param)->conn = NULL; + + scan = ((struct lll_scan *)ftr->param)->hdr.parent; + conn = lll->hdr.parent; + + pdu = (void *)((struct node_rx_pdu *)rx)->pdu; + chan_sel = pdu->chan_sel; + + cc = (void *)pdu; + cc->status = 0; + cc->role = 0; + cc->peer_addr_type = scan->lll.adv_addr_type; + memcpy(cc->peer_addr, scan->lll.adv_addr, BDADDR_SIZE); + cc->interval = lll->interval; + cc->latency = lll->latency; + cc->timeout = scan->lll.conn_timeout; + cc->sca = lll_conn_sca_local_get(); + + lll->handle = ll_conn_handle_get(conn); + rx->handle = lll->handle; + + /* Use Channel Selection Algorithm #2 if peer too supports it */ + if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) { + struct node_rx_pdu *rx_csa; + struct node_rx_cs *cs; + + /* pick the rx node instance stored within the connection + * rx node. + */ + rx_csa = (void *)ftr->extra; + + /* Enqueue the connection event */ + ll_rx_put(link, rx); + + /* use the rx node for CSA event */ + rx = (void *)rx_csa; + link = rx->link; + + rx->handle = lll->handle; + rx->type = NODE_RX_TYPE_CHAN_SEL_ALGO; + + cs = (void *)rx_csa->pdu; + + if (chan_sel) { + u16_t aa_ls = ((u16_t)lll->access_addr[1] << 8) | + lll->access_addr[0]; + u16_t aa_ms = ((u16_t)lll->access_addr[3] << 8) | + lll->access_addr[2]; + + lll->data_chan_sel = 1; + lll->data_chan_id = aa_ms ^ aa_ls; + + cs->csa = 0x01; + } else { + cs->csa = 0x00; + } + } + + ll_rx_put(link, rx); + ll_rx_sched(); + + /* TODO: active_to_start feature port */ + conn->evt.ticks_active_to_start = 0; + conn->evt.ticks_xtal_to_start = + HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US); + conn->evt.ticks_preempt_to_start = + HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US); + conn->evt.ticks_slot = + HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US + + ftr->us_radio_rdy + 328 + TIFS_US + + 328); + + ticks_slot_offset = max(conn->evt.ticks_active_to_start, + conn->evt.ticks_xtal_to_start); + + if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) { + ticks_slot_overhead = ticks_slot_offset; + } else { + ticks_slot_overhead = 0; + } + + conn_interval_us = lll->interval * 1250; + conn_offset_us = ftr->us_radio_end; + conn_offset_us += HAL_TICKER_TICKS_TO_US(1); + conn_offset_us -= EVENT_OVERHEAD_START_US; + conn_offset_us -= ftr->us_radio_rdy; + + /* disable ticker job, in order to chain stop and start to avoid RTC + * being stopped if no tickers active. + */ +#if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO) + mayfly_was_enabled = mayfly_is_enabled(TICKER_USER_ID_ULL_HIGH, + TICKER_USER_ID_ULL_LOW); + mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 0); +#endif + + /* Stop Scanner */ + ticker_id_scan = TICKER_ID_SCAN_BASE + ull_scan_handle_get(scan); + ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR, + TICKER_USER_ID_ULL_HIGH, + ticker_id_scan, ticker_op_stop_scan_cb, + (void *)(u32_t)ticker_id_scan); + ticker_op_stop_scan_cb(ticker_status, (void *)(u32_t)ticker_id_scan); + + /* Scanner stop can expire while here in this ISR. + * Deferred attempt to stop can fail as it would have + * expired, hence ignore failure. + */ + ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH, + TICKER_ID_SCAN_STOP, NULL, NULL); + + /* Start master */ + ticker_id_conn = TICKER_ID_CONN_BASE + ll_conn_handle_get(conn); + ticker_status = ticker_start(TICKER_INSTANCE_ID_CTLR, + TICKER_USER_ID_ULL_HIGH, + ticker_id_conn, + ftr->ticks_anchor - ticks_slot_offset, + HAL_TICKER_US_TO_TICKS(conn_offset_us), + HAL_TICKER_US_TO_TICKS(conn_interval_us), + HAL_TICKER_REMAINDER(conn_interval_us), + TICKER_NULL_LAZY, + (conn->evt.ticks_slot + + ticks_slot_overhead), + ull_master_ticker_cb, conn, ticker_op_cb, + (void *)__LINE__); + LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) || + (ticker_status == TICKER_STATUS_BUSY)); + +#if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO) + /* enable ticker job, if disabled in this function */ + if (mayfly_was_enabled) { + mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, + 1); + } +#else + ARG_UNUSED(mayfly_was_enabled); +#endif + +#if 0 + /* Populate the master context */ + conn->handle = mem_index_get(conn, _radio.conn_pool, + CONNECTION_T_SIZE); + + /* Prepare the rx packet structure */ + node_rx->hdr.handle = conn->handle; + node_rx->hdr.type = NODE_RX_TYPE_CONNECTION; + + /* prepare connection complete structure */ + pdu_data = (void *)node_rx->pdu; + cc = (void *)pdu_data->lldata; + cc->status = 0x00; + cc->role = 0x00; +#if defined(CONFIG_BT_CTLR_PRIVACY) + cc->own_addr_type = pdu_adv_tx->tx_addr; + memcpy(&cc->own_addr[0], &pdu_adv_tx->connect_ind.init_addr[0], + BDADDR_SIZE); + + if (irkmatch_ok && rl_idx != FILTER_IDX_NONE) { + /* TODO: store rl_idx instead if safe */ + /* Store identity address */ + ll_rl_id_addr_get(rl_idx, &cc->peer_addr_type, + &cc->peer_addr[0]); + /* Mark it as identity address from RPA (0x02, 0x03) */ + cc->peer_addr_type += 2; + + /* Store peer RPA */ + memcpy(&cc->peer_rpa[0], + &pdu_adv_tx->connect_ind.adv_addr[0], + BDADDR_SIZE); + } else { + memset(&cc->peer_rpa[0], 0x0, BDADDR_SIZE); +#else + if (1) { +#endif /* CONFIG_BT_CTLR_PRIVACY */ + cc->peer_addr_type = pdu_adv_tx->rx_addr; + memcpy(&cc->peer_addr[0], + &pdu_adv_tx->connect_ind.adv_addr[0], + BDADDR_SIZE); + } + + cc->interval = _radio.scanner.conn_interval; + cc->latency = _radio.scanner.conn_latency; + cc->timeout = _radio.scanner.conn_timeout; + cc->mca = pdu_adv_tx->connect_ind.sca; + + /* enqueue connection complete structure into queue */ + rx_fc_lock(conn->handle); + packet_rx_enqueue(); + + /* Use Channel Selection Algorithm #2 if peer too supports it */ + if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) { + struct node_rx_cs *cs; + + /* Generate LE Channel Selection Algorithm event */ + node_rx = packet_rx_reserve_get(3); + LL_ASSERT(node_rx); + + node_rx->hdr.handle = conn->handle; + node_rx->hdr.type = NODE_RX_TYPE_CHAN_SEL_ALGO; + + pdu_data = (void *)node_rx->pdu; + cs = (void *)pdu_data->lldata; + + if (pdu_adv_rx->chan_sel) { + u16_t aa_ls = + ((u16_t)conn->access_addr[1] << 8) | + conn->access_addr[0]; + u16_t aa_ms = + ((u16_t)conn->access_addr[3] << 8) | + conn->access_addr[2]; + + conn->data_chan_sel = 1; + conn->data_chan_id = aa_ms ^ aa_ls; + + cs->csa = 0x01; + } else { + cs->csa = 0x00; + } + + packet_rx_enqueue(); + } + + /* Calculate master slot */ + conn->hdr.ticks_active_to_start = _radio.ticks_active_to_start; + conn->hdr.ticks_xtal_to_start = HAL_TICKER_US_TO_TICKS( + EVENT_OVERHEAD_XTAL_US); + conn->hdr.ticks_preempt_to_start = HAL_TICKER_US_TO_TICKS( + EVENT_OVERHEAD_PREEMPT_MIN_US); + conn->hdr.ticks_slot = _radio.scanner.ticks_conn_slot; + ticks_slot_offset = max(conn->hdr.ticks_active_to_start, + conn->hdr.ticks_xtal_to_start); + + /* Stop Scanner */ + ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR, + TICKER_USER_ID_LLL, + TICKER_ID_SCAN_BASE, + ticker_stop_scan_assert, + (void *)__LINE__); + ticker_stop_scan_assert(ticker_status, (void *)__LINE__); + + /* Scanner stop can expire while here in this ISR. + * Deferred attempt to stop can fail as it would have + * expired, hence ignore failure. + */ + ticker_stop(TICKER_INSTANCE_ID_CTLR, + TICKER_USER_ID_LLL, + TICKER_ID_SCAN_STOP, NULL, NULL); + + /* Start master */ + ticker_status = + ticker_start(TICKER_INSTANCE_ID_CTLR, + TICKER_USER_ID_LLL, + TICKER_ID_CONN_BASE + + conn->handle, + (_radio.ticks_anchor - ticks_slot_offset), + HAL_TICKER_US_TO_TICKS(conn_space_us), + HAL_TICKER_US_TO_TICKS(conn_interval_us), + HAL_TICKER_REMAINDER(conn_interval_us), + TICKER_NULL_LAZY, + (ticks_slot_offset + conn->hdr.ticks_slot), + event_master_prepare, conn, + ticker_success_assert, (void *)__LINE__); + LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) || + (ticker_status == TICKER_STATUS_BUSY)); +#endif +} + +void ull_master_ticker_cb(u32_t ticks_at_expire, u32_t remainder, u16_t lazy, + void *param) +{ + static memq_link_t _link; + static struct mayfly _mfy = {0, 0, &_link, NULL, lll_master_prepare}; + static struct lll_prepare_param p; + struct ll_conn *conn = param; + u32_t err; + u8_t ref; + int ret; + + DEBUG_RADIO_PREPARE_M(1); + + /* Handle any LL Control Procedures */ + ret = ull_conn_llcp(conn, ticks_at_expire, lazy); + if (ret) { + return; + } + + /* Increment prepare reference count */ + ref = ull_ref_inc(&conn->ull); + LL_ASSERT(ref); + + /* De-mux 1 tx node from FIFO */ + ull_conn_tx_demux(1); + + /* Enqueue towards LLL */ + ull_conn_tx_lll_enqueue(conn, 1); + + /* Append timing parameters */ + p.ticks_at_expire = ticks_at_expire; + p.remainder = remainder; + p.lazy = lazy; + p.param = &conn->lll; + _mfy.param = &p; + + /* Kick LLL prepare */ + err = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, + 0, &_mfy); + LL_ASSERT(!err); + + /* De-mux remaining tx nodes from FIFO */ + ull_conn_tx_demux(UINT8_MAX); + + /* Enqueue towards LLL */ + ull_conn_tx_lll_enqueue(conn, UINT8_MAX); + + DEBUG_RADIO_PREPARE_M(1); +} + +static void ticker_op_stop_scan_cb(u32_t status, void *params) +{ + /* TODO: */ +} + +static void ticker_op_cb(u32_t status, void *params) +{ + ARG_UNUSED(params); + + LL_ASSERT(status == TICKER_STATUS_SUCCESS); +} + +/** @brief Prepare access address as per BT Spec. + * + * - It shall have no more than six consecutive zeros or ones. + * - It shall not be the advertising channel packets' Access Address. + * - It shall not be a sequence that differs from the advertising channel + * packets Access Address by only one bit. + * - It shall not have all four octets equal. + * - It shall have no more than 24 transitions. + * - It shall have a minimum of two transitions in the most significant six + * bits. + * + * LE Coded PHY requirements: + * - It shall have at least three ones in the least significant 8 bits. + * - It shall have no more than eleven transitions in the least significant 16 + * bits. + */ +static u32_t access_addr_get(void) +{ +#if defined(CONFIG_BT_CTLR_PHY_CODED) + u8_t transitions_lsb16; + u8_t ones_count_lsb8; +#endif /* CONFIG_BT_CTLR_PHY_CODED */ + u8_t consecutive_cnt; + u8_t consecutive_bit; + u32_t adv_aa_check; + u32_t access_addr; + u8_t transitions; + u8_t bit_idx; + u8_t retry; + + retry = 3; +again: + LL_ASSERT(retry); + retry--; + + bt_rand(&access_addr, sizeof(u32_t)); + + bit_idx = 31; + transitions = 0; + consecutive_cnt = 1; +#if defined(CONFIG_BT_CTLR_PHY_CODED) + ones_count_lsb8 = 0; + transitions_lsb16 = 0; +#endif /* CONFIG_BT_CTLR_PHY_CODED */ + consecutive_bit = (access_addr >> bit_idx) & 0x01; + while (bit_idx--) { +#if defined(CONFIG_BT_CTLR_PHY_CODED) + u8_t transitions_lsb16_prev = transitions_lsb16; +#endif /* CONFIG_BT_CTLR_PHY_CODED */ + u8_t consecutive_cnt_prev = consecutive_cnt; + u8_t transitions_prev = transitions; + u8_t bit; + + bit = (access_addr >> bit_idx) & 0x01; + if (bit == consecutive_bit) { + consecutive_cnt++; + } else { + consecutive_cnt = 1; + consecutive_bit = bit; + transitions++; + +#if defined(CONFIG_BT_CTLR_PHY_CODED) + if (bit_idx < 15) { + transitions_lsb16++; + } +#endif /* CONFIG_BT_CTLR_PHY_CODED */ + } + +#if defined(CONFIG_BT_CTLR_PHY_CODED) + if ((bit_idx < 8) && consecutive_bit) { + ones_count_lsb8++; + } +#endif /* CONFIG_BT_CTLR_PHY_CODED */ + + /* It shall have no more than six consecutive zeros or ones. */ + /* It shall have a minimum of two transitions in the most + * significant six bits. + */ + if ((consecutive_cnt > 6) || +#if defined(CONFIG_BT_CTLR_PHY_CODED) + (!consecutive_bit && (((bit_idx < 6) && + (ones_count_lsb8 < 1)) || + ((bit_idx < 5) && + (ones_count_lsb8 < 2)) || + ((bit_idx < 4) && + (ones_count_lsb8 < 3)))) || +#endif /* CONFIG_BT_CTLR_PHY_CODED */ + ((consecutive_cnt < 6) && + (((bit_idx < 29) && (transitions < 1)) || + ((bit_idx < 28) && (transitions < 2))))) { + if (consecutive_bit) { + consecutive_bit = 0; + access_addr &= ~BIT(bit_idx); +#if defined(CONFIG_BT_CTLR_PHY_CODED) + if (bit_idx < 8) { + ones_count_lsb8--; + } +#endif /* CONFIG_BT_CTLR_PHY_CODED */ + } else { + consecutive_bit = 1; + access_addr |= BIT(bit_idx); +#if defined(CONFIG_BT_CTLR_PHY_CODED) + if (bit_idx < 8) { + ones_count_lsb8++; + } +#endif /* CONFIG_BT_CTLR_PHY_CODED */ + } + + if (transitions != transitions_prev) { + consecutive_cnt = consecutive_cnt_prev; + transitions = transitions_prev; + } else { + consecutive_cnt = 1; + transitions++; + } + +#if defined(CONFIG_BT_CTLR_PHY_CODED) + if (bit_idx < 15) { + if (transitions_lsb16 != + transitions_lsb16_prev) { + transitions_lsb16 = + transitions_lsb16_prev; + } else { + transitions_lsb16++; + } + } +#endif /* CONFIG_BT_CTLR_PHY_CODED */ + } + + /* It shall have no more than 24 transitions + * It shall have no more than eleven transitions in the least + * significant 16 bits. + */ + if ((transitions > 24) || +#if defined(CONFIG_BT_CTLR_PHY_CODED) + (transitions_lsb16 > 11) || +#endif /* CONFIG_BT_CTLR_PHY_CODED */ + 0) { + if (consecutive_bit) { + access_addr &= ~(BIT(bit_idx + 1) - 1); + } else { + access_addr |= (BIT(bit_idx + 1) - 1); + } + + break; + } + } + + /* It shall not be the advertising channel packets Access Address. + * It shall not be a sequence that differs from the advertising channel + * packets Access Address by only one bit. + */ + adv_aa_check = access_addr ^ 0x8e89bed6; + if (util_ones_count_get((u8_t *)&adv_aa_check, + sizeof(adv_aa_check)) <= 1) { + goto again; + } + + /* It shall not have all four octets equal. */ + if (!((access_addr & 0xFFFF) ^ (access_addr >> 16)) && + !((access_addr & 0xFF) ^ (access_addr >> 24))) { + goto again; + } + + return access_addr; +} diff --git a/subsys/bluetooth/controller/ll_sw/ull_master_internal.h b/subsys/bluetooth/controller/ll_sw/ull_master_internal.h new file mode 100644 index 00000000000..eb2454fbb45 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/ull_master_internal.h @@ -0,0 +1,10 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +void ull_master_setup(memq_link_t *link, struct node_rx_hdr *rx, + struct node_rx_ftr *ftr, struct lll_conn *lll); +void ull_master_ticker_cb(u32_t ticks_at_expire, u32_t remainder, u16_t lazy, + void *param); diff --git a/subsys/bluetooth/controller/ll_sw/ull_scan.c b/subsys/bluetooth/controller/ll_sw/ull_scan.c new file mode 100644 index 00000000000..ccd422e6f84 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/ull_scan.c @@ -0,0 +1,437 @@ +/* + * Copyright (c) 2016-2019 Nordic Semiconductor ASA + * Copyright (c) 2016 Vinayak Kariappa Chettimada + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +#include "hal/ccm.h" +#include "hal/ticker.h" + +#include "util/util.h" +#include "util/memq.h" +#include "util/mayfly.h" + +#include "ticker/ticker.h" + +#include "pdu.h" +#include "ll.h" + +#include "lll.h" +#include "lll_vendor.h" +#include "lll_adv.h" +#include "lll_scan.h" +#include "lll_conn.h" +#include "lll_filter.h" + +#include "ull_adv_types.h" +#include "ull_scan_types.h" + +#include "ull_internal.h" +#include "ull_adv_internal.h" +#include "ull_scan_internal.h" +#include "ull_sched_internal.h" + +#define LOG_MODULE_NAME bt_ctlr_llsw_ull_scan +#include "common/log.h" +#include +#include "hal/debug.h" + +static int _init_reset(void); +static void ticker_cb(u32_t ticks_at_expire, u32_t remainder, u16_t lazy, + void *param); +static u8_t disable(u16_t handle); + +#define CONFIG_BT_SCAN_MAX 1 +static struct ll_scan_set ll_scan[CONFIG_BT_SCAN_MAX]; + +u8_t ll_scan_params_set(u8_t type, u16_t interval, u16_t window, + u8_t own_addr_type, u8_t filter_policy) +{ + struct ll_scan_set *scan; + + scan = ull_scan_is_disabled_get(0); + if (!scan) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + return ull_scan_params_set(scan, type, interval, window, own_addr_type, + filter_policy); +} + +u8_t ll_scan_enable(u8_t enable) +{ + struct ll_scan_set *scan; + + if (!enable) { + return disable(0); + } + + scan = ull_scan_is_disabled_get(0); + if (!scan) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + return ull_scan_enable(scan); +} + +int ull_scan_init(void) +{ + int err; + + err = _init_reset(); + if (err) { + return err; + } + + return 0; +} + +int ull_scan_reset(void) +{ + u16_t handle; + int err; + + for (handle = 0; handle < CONFIG_BT_SCAN_MAX; handle++) { + (void)disable(handle); + } + + err = _init_reset(); + if (err) { + return err; + } + + return 0; +} + +u8_t ull_scan_params_set(struct ll_scan_set *scan, u8_t type, + u16_t interval, u16_t window, + u8_t own_addr_type, u8_t filter_policy) +{ + struct lll_scan *lll = &scan->lll; + + /* type value: + * 0000b - legacy 1M passive + * 0001b - legacy 1M active + * 0010b - Ext. 1M passive + * 0011b - Ext. 1M active + * 0100b - invalid + * 0101b - invalid + * 0110b - invalid + * 0111b - invalid + * 1000b - Ext. Coded passive + * 1001b - Ext. Coded active + */ + lll->type = type; + +#if defined(CONFIG_BT_CTLR_ADV_EXT) + lll->phy = type >> 1; +#endif /* CONFIG_BT_CTLR_ADV_EXT */ + + lll->filter_policy = filter_policy; + lll->interval = interval; + lll->ticks_window = HAL_TICKER_US_TO_TICKS((u64_t)window * 625); + + scan->own_addr_type = own_addr_type; + + return 0; +} + +u8_t ull_scan_enable(struct ll_scan_set *scan) +{ + volatile u32_t ret_cb = TICKER_STATUS_BUSY; + struct lll_scan *lll = &scan->lll; + u32_t ticks_slot_overhead; + u32_t ticks_slot_offset; + u32_t ticks_interval; + u32_t ticks_anchor; + u32_t ret; + +#if defined(CONFIG_BT_CTLR_PRIVACY) + ll_filters_scan_update(scan->filter_policy); + + if ((scan->type & 0x1) && + (scan->own_addr_type == BT_ADDR_LE_PUBLIC_ID || + scan->own_addr_type == BT_ADDR_LE_RANDOM_ID)) { + /* Generate RPAs if required */ + ll_rl_rpa_update(false); + lll->rpa_gen = 1; + lll->rl_idx = FILTER_IDX_NONE; + } +#endif + + lll->init_addr_type = scan->own_addr_type; + ll_addr_get(lll->init_addr_type, lll->init_addr); + + ull_hdr_init(&scan->ull); + lll_hdr_init(lll, scan); + + ticks_interval = HAL_TICKER_US_TO_TICKS((u64_t)lll->interval * 625); + + /* TODO: active_to_start feature port */ + scan->evt.ticks_active_to_start = 0; + scan->evt.ticks_xtal_to_start = + HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US); + scan->evt.ticks_preempt_to_start = + HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US); + if ((lll->ticks_window + + HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US)) < + (ticks_interval - + HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US))) { + scan->evt.ticks_slot = + (lll->ticks_window + + HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US)); + } else { + scan->evt.ticks_slot = + (ticks_interval - + HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US)); + lll->ticks_window = 0; + } + + ticks_slot_offset = max(scan->evt.ticks_active_to_start, + scan->evt.ticks_xtal_to_start); + + if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) { + ticks_slot_overhead = ticks_slot_offset; + } else { + ticks_slot_overhead = 0; + } + + ticks_anchor = ticker_ticks_now_get(); + +#if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_CTLR_SCHED_ADVANCED) + if (!lll->conn) { + u32_t ticks_ref = 0; + u32_t offset_us = 0; + + ull_sched_after_mstr_slot_get(TICKER_USER_ID_THREAD, + (ticks_slot_offset + + scan->evt.ticks_slot), + &ticks_ref, &offset_us); + + /* Use the ticks_ref as scanner's anchor if a free time space + * after any master role is available (indicated by a non-zero + * offset_us value). + */ + if (offset_us) { + ticks_anchor = ticks_ref + + HAL_TICKER_US_TO_TICKS(offset_us); + } + } +#endif /* CONFIG_BT_CENTRAL && CONFIG_BT_CTLR_SCHED_ADVANCED */ + + ret = ticker_start(TICKER_INSTANCE_ID_CTLR, + TICKER_USER_ID_THREAD, TICKER_ID_SCAN_BASE, + ticks_anchor, 0, ticks_interval, + HAL_TICKER_REMAINDER((u64_t)lll->interval * 625), + TICKER_NULL_LAZY, + (scan->evt.ticks_slot + ticks_slot_overhead), + ticker_cb, scan, + ull_ticker_status_give, (void *)&ret_cb); + + ret = ull_ticker_status_take(ret, &ret_cb); + if (ret != TICKER_STATUS_SUCCESS) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + scan->is_enabled = 1; + +#if defined(CONFIG_BT_CTLR_PRIVACY) +#if defined(CONFIG_BT_BROADCASTER) + if (!ull_adv_is_enabled_get(0)) +#endif + { + ll_adv_scan_state_cb(BIT(1)); + } +#endif + + return 0; +} + +u8_t ull_scan_disable(u16_t handle, struct ll_scan_set *scan) +{ + volatile u32_t ret_cb = TICKER_STATUS_BUSY; + void *mark; + u32_t ret; + + mark = ull_disable_mark(scan); + LL_ASSERT(mark == scan); + + ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD, + TICKER_ID_SCAN_BASE + handle, + ull_ticker_status_give, (void *)&ret_cb); + + ret = ull_ticker_status_take(ret, &ret_cb); + if (ret) { + mark = ull_disable_unmark(scan); + LL_ASSERT(mark == scan); + + return BT_HCI_ERR_CMD_DISALLOWED; + } + + ret = ull_disable(&scan->lll); + LL_ASSERT(!ret); + + mark = ull_disable_unmark(scan); + LL_ASSERT(mark == scan); + + return 0; +} + +struct ll_scan_set *ull_scan_set_get(u16_t handle) +{ + if (handle >= CONFIG_BT_SCAN_MAX) { + return NULL; + } + + return &ll_scan[handle]; +} + +u16_t ull_scan_handle_get(struct ll_scan_set *scan) +{ + return ((u8_t *)scan - (u8_t *)ll_scan) / sizeof(*scan); +} + +struct ll_scan_set *ull_scan_is_enabled_get(u16_t handle) +{ + struct ll_scan_set *scan; + + scan = ull_scan_set_get(handle); + if (!scan || !scan->is_enabled) { + return NULL; + } + + return scan; +} + +struct ll_scan_set *ull_scan_is_disabled_get(u16_t handle) +{ + struct ll_scan_set *scan; + + scan = ull_scan_set_get(handle); + if (!scan || scan->is_enabled) { + return NULL; + } + + return scan; +} + +u32_t ull_scan_is_enabled(u16_t handle) +{ + struct ll_scan_set *scan; + + scan = ull_scan_is_enabled_get(handle); + if (!scan) { + return 0; + } + + /* NOTE: BIT(0) - passive scanning enabled + * BIT(1) - active scanning enabled + * BIT(2) - initiator enabled + */ + return (((u32_t)scan->is_enabled << scan->lll.type) | +#if defined(CONFIG_BT_CENTRAL) + (scan->lll.conn ? BIT(2) : 0) | +#endif + 0); +} + +u32_t ull_scan_filter_pol_get(u16_t handle) +{ + struct ll_scan_set *scan; + + scan = ull_scan_is_enabled_get(handle); + if (!scan) { + return 0; + } + + return scan->lll.filter_policy; +} + +static int _init_reset(void) +{ + return 0; +} + +static void ticker_cb(u32_t ticks_at_expire, u32_t remainder, u16_t lazy, + void *param) +{ + static memq_link_t _link; + static struct mayfly _mfy = {0, 0, &_link, NULL, lll_scan_prepare}; + static struct lll_prepare_param p; + struct ll_scan_set *scan = param; + u32_t ret; + u8_t ref; + + DEBUG_RADIO_PREPARE_O(1); + + /* Increment prepare reference count */ + ref = ull_ref_inc(&scan->ull); + LL_ASSERT(ref); + + /* Append timing parameters */ + p.ticks_at_expire = ticks_at_expire; + p.remainder = remainder; + p.lazy = lazy; + p.param = &scan->lll; + _mfy.param = &p; + + /* Kick LLL prepare */ + ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, + 0, &_mfy); + LL_ASSERT(!ret); + +#if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_CTLR_SCHED_ADVANCED) + /* calc next group in us for the anchor where first connection event + * to be placed + */ + if (scan->lll.conn) { + static memq_link_t s_link; + static struct mayfly s_mfy_sched_after_mstr_offset_get = { + 0, 0, &s_link, NULL, + ull_sched_mfy_after_mstr_offset_get}; + u32_t retval; + + s_mfy_sched_after_mstr_offset_get.param = (void *)scan; + + retval = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, + TICKER_USER_ID_ULL_LOW, 1, + &s_mfy_sched_after_mstr_offset_get); + LL_ASSERT(!retval); + } +#endif /* CONFIG_BT_CENTRAL && CONFIG_BT_CTLR_SCHED_ADVANCED */ + + DEBUG_RADIO_PREPARE_O(1); +} + +static u8_t disable(u16_t handle) +{ + struct ll_scan_set *scan; + u8_t ret; + + scan = ull_scan_is_enabled_get(handle); + if (!scan || scan->lll.conn) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + ret = ull_scan_disable(handle, scan); + if (ret) { + return ret; + } + + scan->is_enabled = 0; + +#if defined(CONFIG_BT_CTLR_PRIVACY) +#if defined(CONFIG_BT_BROADCASTER) + if (!ull_adv_is_enabled_get(0)) +#endif + { + ll_adv_scan_state_cb(0); + } +#endif + + return 0; +} diff --git a/subsys/bluetooth/controller/ll_sw/ull_scan_internal.h b/subsys/bluetooth/controller/ll_sw/ull_scan_internal.h new file mode 100644 index 00000000000..454c26281a9 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/ull_scan_internal.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/* NOTE: Definitions used internal to ULL implementations */ + +int ull_scan_init(void); +int ull_scan_reset(void); + +/* Set scan parameters */ +u8_t ull_scan_params_set(struct ll_scan_set *scan, u8_t type, + u16_t interval, u16_t window, + u8_t own_addr_type, u8_t filter_policy); + +/* Enable and start scanning/initiating role */ +u8_t ull_scan_enable(struct ll_scan_set *scan); + +/* Disable scanning/initiating role */ +u8_t ull_scan_disable(u16_t handle, struct ll_scan_set *scan); + +/* Return ll_scan_set context (unconditional) */ +struct ll_scan_set *ull_scan_set_get(u16_t handle); + +/* Return the scan set handle given the scan set instance */ +u16_t ull_scan_handle_get(struct ll_scan_set *scan); + +/* Return ll_scan_set context if enabled */ +struct ll_scan_set *ull_scan_is_enabled_get(u16_t handle); + +/* Return ll_scan_set contesst if disabled */ +struct ll_scan_set *ull_scan_is_disabled_get(u16_t handle); + +/* Return flags if enabled */ +u32_t ull_scan_is_enabled(u16_t handle); + +/* Return filter policy used */ +u32_t ull_scan_filter_pol_get(u16_t handle); diff --git a/subsys/bluetooth/controller/ll_sw/ull_scan_types.h b/subsys/bluetooth/controller/ll_sw/ull_scan_types.h new file mode 100644 index 00000000000..8a5495b02ea --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/ull_scan_types.h @@ -0,0 +1,14 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +struct ll_scan_set { + struct evt_hdr evt; + struct ull_hdr ull; + struct lll_scan lll; + + u8_t is_enabled:1; + u8_t own_addr_type:2; +}; diff --git a/subsys/bluetooth/controller/ll_sw/ull_sched.c b/subsys/bluetooth/controller/ll_sw/ull_sched.c new file mode 100644 index 00000000000..665853519b3 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/ull_sched.c @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +#include "util/memq.h" + +#include "pdu.h" + +#include "lll.h" +#include "lll_scan.h" + +#include "ull_scan_types.h" + +void ull_sched_after_mstr_slot_get(u8_t user_id, u32_t ticks_slot_abs, + u32_t *ticks_anchor, u32_t *us_offset) +{ + /* TODO: */ +} + +void ull_sched_mfy_after_mstr_offset_get(void *param) +{ + struct ll_scan_set *scan = param; + + /* TODO: */ + scan->lll.conn_win_offset_us = 0; +} + +void ull_sched_mfy_free_win_offset_calc(void *param) +{ + /* TODO: */ +} + +void ull_sched_mfy_win_offset_use(void *param) +{ + /* TODO: */ +} + +void ull_sched_mfy_win_offset_select(void *param) +{ + /* TODO: */ +} diff --git a/subsys/bluetooth/controller/ll_sw/ull_sched_internal.h b/subsys/bluetooth/controller/ll_sw/ull_sched_internal.h new file mode 100644 index 00000000000..bb452a049b4 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/ull_sched_internal.h @@ -0,0 +1,12 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +void ull_sched_after_mstr_slot_get(u8_t user_id, u32_t ticks_slot_abs, + u32_t *ticks_anchor, u32_t *us_offset); +void ull_sched_mfy_after_mstr_offset_get(void *param); +void ull_sched_mfy_free_win_offset_calc(void *param); +void ull_sched_mfy_win_offset_use(void *param); +void ull_sched_mfy_win_offset_select(void *param); diff --git a/subsys/bluetooth/controller/ll_sw/ull_slave.c b/subsys/bluetooth/controller/ll_sw/ull_slave.c new file mode 100644 index 00000000000..7bbb66d00a7 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/ull_slave.c @@ -0,0 +1,488 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include + +#include "hal/ticker.h" +#include "hal/ccm.h" +#include "util/memq.h" +#include "util/mayfly.h" +#include "ticker/ticker.h" + +#include "util/util.h" + +#include "pdu.h" + +#include "lll.h" +#include "lll_vendor.h" +#include "lll_adv.h" +#include "lll_conn.h" +#include "lll_slave.h" +#include "lll_tim_internal.h" + +#include "ull_adv_types.h" +#include "ull_conn_types.h" + +#include "ull_internal.h" +#include "ull_adv_internal.h" +#include "ull_conn_internal.h" +#include "ull_slave_internal.h" + +#define LOG_MODULE_NAME bt_ctlr_llsw_ull_slave +#include "common/log.h" +#include +#include "hal/debug.h" + +static void ticker_op_stop_adv_cb(u32_t status, void *param); +static void ticker_op_cb(u32_t status, void *param); + +void ull_slave_setup(memq_link_t *link, struct node_rx_hdr *rx, + struct node_rx_ftr *ftr, struct lll_conn *lll) +{ + u32_t conn_offset_us, conn_interval_us; + u8_t ticker_id_adv, ticker_id_conn; + u8_t peer_addr[BDADDR_SIZE]; + u32_t ticks_slot_overhead; + u32_t mayfly_was_enabled; + u32_t ticks_slot_offset; + struct pdu_adv *pdu_adv; + struct ll_adv_set *adv; + struct node_rx_cc *cc; + struct ll_conn *conn; + u32_t ticker_status; + u8_t peer_addr_type; + u16_t win_offset; + u16_t timeout; + u8_t chan_sel; + + ((struct lll_adv *)ftr->param)->conn = NULL; + + adv = ((struct lll_adv *)ftr->param)->hdr.parent; + conn = lll->hdr.parent; + + /* Populate the slave context */ + pdu_adv = (void *)((struct node_rx_pdu *)rx)->pdu; + memcpy(&lll->crc_init[0], &pdu_adv->connect_ind.crc_init[0], 3); + memcpy(&lll->access_addr[0], &pdu_adv->connect_ind.access_addr[0], 4); + memcpy(&lll->data_chan_map[0], &pdu_adv->connect_ind.chan_map[0], + sizeof(lll->data_chan_map)); + lll->data_chan_count = util_ones_count_get(&lll->data_chan_map[0], + sizeof(lll->data_chan_map)); + lll->data_chan_hop = pdu_adv->connect_ind.hop; + lll->interval = pdu_adv->connect_ind.interval; + lll->latency = pdu_adv->connect_ind.latency; + + win_offset = pdu_adv->connect_ind.win_offset; + conn_interval_us = pdu_adv->connect_ind.interval * 1250; + + /* calculate the window widening */ + lll->slave.sca = pdu_adv->connect_ind.sca; + lll->slave.window_widening_periodic_us = + (((lll_conn_ppm_local_get() + + lll_conn_ppm_get(lll->slave.sca)) * + conn_interval_us) + (1000000 - 1)) / 1000000; + lll->slave.window_widening_max_us = (conn_interval_us >> 1) - TIFS_US; + lll->slave.window_size_event_us = pdu_adv->connect_ind.win_size * 1250; + + /* procedure timeouts */ + conn->supervision_reload = + RADIO_CONN_EVENTS((pdu_adv->connect_ind.timeout * 10 * 1000), + conn_interval_us); + conn->procedure_reload = + RADIO_CONN_EVENTS((40 * 1000 * 1000), conn_interval_us); + +#if defined(CONFIG_BT_CTLR_LE_PING) + /* APTO in no. of connection events */ + conn->apto_reload = RADIO_CONN_EVENTS((30 * 1000 * 1000), + conn_interval_us); + /* Dispatch LE Ping PDU 6 connection events (that peer would + * listen to) before 30s timeout + * TODO: "peer listens to" is greater than 30s due to latency + */ + conn->appto_reload = (conn->apto_reload > (lll->latency + 6)) ? + (conn->apto_reload - (lll->latency + 6)) : + conn->apto_reload; +#endif /* CONFIG_BT_CTLR_LE_PING */ + + /* FIXME: */ + #if 0 + memcpy((void *)&lll->slave.force, &lll->access_addr[0], + sizeof(lll->slave.force)); + #endif + + chan_sel = pdu_adv->chan_sel; + peer_addr_type = pdu_adv->tx_addr; + memcpy(peer_addr, pdu_adv->connect_ind.init_addr, BDADDR_SIZE); + timeout = pdu_adv->connect_ind.timeout; + + cc = (void *)pdu_adv; + cc->status = 0; + cc->role = 1; + cc->peer_addr_type = peer_addr_type; + memcpy(cc->peer_addr, peer_addr, BDADDR_SIZE); + cc->interval = lll->interval; + cc->latency = lll->latency; + cc->timeout = timeout; + cc->sca = lll->slave.sca; + + lll->handle = ll_conn_handle_get(conn); + rx->handle = lll->handle; + + /* Use Channel Selection Algorithm #2 if peer too supports it */ + if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) { + struct node_rx_pdu *rx_csa; + struct node_rx_cs *cs; + + /* pick the rx node instance stored within the connection + * rx node. + */ + rx_csa = (void *)ftr->extra; + + /* Enqueue the connection event */ + ll_rx_put(link, rx); + + /* use the rx node for CSA event */ + rx = (void *)rx_csa; + link = rx->link; + + rx->handle = lll->handle; + rx->type = NODE_RX_TYPE_CHAN_SEL_ALGO; + + cs = (void *)rx_csa->pdu; + + if (chan_sel) { + u16_t aa_ls = ((u16_t)lll->access_addr[1] << 8) | + lll->access_addr[0]; + u16_t aa_ms = ((u16_t)lll->access_addr[3] << 8) | + lll->access_addr[2]; + + lll->data_chan_sel = 1; + lll->data_chan_id = aa_ms ^ aa_ls; + + cs->csa = 0x01; + } else { + cs->csa = 0x00; + } + } + + ll_rx_put(link, rx); + ll_rx_sched(); +#if 0 + /* Prepare the rx packet structure */ + node_rx->hdr.handle = conn->handle; + node_rx->hdr.type = NODE_RX_TYPE_CONNECTION; + + /* prepare connection complete structure */ + pdu_data = (void *)node_rx->pdu_data; + radio_le_conn_cmplt = (void *)pdu_data->lldata; + radio_le_conn_cmplt->status = 0x00; + radio_le_conn_cmplt->role = 0x01; +#if defined(CONFIG_BT_CTLR_PRIVACY) + radio_le_conn_cmplt->own_addr_type = pdu_adv->rx_addr; + memcpy(&radio_le_conn_cmplt->own_addr[0], + &pdu_adv->connect_ind.adv_addr[0], BDADDR_SIZE); + if (rl_idx != FILTER_IDX_NONE) { + /* TODO: store rl_idx instead if safe */ + /* Store identity address */ + ll_rl_id_addr_get(rl_idx, + &radio_le_conn_cmplt->peer_addr_type, + &radio_le_conn_cmplt->peer_addr[0]); + /* Mark it as identity address from RPA (0x02, 0x03) */ + radio_le_conn_cmplt->peer_addr_type += 2; + + /* Store peer RPA */ + memcpy(&radio_le_conn_cmplt->peer_rpa[0], + &pdu_adv->connect_ind.init_addr[0], + BDADDR_SIZE); + } else { + memset(&radio_le_conn_cmplt->peer_rpa[0], 0x0, + BDADDR_SIZE); +#else + if (1) { +#endif /* CONFIG_BT_CTLR_PRIVACY */ + radio_le_conn_cmplt->peer_addr_type = pdu_adv->tx_addr; + memcpy(&radio_le_conn_cmplt->peer_addr[0], + &pdu_adv->connect_ind.init_addr[0], + BDADDR_SIZE); + } + + radio_le_conn_cmplt->interval = + pdu_adv->connect_ind.interval; + radio_le_conn_cmplt->latency = + pdu_adv->connect_ind.latency; + radio_le_conn_cmplt->timeout = + pdu_adv->connect_ind.timeout; + radio_le_conn_cmplt->mca = + pdu_adv->connect_ind.sca; + + /* enqueue connection complete structure into queue */ + rx_fc_lock(conn->handle); + packet_rx_enqueue(); + + /* Use Channel Selection Algorithm #2 if peer too supports it */ + if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) { + struct radio_le_chan_sel_algo *le_chan_sel_algo; + + /* Generate LE Channel Selection Algorithm event */ + node_rx = packet_rx_reserve_get(3); + LL_ASSERT(node_rx); + + node_rx->hdr.handle = conn->handle; + node_rx->hdr.type = NODE_RX_TYPE_CHAN_SEL_ALGO; + + pdu_data = (void *)node_rx->pdu_data; + le_chan_sel_algo = (void *)pdu_data->lldata; + + if (pdu_adv->chan_sel) { + u16_t aa_ls = + ((u16_t)conn->access_addr[1] << 8) | + conn->access_addr[0]; + u16_t aa_ms = + ((u16_t)conn->access_addr[3] << 8) | + conn->access_addr[2]; + + conn->data_chan_sel = 1; + conn->data_chan_id = aa_ms ^ aa_ls; + + le_chan_sel_algo->chan_sel_algo = 0x01; + } else { + le_chan_sel_algo->chan_sel_algo = 0x00; + } + + packet_rx_enqueue(); + } +#endif + + /* TODO: active_to_start feature port */ + conn->evt.ticks_active_to_start = 0; + conn->evt.ticks_xtal_to_start = + HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US); + conn->evt.ticks_preempt_to_start = + HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US); + conn->evt.ticks_slot = + HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US + + ftr->us_radio_rdy + 328 + TIFS_US + + 328); + + ticks_slot_offset = max(conn->evt.ticks_active_to_start, + conn->evt.ticks_xtal_to_start); + + if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) { + ticks_slot_overhead = ticks_slot_offset; + } else { + ticks_slot_overhead = 0; + } + + conn_interval_us -= lll->slave.window_widening_periodic_us; + + conn_offset_us = ftr->us_radio_end; + conn_offset_us += ((u64_t)win_offset + 1) * 1250; + conn_offset_us -= EVENT_OVERHEAD_START_US; + conn_offset_us -= EVENT_JITTER_US << 1; + conn_offset_us -= EVENT_JITTER_US; + conn_offset_us -= ftr->us_radio_rdy; + + /* disable ticker job, in order to chain stop and start to avoid RTC + * being stopped if no tickers active. + */ +#if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO) + mayfly_was_enabled = mayfly_is_enabled(TICKER_USER_ID_ULL_HIGH, + TICKER_USER_ID_ULL_LOW); + mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 0); +#endif + + /* Stop Advertiser */ + ticker_id_adv = TICKER_ID_ADV_BASE + ull_adv_handle_get(adv); + ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR, + TICKER_USER_ID_ULL_HIGH, + ticker_id_adv, ticker_op_stop_adv_cb, adv); + ticker_op_stop_adv_cb(ticker_status, adv); + + /* Stop Direct Adv Stop */ + if (adv->lll.is_hdcd) { + /* Advertiser stop can expire while here in this ISR. + * Deferred attempt to stop can fail as it would have + * expired, hence ignore failure. + */ + ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH, + TICKER_ID_ADV_STOP, NULL, NULL); + } + + /* Start Slave */ + ticker_id_conn = TICKER_ID_CONN_BASE + ll_conn_handle_get(conn); + ticker_status = ticker_start(TICKER_INSTANCE_ID_CTLR, + TICKER_USER_ID_ULL_HIGH, + ticker_id_conn, + ftr->ticks_anchor - ticks_slot_offset, + HAL_TICKER_US_TO_TICKS(conn_offset_us), + HAL_TICKER_US_TO_TICKS(conn_interval_us), + HAL_TICKER_REMAINDER(conn_interval_us), + TICKER_NULL_LAZY, + (conn->evt.ticks_slot + + ticks_slot_overhead), + ull_slave_ticker_cb, conn, ticker_op_cb, + (void *)__LINE__); + LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) || + (ticker_status == TICKER_STATUS_BUSY)); + +#if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO) + /* enable ticker job, if disabled in this function */ + if (mayfly_was_enabled) { + mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, + 1); + } +#else + ARG_UNUSED(mayfly_was_enabled); +#endif +} + +void ull_slave_done(struct node_rx_event_done *done, u32_t *ticks_drift_plus, + u32_t *ticks_drift_minus) +{ + u32_t start_to_address_expected_us; + u32_t start_to_address_actual_us; + u32_t window_widening_event_us; + u32_t preamble_to_addr_us; + + start_to_address_actual_us = + done->extra.slave.start_to_address_actual_us; + window_widening_event_us = + done->extra.slave.window_widening_event_us; + preamble_to_addr_us = + done->extra.slave.preamble_to_addr_us; + + start_to_address_expected_us = EVENT_JITTER_US + + (EVENT_JITTER_US << 1) + + window_widening_event_us + + preamble_to_addr_us; + + if (start_to_address_actual_us <= start_to_address_expected_us) { + *ticks_drift_plus = + HAL_TICKER_US_TO_TICKS(window_widening_event_us); + *ticks_drift_minus = + HAL_TICKER_US_TO_TICKS((start_to_address_expected_us - + start_to_address_actual_us)); + } else { + *ticks_drift_plus = + HAL_TICKER_US_TO_TICKS(start_to_address_actual_us); + *ticks_drift_minus = + HAL_TICKER_US_TO_TICKS(EVENT_JITTER_US + + (EVENT_JITTER_US << 1) + + preamble_to_addr_us); + } +} + +void ull_slave_ticker_cb(u32_t ticks_at_expire, u32_t remainder, u16_t lazy, + void *param) +{ + static memq_link_t _link; + static struct mayfly _mfy = {0, 0, &_link, NULL, lll_slave_prepare}; + static struct lll_prepare_param p; + struct ll_conn *conn = param; + u32_t err; + u8_t ref; + int ret; + + DEBUG_RADIO_PREPARE_S(1); + + /* Handle any LL Control Procedures */ + ret = ull_conn_llcp(conn, ticks_at_expire, lazy); + if (ret) { + return; + } + + /* Increment prepare reference count */ + ref = ull_ref_inc(&conn->ull); + LL_ASSERT(ref); + + /* Append timing parameters */ + p.ticks_at_expire = ticks_at_expire; + p.remainder = remainder; + p.lazy = lazy; + p.param = &conn->lll; + _mfy.param = &p; + + /* Kick LLL prepare */ + err = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, + 0, &_mfy); + LL_ASSERT(!err); + + /* De-mux remaining tx nodes from FIFO */ + ull_conn_tx_demux(UINT8_MAX); + + /* Enqueue towards LLL */ + ull_conn_tx_lll_enqueue(conn, UINT8_MAX); + + DEBUG_RADIO_PREPARE_S(1); +} + +#if defined(CONFIG_BT_CTLR_LE_ENC) +u8_t ll_start_enc_req_send(u16_t handle, u8_t error_code, + u8_t const *const ltk) +{ + struct ll_conn *conn; + + conn = ll_connected_get(handle); + if (!conn) { + return BT_HCI_ERR_UNKNOWN_CONN_ID; + } + + if (error_code) { + if (conn->refresh == 0) { + if (conn->llcp_req != conn->llcp_ack) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + conn->llcp.encryption.error_code = error_code; + conn->llcp.encryption.initiate = 0; + + conn->llcp_type = LLCP_ENCRYPTION; + conn->llcp_req++; + } else { + if (conn->llcp_terminate.ack != + conn->llcp_terminate.req) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + conn->llcp_terminate.reason_own = error_code; + + conn->llcp_terminate.req++; + } + } else { + memcpy(&conn->llcp.encryption.ltk[0], ltk, + sizeof(conn->llcp.encryption.ltk)); + + if (conn->llcp_req != conn->llcp_ack) { + return BT_HCI_ERR_CMD_DISALLOWED; + } + + conn->llcp.encryption.error_code = 0; + conn->llcp.encryption.initiate = 0; + + conn->llcp_type = LLCP_ENCRYPTION; + conn->llcp_req++; + } + + return 0; +} +#endif /* CONFIG_BT_CTLR_LE_ENC */ + +static void ticker_op_stop_adv_cb(u32_t status, void *param) +{ + LL_ASSERT(status != TICKER_STATUS_FAILURE || + param == ull_disable_mark_get()); +} + +static void ticker_op_cb(u32_t status, void *param) +{ + ARG_UNUSED(param); + + LL_ASSERT(status == TICKER_STATUS_SUCCESS); +} diff --git a/subsys/bluetooth/controller/ll_sw/ull_slave_internal.h b/subsys/bluetooth/controller/ll_sw/ull_slave_internal.h new file mode 100644 index 00000000000..ead5e20a8ec --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/ull_slave_internal.h @@ -0,0 +1,12 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +void ull_slave_setup(memq_link_t *link, struct node_rx_hdr *rx, + struct node_rx_ftr *ftr, struct lll_conn *lll); +void ull_slave_done(struct node_rx_event_done *done, u32_t *ticks_drift_plus, + u32_t *ticks_drift_minus); +void ull_slave_ticker_cb(u32_t ticks_at_expire, u32_t remainder, u16_t lazy, + void *param); diff --git a/subsys/bluetooth/controller/ll_sw/ull_tmp.c b/subsys/bluetooth/controller/ll_sw/ull_tmp.c new file mode 100644 index 00000000000..dde19cefc35 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/ull_tmp.c @@ -0,0 +1,325 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include + +#include +#include + +#if defined(CONFIG_BT_CTLR_DEBUG_PINS) +#if defined(CONFIG_PRINTK) +#undef CONFIG_PRINTK +#endif +#endif + +#include "hal/ccm.h" + +#include "util/mem.h" +#include "util/mfifo.h" +#include "util/memq.h" +#include "util/mayfly.h" + +#include "ticker/ticker.h" + +#include "pdu.h" + +#include "lll.h" +#include "lll_conn.h" +#include "lll_tmp.h" +#include "ull_internal.h" +#include "ull_tmp.h" +#include "ull_tmp_internal.h" + +#define LOG_MODULE_NAME bt_ctlr_llsw_ull_tmp +#include "common/log.h" +#include +#include "hal/debug.h" + +#define TMP_TICKER_TICKS_PERIOD 32768 +#define TMP_TICKER_TICKS_SLOT 327 + +#define TMP_TX_POOL_SIZE ((CONFIG_BT_TMP_TX_SIZE_MAX) * \ + (CONFIG_BT_TMP_TX_COUNT_MAX)) + +/* NOTE: structure accessed by Thread and ULL */ +struct ull_tmp { + struct ull_hdr hdr; + + u8_t is_enabled:1; +}; + +struct tmp { + struct ull_tmp ull; + struct lll_tmp lll; +}; + +static struct tmp tmp_inst[CONFIG_BT_TMP_MAX]; + +static MFIFO_DEFINE(tmp_tx, sizeof(struct lll_tx), + CONFIG_BT_TMP_TX_COUNT_MAX); + +static struct { + void *free; + u8_t pool[TMP_TX_POOL_SIZE]; +} mem_tmp_tx; + +static struct { + void *free; + u8_t pool[sizeof(memq_link_t) * CONFIG_BT_TMP_TX_COUNT_MAX]; +} mem_link_tx; + +static int _init_reset(void); +static void _ticker_cb(u32_t ticks_at_expire, u32_t remainder, + u16_t lazy, void *param); +static void _tx_demux(void); + +int ull_tmp_init(void) +{ + int err; + + err = _init_reset(); + if (err) { + return err; + } + + return 0; +} + +int ull_tmp_reset(void) +{ + u16_t handle; + int err; + + handle = CONFIG_BT_TMP_MAX; + while (handle--) { + ull_tmp_disable(handle); + } + + /* Re-initialize the Tx mfifo */ + MFIFO_INIT(tmp_tx); + + err = _init_reset(); + if (err) { + return err; + } + + return 0; +} + +u16_t ull_tmp_handle_get(struct lll_tmp *tmp) +{ + return ((u8_t *)CONTAINER_OF(tmp, struct tmp, lll) - + (u8_t *)&tmp_inst[0]) / sizeof(struct tmp); + +} + +int ull_tmp_enable(u16_t handle) +{ + u32_t tmp_ticker_anchor; + u8_t tmp_ticker_id; + struct tmp *inst; + int ret; + + if (handle >= CONFIG_BT_TMP_MAX) { + return -EINVAL; + } + + inst = &tmp_inst[handle]; + if (inst->ull.is_enabled) { + return -EALREADY; + } + + ull_hdr_init(&inst->ull.hdr); + lll_hdr_init(&inst->lll, inst); + + if (!inst->lll.link_free) { + inst->lll.link_free = &inst->lll._link; + } + + memq_init(inst->lll.link_free, &inst->lll.memq_tx.head, + &inst->lll.memq_tx.tail); + + tmp_ticker_id = TICKER_ID_TMP_BASE + handle; + tmp_ticker_anchor = ticker_ticks_now_get(); + + ret = ticker_start(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD, + tmp_ticker_id, + tmp_ticker_anchor, + 0, + TMP_TICKER_TICKS_PERIOD, + TICKER_NULL_REMAINDER, + TICKER_NULL_LAZY, + TMP_TICKER_TICKS_SLOT, + _ticker_cb, inst, + NULL, NULL); + if (ret) { + goto enable_cleanup; + } + + inst->lll.link_free = NULL; + inst->ull.is_enabled = 1; + +enable_cleanup: + + return ret; +} + +int ull_tmp_disable(u16_t handle) +{ + u8_t tmp_ticker_id; + struct tmp *inst; + int ret; + + if (handle >= CONFIG_BT_TMP_MAX) { + return -EINVAL; + } + + inst = &tmp_inst[handle]; + if (!inst->ull.is_enabled) { + return -EALREADY; + } + + tmp_ticker_id = TICKER_ID_TMP_BASE + handle; + + ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD, + tmp_ticker_id, + NULL, NULL); + if (ret) { + return ret; + } + + ret = ull_disable(&inst->lll); + if (ret) { + return ret; + } + + inst->ull.is_enabled = 0; + inst->lll.link_free = memq_deinit(&inst->lll.memq_tx.head, + &inst->lll.memq_tx.tail); + + return ret; +} + +int ull_tmp_data_send(u16_t handle, u8_t size, u8_t *data) +{ + struct lll_tx *tx; + struct node_tx *node_tx; + struct tmp *inst; + u8_t idx; + + if (handle >= CONFIG_BT_TMP_MAX) { + return -EINVAL; + } + + inst = &tmp_inst[handle]; + if (!inst->ull.is_enabled) { + return -EINVAL; + } + + if (size > CONFIG_BT_TMP_TX_SIZE_MAX) { + return -EMSGSIZE; + } + + idx = MFIFO_ENQUEUE_GET(tmp_tx, (void **) &tx); + if (!tx) { + return -ENOBUFS; + } + + tx->node = mem_acquire(&mem_tmp_tx.free); + if (!tx->node) { + return -ENOMEM; + } + + tx->handle = handle; + + node_tx = tx->node; + memcpy(node_tx->pdu, data, size); + + MFIFO_ENQUEUE(tmp_tx, idx); + + return 0; +} + +void ull_tmp_link_tx_release(memq_link_t *link) +{ + mem_release(link, &mem_link_tx.free); +} + +static int _init_reset(void) +{ + /* Initialize tx pool. */ + mem_init(mem_tmp_tx.pool, CONFIG_BT_TMP_TX_SIZE_MAX, + CONFIG_BT_TMP_TX_COUNT_MAX, &mem_tmp_tx.free); + + /* Initialize tx link pool. */ + mem_init(mem_link_tx.pool, sizeof(memq_link_t), + CONFIG_BT_TMP_TX_COUNT_MAX, &mem_link_tx.free); + + return 0; +} + +static void _ticker_cb(u32_t ticks_at_expire, u32_t remainder, + u16_t lazy, void *param) +{ + static memq_link_t _link; + static struct mayfly _mfy = {0, 0, &_link, NULL, lll_tmp_prepare}; + static struct lll_prepare_param p; + struct tmp *inst = param; + u32_t ret; + u8_t ref; + + printk("\t_ticker_cb (%p) enter: %u, %u, %u.\n", param, + ticks_at_expire, remainder, lazy); + DEBUG_RADIO_PREPARE_A(1); + + /* Increment prepare reference count */ + ref = ull_ref_inc(&inst->ull.hdr); + LL_ASSERT(ref); + + /* Append timing parameters */ + p.ticks_at_expire = ticks_at_expire; + p.remainder = remainder; + p.lazy = lazy; + p.param = &inst->lll; + _mfy.param = &p; + + ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, + 0, &_mfy); + LL_ASSERT(!ret); + + /* De-mux tx FIFO */ + _tx_demux(); + + DEBUG_RADIO_PREPARE_A(1); + printk("\t_ticker_cb (%p) exit.\n", param); +} + +static void _tx_demux(void) +{ + struct lll_tx *tx; + + tx = MFIFO_DEQUEUE_GET(tmp_tx); + while (tx) { + memq_link_t *link; + struct tmp *inst; + + inst = &tmp_inst[tx->handle]; + + printk("\t_ticker_cb (%p) tx_demux (%p): h = 0x%x, n=%p.\n", + inst, tx, tx->handle, tx->node); + + link = mem_acquire(&mem_link_tx.free); + LL_ASSERT(link); + + memq_enqueue(link, tx->node, &inst->lll.memq_tx.tail); + + MFIFO_DEQUEUE(tmp_tx); + + tx = MFIFO_DEQUEUE_GET(tmp_tx); + } +} diff --git a/subsys/bluetooth/controller/ll_sw/ull_tmp.h b/subsys/bluetooth/controller/ll_sw/ull_tmp.h new file mode 100644 index 00000000000..fd8c5f56bae --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/ull_tmp.h @@ -0,0 +1,10 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +int ull_tmp_enable(u16_t handle); +int ull_tmp_disable(u16_t handle); + +int ull_tmp_data_send(u16_t handle, u8_t size, u8_t *data); diff --git a/subsys/bluetooth/controller/ll_sw/ull_tmp_internal.h b/subsys/bluetooth/controller/ll_sw/ull_tmp_internal.h new file mode 100644 index 00000000000..c3ceeeaa1a3 --- /dev/null +++ b/subsys/bluetooth/controller/ll_sw/ull_tmp_internal.h @@ -0,0 +1,9 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +int ull_tmp_init(void); +int ull_tmp_reset(void); +void ull_tmp_link_tx_release(memq_link_t *link); diff --git a/subsys/bluetooth/controller/util/mfifo.h b/subsys/bluetooth/controller/util/mfifo.h new file mode 100644 index 00000000000..eef65fb85d6 --- /dev/null +++ b/subsys/bluetooth/controller/util/mfifo.h @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2018-2019 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#define MFIFO_DEFINE(name, sz, cnt) \ + struct { \ + u8_t const s; /* TODO: const, optimise RAM use */ \ + u8_t const n; /* TODO: const, optimise RAM use */ \ + u8_t f; \ + u8_t l; \ + u8_t m[sz * ((cnt) + 1)]; \ + } mfifo_##name = { \ + .n = ((cnt) + 1), \ + .s = (sz), \ + .f = 0, \ + .l = 0, \ + } + +#define MFIFO_INIT(name) \ + mfifo_##name.f = mfifo_##name.l = 0 + +static inline bool mfifo_enqueue_idx_get(u8_t count, u8_t first, u8_t last, + u8_t *idx) +{ + last = last + 1; + if (last == count) { + last = 0; + } + + if (last == first) { + return false; + } + + *idx = last; + + return true; +} + +#define MFIFO_ENQUEUE_IDX_GET(name, i) \ + mfifo_enqueue_idx_get(mfifo_##name.n, mfifo_##name.f, \ + mfifo_##name.l, (i)) + +static inline void mfifo_by_idx_enqueue(u8_t *fifo, u8_t size, u8_t idx, + void *mem, u8_t *last) +{ + void **p = (void **)(fifo + (*last) * size); + + *p = mem; + *last = idx; +} + +#define MFIFO_BY_IDX_ENQUEUE(name, i, mem) \ + mfifo_by_idx_enqueue(mfifo_##name.m, mfifo_##name.s, (i), \ + (mem), &mfifo_##name.l) + +static inline u8_t mfifo_enqueue_get(u8_t *fifo, u8_t size, u8_t count, + u8_t first, u8_t last, void **mem) +{ + u8_t idx; + + if (!mfifo_enqueue_idx_get(count, first, last, &idx)) { + *mem = NULL; + return 0; + } + + *mem = (void *)(fifo + last * size); + + return idx; +} + +#define MFIFO_ENQUEUE_GET(name, mem) \ + mfifo_enqueue_get(mfifo_##name.m, mfifo_##name.s, \ + mfifo_##name.n, mfifo_##name.f, \ + mfifo_##name.l, (mem)) + +static inline void mfifo_enqueue(u8_t idx, u8_t *last) +{ + *last = idx; +} + +#define MFIFO_ENQUEUE(name, idx) \ + mfifo_enqueue((idx), &mfifo_##name.l) + +static inline u8_t mfifo_avail_count_get(u8_t count, u8_t first, u8_t last) +{ + if (last >= first) { + return last - first; + } else { + return count - first + last; + } +} + +#define MFIFO_AVAIL_COUNT_GET(name) \ + mfifo_avail_count_get(mfifo_##name.n, mfifo_##name.f, \ + mfifo_##name.l) + +static inline void *mfifo_dequeue_get(u8_t *fifo, u8_t size, u8_t first, + u8_t last) +{ + if (first == last) { + return NULL; + } + + return (void *)(fifo + first * size); +} + +#define MFIFO_DEQUEUE_GET(name) \ + mfifo_dequeue_get(mfifo_##name.m, mfifo_##name.s, \ + mfifo_##name.f, mfifo_##name.l) + +static inline void *mfifo_dequeue_peek(u8_t *fifo, u8_t size, u8_t first, + u8_t last) +{ + if (first == last) { + return NULL; + } + + return *((void **)(fifo + first * size)); +} + +#define MFIFO_DEQUEUE_PEEK(name) \ + mfifo_dequeue_peek(mfifo_##name.m, mfifo_##name.s, \ + mfifo_##name.f, mfifo_##name.l) + +static inline void *mfifo_dequeue_iter_get(u8_t *fifo, u8_t size, u8_t count, + u8_t first, u8_t last, u8_t *idx) +{ + void *p; + u8_t i; + + if (*idx >= count) { + *idx = first; + } + + if (*idx == last) { + return NULL; + } + + i = *idx + 1; + if (i == count) { + i = 0; + } + + p = (void *)(fifo + (*idx) * size); + + *idx = i; + + return p; +} + +#define MFIFO_DEQUEUE_ITER_GET(name, idx) \ + mfifo_dequeue_iter_get(mfifo_##name.m, mfifo_##name.s, \ + mfifo_##name.n, mfifo_##name.f, \ + mfifo_##name.l, (idx)) + +static inline void *mfifo_dequeue(u8_t *fifo, u8_t size, u8_t count, + u8_t last, u8_t *first) +{ + u8_t _first = *first; + void *mem; + + if (_first == last) { + return NULL; + } + + mem = *((void **)(fifo + _first * size)); + + _first += 1; + if (_first == count) { + _first = 0; + } + *first = _first; + + return mem; +} + +#define MFIFO_DEQUEUE(name) \ + mfifo_dequeue(mfifo_##name.m, mfifo_##name.s, \ + mfifo_##name.n, mfifo_##name.l, \ + &mfifo_##name.f)