Bluetooth: controller: Introduce ULL LLL architecture

This is a squash merge of commits introducing the new split
Upper Link Layer and Lower Link Layer architecture of the
Bluetooth Low Energy controller.

This introduces a new, improved Link Layer based on the
concept of split responsibilities; The Upper Link Layer
(ULL) is in charge of control procedures, inter-event
scheduling and overall role management. The code for the
ULL is shared among all hardware implementations. The
Lower Link Layer (LLL) is responsible for the intra-event
scheduling and vendor specific radio hardware access.

The communication between ULL and LLL is achieved through
a set of FIFOs that contain both control and data packets.

Signed-off-by: Vinayak Kariappa Chettimada <vich@nordicsemi.no>
Signed-off-by: Alberto Escolar Piedras <alpi@oticon.com>
Signed-off-by: Wolfgang Puffitsch <wopu@oticon.com>
Signed-off-by: Morten Priess <mtpr@oticon.com>
This commit is contained in:
Vinayak Kariappa Chettimada 2018-12-18 05:48:20 +01:00 committed by Carles Cufí
commit 1475402d41
57 changed files with 16899 additions and 6 deletions

View file

@ -8,7 +8,7 @@ add_subdirectory_ifdef(CONFIG_BT_SHELL shell)
add_subdirectory_ifdef(CONFIG_BT_CONN services) add_subdirectory_ifdef(CONFIG_BT_CONN services)
if(CONFIG_BT_CTLR) if(CONFIG_BT_CTLR)
if(CONFIG_BT_LL_SW) if(CONFIG_BT_LL_SW OR CONFIG_BT_LL_SW_SPLIT)
add_subdirectory(controller) add_subdirectory(controller)
endif() endif()
endif() endif()

View file

@ -51,6 +51,101 @@ if(CONFIG_BT_LL_SW)
) )
endif() endif()
if(CONFIG_BT_LL_SW_SPLIT)
zephyr_library_sources(
ll_sw/ull.c
ll_sw/lll_chan.c
)
zephyr_library_sources_ifdef(
CONFIG_BT_LLL_VENDOR_NORDIC
ll_sw/nordic/lll/lll.c
)
if(CONFIG_BT_BROADCASTER)
zephyr_library_sources(
ll_sw/ull_adv.c
)
zephyr_library_sources_ifdef(
CONFIG_BT_CTLR_ADV_EXT
ll_sw/ull_adv_aux.c
)
zephyr_library_sources_ifdef(
CONFIG_BT_LLL_VENDOR_NORDIC
ll_sw/nordic/lll/lll_adv.c
)
endif()
if(CONFIG_BT_OBSERVER)
zephyr_library_sources(
ll_sw/ull_scan.c
)
zephyr_library_sources_ifdef(
CONFIG_BT_LLL_VENDOR_NORDIC
ll_sw/nordic/lll/lll_scan.c
)
endif()
if(CONFIG_BT_CONN)
zephyr_library_sources(
ll_sw/ull_conn.c
)
zephyr_library_sources_ifdef(
CONFIG_BT_LLL_VENDOR_NORDIC
ll_sw/nordic/lll/lll_clock.c
ll_sw/nordic/lll/lll_conn.c
)
if(CONFIG_BT_PERIPHERAL)
zephyr_library_sources(
ll_sw/ull_slave.c
)
zephyr_library_sources_ifdef(
CONFIG_BT_LLL_VENDOR_NORDIC
ll_sw/nordic/lll/lll_slave.c
)
endif()
if(CONFIG_BT_CENTRAL)
zephyr_library_sources(
ll_sw/ull_master.c
)
zephyr_library_sources_ifdef(
CONFIG_BT_LLL_VENDOR_NORDIC
ll_sw/nordic/lll/lll_master.c
)
endif()
if(CONFIG_BT_CTLR_SCHED_ADVANCED)
zephyr_library_sources(
ll_sw/ull_sched.c
)
endif()
endif()
zephyr_library_sources_ifdef(
CONFIG_BT_CTLR_FILTER
ll_sw/nordic/lll/lll_filter.c
)
zephyr_library_sources_ifdef(
CONFIG_BT_HCI_MESH_EXT
ll_sw/ll_mesh.c
)
zephyr_library_sources_ifdef(
CONFIG_BT_CTLR_DTM
ll_sw/nordic/lll/lll_test.c
)
if(CONFIG_BT_TMP)
zephyr_library_sources(
ll_sw/ull_tmp.c
)
zephyr_library_sources_ifdef(
CONFIG_BT_LLL_VENDOR_NORDIC
ll_sw/nordic/lll/lll_tmp.c
)
endif()
if(CONFIG_BT_LLL_VENDOR_NORDIC)
zephyr_library_include_directories(
ll_sw/nordic/lll
)
zephyr_library_sources_ifdef(
CONFIG_BT_CTLR_PROFILE_ISR
ll_sw/nordic/lll/lll_prof.c
)
endif()
endif()
zephyr_library_sources_ifdef( zephyr_library_sources_ifdef(
CONFIG_SOC_COMPATIBLE_NRF CONFIG_SOC_COMPATIBLE_NRF

View file

@ -29,8 +29,24 @@ config BT_LL_SW
help help
Use Zephyr software BLE Link Layer implementation. Use Zephyr software BLE Link Layer implementation.
config BT_LL_SW_SPLIT
bool "Software-based BLE Link Layer (ULL/LLL split)"
select BT_RECV_IS_RX_THREAD
select ENTROPY_GENERATOR
help
Use Zephyr software BLE Link Layer ULL LLL split implementation.
endchoice endchoice
config BT_LLL_VENDOR_NORDIC
bool "Use Nordic LLL"
depends on BT_LL_SW_SPLIT && SOC_COMPATIBLE_NRF
select ENTROPY_NRF5_RNG
select ENTROPY_NRF5_BIAS_CORRECTION
default y
help
Use Nordic Lower Link Layer implementation.
comment "BLE Controller configuration" comment "BLE Controller configuration"
config BT_CTLR_CRYPTO config BT_CTLR_CRYPTO
@ -321,6 +337,14 @@ config BT_CTLR_ADV_EXT
Enable support for Bluetooth 5.0 LE Advertising Extensions in the Enable support for Bluetooth 5.0 LE Advertising Extensions in the
Controller. Controller.
config BT_ADV_SET
prompt "LE Advertising Extensions Sets"
depends on BT_CTLR_ADV_EXT
int
default 1
help
Maximum supported advertising sets.
config BT_CTLR_DTM config BT_CTLR_DTM
bool bool
help help
@ -451,6 +475,57 @@ config BT_CTLR_SCHED_ADVANCED
Disabling this feature will lead to overlapping role in timespace Disabling this feature will lead to overlapping role in timespace
leading to skipped events amongst active roles. leading to skipped events amongst active roles.
if BT_LL_SW_SPLIT
config BT_CTLR_LLL_PRIO
prompt "Lower Link Layer (Radio) IRQ priority"
int
range 0 3 if SOC_SERIES_NRF51X
range 0 6 if SOC_SERIES_NRF52X
default 0
help
The interrupt priority for event preparation and radio IRQ.
config BT_CTLR_ULL_HIGH_PRIO
prompt "Upper Link Layer High IRQ priority"
int
range BT_CTLR_LLL_PRIO 3 if SOC_SERIES_NRF51X
range BT_CTLR_LLL_PRIO 6 if SOC_SERIES_NRF52X
default 0
help
The interrupt priority for Ticker's Worker IRQ and Upper Link Layer
higher priority functions.
config BT_CTLR_ULL_LOW_PRIO
prompt "Upper Link Layer Low IRQ priority"
int
range BT_CTLR_ULL_HIGH_PRIO 3 if SOC_SERIES_NRF51X
range BT_CTLR_ULL_HIGH_PRIO 6 if SOC_SERIES_NRF52X
default 0
help
The interrupt priority for Ticker's Job IRQ and Upper Link Layer
lower priority functions.
config BT_CTLR_LOWEST_PRIO
prompt "Link Layer Lowest IRQ priority"
int
range BT_CTLR_ULL_LOW_PRIO 3 if SOC_SERIES_NRF51X
range BT_CTLR_ULL_LOW_PRIO 6 if SOC_SERIES_NRF52X
default 0
help
The interrupt priority for RNG and other non-critical functions.
config BT_CTLR_LOW_LAT
prompt "Low latency non-negotiating event pre-emption"
bool
default y if SOC_SERIES_NRF51X
help
Use low latency non-negotiating event pre-emption. This reduces
Radio ISR latencies by the controller event scheduling framework.
Consequently, this reduces on-air radio utilization due to redundant
radio state switches.
endif # BT_LL_SW_SPLIT
config BT_CTLR_RADIO_ENABLE_FAST config BT_CTLR_RADIO_ENABLE_FAST
bool "Use tTXEN/RXEN,FAST ramp-up" bool "Use tTXEN/RXEN,FAST ramp-up"
depends on SOC_COMPATIBLE_NRF52X depends on SOC_COMPATIBLE_NRF52X
@ -612,6 +687,39 @@ config BT_CTLR_PA_LNA_GPIOTE_CHAN
help help
Select the nRF5 GPIOTE channel to use for PA/LNA GPIO feature. Select the nRF5 GPIOTE channel to use for PA/LNA GPIO feature.
if BT_LL_SW_SPLIT
config BT_TMP
prompt "Temporary Role"
depends on BT_SHELL
bool
default y
help
Temporary role to manual test ULL/LLL split architecture.
if BT_TMP
config BT_TMP_MAX
prompt "Temporary Role Max. Instances"
int
default 3
help
Maximum supported Temporary role instances.
config BT_TMP_TX_SIZE_MAX
prompt "Temporary Role Max. Tx buffer size"
int
default 10
help
Temporary role's maximum transmit buffer size in bytes.
config BT_TMP_TX_COUNT_MAX
prompt "Temporary Role Max. Tx buffers"
int
default 1
help
Temporary role's maximum transmit buffer count.
endif # BT_TMP
endif # BT_LL_SW_SPLIT
comment "BLE Controller debug configuration" comment "BLE Controller debug configuration"
config BT_CTLR_ASSERT_HANDLER config BT_CTLR_ASSERT_HANDLER

View file

@ -20,6 +20,13 @@
#if defined(CONFIG_BT_LL_SW) #if defined(CONFIG_BT_LL_SW)
#include <misc/slist.h> #include <misc/slist.h>
#include "ctrl.h" #include "ctrl.h"
#define ull_adv_is_enabled ll_adv_is_enabled
#define ull_scan_is_enabled ll_scan_is_enabled
#elif defined(CONFIG_BT_LL_SW_SPLIT)
#include "lll_scan.h"
#include "ull_scan_types.h"
#include "ull_adv_internal.h"
#include "ull_scan_internal.h"
#endif #endif
static u8_t pub_addr[BDADDR_SIZE]; static u8_t pub_addr[BDADDR_SIZE];
@ -49,12 +56,12 @@ u8_t *ll_addr_get(u8_t addr_type, u8_t *bdaddr)
u32_t ll_addr_set(u8_t addr_type, u8_t const *const bdaddr) u32_t ll_addr_set(u8_t addr_type, u8_t const *const bdaddr)
{ {
if (IS_ENABLED(CONFIG_BT_BROADCASTER) && if (IS_ENABLED(CONFIG_BT_BROADCASTER) &&
ll_adv_is_enabled(0)) { ull_adv_is_enabled(0)) {
return BT_HCI_ERR_CMD_DISALLOWED; return BT_HCI_ERR_CMD_DISALLOWED;
} }
if (IS_ENABLED(CONFIG_BT_OBSERVER) && if (IS_ENABLED(CONFIG_BT_OBSERVER) &&
(ll_scan_is_enabled(0) & (BIT(1) | BIT(2)))) { (ull_scan_is_enabled(0) & (BIT(1) | BIT(2)))) {
return BT_HCI_ERR_CMD_DISALLOWED; return BT_HCI_ERR_CMD_DISALLOWED;
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018 Nordic Semiconductor ASA * Copyright (c) 2018-2019 Nordic Semiconductor ASA
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */

View file

@ -0,0 +1,199 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/types.h>
#include "hal/ccm.h"
#include "hal/radio.h"
#define LOG_MODULE_NAME bt_ctlr_llsw_nordic_lll_chan
#include "common/log.h"
#include <soc.h>
#include "hal/debug.h"
#if defined(CONFIG_BT_CONN)
static u8_t chan_sel_remap(u8_t *chan_map, u8_t chan_index);
#if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
static u16_t chan_prn(u16_t counter, u16_t chan_id);
#endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */
#endif /* CONFIG_BT_CONN */
void lll_chan_set(u32_t chan)
{
switch (chan) {
case 37:
radio_freq_chan_set(2);
break;
case 38:
radio_freq_chan_set(26);
break;
case 39:
radio_freq_chan_set(80);
break;
default:
if (chan < 11) {
radio_freq_chan_set(4 + (2 * chan));
} else if (chan < 40) {
radio_freq_chan_set(28 + (2 * (chan - 11)));
} else {
LL_ASSERT(0);
}
break;
}
radio_whiten_iv_set(chan);
}
#if defined(CONFIG_BT_CONN)
u8_t lll_chan_sel_1(u8_t *chan_use, u8_t hop, u16_t latency, u8_t *chan_map,
u8_t chan_count)
{
u8_t chan_next;
chan_next = ((*chan_use) + (hop * (1 + latency))) % 37;
*chan_use = chan_next;
if ((chan_map[chan_next >> 3] & (1 << (chan_next % 8))) == 0) {
u8_t chan_index;
chan_index = chan_next % chan_count;
chan_next = chan_sel_remap(chan_map, chan_index);
} else {
/* channel can be used, return it */
}
return chan_next;
}
#if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
u8_t lll_chan_sel_2(u16_t counter, u16_t chan_id, u8_t *chan_map,
u8_t chan_count)
{
u8_t chan_next;
u16_t prn_e;
prn_e = chan_prn(counter, chan_id);
chan_next = prn_e % 37;
if ((chan_map[chan_next >> 3] & (1 << (chan_next % 8))) == 0) {
u8_t chan_index;
chan_index = ((u32_t)chan_count * prn_e) >> 16;
chan_next = chan_sel_remap(chan_map, chan_index);
} else {
/* channel can be used, return it */
}
return chan_next;
}
#endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */
static u8_t chan_sel_remap(u8_t *chan_map, u8_t chan_index)
{
u8_t chan_next;
u8_t byte_count;
chan_next = 0;
byte_count = 5;
while (byte_count--) {
u8_t bite;
u8_t bit_count;
bite = *chan_map;
bit_count = 8;
while (bit_count--) {
if (bite & 0x01) {
if (chan_index == 0) {
break;
}
chan_index--;
}
chan_next++;
bite >>= 1;
}
if (bit_count < 8) {
break;
}
chan_map++;
}
return chan_next;
}
#if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
#if defined(RADIO_UNIT_TEST)
void lll_chan_sel_2_ut(void)
{
u8_t chan_map_1[] = {0xFF, 0xFF, 0xFF, 0xFF, 0x1F};
u8_t chan_map_2[] = {0x00, 0x06, 0xE0, 0x00, 0x1E};
u8_t m;
m = chan_sel_2(1, 0x305F, chan_map_1, 37);
LL_ASSERT(m == 20);
m = chan_sel_2(2, 0x305F, chan_map_1, 37);
LL_ASSERT(m == 6);
m = chan_sel_2(3, 0x305F, chan_map_1, 37);
LL_ASSERT(m == 21);
m = chan_sel_2(6, 0x305F, chan_map_2, 9);
LL_ASSERT(m == 23);
m = chan_sel_2(7, 0x305F, chan_map_2, 9);
LL_ASSERT(m == 9);
m = chan_sel_2(8, 0x305F, chan_map_2, 9);
LL_ASSERT(m == 34);
}
#endif /* RADIO_UNIT_TEST */
/* Attribution:
* http://graphics.stanford.edu/%7Eseander/bithacks.html#ReverseByteWith32Bits
*/
static u8_t chan_rev_8(u8_t b)
{
b = (((u32_t)b * 0x0802LU & 0x22110LU) |
((u32_t)b * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
return b;
}
static u16_t chan_perm(u16_t i)
{
return (chan_rev_8((i >> 8) & 0xFF) << 8) | chan_rev_8(i & 0xFF);
}
static u16_t chan_mam(u16_t a, u16_t b)
{
return ((u32_t)a * 17 + b) & 0xFFFF;
}
static u16_t chan_prn(u16_t counter, u16_t chan_id)
{
u8_t iterate;
u16_t prn_e;
prn_e = counter ^ chan_id;
for (iterate = 0; iterate < 3; iterate++) {
prn_e = chan_perm(prn_e);
prn_e = chan_mam(prn_e, chan_id);
}
prn_e ^= chan_id;
return prn_e;
}
#endif /* CONFIG_BT_CTLR_CHAN_SEL_2 */
#endif /* CONFIG_BT_CONN */

View file

@ -0,0 +1,11 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
void lll_chan_set(u32_t chan);
u8_t lll_chan_sel_1(u8_t *chan_use, u8_t hop, u16_t latency, u8_t *chan_map,
u8_t chan_count);
u8_t lll_chan_sel_2(u16_t counter, u16_t chan_id, u8_t *chan_map,
u8_t chan_count);

View file

@ -1,9 +1,21 @@
/* /*
* Copyright (c) 2018 Nordic Semiconductor ASA * Copyright (c) 2018-2019 Nordic Semiconductor ASA
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
#define LLL_CONN_RSSI_SAMPLE_COUNT 10
#define LLL_CONN_RSSI_THRESHOLD 4
#define LLL_CONN_MIC_NONE 0
#define LLL_CONN_MIC_PASS 1
#define LLL_CONN_MIC_FAIL 2
struct lll_tx {
u16_t handle;
void *node;
};
struct node_tx { struct node_tx {
union { union {
void *next; void *next;
@ -12,3 +24,141 @@ struct node_tx {
u8_t pdu[]; u8_t pdu[];
}; };
enum llcp {
LLCP_NONE,
LLCP_CONN_UPD,
LLCP_CHAN_MAP,
#if defined(CONFIG_BT_CTLR_LE_ENC)
LLCP_ENCRYPTION,
#endif /* CONFIG_BT_CTLR_LE_ENC */
LLCP_FEATURE_EXCHANGE,
LLCP_VERSION_EXCHANGE,
/* LLCP_TERMINATE, */
LLCP_CONNECTION_PARAM_REQ,
#if defined(CONFIG_BT_CTLR_LE_PING)
LLCP_PING,
#endif /* CONFIG_BT_CTLR_LE_PING */
#if defined(CONFIG_BT_CTLR_PHY)
LLCP_PHY_UPD,
#endif /* CONFIG_BT_CTLR_PHY */
};
struct lll_conn {
struct lll_hdr hdr;
u8_t access_addr[4];
u8_t crc_init[3];
u16_t handle;
u16_t interval;
u16_t latency;
/* FIXME: BEGIN: Move to ULL? */
u16_t latency_prepare;
u16_t latency_event;
u16_t event_counter;
u8_t data_chan_map[5];
u8_t data_chan_count:6;
u8_t data_chan_sel:1;
u8_t role:1;
union {
struct {
u8_t data_chan_hop;
u8_t data_chan_use;
};
u16_t data_chan_id;
};
union {
struct {
u8_t terminate_ack:1;
} master;
struct {
u8_t latency_enabled:1;
u8_t latency_cancel:1;
u8_t sca:3;
u32_t window_widening_periodic_us;
u32_t window_widening_max_us;
u32_t window_widening_prepare_us;
u32_t window_widening_event_us;
u32_t window_size_prepare_us;
u32_t window_size_event_us;
} slave;
};
/* FIXME: END: Move to ULL? */
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
u16_t default_tx_octets;
u16_t max_tx_octets;
u16_t max_rx_octets;
#if defined(CONFIG_BT_CTLR_PHY)
u16_t default_tx_time;
u16_t max_tx_time;
u16_t max_rx_time;
#endif /* CONFIG_BT_CTLR_PHY */
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_PHY)
u8_t phy_tx:3;
u8_t phy_flags:1;
u8_t phy_tx_time:3;
u8_t phy_rx:3;
#endif /* CONFIG_BT_CTLR_PHY */
MEMQ_DECLARE(tx);
memq_link_t link_tx;
memq_link_t *link_tx_free;
u8_t packet_tx_head_len;
u8_t packet_tx_head_offset;
u8_t sn:1;
u8_t nesn:1;
u8_t empty:1;
#if defined(CONFIG_BT_CTLR_LE_ENC)
u8_t enc_rx:1;
u8_t enc_tx:1;
struct ccm ccm_rx;
struct ccm ccm_tx;
#endif /* CONFIG_BT_CTLR_LE_ENC */
#if defined(CONFIG_BT_CTLR_CONN_RSSI)
u8_t rssi_latest;
u8_t rssi_reported;
u8_t rssi_sample_count;
#endif /* CONFIG_BT_CTLR_CONN_RSSI */
};
int lll_conn_init(void);
int lll_conn_reset(void);
u8_t lll_conn_sca_local_get(void);
u32_t lll_conn_ppm_local_get(void);
u32_t lll_conn_ppm_get(u8_t sca);
void lll_conn_prepare_reset(void);
int lll_conn_is_abort_cb(void *next, int prio, void *curr,
lll_prepare_cb_t *resume_cb, int *resume_prio);
void lll_conn_abort_cb(struct lll_prepare_param *prepare_param, void *param);
void lll_conn_isr_rx(void *param);
void lll_conn_isr_tx(void *param);
void lll_conn_isr_abort(void *param);
void lll_conn_rx_pkt_set(struct lll_conn *lll);
void lll_conn_tx_pkt_set(struct lll_conn *lll, struct pdu_data *pdu_data_tx);
void lll_conn_pdu_tx_prep(struct lll_conn *lll, struct pdu_data **pdu_data_tx);
u8_t lll_conn_ack_last_idx_get(void);
memq_link_t *lll_conn_ack_peek(u8_t *ack_last, u16_t *handle,
struct node_tx **node_tx);
memq_link_t *lll_conn_ack_by_last_peek(u8_t last, u16_t *handle,
struct node_tx **node_tx);
void *lll_conn_ack_dequeue(void);
void lll_conn_tx_flush(void *param);

View file

@ -0,0 +1,17 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#define WL_SIZE 8
#define FILTER_IDX_NONE 0xFF
struct ll_filter {
u8_t enable_bitmask;
u8_t addr_type_bitmask;
u8_t bdaddr[WL_SIZE][BDADDR_SIZE];
};
struct ll_filter *ctrl_filter_get(bool whitelist);
void ll_adv_scan_state_cb(u8_t bm);

View file

@ -18,6 +18,11 @@
#if defined(CONFIG_BT_LL_SW) #if defined(CONFIG_BT_LL_SW)
#define MAYFLY_CALL_ID_WORKER MAYFLY_CALL_ID_0 #define MAYFLY_CALL_ID_WORKER MAYFLY_CALL_ID_0
#define MAYFLY_CALL_ID_JOB MAYFLY_CALL_ID_1 #define MAYFLY_CALL_ID_JOB MAYFLY_CALL_ID_1
#elif defined(CONFIG_BT_LL_SW_SPLIT)
#include "ll_sw/lll.h"
#define MAYFLY_CALL_ID_LLL TICKER_USER_ID_LLL
#define MAYFLY_CALL_ID_WORKER TICKER_USER_ID_ULL_HIGH
#define MAYFLY_CALL_ID_JOB TICKER_USER_ID_ULL_LOW
#else #else
#error Unknown LL variant. #error Unknown LL variant.
#endif #endif
@ -40,6 +45,11 @@ u32_t mayfly_is_enabled(u8_t caller_id, u8_t callee_id)
(void)caller_id; (void)caller_id;
switch (callee_id) { switch (callee_id) {
#if defined(CONFIG_BT_LL_SW_SPLIT)
case MAYFLY_CALL_ID_LLL:
return irq_is_enabled(SWI4_IRQn);
#endif /* CONFIG_BT_LL_SW_SPLIT */
case MAYFLY_CALL_ID_WORKER: case MAYFLY_CALL_ID_WORKER:
return irq_is_enabled(RTC0_IRQn); return irq_is_enabled(RTC0_IRQn);
@ -64,6 +74,25 @@ u32_t mayfly_prio_is_equal(u8_t caller_id, u8_t callee_id)
((caller_id == MAYFLY_CALL_ID_JOB) && ((caller_id == MAYFLY_CALL_ID_JOB) &&
(callee_id == MAYFLY_CALL_ID_WORKER)) || (callee_id == MAYFLY_CALL_ID_WORKER)) ||
#endif #endif
#elif defined(CONFIG_BT_LL_SW_SPLIT)
#if (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_HIGH_PRIO)
((caller_id == MAYFLY_CALL_ID_LLL) &&
(callee_id == MAYFLY_CALL_ID_WORKER)) ||
((caller_id == MAYFLY_CALL_ID_WORKER) &&
(callee_id == MAYFLY_CALL_ID_LLL)) ||
#endif
#if (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
((caller_id == MAYFLY_CALL_ID_LLL) &&
(callee_id == MAYFLY_CALL_ID_JOB)) ||
((caller_id == MAYFLY_CALL_ID_JOB) &&
(callee_id == MAYFLY_CALL_ID_LLL)) ||
#endif
#if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
((caller_id == MAYFLY_CALL_ID_WORKER) &&
(callee_id == MAYFLY_CALL_ID_JOB)) ||
((caller_id == MAYFLY_CALL_ID_JOB) &&
(callee_id == MAYFLY_CALL_ID_WORKER)) ||
#endif
#endif #endif
0; 0;
} }
@ -73,6 +102,12 @@ void mayfly_pend(u8_t caller_id, u8_t callee_id)
(void)caller_id; (void)caller_id;
switch (callee_id) { switch (callee_id) {
#if defined(CONFIG_BT_LL_SW_SPLIT)
case MAYFLY_CALL_ID_LLL:
NVIC_SetPendingIRQ(SWI4_IRQn);
break;
#endif /* CONFIG_BT_LL_SW_SPLIT */
case MAYFLY_CALL_ID_WORKER: case MAYFLY_CALL_ID_WORKER:
NVIC_SetPendingIRQ(RTC0_IRQn); NVIC_SetPendingIRQ(RTC0_IRQn);
break; break;

View file

@ -31,6 +31,19 @@ static u8_t const caller_id_lut[] = {
TICKER_CALL_ID_NONE, TICKER_CALL_ID_NONE,
TICKER_CALL_ID_PROGRAM TICKER_CALL_ID_PROGRAM
}; };
#elif defined(CONFIG_BT_LL_SW_SPLIT)
#include "ll_sw/lll.h"
#define TICKER_MAYFLY_CALL_ID_ISR TICKER_USER_ID_LLL
#define TICKER_MAYFLY_CALL_ID_TRIGGER TICKER_USER_ID_ULL_HIGH
#define TICKER_MAYFLY_CALL_ID_WORKER TICKER_USER_ID_ULL_HIGH
#define TICKER_MAYFLY_CALL_ID_JOB TICKER_USER_ID_ULL_LOW
#define TICKER_MAYFLY_CALL_ID_PROGRAM TICKER_USER_ID_THREAD
static u8_t const caller_id_lut[] = {
TICKER_CALL_ID_ISR,
TICKER_CALL_ID_WORKER,
TICKER_CALL_ID_JOB,
TICKER_CALL_ID_PROGRAM
};
#else #else
#error Unknown LL variant. #error Unknown LL variant.
#endif #endif
@ -55,6 +68,32 @@ void hal_ticker_instance0_sched(u8_t caller_id, u8_t callee_id, u8_t chain,
* schedule. * schedule.
*/ */
switch (caller_id) { switch (caller_id) {
#if defined(CONFIG_BT_LL_SW_SPLIT)
case TICKER_CALL_ID_ISR:
switch (callee_id) {
case TICKER_CALL_ID_JOB:
{
static memq_link_t link;
static struct mayfly m = {0, 0, &link, NULL,
ticker_job};
m.param = instance;
/* TODO: scheduler lock, if preemptive threads used */
mayfly_enqueue(TICKER_MAYFLY_CALL_ID_ISR,
TICKER_MAYFLY_CALL_ID_JOB,
chain,
&m);
}
break;
default:
LL_ASSERT(0);
break;
}
break;
#endif /* CONFIG_BT_LL_SW_SPLIT */
case TICKER_CALL_ID_TRIGGER: case TICKER_CALL_ID_TRIGGER:
switch (callee_id) { switch (callee_id) {
case TICKER_CALL_ID_WORKER: case TICKER_CALL_ID_WORKER:

View file

@ -0,0 +1,552 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <errno.h>
#include <zephyr/types.h>
#include <device.h>
#include <clock_control.h>
#include <drivers/clock_control/nrf_clock_control.h>
#include "hal/ccm.h"
#include "hal/radio.h"
#include "hal/ticker.h"
#include "util/mem.h"
#include "util/memq.h"
#include "util/mayfly.h"
#include "ticker/ticker.h"
#include "lll.h"
#include "lll_internal.h"
#define LOG_MODULE_NAME bt_ctlr_llsw_nordic_lll
#include "common/log.h"
#include <soc.h>
#include "hal/debug.h"
static struct {
struct {
void *param;
lll_is_abort_cb_t is_abort_cb;
lll_abort_cb_t abort_cb;
} curr;
} event;
static struct {
struct device *clk_hf;
} lll;
static int _init_reset(void);
static int prepare(lll_is_abort_cb_t is_abort_cb, lll_abort_cb_t abort_cb,
lll_prepare_cb_t prepare_cb, int prio,
struct lll_prepare_param *prepare_param, u8_t is_resume);
static int resume_enqueue(lll_prepare_cb_t resume_cb, int resume_prio);
#if !defined(CONFIG_BT_CTLR_LOW_LAT)
static void _preempt_ticker_cb(u32_t ticks_at_expire, u32_t remainder,
u16_t lazy, void *param);
static void _preempt(void *param);
#else /* CONFIG_BT_CTLR_LOW_LAT */
#if (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
static void ticker_op_job_disable(u32_t status, void *op_context);
#endif
#endif /* CONFIG_BT_CTLR_LOW_LAT */
ISR_DIRECT_DECLARE(radio_nrf5_isr)
{
DEBUG_RADIO_ISR(1);
isr_radio();
ISR_DIRECT_PM();
DEBUG_RADIO_ISR(0);
return 1;
}
static void rtc0_nrf5_isr(void *arg)
{
DEBUG_TICKER_ISR(1);
/* On compare0 run ticker worker instance0 */
if (NRF_RTC0->EVENTS_COMPARE[0]) {
NRF_RTC0->EVENTS_COMPARE[0] = 0;
ticker_trigger(0);
}
mayfly_run(TICKER_USER_ID_ULL_HIGH);
DEBUG_TICKER_ISR(0);
}
static void swi4_nrf5_isr(void *arg)
{
DEBUG_RADIO_ISR(1);
mayfly_run(TICKER_USER_ID_LLL);
DEBUG_RADIO_ISR(0);
}
static void swi5_nrf5_isr(void *arg)
{
DEBUG_TICKER_JOB(1);
mayfly_run(TICKER_USER_ID_ULL_LOW);
DEBUG_TICKER_JOB(0);
}
int lll_init(void)
{
struct device *clk_k32;
int err;
/* Initialise LLL internals */
event.curr.abort_cb = NULL;
/* Initialize LF CLK */
clk_k32 = device_get_binding(CONFIG_CLOCK_CONTROL_NRF_K32SRC_DRV_NAME);
if (!clk_k32) {
return -ENODEV;
}
clock_control_on(clk_k32, (void *)CLOCK_CONTROL_NRF_K32SRC);
/* Initialize HF CLK */
lll.clk_hf =
device_get_binding(CONFIG_CLOCK_CONTROL_NRF_M16SRC_DRV_NAME);
if (!lll.clk_hf) {
return -ENODEV;
}
err = _init_reset();
if (err) {
return err;
}
/* Connect ISRs */
IRQ_DIRECT_CONNECT(NRF5_IRQ_RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO,
radio_nrf5_isr, 0);
IRQ_CONNECT(NRF5_IRQ_SWI4_IRQn, CONFIG_BT_CTLR_LLL_PRIO,
swi4_nrf5_isr, NULL, 0);
IRQ_CONNECT(NRF5_IRQ_RTC0_IRQn, CONFIG_BT_CTLR_ULL_HIGH_PRIO,
rtc0_nrf5_isr, NULL, 0);
IRQ_CONNECT(NRF5_IRQ_SWI5_IRQn, CONFIG_BT_CTLR_ULL_LOW_PRIO,
swi5_nrf5_isr, NULL, 0);
/* Enable IRQs */
irq_enable(NRF5_IRQ_RADIO_IRQn);
irq_enable(NRF5_IRQ_SWI4_IRQn);
irq_enable(NRF5_IRQ_RTC0_IRQn);
irq_enable(NRF5_IRQ_SWI5_IRQn);
return 0;
}
int lll_reset(void)
{
int err;
err = _init_reset();
if (err) {
return err;
}
return 0;
}
int lll_prepare(lll_is_abort_cb_t is_abort_cb, lll_abort_cb_t abort_cb,
lll_prepare_cb_t prepare_cb, int prio,
struct lll_prepare_param *prepare_param)
{
return prepare(is_abort_cb, abort_cb, prepare_cb, prio, prepare_param,
0);
}
void lll_resume(void *param)
{
struct lll_event *next = param;
int ret;
if (event.curr.abort_cb) {
ret = prepare(next->is_abort_cb, next->abort_cb,
next->prepare_cb, next->prio,
&next->prepare_param, next->is_resume);
LL_ASSERT(!ret || ret == -EINPROGRESS);
return;
}
event.curr.is_abort_cb = next->is_abort_cb;
event.curr.abort_cb = next->abort_cb;
event.curr.param = next->prepare_param.param;
ret = next->prepare_cb(&next->prepare_param);
LL_ASSERT(!ret);
}
void lll_disable(void *param)
{
if (!param || param == event.curr.param) {
if (event.curr.abort_cb && event.curr.param) {
event.curr.abort_cb(NULL, event.curr.param);
} else {
LL_ASSERT(!param);
}
}
{
struct lll_event *next;
u8_t idx = UINT8_MAX;
next = ull_prepare_dequeue_iter(&idx);
while (next) {
if (!next->is_aborted &&
param == next->prepare_param.param) {
next->is_aborted = 1;
next->abort_cb(&next->prepare_param,
next->prepare_param.param);
}
next = ull_prepare_dequeue_iter(&idx);
}
}
}
int lll_prepare_done(void *param)
{
#if defined(CONFIG_BT_CTLR_LOW_LAT) && \
(CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
u32_t ret;
/* Ticker Job Silence */
ret = ticker_job_idle_get(TICKER_INSTANCE_ID_CTLR,
TICKER_USER_ID_LLL,
ticker_op_job_disable, NULL);
return ((ret == TICKER_STATUS_SUCCESS) || (ret == TICKER_STATUS_BUSY)) ?
0 : -EFAULT;
#else
return 0;
#endif /* CONFIG_BT_CTLR_LOW_LAT */
}
int lll_done(void *param)
{
struct lll_event *next = ull_prepare_dequeue_get();
struct ull_hdr *ull = NULL;
int ret = 0;
/* Assert if param supplied without a pending prepare to cancel. */
LL_ASSERT(!param || next);
/* check if current LLL event is done */
if (!param) {
/* Reset current event instance */
LL_ASSERT(event.curr.abort_cb);
event.curr.abort_cb = NULL;
param = event.curr.param;
event.curr.param = NULL;
if (param) {
ull = HDR_ULL(((struct lll_hdr *)param)->parent);
}
#if defined(CONFIG_BT_CTLR_LOW_LAT) && \
(CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
mayfly_enable(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_LOW, 1);
#endif /* CONFIG_BT_CTLR_LOW_LAT */
DEBUG_RADIO_CLOSE(0);
} else {
ull = HDR_ULL(((struct lll_hdr *)param)->parent);
}
/* Let ULL know about LLL event done */
ull_event_done(ull);
return ret;
}
bool lll_is_done(void *param)
{
/* FIXME: use param to check */
return !event.curr.abort_cb;
}
int lll_clk_on(void)
{
int err;
/* turn on radio clock in non-blocking mode. */
err = clock_control_on(lll.clk_hf, NULL);
if (!err || err == -EINPROGRESS) {
DEBUG_RADIO_XTAL(1);
}
return err;
}
int lll_clk_on_wait(void)
{
int err;
/* turn on radio clock in blocking mode. */
err = clock_control_on(lll.clk_hf, (void *)1);
if (!err || err == -EINPROGRESS) {
DEBUG_RADIO_XTAL(1);
}
return err;
}
int lll_clk_off(void)
{
int err;
/* turn off radio clock in non-blocking mode. */
err = clock_control_off(lll.clk_hf, NULL);
if (!err) {
DEBUG_RADIO_XTAL(0);
} else if (err == -EBUSY) {
DEBUG_RADIO_XTAL(1);
}
return err;
}
u32_t lll_evt_offset_get(struct evt_hdr *evt)
{
if (0) {
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
} else if (evt->ticks_xtal_to_start & XON_BITMASK) {
return max(evt->ticks_active_to_start,
evt->ticks_preempt_to_start);
#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
} else {
return max(evt->ticks_active_to_start,
evt->ticks_xtal_to_start);
}
}
u32_t lll_preempt_calc(struct evt_hdr *evt, u8_t ticker_id,
u32_t ticks_at_event)
{
/* TODO: */
return 0;
}
static int _init_reset(void)
{
return 0;
}
static int prepare(lll_is_abort_cb_t is_abort_cb, lll_abort_cb_t abort_cb,
lll_prepare_cb_t prepare_cb, int prio,
struct lll_prepare_param *prepare_param, u8_t is_resume)
{
struct lll_event *p;
u8_t idx = UINT8_MAX;
p = ull_prepare_dequeue_iter(&idx);
while (p && p->is_aborted) {
p = ull_prepare_dequeue_iter(&idx);
}
if (event.curr.abort_cb || p) {
#if !defined(CONFIG_BT_CTLR_LOW_LAT)
u32_t preempt_anchor;
struct evt_hdr *evt;
u32_t preempt_to;
#else /* CONFIG_BT_CTLR_LOW_LAT */
lll_prepare_cb_t resume_cb;
struct lll_event *next;
int resume_prio;
#endif /* CONFIG_BT_CTLR_LOW_LAT */
int ret;
#if defined(CONFIG_BT_CTLR_LOW_LAT)
/* early abort */
if (event.curr.param) {
event.curr.abort_cb(NULL, event.curr.param);
}
#endif /* CONFIG_BT_CTLR_LOW_LAT */
/* Store the next prepare for deferred call */
ret = ull_prepare_enqueue(is_abort_cb, abort_cb, prepare_param,
prepare_cb, prio, is_resume);
LL_ASSERT(!ret);
if (is_resume) {
return -EINPROGRESS;
}
#if !defined(CONFIG_BT_CTLR_LOW_LAT)
/* Calc the preempt timeout */
evt = HDR_LLL2EVT(prepare_param->param);
preempt_anchor = prepare_param->ticks_at_expire;
preempt_to = max(evt->ticks_active_to_start,
evt->ticks_xtal_to_start) -
evt->ticks_preempt_to_start;
/* Setup pre empt timeout */
ret = ticker_start(TICKER_INSTANCE_ID_CTLR,
TICKER_USER_ID_LLL,
TICKER_ID_LLL_PREEMPT,
preempt_anchor,
preempt_to,
TICKER_NULL_PERIOD,
TICKER_NULL_REMAINDER,
TICKER_NULL_LAZY,
TICKER_NULL_SLOT,
_preempt_ticker_cb, NULL,
NULL, NULL);
LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
(ret == TICKER_STATUS_FAILURE) ||
(ret == TICKER_STATUS_BUSY));
#else /* CONFIG_BT_CTLR_LOW_LAT */
next = NULL;
while (p) {
if (!p->is_aborted) {
if (event.curr.param ==
p->prepare_param.param) {
p->is_aborted = 1;
p->abort_cb(&p->prepare_param,
p->prepare_param.param);
} else {
next = p;
}
}
p = ull_prepare_dequeue_iter(&idx);
}
if (next) {
/* check if resume requested by curr */
ret = event.curr.is_abort_cb(NULL, 0, event.curr.param,
&resume_cb, &resume_prio);
LL_ASSERT(ret);
if (ret == -EAGAIN) {
ret = resume_enqueue(resume_cb, resume_prio);
LL_ASSERT(!ret);
} else {
LL_ASSERT(ret == -ECANCELED);
}
}
#endif /* CONFIG_BT_CTLR_LOW_LAT */
return -EINPROGRESS;
}
event.curr.param = prepare_param->param;
event.curr.is_abort_cb = is_abort_cb;
event.curr.abort_cb = abort_cb;
return prepare_cb(prepare_param);
}
static int resume_enqueue(lll_prepare_cb_t resume_cb, int resume_prio)
{
struct lll_prepare_param prepare_param;
prepare_param.param = event.curr.param;
event.curr.param = NULL;
return ull_prepare_enqueue(event.curr.is_abort_cb, event.curr.abort_cb,
&prepare_param, resume_cb, resume_prio, 1);
}
#if !defined(CONFIG_BT_CTLR_LOW_LAT)
static void _preempt_ticker_cb(u32_t ticks_at_expire, u32_t remainder,
u16_t lazy, void *param)
{
static memq_link_t _link;
static struct mayfly _mfy = {0, 0, &_link, NULL, _preempt};
u32_t ret;
ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
0, &_mfy);
LL_ASSERT(!ret);
}
static void _preempt(void *param)
{
struct lll_event *next = ull_prepare_dequeue_get();
lll_prepare_cb_t resume_cb;
u8_t idx = UINT8_MAX;
int resume_prio;
int ret;
next = ull_prepare_dequeue_iter(&idx);
if (!next || !event.curr.abort_cb || !event.curr.param) {
return;
}
while (next && next->is_resume) {
next = ull_prepare_dequeue_iter(&idx);
}
if (!next) {
return;
}
ret = event.curr.is_abort_cb(next->prepare_param.param, next->prio,
event.curr.param,
&resume_cb, &resume_prio);
if (!ret) {
/* Let LLL know about the cancelled prepare */
next->is_aborted = 1;
next->abort_cb(&next->prepare_param, next->prepare_param.param);
return;
}
event.curr.abort_cb(NULL, event.curr.param);
if (ret == -EAGAIN) {
struct lll_event *iter;
u8_t idx = UINT8_MAX;
iter = ull_prepare_dequeue_iter(&idx);
while (iter) {
if (!iter->is_aborted &&
event.curr.param == iter->prepare_param.param) {
iter->is_aborted = 1;
iter->abort_cb(&iter->prepare_param,
iter->prepare_param.param);
}
iter = ull_prepare_dequeue_iter(&idx);
}
ret = resume_enqueue(resume_cb, resume_prio);
LL_ASSERT(!ret);
} else {
LL_ASSERT(ret == -ECANCELED);
}
}
#else /* CONFIG_BT_CTLR_LOW_LAT */
#if (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
static void ticker_op_job_disable(u32_t status, void *op_context)
{
ARG_UNUSED(status);
ARG_UNUSED(op_context);
/* FIXME: */
if (1 /* _radio.state != STATE_NONE */) {
mayfly_enable(TICKER_USER_ID_ULL_LOW,
TICKER_USER_ID_ULL_LOW, 0);
}
}
#endif
#endif /* CONFIG_BT_CTLR_LOW_LAT */

View file

@ -0,0 +1,846 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdbool.h>
#include <stddef.h>
#include <zephyr/types.h>
#include <toolchain.h>
#include <bluetooth/hci.h>
#include "hal/ccm.h"
#include "hal/radio.h"
#include "hal/ticker.h"
#include "util/util.h"
#include "util/memq.h"
#include "ticker/ticker.h"
#include "pdu.h"
#include "lll.h"
#include "lll_vendor.h"
#include "lll_adv.h"
#include "lll_filter.h"
#include "lll_chan.h"
#include "lll_internal.h"
#include "lll_tim_internal.h"
#include "lll_adv_internal.h"
#include "lll_prof_internal.h"
#define LOG_MODULE_NAME bt_ctlr_llsw_nordic_lll_adv
#include "common/log.h"
#include <soc.h>
#include "hal/debug.h"
static int init_reset(void);
static int prepare_cb(struct lll_prepare_param *prepare_param);
static int is_abort_cb(void *next, int prio, void *curr,
lll_prepare_cb_t *resume_cb, int *resume_prio);
static void abort_cb(struct lll_prepare_param *prepare_param, void *param);
static void isr_tx(void *param);
static void isr_rx(void *param);
static void isr_done(void *param);
static void isr_abort(void *param);
static void isr_cleanup(void *param);
static void isr_race(void *param);
static void chan_prepare(struct lll_adv *lll);
static inline int isr_rx_pdu(struct lll_adv *lll,
u8_t devmatch_ok, u8_t devmatch_id,
u8_t irkmatch_ok, u8_t irkmatch_id,
u8_t rssi_ready);
static inline bool isr_rx_sr_check(struct lll_adv *lll, struct pdu_adv *adv,
struct pdu_adv *sr, u8_t devmatch_ok,
u8_t *rl_idx);
static inline bool isr_rx_sr_adva_check(struct pdu_adv *adv,
struct pdu_adv *sr);
#if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
static inline int isr_rx_sr_report(struct pdu_adv *pdu_adv_rx,
u8_t rssi_ready);
#endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
static inline bool isr_rx_ci_check(struct lll_adv *lll, struct pdu_adv *adv,
struct pdu_adv *ci, u8_t devmatch_ok,
u8_t *rl_idx);
static inline bool isr_rx_ci_tgta_check(struct pdu_adv *adv, struct pdu_adv *ci,
u8_t rl_idx);
static inline bool isr_rx_ci_adva_check(struct pdu_adv *adv,
struct pdu_adv *ci);
int lll_adv_init(void)
{
int err;
err = init_reset();
if (err) {
return err;
}
return 0;
}
int lll_adv_reset(void)
{
int err;
err = init_reset();
if (err) {
return err;
}
return 0;
}
void lll_adv_prepare(void *param)
{
struct lll_prepare_param *p = param;
int err;
err = lll_clk_on();
LL_ASSERT(!err || err == -EINPROGRESS);
err = lll_prepare(is_abort_cb, abort_cb, prepare_cb, 0, p);
LL_ASSERT(!err || err == -EINPROGRESS);
}
static int init_reset(void)
{
return 0;
}
static int prepare_cb(struct lll_prepare_param *prepare_param)
{
struct lll_adv *lll = prepare_param->param;
u32_t aa = 0x8e89bed6;
u32_t ticks_at_event;
struct evt_hdr *evt;
u32_t remainder_us;
u32_t remainder;
DEBUG_RADIO_START_A(1);
/* Check if stopped (on connection establishment race between LLL and
* ULL.
*/
if (lll_is_stop(lll)) {
int err;
err = lll_clk_off();
LL_ASSERT(!err || err == -EBUSY);
lll_done(NULL);
DEBUG_RADIO_START_A(0);
return 0;
}
radio_reset();
/* TODO: other Tx Power settings */
radio_tx_power_set(0);
#if defined(CONFIG_BT_CTLR_ADV_EXT)
/* TODO: if coded we use S8? */
radio_phy_set(lll->phy_p, 1);
radio_pkt_configure(8, PDU_AC_PAYLOAD_SIZE_MAX, (lll->phy_p << 1));
#else /* !CONFIG_BT_CTLR_ADV_EXT */
radio_phy_set(0, 0);
radio_pkt_configure(8, PDU_AC_PAYLOAD_SIZE_MAX, 0);
#endif /* !CONFIG_BT_CTLR_ADV_EXT */
radio_aa_set((u8_t *)&aa);
radio_crc_configure(((0x5bUL) | ((0x06UL) << 8) | ((0x00UL) << 16)),
0x555555);
lll->chan_map_curr = lll->chan_map;
chan_prepare(lll);
#if defined(CONFIG_BT_HCI_MESH_EXT)
_radio.mesh_adv_end_us = 0;
#endif /* CONFIG_BT_HCI_MESH_EXT */
#if defined(CONFIG_BT_CTLR_PRIVACY)
if (ctrl_rl_enabled()) {
struct ll_filter *filter =
ctrl_filter_get(!!(_radio.advertiser.filter_policy));
radio_filter_configure(filter->enable_bitmask,
filter->addr_type_bitmask,
(u8_t *)filter->bdaddr);
} else
#endif /* CONFIG_BT_CTLR_PRIVACY */
#if defined(CONFIG_BT_CTLR_FILTER)
/* Setup Radio Filter */
if (lll->filter_policy) {
struct ll_filter *wl = ctrl_filter_get(true);
radio_filter_configure(wl->enable_bitmask,
wl->addr_type_bitmask,
(u8_t *)wl->bdaddr);
}
#endif /* CONFIG_BT_CTLR_FILTER */
ticks_at_event = prepare_param->ticks_at_expire;
evt = HDR_LLL2EVT(lll);
ticks_at_event += lll_evt_offset_get(evt);
ticks_at_event += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
remainder = prepare_param->remainder;
remainder_us = radio_tmr_start(1, ticks_at_event, remainder);
/* capture end of Tx-ed PDU, used to calculate HCTO. */
radio_tmr_end_capture();
#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN)
radio_gpio_pa_setup();
radio_gpio_pa_lna_enable(remainder_us +
radio_tx_ready_delay_get(0, 0) -
CONFIG_BT_CTLR_GPIO_PA_OFFSET);
#else /* !CONFIG_BT_CTLR_GPIO_PA_PIN */
ARG_UNUSED(remainder_us);
#endif /* !CONFIG_BT_CTLR_GPIO_PA_PIN */
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED) && \
(EVENT_OVERHEAD_PREEMPT_US <= EVENT_OVERHEAD_PREEMPT_MIN_US)
/* check if preempt to start has changed */
if (lll_preempt_calc(evt, TICKER_ID_ADV_BASE, ticks_at_event)) {
radio_isr_set(isr_abort, lll);
radio_disable();
} else
#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
{
u32_t ret;
ret = lll_prepare_done(lll);
LL_ASSERT(!ret);
}
DEBUG_RADIO_START_A(1);
return 0;
}
#if defined(CONFIG_BT_PERIPHERAL)
static int resume_prepare_cb(struct lll_prepare_param *p)
{
struct evt_hdr *evt = HDR_LLL2EVT(p->param);
p->ticks_at_expire = ticker_ticks_now_get() - lll_evt_offset_get(evt);
p->remainder = 0;
p->lazy = 0;
return prepare_cb(p);
}
#endif /* CONFIG_BT_PERIPHERAL */
static int is_abort_cb(void *next, int prio, void *curr,
lll_prepare_cb_t *resume_cb, int *resume_prio)
{
#if defined(CONFIG_BT_PERIPHERAL)
struct lll_adv *lll = curr;
struct pdu_adv *pdu;
#endif /* CONFIG_BT_PERIPHERAL */
/* TODO: prio check */
if (next != curr) {
if (0) {
#if defined(CONFIG_BT_PERIPHERAL)
} else if (lll->is_hdcd) {
int err;
/* wrap back after the pre-empter */
*resume_cb = resume_prepare_cb;
*resume_prio = 0; /* TODO: */
/* Retain HF clk */
err = lll_clk_on();
LL_ASSERT(!err || err == -EINPROGRESS);
return -EAGAIN;
#endif /* CONFIG_BT_PERIPHERAL */
} else {
return -ECANCELED;
}
}
#if defined(CONFIG_BT_PERIPHERAL)
pdu = lll_adv_data_curr_get(lll);
if (pdu->type == PDU_ADV_TYPE_DIRECT_IND) {
return 0;
}
#endif /* CONFIG_BT_PERIPHERAL */
return -ECANCELED;
}
static void abort_cb(struct lll_prepare_param *prepare_param, void *param)
{
int err;
/* NOTE: This is not a prepare being cancelled */
if (!prepare_param) {
/* Perform event abort here.
* After event has been cleanly aborted, clean up resources
* and dispatch event done.
*/
radio_isr_set(isr_abort, param);
radio_disable();
return;
}
/* NOTE: Else clean the top half preparations of the aborted event
* currently in preparation pipeline.
*/
err = lll_clk_off();
LL_ASSERT(!err || err == -EBUSY);
lll_done(param);
}
static void isr_tx(void *param)
{
u32_t hcto;
/* TODO: MOVE to a common interface, isr_lll_radio_status? */
#if defined(CONFIG_BT_CTLR_PROFILE_ISR)
lll_prof_latency_capture();
#endif /* CONFIG_BT_CTLR_PROFILE_ISR */
/* Clear radio status and events */
radio_status_reset();
radio_tmr_status_reset();
#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) || \
defined(CONFIG_BT_CTLR_GPIO_LNA_PIN)
radio_gpio_pa_lna_disable();
#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN || CONFIG_BT_CTLR_GPIO_LNA_PIN */
/* TODO: MOVE ^^ */
radio_isr_set(isr_rx, param);
radio_tmr_tifs_set(TIFS_US);
radio_switch_complete_and_tx(0, 0, 0, 0);
radio_pkt_rx_set(radio_pkt_scratch_get());
/* assert if radio packet ptr is not set and radio started rx */
LL_ASSERT(!radio_is_ready());
#if defined(CONFIG_BT_CTLR_PROFILE_ISR)
lll_prof_cputime_capture();
#endif /* CONFIG_BT_CTLR_PROFILE_ISR */
#if defined(CONFIG_BT_CTLR_PRIVACY)
if (ctrl_rl_enabled()) {
u8_t count, *irks = ctrl_irks_get(&count);
radio_ar_configure(count, irks);
}
#endif /* CONFIG_BT_CTLR_PRIVACY */
/* +/- 2us active clock jitter, +1 us hcto compensation */
hcto = radio_tmr_tifs_base_get() + TIFS_US + 4 + 1;
hcto += radio_rx_chain_delay_get(0, 0);
hcto += addr_us_get(0);
hcto -= radio_tx_chain_delay_get(0, 0);
radio_tmr_hcto_configure(hcto);
/* capture end of CONNECT_IND PDU, used for calculating first
* slave event.
*/
radio_tmr_end_capture();
#if defined(CONFIG_BT_CTLR_SCAN_REQ_RSSI)
radio_rssi_measure();
#endif /* CONFIG_BT_CTLR_SCAN_REQ_RSSI */
#if defined(CONFIG_BT_CTLR_GPIO_LNA_PIN)
#if defined(CONFIG_BT_CTLR_PROFILE_ISR)
/* PA/LNA enable is overwriting packet end used in ISR profiling,
* hence back it up for later use.
*/
lll_prof_radio_end_backup();
#endif /* CONFIG_BT_CTLR_PROFILE_ISR */
radio_gpio_lna_setup();
radio_gpio_pa_lna_enable(radio_tmr_tifs_base_get() + TIFS_US - 4 -
radio_tx_chain_delay_get(0, 0) -
CONFIG_BT_CTLR_GPIO_LNA_OFFSET);
#endif /* CONFIG_BT_CTLR_GPIO_LNA_PIN */
#if defined(CONFIG_BT_CTLR_PROFILE_ISR)
/* NOTE: as scratch packet is used to receive, it is safe to
* generate profile event using rx nodes.
*/
lll_prof_send();
#endif /* CONFIG_BT_CTLR_PROFILE_ISR */
}
static void isr_rx(void *param)
{
u8_t trx_done;
u8_t crc_ok;
u8_t devmatch_ok;
u8_t devmatch_id;
u8_t irkmatch_ok;
u8_t irkmatch_id;
u8_t rssi_ready;
#if defined(CONFIG_BT_CTLR_PROFILE_ISR)
lll_prof_latency_capture();
#endif /* CONFIG_BT_CTLR_PROFILE_ISR */
/* Read radio status and events */
trx_done = radio_is_done();
if (trx_done) {
crc_ok = radio_crc_is_valid();
devmatch_ok = radio_filter_has_match();
devmatch_id = radio_filter_match_get();
irkmatch_ok = radio_ar_has_match();
irkmatch_id = radio_ar_match_get();
rssi_ready = radio_rssi_is_ready();
} else {
crc_ok = devmatch_ok = irkmatch_ok = rssi_ready = 0;
devmatch_id = irkmatch_id = 0xFF;
}
/* Clear radio status and events */
radio_status_reset();
radio_tmr_status_reset();
radio_filter_status_reset();
radio_ar_status_reset();
radio_rssi_status_reset();
#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) || \
defined(CONFIG_BT_CTLR_GPIO_LNA_PIN)
radio_gpio_pa_lna_disable();
#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN || CONFIG_BT_CTLR_GPIO_LNA_PIN */
if (!trx_done) {
goto isr_rx_do_close;
}
if (crc_ok) {
int err;
err = isr_rx_pdu(param, devmatch_ok, devmatch_id, irkmatch_ok,
irkmatch_id, rssi_ready);
if (!err) {
#if defined(CONFIG_BT_CTLR_PROFILE_ISR)
lll_prof_send();
#endif /* CONFIG_BT_CTLR_PROFILE_ISR */
return;
}
}
isr_rx_do_close:
radio_isr_set(isr_done, param);
radio_disable();
}
static void isr_done(void *param)
{
struct node_rx_hdr *node_rx;
struct lll_adv *lll = param;
/* TODO: MOVE to a common interface, isr_lll_radio_status? */
/* Clear radio status and events */
radio_status_reset();
radio_tmr_status_reset();
radio_filter_status_reset();
radio_ar_status_reset();
radio_rssi_status_reset();
#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) || \
defined(CONFIG_BT_CTLR_GPIO_LNA_PIN)
radio_gpio_pa_lna_disable();
#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN || CONFIG_BT_CTLR_GPIO_LNA_PIN */
/* TODO: MOVE ^^ */
#if defined(CONFIG_BT_HCI_MESH_EXT)
if (_radio.advertiser.is_mesh &&
!_radio.mesh_adv_end_us) {
_radio.mesh_adv_end_us = radio_tmr_end_get();
}
#endif /* CONFIG_BT_HCI_MESH_EXT */
#if defined(CONFIG_BT_PERIPHERAL)
if (!lll->chan_map_curr && lll->is_hdcd) {
lll->chan_map_curr = lll->chan_map;
}
#endif /* CONFIG_BT_PERIPHERAL */
if (lll->chan_map_curr) {
u32_t start_us;
chan_prepare(lll);
#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN)
start_us = radio_tmr_start_now(1);
radio_gpio_pa_setup();
radio_gpio_pa_lna_enable(start_us +
radio_tx_ready_delay_get(0, 0) -
CONFIG_BT_CTLR_GPIO_PA_OFFSET);
#else /* !CONFIG_BT_CTLR_GPIO_PA_PIN */
ARG_UNUSED(start_us);
radio_tx_enable();
#endif /* !CONFIG_BT_CTLR_GPIO_PA_PIN */
/* capture end of Tx-ed PDU, used to calculate HCTO. */
radio_tmr_end_capture();
return;
}
radio_filter_disable();
#if defined(CONFIG_BT_PERIPHERAL)
if (!lll->is_hdcd)
#endif /* CONFIG_BT_PERIPHERAL */
{
#if defined(CONFIG_BT_HCI_MESH_EXT)
if (_radio.advertiser.is_mesh) {
u32_t err;
err = isr_close_adv_mesh();
if (err) {
return 0;
}
}
#endif /* CONFIG_BT_HCI_MESH_EXT */
}
#if defined(CONFIG_BT_CTLR_ADV_INDICATION)
node_rx = ull_pdu_rx_alloc_peek(3);
if (node_rx) {
ull_pdu_rx_alloc();
/* TODO: add other info by defining a payload struct */
node_rx->type = NODE_RX_TYPE_ADV_INDICATION;
ull_rx_put(node_rx->link, node_rx);
ull_rx_sched();
}
#else /* !CONFIG_BT_CTLR_ADV_INDICATION */
ARG_UNUSED(node_rx);
#endif /* !CONFIG_BT_CTLR_ADV_INDICATION */
isr_cleanup(param);
}
static void isr_abort(void *param)
{
radio_filter_disable();
isr_cleanup(param);
}
static void isr_cleanup(void *param)
{
int err;
radio_isr_set(isr_race, param);
radio_tmr_stop();
err = lll_clk_off();
LL_ASSERT(!err || err == -EBUSY);
lll_done(NULL);
}
static void isr_race(void *param)
{
/* NOTE: lll_disable could have a race with ... */
radio_status_reset();
}
static void chan_prepare(struct lll_adv *lll)
{
struct pdu_adv *pdu;
struct pdu_adv *scan_pdu;
u8_t chan;
u8_t upd = 0;
pdu = lll_adv_data_latest_get(lll, &upd);
scan_pdu = lll_adv_scan_rsp_latest_get(lll, &upd);
#if defined(CONFIG_BT_CTLR_PRIVACY)
if (upd) {
/* Copy the address from the adv packet we will send into the
* scan response.
*/
memcpy(&scan_pdu->scan_rsp.addr[0],
&pdu->adv_ind.addr[0], BDADDR_SIZE);
}
#else
ARG_UNUSED(scan_pdu);
ARG_UNUSED(upd);
#endif /* !CONFIG_BT_CTLR_PRIVACY */
radio_pkt_tx_set(pdu);
if ((pdu->type != PDU_ADV_TYPE_NONCONN_IND) &&
(!IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) ||
(pdu->type != PDU_ADV_TYPE_EXT_IND))) {
radio_isr_set(isr_tx, lll);
radio_tmr_tifs_set(TIFS_US);
radio_switch_complete_and_rx(0);
} else {
radio_isr_set(isr_done, lll);
radio_switch_complete_and_disable();
}
chan = find_lsb_set(lll->chan_map_curr);
LL_ASSERT(chan);
lll->chan_map_curr &= (lll->chan_map_curr - 1);
lll_chan_set(36 + chan);
}
static inline int isr_rx_pdu(struct lll_adv *lll,
u8_t devmatch_ok, u8_t devmatch_id,
u8_t irkmatch_ok, u8_t irkmatch_id,
u8_t rssi_ready)
{
struct pdu_adv *pdu_rx, *pdu_adv;
#if defined(CONFIG_BT_CTLR_PRIVACY)
/* An IRK match implies address resolution enabled */
u8_t rl_idx = irkmatch_ok ? ctrl_rl_irk_idx(irkmatch_id) :
FILTER_IDX_NONE;
#else
u8_t rl_idx = FILTER_IDX_NONE;
#endif /* CONFIG_BT_CTLR_PRIVACY */
pdu_rx = (void *)radio_pkt_scratch_get();
pdu_adv = lll_adv_data_curr_get(lll);
if ((pdu_rx->type == PDU_ADV_TYPE_SCAN_REQ) &&
(pdu_rx->len == sizeof(struct pdu_adv_scan_req)) &&
isr_rx_sr_check(lll, pdu_adv, pdu_rx, devmatch_ok, &rl_idx)) {
radio_isr_set(isr_done, lll);
radio_switch_complete_and_disable();
radio_pkt_tx_set(lll_adv_scan_rsp_curr_get(lll));
/* assert if radio packet ptr is not set and radio started tx */
LL_ASSERT(!radio_is_ready());
#if defined(CONFIG_BT_CTLR_PROFILE_ISR)
lll_prof_cputime_capture();
#endif /* CONFIG_BT_CTLR_PROFILE_ISR */
#if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
if (!IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) ||
0 /* TODO: extended adv. scan req notification enabled */) {
u32_t err;
/* Generate the scan request event */
err = isr_rx_sr_report(pdu_rx, rssi_ready);
if (err) {
/* Scan Response will not be transmitted */
return err;
}
}
#endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN)
#if defined(CONFIG_BT_CTLR_PROFILE_ISR)
/* PA/LNA enable is overwriting packet end used in ISR
* profiling, hence back it up for later use.
*/
lll_prof_radio_end_backup();
#endif /* CONFIG_BT_CTLR_PROFILE_ISR */
radio_gpio_pa_setup();
radio_gpio_pa_lna_enable(radio_tmr_tifs_base_get() + TIFS_US -
radio_rx_chain_delay_get(0, 0) -
CONFIG_BT_CTLR_GPIO_PA_OFFSET);
#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN */
return 0;
#if defined(CONFIG_BT_PERIPHERAL)
} else if ((pdu_rx->type == PDU_ADV_TYPE_CONNECT_IND) &&
(pdu_rx->len == sizeof(struct pdu_adv_connect_ind)) &&
isr_rx_ci_check(lll, pdu_adv, pdu_rx, devmatch_ok,
&rl_idx) &&
lll->conn) {
struct node_rx_ftr *ftr;
struct node_rx_pdu *rx;
int ret;
if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
rx = ull_pdu_rx_alloc_peek(4);
} else {
rx = ull_pdu_rx_alloc_peek(3);
}
if (!rx) {
return -ENOBUFS;
}
radio_isr_set(isr_abort, lll);
radio_disable();
/* assert if radio started tx */
LL_ASSERT(!radio_is_ready());
#if defined(CONFIG_BT_CTLR_PROFILE_ISR)
lll_prof_cputime_capture();
#endif /* CONFIG_BT_CTLR_PROFILE_ISR */
/* Stop further LLL radio events */
ret = lll_stop(lll);
LL_ASSERT(!ret);
rx = ull_pdu_rx_alloc();
rx->hdr.type = NODE_RX_TYPE_CONNECTION;
rx->hdr.handle = 0xffff;
memcpy(rx->pdu, pdu_rx, (offsetof(struct pdu_adv, connect_ind) +
sizeof(struct pdu_adv_connect_ind)));
ftr = (void *)((u8_t *)rx->pdu +
(offsetof(struct pdu_adv, connect_ind) +
sizeof(struct pdu_adv_connect_ind)));
ftr->param = lll;
ftr->ticks_anchor = radio_tmr_start_get();
ftr->us_radio_end = radio_tmr_end_get() -
radio_tx_chain_delay_get(0, 0);
ftr->us_radio_rdy = radio_rx_ready_delay_get(0, 0);
if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
ftr->extra = ull_pdu_rx_alloc();
}
ull_rx_put(rx->hdr.link, rx);
ull_rx_sched();
return 0;
#endif /* CONFIG_BT_PERIPHERAL */
}
return -EINVAL;
}
static inline bool isr_rx_sr_check(struct lll_adv *lll, struct pdu_adv *adv,
struct pdu_adv *sr, u8_t devmatch_ok,
u8_t *rl_idx)
{
#if defined(CONFIG_BT_CTLR_PRIVACY)
return ((((_radio.advertiser.filter_policy & 0x01) == 0) &&
ctrl_rl_addr_allowed(sr->tx_addr, sr->scan_req.scan_addr,
rl_idx)) ||
(((_radio.advertiser.filter_policy & 0x01) != 0) &&
(devmatch_ok || ctrl_irk_whitelisted(*rl_idx)))) &&
isr_rx_sr_adva_check(adv, sr);
#else
return (((lll->filter_policy & 0x01) == 0) || devmatch_ok) &&
isr_rx_sr_adva_check(adv, sr);
#endif /* CONFIG_BT_CTLR_PRIVACY */
}
static inline bool isr_rx_sr_adva_check(struct pdu_adv *adv,
struct pdu_adv *sr)
{
return (adv->tx_addr == sr->rx_addr) &&
!memcmp(adv->adv_ind.addr, sr->scan_req.adv_addr, BDADDR_SIZE);
}
#if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
static inline int isr_rx_sr_report(struct pdu_adv *pdu_adv_rx,
u8_t rssi_ready)
{
struct node_rx_pdu *node_rx;
struct pdu_adv *pdu_adv;
u8_t pdu_len;
node_rx = ull_pdu_rx_alloc_peek(3);
if (!node_rx) {
return -ENOBUFS;
}
ull_pdu_rx_alloc();
/* Prepare the report (scan req) */
node_rx->hdr.type = NODE_RX_TYPE_SCAN_REQ;
node_rx->hdr.handle = 0xffff;
/* Make a copy of PDU into Rx node (as the received PDU is in the
* scratch buffer), and save the RSSI value.
*/
pdu_adv = (void *)node_rx->pdu;
pdu_len = offsetof(struct pdu_adv, payload) + pdu_adv_rx->len;
memcpy(pdu_adv, pdu_adv_rx, pdu_len);
((u8_t *)pdu_adv)[pdu_len] = (rssi_ready) ? (radio_rssi_get() & 0x7f) :
0x7f;
ull_rx_put(node_rx->hdr.link, node_rx);
ull_rx_sched();
return 0;
}
#endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
static inline bool isr_rx_ci_check(struct lll_adv *lll, struct pdu_adv *adv,
struct pdu_adv *ci, u8_t devmatch_ok,
u8_t *rl_idx)
{
/* LL 4.3.2: filter policy shall be ignored for directed adv */
if (adv->type == PDU_ADV_TYPE_DIRECT_IND) {
#if defined(CONFIG_BT_CTLR_PRIVACY)
return ctrl_rl_addr_allowed(ci->tx_addr,
ci->connect_ind.init_addr,
rl_idx) &&
#else
return (1) &&
#endif
isr_rx_ci_adva_check(adv, ci) &&
isr_rx_ci_tgta_check(adv, ci, *rl_idx);
}
#if defined(CONFIG_BT_CTLR_PRIVACY)
return ((((_radio.advertiser.filter_policy & 0x02) == 0) &&
ctrl_rl_addr_allowed(ci->tx_addr, ci->connect_ind.init_addr,
rl_idx)) ||
(((_radio.advertiser.filter_policy & 0x02) != 0) &&
(devmatch_ok || ctrl_irk_whitelisted(*rl_idx)))) &&
isr_rx_ci_adva_check(adv, ci);
#else
return (((lll->filter_policy & 0x02) == 0) ||
(devmatch_ok)) &&
isr_rx_ci_adva_check(adv, ci);
#endif /* CONFIG_BT_CTLR_PRIVACY */
}
static inline bool isr_rx_ci_tgta_check(struct pdu_adv *adv, struct pdu_adv *ci,
u8_t rl_idx)
{
#if defined(CONFIG_BT_CTLR_PRIVACY)
if (rl_idx != FILTER_IDX_NONE) {
return rl_idx == _radio.advertiser.rl_idx;
}
#endif /* CONFIG_BT_CTLR_PRIVACY */
return (adv->rx_addr == ci->tx_addr) &&
!memcmp(adv->direct_ind.tgt_addr, ci->connect_ind.init_addr,
BDADDR_SIZE);
}
static inline bool isr_rx_ci_adva_check(struct pdu_adv *adv,
struct pdu_adv *ci)
{
return (adv->tx_addr == ci->rx_addr) &&
(((adv->type == PDU_ADV_TYPE_DIRECT_IND) &&
!memcmp(adv->direct_ind.adv_addr, ci->connect_ind.adv_addr,
BDADDR_SIZE)) ||
(!memcmp(adv->adv_ind.addr, ci->connect_ind.adv_addr,
BDADDR_SIZE)));
}

View file

@ -0,0 +1,99 @@
/*
* Copyright (c) 2017-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
struct lll_adv_pdu {
u8_t first;
u8_t last;
/* TODO: use,
* struct pdu_adv *pdu[DOUBLE_BUFFER_SIZE];
*/
u8_t pdu[DOUBLE_BUFFER_SIZE][PDU_AC_SIZE_MAX];
};
struct lll_adv {
struct lll_hdr hdr;
#if defined(CONFIG_BT_PERIPHERAL)
/* NOTE: conn context has to be after lll_hdr */
struct lll_conn *conn;
u8_t is_hdcd:1;
#endif /* CONFIG_BT_PERIPHERAL */
u8_t chan_map:3;
u8_t chan_map_curr:3;
u8_t filter_policy:2;
#if defined(CONFIG_BT_CTLR_ADV_EXT)
u8_t phy_p:3;
#endif /* !CONFIG_BT_CTLR_ADV_EXT */
#if defined(CONFIG_BT_HCI_MESH_EXT)
u8_t is_mesh:1;
#endif /* CONFIG_BT_HCI_MESH_EXT */
struct lll_adv_pdu adv_data;
struct lll_adv_pdu scan_rsp;
};
int lll_adv_init(void);
int lll_adv_reset(void);
void lll_adv_prepare(void *param);
static inline struct pdu_adv *lll_adv_pdu_alloc(struct lll_adv_pdu *pdu,
u8_t *idx)
{
u8_t last;
if (pdu->first == pdu->last) {
last = pdu->last + 1;
if (last == DOUBLE_BUFFER_SIZE) {
last = 0;
}
} else {
last = pdu->last;
}
*idx = last;
return (void *)pdu->pdu[last];
}
static inline void lll_adv_pdu_enqueue(struct lll_adv_pdu *pdu, u8_t idx)
{
pdu->last = idx;
}
static inline struct pdu_adv *lll_adv_data_alloc(struct lll_adv *lll, u8_t *idx)
{
return lll_adv_pdu_alloc(&lll->adv_data, idx);
}
static inline void lll_adv_data_enqueue(struct lll_adv *lll, u8_t idx)
{
lll_adv_pdu_enqueue(&lll->adv_data, idx);
}
static inline struct pdu_adv *lll_adv_data_peek(struct lll_adv *lll)
{
return (void *)lll->adv_data.pdu[lll->adv_data.last];
}
static inline struct pdu_adv *lll_adv_scan_rsp_alloc(struct lll_adv *lll,
u8_t *idx)
{
return lll_adv_pdu_alloc(&lll->scan_rsp, idx);
}
static inline void lll_adv_scan_rsp_enqueue(struct lll_adv *lll, u8_t idx)
{
lll_adv_pdu_enqueue(&lll->scan_rsp, idx);
}
static inline struct pdu_adv *lll_adv_scan_rsp_peek(struct lll_adv *lll)
{
return (void *)lll->scan_rsp.pdu[lll->scan_rsp.last];
}

View file

@ -0,0 +1,45 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
static inline struct pdu_adv *lll_adv_pdu_latest_get(struct lll_adv_pdu *pdu,
u8_t *is_modified)
{
u8_t first;
first = pdu->first;
if (first != pdu->last) {
first += 1;
if (first == DOUBLE_BUFFER_SIZE) {
first = 0;
}
pdu->first = first;
*is_modified = 1;
}
return (void *)pdu->pdu[first];
}
static inline struct pdu_adv *lll_adv_data_latest_get(struct lll_adv *lll,
u8_t *is_modified)
{
return lll_adv_pdu_latest_get(&lll->adv_data, is_modified);
}
static inline struct pdu_adv *lll_adv_scan_rsp_latest_get(struct lll_adv *lll,
u8_t *is_modified)
{
return lll_adv_pdu_latest_get(&lll->scan_rsp, is_modified);
}
static inline struct pdu_adv *lll_adv_data_curr_get(struct lll_adv *lll)
{
return (void *)lll->adv_data.pdu[lll->adv_data.first];
}
static inline struct pdu_adv *lll_adv_scan_rsp_curr_get(struct lll_adv *lll)
{
return (void *)lll->scan_rsp.pdu[lll->scan_rsp.first];
}

View file

@ -0,0 +1,38 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/types.h>
#include <soc.h>
#include <device.h>
#include <clock_control.h>
#include <drivers/clock_control/nrf_clock_control.h>
#define LOG_MODULE_NAME bt_ctlr_llsw_nordic_lll_clock
#include "common/log.h"
#include "hal/debug.h"
#define DRV_NAME CONFIG_CLOCK_CONTROL_NRF_K32SRC_DRV_NAME
#define K32SRC CLOCK_CONTROL_NRF_K32SRC
static u8_t is_k32src_stable;
void lll_clock_wait(void)
{
if (!is_k32src_stable) {
struct device *clk_k32;
is_k32src_stable = 1;
clk_k32 = device_get_binding(DRV_NAME);
LL_ASSERT(clk_k32);
while (clock_control_on(clk_k32, (void *)K32SRC)) {
DEBUG_CPU_SLEEP(1);
k_cpu_idle();
DEBUG_CPU_SLEEP(0);
}
}
}

View file

@ -0,0 +1,7 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
void lll_clock_wait(void);

View file

@ -0,0 +1,913 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdbool.h>
#include <stddef.h>
#include <toolchain.h>
#include <zephyr/types.h>
#include <misc/util.h>
#include <drivers/clock_control/nrf_clock_control.h>
#include "util/memq.h"
#include "util/mfifo.h"
#include "hal/ccm.h"
#include "hal/radio.h"
#include "pdu.h"
#include "lll.h"
#include "lll_conn.h"
#include "lll_internal.h"
#include "lll_tim_internal.h"
#include "lll_prof_internal.h"
#define LOG_MODULE_NAME bt_ctlr_llsw_nordic_lll_conn
#include "common/log.h"
#include <soc.h>
#include "hal/debug.h"
static int init_reset(void);
static void isr_done(void *param);
static void isr_cleanup(void *param);
static void isr_race(void *param);
static int isr_rx_pdu(struct lll_conn *lll, struct pdu_data *pdu_data_rx,
struct node_tx **tx_release, u8_t *is_rx_enqueue);
static struct pdu_data *empty_tx_enqueue(struct lll_conn *lll);
static u16_t const sca_ppm_lut[] = {500, 250, 150, 100, 75, 50, 30, 20};
static u8_t crc_expire;
static u8_t crc_valid;
static u16_t trx_cnt;
#if defined(CONFIG_BT_CTLR_LE_ENC)
static u8_t mic_state;
#endif /* CONFIG_BT_CTLR_LE_ENC */
static MFIFO_DEFINE(conn_ack, sizeof(struct lll_tx),
CONFIG_BT_CTLR_TX_BUFFERS);
int lll_conn_init(void)
{
int err;
err = init_reset();
if (err) {
return err;
}
return 0;
}
int lll_conn_reset(void)
{
int err;
MFIFO_INIT(conn_ack);
err = init_reset();
if (err) {
return err;
}
return 0;
}
u8_t lll_conn_sca_local_get(void)
{
return CLOCK_CONTROL_NRF_K32SRC_ACCURACY;
}
u32_t lll_conn_ppm_local_get(void)
{
return sca_ppm_lut[CLOCK_CONTROL_NRF_K32SRC_ACCURACY];
}
u32_t lll_conn_ppm_get(u8_t sca)
{
return sca_ppm_lut[sca];
}
void lll_conn_prepare_reset(void)
{
trx_cnt = 0;
crc_expire = 0;
crc_valid = 0;
#if defined(CONFIG_BT_CTLR_LE_ENC)
mic_state = LLL_CONN_MIC_NONE;
#endif /* CONFIG_BT_CTLR_LE_ENC */
}
int lll_conn_is_abort_cb(void *next, int prio, void *curr,
lll_prepare_cb_t *resume_cb, int *resume_prio)
{
return -ECANCELED;
}
void lll_conn_abort_cb(struct lll_prepare_param *prepare_param, void *param)
{
int err;
/* NOTE: This is not a prepare being cancelled */
if (!prepare_param) {
/* Perform event abort here.
* After event has been cleanly aborted, clean up resources
* and dispatch event done.
*/
radio_isr_set(isr_done, param);
radio_disable();
return;
}
/* NOTE: Else clean the top half preparations of the aborted event
* currently in preparation pipeline.
*/
err = lll_clk_off();
LL_ASSERT(!err || err == -EBUSY);
lll_done(param);
}
void lll_conn_isr_rx(void *param)
{
struct node_tx *tx_release = NULL;
struct lll_conn *lll = param;
struct pdu_data *pdu_data_rx;
struct pdu_data *pdu_data_tx;
struct node_rx_pdu *node_rx;
u8_t is_empty_pdu_tx_retry;
u8_t is_crc_backoff = 0;
u8_t is_rx_enqueue = 0;
u8_t is_ull_rx = 0;
u8_t rssi_ready;
u8_t trx_done;
u8_t is_done;
u8_t crc_ok;
#if defined(CONFIG_BT_CTLR_PROFILE_ISR)
lll_prof_latency_capture();
#endif /* CONFIG_BT_CTLR_PROFILE_ISR */
/* Read radio status and events */
trx_done = radio_is_done();
if (trx_done) {
crc_ok = radio_crc_is_valid();
rssi_ready = radio_rssi_is_ready();
} else {
crc_ok = rssi_ready = 0;
}
/* Clear radio status and events */
radio_status_reset();
radio_tmr_status_reset();
radio_rssi_status_reset();
#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) || \
defined(CONFIG_BT_CTLR_GPIO_LNA_PIN)
radio_gpio_pa_lna_disable();
#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN || CONFIG_BT_CTLR_GPIO_LNA_PIN */
if (!trx_done) {
radio_isr_set(isr_done, param);
radio_disable();
return;
}
trx_cnt++;
node_rx = ull_pdu_rx_alloc_peek(1);
LL_ASSERT(node_rx);
pdu_data_rx = (void *)node_rx->pdu;
if (crc_ok) {
u32_t err;
err = isr_rx_pdu(lll, pdu_data_rx, &tx_release, &is_rx_enqueue);
if (err) {
goto lll_conn_isr_rx_exit;
}
/* Reset CRC expiry counter */
crc_expire = 0;
/* CRC valid flag used to detect supervision timeout */
crc_valid = 1;
} else {
/* Start CRC error countdown, if not already started */
if (crc_expire == 0) {
crc_expire = 2;
}
/* CRC error countdown */
crc_expire--;
is_crc_backoff = (crc_expire == 0);
}
/* prepare tx packet */
is_empty_pdu_tx_retry = lll->empty;
lll_conn_pdu_tx_prep(lll, &pdu_data_tx);
/* Decide on event continuation and hence Radio Shorts to use */
is_done = is_crc_backoff || ((crc_ok) && (pdu_data_rx->md == 0) &&
(pdu_data_tx->len == 0));
if (is_done) {
radio_isr_set(isr_done, param);
if (0) {
#if defined(CONFIG_BT_CENTRAL)
/* Event done for master */
} else if (!lll->role) {
radio_disable();
/* assert if radio packet ptr is not set and radio
* started tx.
*/
LL_ASSERT(!radio_is_ready());
/* Restore state if last transmitted was empty PDU */
lll->empty = is_empty_pdu_tx_retry;
goto lll_conn_isr_rx_exit;
#endif /* CONFIG_BT_CENTRAL */
#if defined(CONFIG_BT_PERIPHERAL)
/* Event done for slave */
} else {
radio_switch_complete_and_disable();
#endif /* CONFIG_BT_PERIPHERAL */
}
} else {
radio_isr_set(lll_conn_isr_tx, param);
radio_tmr_tifs_set(TIFS_US);
#if defined(CONFIG_BT_CTLR_PHY)
radio_switch_complete_and_rx(lll->phy_rx);
#else /* !CONFIG_BT_CTLR_PHY */
radio_switch_complete_and_rx(0);
#endif /* !CONFIG_BT_CTLR_PHY */
/* capture end of Tx-ed PDU, used to calculate HCTO. */
radio_tmr_end_capture();
}
/* Fill sn and nesn */
pdu_data_tx->sn = lll->sn;
pdu_data_tx->nesn = lll->nesn;
/* setup the radio tx packet buffer */
lll_conn_tx_pkt_set(lll, pdu_data_tx);
#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN)
#if defined(CONFIG_BT_CTLR_PROFILE_ISR)
/* PA enable is overwriting packet end used in ISR profiling, hence
* back it up for later use.
*/
lll_prof_radio_end_backup();
#endif /* CONFIG_BT_CTLR_PROFILE_ISR */
radio_gpio_pa_setup();
#if defined(CONFIG_BT_CTLR_PHY)
radio_gpio_pa_lna_enable(radio_tmr_tifs_base_get() + TIFS_US -
radio_rx_chain_delay_get(lll->phy_rx, 1) -
CONFIG_BT_CTLR_GPIO_PA_OFFSET);
#else /* !CONFIG_BT_CTLR_PHY */
radio_gpio_pa_lna_enable(radio_tmr_tifs_base_get() + TIFS_US -
radio_rx_chain_delay_get(0, 0) -
CONFIG_BT_CTLR_GPIO_PA_OFFSET);
#endif /* !CONFIG_BT_CTLR_PHY */
#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN */
/* assert if radio packet ptr is not set and radio started tx */
LL_ASSERT(!radio_is_ready());
lll_conn_isr_rx_exit:
/* Save the AA captured for the first Rx in connection event */
if (!radio_tmr_aa_restore()) {
radio_tmr_aa_save(radio_tmr_aa_get());
}
#if defined(CONFIG_BT_CTLR_PROFILE_ISR)
lll_prof_cputime_capture();
#endif /* CONFIG_BT_CTLR_PROFILE_ISR */
if (tx_release) {
struct lll_tx *tx;
u8_t idx;
LL_ASSERT(lll->handle != 0xFFFF);
idx = MFIFO_ENQUEUE_GET(conn_ack, (void **)&tx);
LL_ASSERT(tx);
tx->handle = lll->handle;
tx->node = tx_release;
MFIFO_ENQUEUE(conn_ack, idx);
is_ull_rx = 1;
}
if (is_rx_enqueue) {
LL_ASSERT(lll->handle != 0xFFFF);
ull_pdu_rx_alloc();
node_rx->hdr.type = NODE_RX_TYPE_DC_PDU;
node_rx->hdr.handle = lll->handle;
ull_rx_put(node_rx->hdr.link, node_rx);
is_ull_rx = 1;
}
if (is_ull_rx) {
ull_rx_sched();
}
#if defined(CONFIG_BT_CTLR_CONN_RSSI)
/* Collect RSSI for connection */
if (rssi_ready) {
u8_t rssi = radio_rssi_get();
lll->rssi_latest = rssi;
if (((lll->rssi_reported - rssi) & 0xFF) >
LLL_CONN_RSSI_THRESHOLD) {
if (lll->rssi_sample_count) {
lll->rssi_sample_count--;
}
} else {
lll->rssi_sample_count = LLL_CONN_RSSI_SAMPLE_COUNT;
}
}
#else /* !CONFIG_BT_CTLR_CONN_RSSI */
ARG_UNUSED(rssi_ready);
#endif /* !CONFIG_BT_CTLR_CONN_RSSI */
#if defined(CONFIG_BT_CTLR_PROFILE_ISR)
lll_prof_send();
#endif /* CONFIG_BT_CTLR_PROFILE_ISR */
}
void lll_conn_isr_tx(void *param)
{
struct lll_conn *lll = (void *)param;
u32_t hcto;
/* TODO: MOVE to a common interface, isr_lll_radio_status? */
/* Clear radio status and events */
radio_status_reset();
radio_tmr_status_reset();
#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) || \
defined(CONFIG_BT_CTLR_GPIO_LNA_PIN)
radio_gpio_pa_lna_disable();
#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN || CONFIG_BT_CTLR_GPIO_LNA_PIN */
/* TODO: MOVE ^^ */
radio_isr_set(lll_conn_isr_rx, param);
radio_tmr_tifs_set(TIFS_US);
#if defined(CONFIG_BT_CTLR_PHY)
radio_switch_complete_and_tx(lll->phy_rx, 0,
lll->phy_tx,
lll->phy_flags);
#else /* !CONFIG_BT_CTLR_PHY */
radio_switch_complete_and_tx(0, 0, 0, 0);
#endif /* !CONFIG_BT_CTLR_PHY */
lll_conn_rx_pkt_set(lll);
/* assert if radio packet ptr is not set and radio started rx */
LL_ASSERT(!radio_is_ready());
/* +/- 2us active clock jitter, +1 us hcto compensation */
hcto = radio_tmr_tifs_base_get() + TIFS_US + 4 + 1;
#if defined(CONFIG_BT_CTLR_PHY)
hcto += radio_rx_chain_delay_get(lll->phy_rx, 1);
hcto += addr_us_get(lll->phy_rx);
hcto -= radio_tx_chain_delay_get(lll->phy_tx, lll->phy_flags);
#else /* !CONFIG_BT_CTLR_PHY */
hcto += radio_rx_chain_delay_get(0, 0);
hcto += addr_us_get(0);
hcto -= radio_tx_chain_delay_get(0, 0);
#endif /* !CONFIG_BT_CTLR_PHY */
radio_tmr_hcto_configure(hcto);
#if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_CTLR_CONN_RSSI)
if (!lll->role) {
radio_rssi_measure();
}
#endif /* iCONFIG_BT_CENTRAL && CONFIG_BT_CTLR_CONN_RSSI */
#if defined(CONFIG_BT_CTLR_PROFILE_ISR) || \
defined(CONFIG_BT_CTLR_GPIO_PA_PIN)
radio_tmr_end_capture();
#endif /* CONFIG_BT_CTLR_PROFILE_ISR */
#if defined(CONFIG_BT_CTLR_GPIO_LNA_PIN)
radio_gpio_lna_setup();
#if defined(CONFIG_BT_CTLR_PHY)
radio_gpio_pa_lna_enable(radio_tmr_tifs_base_get() + TIFS_US - 4 -
radio_tx_chain_delay_get(lll->phy_tx,
lll->phy_flags) -
CONFIG_BT_CTLR_GPIO_LNA_OFFSET);
#else /* !CONFIG_BT_CTLR_PHY */
radio_gpio_pa_lna_enable(radio_tmr_tifs_base_get() + TIFS_US - 4 -
radio_tx_chain_delay_get(0, 0) -
CONFIG_BT_CTLR_GPIO_LNA_OFFSET);
#endif /* !CONFIG_BT_CTLR_PHY */
#endif /* CONFIG_BT_CTLR_GPIO_LNA_PIN */
}
void lll_conn_isr_abort(void *param)
{
isr_cleanup(param);
}
void lll_conn_rx_pkt_set(struct lll_conn *lll)
{
struct node_rx_pdu *node_rx;
u16_t max_rx_octets;
u8_t phy;
node_rx = ull_pdu_rx_alloc_peek(1);
LL_ASSERT(node_rx);
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
max_rx_octets = lll->max_rx_octets;
#else /* !CONFIG_BT_CTLR_DATA_LENGTH */
max_rx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
#endif /* !CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_PHY)
phy = lll->phy_rx;
#else /* !CONFIG_BT_CTLR_PHY */
phy = 0;
#endif /* !CONFIG_BT_CTLR_PHY */
radio_phy_set(phy, 0);
if (0) {
#if defined(CONFIG_BT_CTLR_LE_ENC)
} else if (lll->enc_rx) {
radio_pkt_configure(8, (max_rx_octets + 4), (phy << 1) | 0x01);
radio_pkt_rx_set(radio_ccm_rx_pkt_set(&lll->ccm_rx, phy,
node_rx->pdu));
#endif /* CONFIG_BT_CTLR_LE_ENC */
} else {
radio_pkt_configure(8, max_rx_octets, (phy << 1) | 0x01);
radio_pkt_rx_set(node_rx->pdu);
}
}
void lll_conn_tx_pkt_set(struct lll_conn *lll, struct pdu_data *pdu_data_tx)
{
u16_t max_tx_octets;
u8_t phy, flags;
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
max_tx_octets = lll->max_tx_octets;
#else /* !CONFIG_BT_CTLR_DATA_LENGTH */
max_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
#endif /* !CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_PHY)
phy = lll->phy_tx;
flags = lll->phy_flags;
#else /* !CONFIG_BT_CTLR_PHY */
phy = 0;
flags = 0;
#endif /* !CONFIG_BT_CTLR_PHY */
radio_phy_set(phy, flags);
if (0) {
#if defined(CONFIG_BT_CTLR_LE_ENC)
} else if (lll->enc_tx) {
radio_pkt_configure(8, (max_tx_octets + 4), (phy << 1) | 0x01);
radio_pkt_tx_set(radio_ccm_tx_pkt_set(&lll->ccm_tx,
pdu_data_tx));
#endif /* CONFIG_BT_CTLR_LE_ENC */
} else {
radio_pkt_configure(8, max_tx_octets, (phy << 1) | 0x01);
radio_pkt_tx_set(pdu_data_tx);
}
}
void lll_conn_pdu_tx_prep(struct lll_conn *lll, struct pdu_data **pdu_data_tx)
{
struct node_tx *tx;
struct pdu_data *p;
memq_link_t *link;
if (lll->empty) {
*pdu_data_tx = empty_tx_enqueue(lll);
return;
}
link = memq_peek(lll->memq_tx.head, lll->memq_tx.tail, (void **)&tx);
if (!link) {
p = empty_tx_enqueue(lll);
} else {
u16_t max_tx_octets;
p = (void *)(tx->pdu + lll->packet_tx_head_offset);
if (!lll->packet_tx_head_len) {
lll->packet_tx_head_len = p->len;
}
if (lll->packet_tx_head_offset) {
p->ll_id = PDU_DATA_LLID_DATA_CONTINUE;
}
p->len = lll->packet_tx_head_len - lll->packet_tx_head_offset;
p->md = 0;
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
#if defined(CONFIG_BT_CTLR_PHY)
switch (lll->phy_tx_time) {
default:
case BIT(0):
/* 1M PHY, 1us = 1 bit, hence divide by 8.
* Deduct 10 bytes for preamble (1), access address (4),
* header (2), and CRC (3).
*/
max_tx_octets = (lll->max_tx_time >> 3) - 10;
break;
case BIT(1):
/* 2M PHY, 1us = 2 bits, hence divide by 4.
* Deduct 11 bytes for preamble (2), access address (4),
* header (2), and CRC (3).
*/
max_tx_octets = (lll->max_tx_time >> 2) - 11;
break;
#if defined(CONFIG_BT_CTLR_PHY_CODED)
case BIT(2):
if (lll->phy_flags & 0x01) {
/* S8 Coded PHY, 8us = 1 bit, hence divide by
* 64.
* Subtract time for preamble (80), AA (256),
* CI (16), TERM1 (24), CRC (192) and
* TERM2 (24), total 592 us.
* Subtract 2 bytes for header.
*/
max_tx_octets = ((lll->max_tx_time - 592) >>
6) - 2;
} else {
/* S2 Coded PHY, 2us = 1 bit, hence divide by
* 16.
* Subtract time for preamble (80), AA (256),
* CI (16), TERM1 (24), CRC (48) and
* TERM2 (6), total 430 us.
* Subtract 2 bytes for header.
*/
max_tx_octets = ((lll->max_tx_time - 430) >>
4) - 2;
}
break;
#endif /* CONFIG_BT_CTLR_PHY_CODED */
}
#if defined(CONFIG_BT_CTLR_LE_ENC)
if (lll->enc_tx) {
/* deduct the MIC */
max_tx_octets -= 4;
}
#endif /* CONFIG_BT_CTLR_LE_ENC */
if (max_tx_octets > lll->max_tx_octets) {
max_tx_octets = lll->max_tx_octets;
}
#else /* !CONFIG_BT_CTLR_PHY */
max_tx_octets = lll->max_tx_octets;
#endif /* !CONFIG_BT_CTLR_PHY */
#else /* !CONFIG_BT_CTLR_DATA_LENGTH */
max_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
#endif /* !CONFIG_BT_CTLR_DATA_LENGTH */
if (p->len > max_tx_octets) {
p->len = max_tx_octets;
p->md = 1;
}
if (link->next) {
p->md = 1;
}
}
*pdu_data_tx = p;
}
u8_t lll_conn_ack_last_idx_get(void)
{
return mfifo_conn_ack.l;
}
memq_link_t *lll_conn_ack_peek(u8_t *ack_last, u16_t *handle,
struct node_tx **node_tx)
{
struct lll_tx *tx;
tx = MFIFO_DEQUEUE_GET(conn_ack);
if (!tx) {
return NULL;
}
*ack_last = mfifo_conn_ack.l;
*handle = tx->handle;
*node_tx = tx->node;
return (*node_tx)->link;
}
memq_link_t *lll_conn_ack_by_last_peek(u8_t last, u16_t *handle,
struct node_tx **node_tx)
{
struct lll_tx *tx;
tx = mfifo_dequeue_get(mfifo_conn_ack.m, mfifo_conn_ack.s,
mfifo_conn_ack.f, last);
if (!tx) {
return NULL;
}
*handle = tx->handle;
*node_tx = tx->node;
return (*node_tx)->link;
}
void *lll_conn_ack_dequeue(void)
{
return MFIFO_DEQUEUE(conn_ack);
}
void lll_conn_tx_flush(void *param)
{
struct lll_conn *lll = param;
struct node_tx *node_tx;
memq_link_t *link;
link = memq_dequeue(lll->memq_tx.tail, &lll->memq_tx.head,
(void **)&node_tx);
while (link) {
struct pdu_data *p;
struct lll_tx *tx;
u8_t idx;
idx = MFIFO_ENQUEUE_GET(conn_ack, (void **)&tx);
LL_ASSERT(tx);
tx->handle = 0xFFFF;
tx->node = node_tx;
link->next = node_tx->next;
node_tx->link = link;
p = (void *)node_tx->pdu;
p->ll_id = PDU_DATA_LLID_RESV;
MFIFO_ENQUEUE(conn_ack, idx);
link = memq_dequeue(lll->memq_tx.tail, &lll->memq_tx.head,
(void **)&node_tx);
}
}
static int init_reset(void)
{
return 0;
}
static void isr_done(void *param)
{
struct event_done_extra *e;
/* TODO: MOVE to a common interface, isr_lll_radio_status? */
/* Clear radio status and events */
radio_status_reset();
radio_tmr_status_reset();
radio_filter_status_reset();
radio_ar_status_reset();
radio_rssi_status_reset();
#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN) || \
defined(CONFIG_BT_CTLR_GPIO_LNA_PIN)
radio_gpio_pa_lna_disable();
#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN || CONFIG_BT_CTLR_GPIO_LNA_PIN */
/* TODO: MOVE ^^ */
e = ull_event_done_extra_get();
e->type = EVENT_DONE_EXTRA_TYPE_CONN;
e->trx_cnt = trx_cnt;
e->crc_valid = crc_valid;
#if defined(CONFIG_BT_CTLR_LE_ENC)
e->mic_state = mic_state;
#endif /* CONFIG_BT_CTLR_LE_ENC */
if (trx_cnt) {
struct lll_conn *lll = param;
if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && lll->role) {
u32_t preamble_to_addr_us;
#if defined(CONFIG_BT_CTLR_PHY)
preamble_to_addr_us =
addr_us_get(lll->phy_rx);
#else /* !CONFIG_BT_CTLR_PHY */
preamble_to_addr_us =
addr_us_get(0);
#endif /* !CONFIG_BT_CTLR_PHY */
e->slave.start_to_address_actual_us =
radio_tmr_aa_restore() - radio_tmr_ready_get();
e->slave.window_widening_event_us =
lll->slave.window_widening_event_us;
e->slave.preamble_to_addr_us = preamble_to_addr_us;
/* Reset window widening, as anchor point sync-ed */
lll->slave.window_widening_event_us = 0;
lll->slave.window_size_event_us = 0;
}
}
isr_cleanup(param);
}
static void isr_cleanup(void *param)
{
int err;
radio_isr_set(isr_race, param);
radio_tmr_stop();
err = lll_clk_off();
LL_ASSERT(!err || err == -EBUSY);
lll_done(NULL);
}
static void isr_race(void *param)
{
/* NOTE: lll_disable could have a race with ... */
radio_status_reset();
}
static int isr_rx_pdu(struct lll_conn *lll, struct pdu_data *pdu_data_rx,
struct node_tx **tx_release, u8_t *is_rx_enqueue)
{
/* Ack for tx-ed data */
if (pdu_data_rx->nesn != lll->sn) {
/* Increment serial number */
lll->sn++;
/* First ack (and redundantly any other ack) enable use of
* slave latency.
*/
if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && lll->role) {
lll->slave.latency_enabled = 1;
}
if (!lll->empty) {
struct pdu_data *pdu_data_tx;
u8_t pdu_data_tx_len;
struct node_tx *tx;
memq_link_t *link;
link = memq_peek(lll->memq_tx.head, lll->memq_tx.tail,
(void **)&tx);
LL_ASSERT(link);
pdu_data_tx = (void *)(tx->pdu +
lll->packet_tx_head_offset);
pdu_data_tx_len = pdu_data_tx->len;
#if defined(CONFIG_BT_CTLR_LE_ENC)
if (pdu_data_tx_len != 0) {
/* if encrypted increment tx counter */
if (lll->enc_tx) {
lll->ccm_tx.counter++;
}
}
#endif /* CONFIG_BT_CTLR_LE_ENC */
lll->packet_tx_head_offset += pdu_data_tx_len;
if (lll->packet_tx_head_offset ==
lll->packet_tx_head_len) {
lll->packet_tx_head_len = 0;
lll->packet_tx_head_offset = 0;
memq_dequeue(lll->memq_tx.tail,
&lll->memq_tx.head, NULL);
link->next = tx->next;
tx->next = link;
*tx_release = tx;
}
} else {
lll->empty = 0;
}
}
/* process received data */
if ((pdu_data_rx->sn == lll->nesn) &&
/* check so that we will NEVER use the rx buffer reserved for empty
* packet and internal control enqueue
*/
(ull_pdu_rx_alloc_peek(3) != 0)) {
/* Increment next expected serial number */
lll->nesn++;
if (pdu_data_rx->len != 0) {
#if defined(CONFIG_BT_CTLR_LE_ENC)
/* If required, wait for CCM to finish
*/
if (lll->enc_rx) {
u32_t done;
done = radio_ccm_is_done();
LL_ASSERT(done);
if (!radio_ccm_mic_is_valid()) {
/* Record MIC invalid */
mic_state = LLL_CONN_MIC_FAIL;
return -EINVAL;
}
/* Increment counter */
lll->ccm_rx.counter++;
/* Record MIC valid */
mic_state = LLL_CONN_MIC_PASS;
}
#endif /* CONFIG_BT_CTLR_LE_ENC */
/* Enqueue non-empty PDU */
*is_rx_enqueue = 1;
#if 0
/* MIC Failure Check or data rx during pause */
if ((_radio.conn_curr->enc_rx &&
!radio_ccm_mic_is_valid()) ||
(_radio.conn_curr->pause_rx &&
isr_rx_conn_enc_unexpected(_radio.conn_curr,
pdu_data_rx))) {
_radio.state = STATE_CLOSE;
radio_disable();
/* assert if radio packet ptr is not set and
* radio started tx
*/
LL_ASSERT(!radio_is_ready());
terminate_ind_rx_enqueue(_radio.conn_curr,
0x3d);
connection_release(_radio.conn_curr);
_radio.conn_curr = NULL;
return 1; /* terminated */
}
#endif
}
}
return 0;
}
static struct pdu_data *empty_tx_enqueue(struct lll_conn *lll)
{
struct pdu_data *p;
lll->empty = 1;
p = (void *)radio_pkt_empty_get();
p->ll_id = PDU_DATA_LLID_DATA_CONTINUE;
p->len = 0;
if (memq_peek(lll->memq_tx.head, lll->memq_tx.tail, NULL)) {
p->md = 1;
} else {
p->md = 0;
}
return p;
}

View file

@ -0,0 +1,981 @@
/*
* Copyright (c) 2017-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <string.h>
#include <zephyr.h>
#include <misc/byteorder.h>
#include <bluetooth/hci.h>
#include "hal/ccm.h"
#include "util/util.h"
#include "util/memq.h"
#include "pdu.h"
#include "ll.h"
#include "lll.h"
#include "lll_adv.h"
#include "lll_scan.h"
#include "lll_conn.h"
#include "lll_filter.h"
#include "ull_adv_types.h"
#include "ull_scan_types.h"
#include "ull_internal.h"
#include "ull_adv_internal.h"
#include "ull_scan_internal.h"
#define ADDR_TYPE_ANON 0xFF
#define LOG_MODULE_NAME bt_ctlr_llsw_nordic_lll_filter
#include "common/log.h"
#include "hal/debug.h"
/* Hardware whitelist */
static struct ll_filter wl_filter;
u8_t wl_anon;
#if defined(CONFIG_BT_CTLR_PRIVACY)
#include "common/rpa.h"
/* Whitelist peer list */
static struct {
u8_t taken:1;
u8_t id_addr_type:1;
u8_t rl_idx;
bt_addr_t id_addr;
} wl[WL_SIZE];
static u8_t rl_enable;
static struct rl_dev {
u8_t taken:1;
u8_t rpas_ready:1;
u8_t pirk:1;
u8_t lirk:1;
u8_t dev:1;
u8_t wl:1;
u8_t id_addr_type:1;
bt_addr_t id_addr;
u8_t local_irk[16];
u8_t pirk_idx;
bt_addr_t curr_rpa;
bt_addr_t peer_rpa;
bt_addr_t *local_rpa;
} rl[CONFIG_BT_CTLR_RL_SIZE];
static u8_t peer_irks[CONFIG_BT_CTLR_RL_SIZE][16];
static u8_t peer_irk_rl_ids[CONFIG_BT_CTLR_RL_SIZE];
static u8_t peer_irk_count;
static bt_addr_t local_rpas[CONFIG_BT_CTLR_RL_SIZE];
BUILD_ASSERT(ARRAY_SIZE(wl) < FILTER_IDX_NONE);
BUILD_ASSERT(ARRAY_SIZE(rl) < FILTER_IDX_NONE);
/* Hardware filter for the resolving list */
static struct ll_filter rl_filter;
#define DEFAULT_RPA_TIMEOUT_MS (900 * 1000)
u32_t rpa_timeout_ms;
s64_t rpa_last_ms;
struct k_delayed_work rpa_work;
#define LIST_MATCH(list, i, type, addr) (list[i].taken && \
(list[i].id_addr_type == (type & 0x1)) && \
!memcmp(list[i].id_addr.val, addr, BDADDR_SIZE))
#else /* CONFIG_BT_CTLR_PRIVACY */
static void filter_clear(struct ll_filter *filter)
{
filter->enable_bitmask = 0;
filter->addr_type_bitmask = 0;
}
static void filter_insert(struct ll_filter *filter, int index, u8_t addr_type,
u8_t *bdaddr)
{
filter->enable_bitmask |= BIT(index);
filter->addr_type_bitmask |= ((addr_type & 0x01) << index);
memcpy(&filter->bdaddr[index][0], bdaddr, BDADDR_SIZE);
}
static u32_t filter_add(struct ll_filter *filter, u8_t addr_type, u8_t *bdaddr)
{
int index;
if (filter->enable_bitmask == 0xFF) {
return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
}
for (index = 0;
(filter->enable_bitmask & BIT(index));
index++) {
}
filter_insert(filter, index, addr_type, bdaddr);
return 0;
}
static u32_t filter_remove(struct ll_filter *filter, u8_t addr_type,
u8_t *bdaddr)
{
int index;
if (!filter->enable_bitmask) {
return BT_HCI_ERR_INVALID_PARAM;
}
index = 8;
while (index--) {
if ((filter->enable_bitmask & BIT(index)) &&
(((filter->addr_type_bitmask >> index) & 0x01) ==
(addr_type & 0x01)) &&
!memcmp(filter->bdaddr[index], bdaddr, BDADDR_SIZE)) {
filter->enable_bitmask &= ~BIT(index);
filter->addr_type_bitmask &= ~BIT(index);
return 0;
}
}
return BT_HCI_ERR_INVALID_PARAM;
}
#endif /* !CONFIG_BT_CTLR_PRIVACY */
#if defined(CONFIG_BT_CTLR_PRIVACY)
static void wl_clear(void)
{
for (int i = 0; i < WL_SIZE; i++) {
wl[i].taken = 0;
}
}
static u8_t wl_find(u8_t addr_type, u8_t *addr, u8_t *free)
{
int i;
if (free) {
*free = FILTER_IDX_NONE;
}
for (i = 0; i < WL_SIZE; i++) {
if (LIST_MATCH(wl, i, addr_type, addr)) {
return i;
} else if (free && !wl[i].taken && (*free == FILTER_IDX_NONE)) {
*free = i;
}
}
return FILTER_IDX_NONE;
}
static u32_t wl_add(bt_addr_le_t *id_addr)
{
u8_t i, j;
i = wl_find(id_addr->type, id_addr->a.val, &j);
/* Duplicate check */
if (i < ARRAY_SIZE(wl)) {
return BT_HCI_ERR_INVALID_PARAM;
} else if (j >= ARRAY_SIZE(wl)) {
return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
}
i = j;
wl[i].id_addr_type = id_addr->type & 0x1;
bt_addr_copy(&wl[i].id_addr, &id_addr->a);
/* Get index to Resolving List if applicable */
j = ll_rl_find(id_addr->type, id_addr->a.val, NULL);
if (j < ARRAY_SIZE(rl)) {
wl[i].rl_idx = j;
rl[j].wl = 1;
} else {
wl[i].rl_idx = FILTER_IDX_NONE;
}
wl[i].taken = 1;
return 0;
}
static u32_t wl_remove(bt_addr_le_t *id_addr)
{
/* find the device and mark it as empty */
u8_t i = wl_find(id_addr->type, id_addr->a.val, NULL);
if (i < ARRAY_SIZE(wl)) {
u8_t j = wl[i].rl_idx;
if (j < ARRAY_SIZE(rl)) {
rl[j].wl = 0;
}
wl[i].taken = 0;
return 0;
}
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
bt_addr_t *ctrl_lrpa_get(u8_t rl_idx)
{
if ((rl_idx >= ARRAY_SIZE(rl)) || !rl[rl_idx].lirk ||
!rl[rl_idx].rpas_ready) {
return NULL;
}
return rl[rl_idx].local_rpa;
}
u8_t *ctrl_irks_get(u8_t *count)
{
*count = peer_irk_count;
return (u8_t *)peer_irks;
}
u8_t ctrl_rl_idx(bool whitelist, u8_t devmatch_id)
{
u8_t i;
if (whitelist) {
LL_ASSERT(devmatch_id < ARRAY_SIZE(wl));
LL_ASSERT(wl[devmatch_id].taken);
i = wl[devmatch_id].rl_idx;
} else {
LL_ASSERT(devmatch_id < ARRAY_SIZE(rl));
i = devmatch_id;
LL_ASSERT(rl[i].taken);
}
return i;
}
u8_t ctrl_rl_irk_idx(u8_t irkmatch_id)
{
u8_t i;
LL_ASSERT(irkmatch_id < peer_irk_count);
i = peer_irk_rl_ids[irkmatch_id];
LL_ASSERT(i < CONFIG_BT_CTLR_RL_SIZE);
LL_ASSERT(rl[i].taken);
return i;
}
bool ctrl_irk_whitelisted(u8_t rl_idx)
{
if (rl_idx >= ARRAY_SIZE(rl)) {
return false;
}
LL_ASSERT(rl[rl_idx].taken);
return rl[rl_idx].wl;
}
#endif /* CONFIG_BT_CTLR_PRIVACY */
struct ll_filter *ctrl_filter_get(bool whitelist)
{
#if defined(CONFIG_BT_CTLR_PRIVACY)
if (whitelist) {
return &wl_filter;
}
return &rl_filter;
#else
LL_ASSERT(whitelist);
return &wl_filter;
#endif
}
u8_t ll_wl_size_get(void)
{
return WL_SIZE;
}
u8_t ll_wl_clear(void)
{
#if defined(CONFIG_BT_BROADCASTER)
if (ull_adv_filter_pol_get(0)) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
#endif /* CONFIG_BT_BROADCASTER */
#if defined(CONFIG_BT_OBSERVER)
if (ull_scan_filter_pol_get(0) & 0x1) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
#endif /* CONFIG_BT_OBSERVER */
#if defined(CONFIG_BT_CTLR_PRIVACY)
wl_clear();
#else
filter_clear(&wl_filter);
#endif /* CONFIG_BT_CTLR_PRIVACY */
wl_anon = 0;
return 0;
}
u8_t ll_wl_add(bt_addr_le_t *addr)
{
#if defined(CONFIG_BT_BROADCASTER)
if (ull_adv_filter_pol_get(0)) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
#endif /* CONFIG_BT_BROADCASTER */
#if defined(CONFIG_BT_OBSERVER)
if (ull_scan_filter_pol_get(0) & 0x1) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
#endif /* CONFIG_BT_OBSERVER */
if (addr->type == ADDR_TYPE_ANON) {
wl_anon = 1;
return 0;
}
#if defined(CONFIG_BT_CTLR_PRIVACY)
return wl_add(addr);
#else
return filter_add(&wl_filter, addr->type, addr->a.val);
#endif /* CONFIG_BT_CTLR_PRIVACY */
}
u8_t ll_wl_remove(bt_addr_le_t *addr)
{
#if defined(CONFIG_BT_BROADCASTER)
if (ull_adv_filter_pol_get(0)) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
#endif /* CONFIG_BT_BROADCASTER */
#if defined(CONFIG_BT_OBSERVER)
if (ull_scan_filter_pol_get(0) & 0x1) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
#endif /* CONFIG_BT_OBSERVER */
if (addr->type == ADDR_TYPE_ANON) {
wl_anon = 0;
return 0;
}
#if defined(CONFIG_BT_CTLR_PRIVACY)
return wl_remove(addr);
#else
return filter_remove(&wl_filter, addr->type, addr->a.val);
#endif /* CONFIG_BT_CTLR_PRIVACY */
}
#if defined(CONFIG_BT_CTLR_PRIVACY)
static void filter_wl_update(void)
{
u8_t i;
/* Populate filter from wl peers */
for (i = 0; i < WL_SIZE; i++) {
u8_t j;
if (!wl[i].taken) {
continue;
}
j = wl[i].rl_idx;
if (!rl_enable || j >= ARRAY_SIZE(rl) || !rl[j].pirk ||
rl[j].dev) {
filter_insert(&wl_filter, i, wl[i].id_addr_type,
wl[i].id_addr.val);
}
}
}
static void filter_rl_update(void)
{
u8_t i;
/* Populate filter from rl peers */
for (i = 0; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
if (rl[i].taken) {
filter_insert(&rl_filter, i, rl[i].id_addr_type,
rl[i].id_addr.val);
}
}
}
void ll_filters_adv_update(u8_t adv_fp)
{
/* Clear before populating filter */
filter_clear(&wl_filter);
/* enabling advertising */
if (adv_fp && !(ull_scan_filter_pol_get(0) & 0x1)) {
/* whitelist not in use, update whitelist */
filter_wl_update();
}
/* Clear before populating rl filter */
filter_clear(&rl_filter);
if (rl_enable && !ll_scan_is_enabled()) {
/* rl not in use, update resolving list LUT */
filter_rl_update();
}
}
void ll_filters_scan_update(u8_t scan_fp)
{
/* Clear before populating filter */
filter_clear(&wl_filter);
/* enabling advertising */
if ((scan_fp & 0x1) && !ull_adv_filter_pol_get(0)) {
/* whitelist not in use, update whitelist */
filter_wl_update();
}
/* Clear before populating rl filter */
filter_clear(&rl_filter);
if (rl_enable && !ll_adv_is_enabled(LL_ADV_SET_MAX)) {
/* rl not in use, update resolving list LUT */
filter_rl_update();
}
}
u8_t ll_rl_find(u8_t id_addr_type, u8_t *id_addr, u8_t *free)
{
u8_t i;
if (free) {
*free = FILTER_IDX_NONE;
}
for (i = 0; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
if (LIST_MATCH(rl, i, id_addr_type, id_addr)) {
return i;
} else if (free && !rl[i].taken && (*free == FILTER_IDX_NONE)) {
*free = i;
}
}
return FILTER_IDX_NONE;
}
bool ctrl_rl_idx_allowed(u8_t irkmatch_ok, u8_t rl_idx)
{
/* If AR is disabled or we don't know the device or we matched an IRK
* then we're all set.
*/
if (!rl_enable || rl_idx >= ARRAY_SIZE(rl) || irkmatch_ok) {
return true;
}
LL_ASSERT(rl_idx < CONFIG_BT_CTLR_RL_SIZE);
LL_ASSERT(rl[rl_idx].taken);
return !rl[rl_idx].pirk || rl[rl_idx].dev;
}
void ll_rl_id_addr_get(u8_t rl_idx, u8_t *id_addr_type, u8_t *id_addr)
{
LL_ASSERT(rl_idx < CONFIG_BT_CTLR_RL_SIZE);
LL_ASSERT(rl[rl_idx].taken);
*id_addr_type = rl[rl_idx].id_addr_type;
memcpy(id_addr, rl[rl_idx].id_addr.val, BDADDR_SIZE);
}
bool ctrl_rl_addr_allowed(u8_t id_addr_type, u8_t *id_addr, u8_t *rl_idx)
{
u8_t i, j;
/* If AR is disabled or we matched an IRK then we're all set. No hw
* filters are used in this case.
*/
if (!rl_enable || *rl_idx != FILTER_IDX_NONE) {
return true;
}
for (i = 0; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
if (rl[i].taken && (rl[i].id_addr_type == id_addr_type)) {
u8_t *addr = rl[i].id_addr.val;
for (j = 0; j < BDADDR_SIZE; j++) {
if (addr[j] != id_addr[j]) {
break;
}
}
if (j == BDADDR_SIZE) {
*rl_idx = i;
return !rl[i].pirk || rl[i].dev;
}
}
}
return true;
}
bool ctrl_rl_addr_resolve(u8_t id_addr_type, u8_t *id_addr, u8_t rl_idx)
{
/* Unable to resolve if AR is disabled, no RL entry or no local IRK */
if (!rl_enable || rl_idx >= ARRAY_SIZE(rl) || !rl[rl_idx].lirk) {
return false;
}
if ((id_addr_type != 0) && ((id_addr[5] & 0xc0) == 0x40)) {
return bt_rpa_irk_matches(rl[rl_idx].local_irk,
(bt_addr_t *)id_addr);
}
return false;
}
bool ctrl_rl_enabled(void)
{
return rl_enable;
}
#if defined(CONFIG_BT_BROADCASTER)
void ll_rl_pdu_adv_update(struct ll_adv_set *adv, u8_t idx,
struct pdu_adv *pdu)
{
u8_t *adva = pdu->type == PDU_ADV_TYPE_SCAN_RSP ?
&pdu->scan_rsp.addr[0] :
&pdu->adv_ind.addr[0];
/* AdvA */
if (idx < ARRAY_SIZE(rl) && rl[idx].lirk) {
LL_ASSERT(rl[idx].rpas_ready);
pdu->tx_addr = 1;
memcpy(adva, rl[idx].local_rpa->val, BDADDR_SIZE);
} else {
pdu->tx_addr = adv->own_addr_type & 0x1;
ll_addr_get(adv->own_addr_type & 0x1, adva);
}
/* TargetA */
if (pdu->type == PDU_ADV_TYPE_DIRECT_IND) {
if (idx < ARRAY_SIZE(rl) && rl[idx].pirk) {
pdu->rx_addr = 1;
memcpy(&pdu->direct_ind.tgt_addr[0],
rl[idx].peer_rpa.val, BDADDR_SIZE);
} else {
pdu->rx_addr = adv->id_addr_type;
memcpy(&pdu->direct_ind.tgt_addr[0],
adv->id_addr, BDADDR_SIZE);
}
}
}
static void rpa_adv_refresh(struct ll_adv_set *adv)
{
struct radio_adv_data *radio_adv_data;
struct pdu_adv *prev;
struct pdu_adv *pdu;
u8_t last;
u8_t idx;
if (adv->own_addr_type != BT_ADDR_LE_PUBLIC_ID &&
adv->own_addr_type != BT_ADDR_LE_RANDOM_ID) {
return;
}
radio_adv_data = radio_adv_data_get();
prev = (struct pdu_adv *)&radio_adv_data->data[radio_adv_data->last][0];
/* use the last index in double buffer, */
if (radio_adv_data->first == radio_adv_data->last) {
last = radio_adv_data->last + 1;
if (last == DOUBLE_BUFFER_SIZE) {
last = 0;
}
} else {
last = radio_adv_data->last;
}
/* update adv pdu fields. */
pdu = (struct pdu_adv *)&radio_adv_data->data[last][0];
pdu->type = prev->type;
pdu->rfu = 0;
if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
pdu->chan_sel = prev->chan_sel;
} else {
pdu->chan_sel = 0;
}
idx = ll_rl_find(adv->id_addr_type, adv->id_addr, NULL);
LL_ASSERT(idx < ARRAY_SIZE(rl));
ll_rl_pdu_adv_update(adv, idx, pdu);
memcpy(&pdu->adv_ind.data[0], &prev->adv_ind.data[0],
prev->len - BDADDR_SIZE);
pdu->len = prev->len;
/* commit the update so controller picks it. */
radio_adv_data->last = last;
}
#endif
static void rl_clear(void)
{
for (u8_t i = 0; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
rl[i].taken = 0;
}
peer_irk_count = 0;
}
static int rl_access_check(bool check_ar)
{
if (check_ar) {
/* If address resolution is disabled, allow immediately */
if (!rl_enable) {
return -1;
}
}
return (ll_adv_is_enabled(LL_ADV_SET_MAX) ||
ll_scan_is_enabled()) ? 0 : 1;
}
void ll_rl_rpa_update(bool timeout)
{
u8_t i;
int err;
s64_t now = k_uptime_get();
bool all = timeout || (rpa_last_ms == -1) ||
(now - rpa_last_ms >= rpa_timeout_ms);
BT_DBG("");
for (i = 0; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
if ((rl[i].taken) && (all || !rl[i].rpas_ready)) {
if (rl[i].pirk) {
u8_t irk[16];
/* TODO: move this swap to the driver level */
sys_memcpy_swap(irk, peer_irks[rl[i].pirk_idx],
16);
err = bt_rpa_create(irk, &rl[i].peer_rpa);
LL_ASSERT(!err);
}
if (rl[i].lirk) {
bt_addr_t rpa;
err = bt_rpa_create(rl[i].local_irk, &rpa);
LL_ASSERT(!err);
/* pointer read/write assumed to be atomic
* so that if ISR fires the local_rpa pointer
* will always point to a valid full RPA
*/
rl[i].local_rpa = &rpa;
bt_addr_copy(&local_rpas[i], &rpa);
rl[i].local_rpa = &local_rpas[i];
}
rl[i].rpas_ready = 1;
}
}
if (all) {
rpa_last_ms = now;
}
if (timeout) {
#if defined(CONFIG_BT_BROADCASTER)
struct ll_adv_set *adv;
/* TODO: foreach adv set */
adv = ll_adv_is_enabled_get(0);
if (adv) {
rpa_adv_refresh(adv);
}
#endif
}
}
static void rpa_timeout(struct k_work *work)
{
ll_rl_rpa_update(true);
k_delayed_work_submit(&rpa_work, rpa_timeout_ms);
}
static void rpa_refresh_start(void)
{
if (!rl_enable) {
return;
}
BT_DBG("");
k_delayed_work_submit(&rpa_work, rpa_timeout_ms);
}
static void rpa_refresh_stop(void)
{
if (!rl_enable) {
return;
}
k_delayed_work_cancel(&rpa_work);
}
void ll_adv_scan_state_cb(u8_t bm)
{
if (bm) {
rpa_refresh_start();
} else {
rpa_refresh_stop();
}
}
u32_t ll_rl_size_get(void)
{
return CONFIG_BT_CTLR_RL_SIZE;
}
u32_t ll_rl_clear(void)
{
if (!rl_access_check(false)) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
rl_clear();
return 0;
}
u32_t ll_rl_add(bt_addr_le_t *id_addr, const u8_t pirk[16],
const u8_t lirk[16])
{
u8_t i, j;
if (!rl_access_check(false)) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
i = ll_rl_find(id_addr->type, id_addr->a.val, &j);
/* Duplicate check */
if (i < ARRAY_SIZE(rl)) {
return BT_HCI_ERR_INVALID_PARAM;
} else if (j >= ARRAY_SIZE(rl)) {
return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
}
/* Device not found but empty slot found */
i = j;
bt_addr_copy(&rl[i].id_addr, &id_addr->a);
rl[i].id_addr_type = id_addr->type & 0x1;
rl[i].pirk = mem_nz((u8_t *)pirk, 16);
rl[i].lirk = mem_nz((u8_t *)lirk, 16);
if (rl[i].pirk) {
/* cross-reference */
rl[i].pirk_idx = peer_irk_count;
peer_irk_rl_ids[peer_irk_count] = i;
/* AAR requires big-endian IRKs */
sys_memcpy_swap(peer_irks[peer_irk_count++], pirk, 16);
}
if (rl[i].lirk) {
memcpy(rl[i].local_irk, lirk, 16);
rl[i].local_rpa = NULL;
}
memset(rl[i].curr_rpa.val, 0x00, sizeof(rl[i].curr_rpa));
rl[i].rpas_ready = 0;
/* Default to Network Privacy */
rl[i].dev = 0;
/* Add reference to a whitelist entry */
j = wl_find(id_addr->type, id_addr->a.val, NULL);
if (j < ARRAY_SIZE(wl)) {
wl[j].rl_idx = i;
rl[i].wl = 1;
} else {
rl[i].wl = 0;
}
rl[i].taken = 1;
return 0;
}
u32_t ll_rl_remove(bt_addr_le_t *id_addr)
{
u8_t i;
if (!rl_access_check(false)) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
/* find the device and mark it as empty */
i = ll_rl_find(id_addr->type, id_addr->a.val, NULL);
if (i < ARRAY_SIZE(rl)) {
u8_t j, k;
if (rl[i].pirk) {
/* Swap with last item */
u8_t pi = rl[i].pirk_idx, pj = peer_irk_count - 1;
if (pj && pi != pj) {
memcpy(peer_irks[pi], peer_irks[pj], 16);
for (k = 0;
k < CONFIG_BT_CTLR_RL_SIZE;
k++) {
if (rl[k].taken && rl[k].pirk &&
rl[k].pirk_idx == pj) {
rl[k].pirk_idx = pi;
peer_irk_rl_ids[pi] = k;
break;
}
}
}
peer_irk_count--;
}
/* Check if referenced by a whitelist entry */
j = wl_find(id_addr->type, id_addr->a.val, NULL);
if (j < ARRAY_SIZE(wl)) {
wl[j].rl_idx = FILTER_IDX_NONE;
}
rl[i].taken = 0;
return 0;
}
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
void ll_rl_crpa_set(u8_t id_addr_type, u8_t *id_addr, u8_t rl_idx, u8_t *crpa)
{
if ((crpa[5] & 0xc0) == 0x40) {
if (id_addr) {
/* find the device and return its RPA */
rl_idx = ll_rl_find(id_addr_type, id_addr, NULL);
}
if (rl_idx < ARRAY_SIZE(rl) && rl[rl_idx].taken) {
memcpy(rl[rl_idx].curr_rpa.val, crpa,
sizeof(bt_addr_t));
}
}
}
u32_t ll_rl_crpa_get(bt_addr_le_t *id_addr, bt_addr_t *crpa)
{
u8_t i;
/* find the device and return its RPA */
i = ll_rl_find(id_addr->type, id_addr->a.val, NULL);
if (i < ARRAY_SIZE(rl) &&
mem_nz(rl[i].curr_rpa.val, sizeof(rl[i].curr_rpa.val))) {
bt_addr_copy(crpa, &rl[i].curr_rpa);
return 0;
}
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
u32_t ll_rl_lrpa_get(bt_addr_le_t *id_addr, bt_addr_t *lrpa)
{
u8_t i;
/* find the device and return the local RPA */
i = ll_rl_find(id_addr->type, id_addr->a.val, NULL);
if (i < ARRAY_SIZE(rl)) {
bt_addr_copy(lrpa, rl[i].local_rpa);
return 0;
}
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
u32_t ll_rl_enable(u8_t enable)
{
if (!rl_access_check(false)) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
switch (enable) {
case BT_HCI_ADDR_RES_DISABLE:
rl_enable = 0;
break;
case BT_HCI_ADDR_RES_ENABLE:
rl_enable = 1;
break;
default:
return BT_HCI_ERR_INVALID_PARAM;
}
return 0;
}
void ll_rl_timeout_set(u16_t timeout)
{
rpa_timeout_ms = timeout * 1000;
}
u32_t ll_priv_mode_set(bt_addr_le_t *id_addr, u8_t mode)
{
u8_t i;
if (!rl_access_check(false)) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
/* find the device and mark it as empty */
i = ll_rl_find(id_addr->type, id_addr->a.val, NULL);
if (i < ARRAY_SIZE(rl)) {
switch (mode) {
case BT_HCI_LE_PRIVACY_MODE_NETWORK:
rl[i].dev = 0;
break;
case BT_HCI_LE_PRIVACY_MODE_DEVICE:
rl[i].dev = 1;
break;
default:
return BT_HCI_ERR_INVALID_PARAM;
}
} else {
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
return 0;
}
#endif /* CONFIG_BT_CTLR_PRIVACY */
void ll_filter_reset(bool init)
{
wl_anon = 0;
#if defined(CONFIG_BT_CTLR_PRIVACY)
wl_clear();
rl_enable = 0;
rpa_timeout_ms = DEFAULT_RPA_TIMEOUT_MS;
rpa_last_ms = -1;
rl_clear();
if (init) {
k_delayed_work_init(&rpa_work, rpa_timeout);
} else {
k_delayed_work_cancel(&rpa_work);
}
#else
filter_clear(&wl_filter);
#endif /* CONFIG_BT_CTLR_PRIVACY */
}

View file

@ -0,0 +1,15 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
int lll_prepare_done(void *param);
int lll_done(void *param);
bool lll_is_done(void *param);
int lll_clk_on(void);
int lll_clk_on_wait(void);
int lll_clk_off(void);
u32_t lll_evt_offset_get(struct evt_hdr *evt);
u32_t lll_preempt_calc(struct evt_hdr *evt, u8_t ticker_id,
u32_t ticks_at_event);

View file

@ -0,0 +1,208 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdbool.h>
#include <toolchain.h>
#include <zephyr/types.h>
#include <misc/util.h>
#include "hal/ccm.h"
#include "hal/radio.h"
#include "hal/ticker.h"
#include "util/memq.h"
#include "pdu.h"
#include "lll.h"
#include "lll_vendor.h"
#include "lll_conn.h"
#include "lll_master.h"
#include "lll_chan.h"
#include "lll_internal.h"
#include "lll_tim_internal.h"
#define LOG_MODULE_NAME bt_ctlr_llsw_nordic_lll_master
#include "common/log.h"
#include <soc.h>
#include "hal/debug.h"
static int init_reset(void);
static int prepare_cb(struct lll_prepare_param *prepare_param);
int lll_master_init(void)
{
int err;
err = init_reset();
if (err) {
return err;
}
return 0;
}
int lll_master_reset(void)
{
int err;
err = init_reset();
if (err) {
return err;
}
return 0;
}
void lll_master_prepare(void *param)
{
struct lll_prepare_param *p = param;
int err;
err = lll_clk_on();
LL_ASSERT(!err || err == -EINPROGRESS);
err = lll_prepare(lll_conn_is_abort_cb, lll_conn_abort_cb, prepare_cb,
0, p);
LL_ASSERT(!err || err == -EINPROGRESS);
}
static int init_reset(void)
{
return 0;
}
static int prepare_cb(struct lll_prepare_param *prepare_param)
{
struct lll_conn *lll = prepare_param->param;
struct pdu_data *pdu_data_tx;
u32_t ticks_at_event;
struct evt_hdr *evt;
u16_t event_counter;
u32_t remainder_us;
u8_t data_chan_use;
u32_t remainder;
u16_t lazy;
DEBUG_RADIO_START_M(1);
/* TODO: Do the below in ULL ? */
lazy = prepare_param->lazy;
/* save the latency for use in event */
lll->latency_prepare += lazy;
/* calc current event counter value */
event_counter = lll->event_counter + lll->latency_prepare;
/* store the next event counter value */
lll->event_counter = event_counter + 1;
/* TODO: Do the above in ULL ? */
/* Reset connection event global variables */
lll_conn_prepare_reset();
/* TODO: can we do something in ULL? */
lll->latency_event = lll->latency_prepare;
lll->latency_prepare = 0;
if (lll->data_chan_sel) {
#if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
data_chan_use = lll_chan_sel_2(lll->event_counter - 1,
lll->data_chan_id,
&lll->data_chan_map[0],
lll->data_chan_count);
#else /* !CONFIG_BT_CTLR_CHAN_SEL_2 */
LL_ASSERT(0);
#endif /* !CONFIG_BT_CTLR_CHAN_SEL_2 */
} else {
data_chan_use = lll_chan_sel_1(&lll->data_chan_use,
lll->data_chan_hop,
lll->latency_event,
&lll->data_chan_map[0],
lll->data_chan_count);
}
/* Prepare the Tx PDU */
lll_conn_pdu_tx_prep(lll, &pdu_data_tx);
pdu_data_tx->sn = lll->sn;
pdu_data_tx->nesn = lll->nesn;
/* Start setting up of Radio h/w */
radio_reset();
/* TODO: other Tx Power settings */
radio_tx_power_set(RADIO_TXP_DEFAULT);
radio_aa_set(lll->access_addr);
radio_crc_configure(((0x5bUL) | ((0x06UL) << 8) | ((0x00UL) << 16)),
(((u32_t)lll->crc_init[2] << 16) |
((u32_t)lll->crc_init[1] << 8) |
((u32_t)lll->crc_init[0])));
lll_chan_set(data_chan_use);
/* setup the radio tx packet buffer */
lll_conn_tx_pkt_set(lll, pdu_data_tx);
radio_isr_set(lll_conn_isr_tx, lll);
radio_tmr_tifs_set(TIFS_US);
#if defined(CONFIG_BT_CTLR_PHY)
radio_switch_complete_and_rx(lll->phy_rx);
#else /* !CONFIG_BT_CTLR_PHY */
radio_switch_complete_and_rx(0);
#endif /* !CONFIG_BT_CTLR_PHY */
ticks_at_event = prepare_param->ticks_at_expire;
evt = HDR_LLL2EVT(lll);
ticks_at_event += lll_evt_offset_get(evt);
ticks_at_event += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
remainder = prepare_param->remainder;
remainder_us = radio_tmr_start(1, ticks_at_event, remainder);
/* capture end of Tx-ed PDU, used to calculate HCTO. */
radio_tmr_end_capture();
#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN)
radio_gpio_pa_setup();
#if defined(CONFIG_BT_CTLR_PHY)
radio_gpio_pa_lna_enable(remainder_us +
radio_tx_ready_delay_get(lll->phy_tx,
lll->phy_flags) -
CONFIG_BT_CTLR_GPIO_PA_OFFSET);
#else /* !CONFIG_BT_CTLR_PHY */
radio_gpio_pa_lna_enable(remainder_us +
radio_tx_ready_delay_get(0, 0) -
CONFIG_BT_CTLR_GPIO_PA_OFFSET);
#endif /* !CONFIG_BT_CTLR_PHY */
#else /* !CONFIG_BT_CTLR_GPIO_PA_PIN */
ARG_UNUSED(remainder_us);
#endif /* !CONFIG_BT_CTLR_GPIO_PA_PIN */
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED) && \
(EVENT_OVERHEAD_PREEMPT_US <= EVENT_OVERHEAD_PREEMPT_MIN_US)
/* check if preempt to start has changed */
if (lll_preempt_calc(evt, TICKER_ID_CONN_BASE, ticks_at_event)) {
radio_isr_set(lll_conn_isr_abort, lll);
radio_disable();
} else
#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
{
u32_t ret;
ret = lll_prepare_done(lll);
LL_ASSERT(!ret);
}
DEBUG_RADIO_START_M(1);
return 0;
}

View file

@ -0,0 +1,9 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
int lll_master_init(void);
int lll_master_reset(void);
void lll_master_prepare(void *param);

View file

@ -0,0 +1,138 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <toolchain.h>
#include <zephyr/types.h>
#include "hal/ccm.h"
#include "hal/radio.h"
#include "util/memq.h"
#include "pdu.h"
#include "lll.h"
static u8_t latency_min = (u8_t) -1;
static u8_t latency_max;
static u8_t latency_prev;
static u8_t cputime_min = (u8_t) -1;
static u8_t cputime_max;
static u8_t cputime_prev;
static u32_t timestamp_latency;
void lll_prof_latency_capture(void)
{
/* sample the packet timer, use it to calculate ISR latency
* and generate the profiling event at the end of the ISR.
*/
radio_tmr_sample();
}
#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN)
static u32_t timestamp_radio_end;
u32_t lll_prof_radio_end_backup(void)
{
/* PA enable is overwriting packet end used in ISR profiling, hence
* back it up for later use.
*/
timestamp_radio_end = radio_tmr_end_get();
return timestamp_radio_end;
}
#endif /* !CONFIG_BT_CTLR_GPIO_PA_PIN */
void lll_prof_cputime_capture(void)
{
/* get the ISR latency sample */
timestamp_latency = radio_tmr_sample_get();
/* sample the packet timer again, use it to calculate ISR execution time
* and use it in profiling event
*/
radio_tmr_sample();
}
void lll_prof_send(void)
{
u8_t latency, cputime, prev;
u8_t chg = 0;
/* calculate the elapsed time in us since on-air radio packet end
* to ISR entry
*/
#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN)
latency = timestamp_latency - timestamp_radio_end;
#else /* !CONFIG_BT_CTLR_GPIO_PA_PIN */
latency = timestamp_latency - radio_tmr_end_get();
#endif /* !CONFIG_BT_CTLR_GPIO_PA_PIN */
/* check changes in min, avg and max of latency */
if (latency > latency_max) {
latency_max = latency;
chg = 1;
}
if (latency < latency_min) {
latency_min = latency;
chg = 1;
}
/* check for +/- 1us change */
prev = ((u16_t)latency_prev + latency) >> 1;
if (prev != latency_prev) {
latency_prev = latency;
chg = 1;
}
/* calculate the elapsed time in us since ISR entry */
cputime = radio_tmr_sample_get() - timestamp_latency;
/* check changes in min, avg and max */
if (cputime > cputime_max) {
cputime_max = cputime;
chg = 1;
}
if (cputime < cputime_min) {
cputime_min = cputime;
chg = 1;
}
/* check for +/- 1us change */
prev = ((u16_t)cputime_prev + cputime) >> 1;
if (prev != cputime_prev) {
cputime_prev = cputime;
chg = 1;
}
/* generate event if any change */
if (chg) {
struct node_rx_pdu *rx;
/* NOTE: enqueue only if rx buffer available, else ignore */
rx = ull_pdu_rx_alloc_peek(3);
if (rx) {
struct profile *p;
ull_pdu_rx_alloc();
rx->hdr.type = NODE_RX_TYPE_PROFILE;
rx->hdr.handle = 0xFFFF;
p = &((struct pdu_data *)rx->pdu)->profile;
p->lcur = latency;
p->lmin = latency_min;
p->lmax = latency_max;
p->cur = cputime;
p->min = cputime_min;
p->max = cputime_max;
ull_rx_put(rx->hdr.link, rx);
ull_rx_sched();
}
}
}

View file

@ -0,0 +1,10 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
void lll_prof_latency_capture(void);
void lll_prof_radio_end_backup(void);
void lll_prof_cputime_capture(void);
void lll_prof_send(void);

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,45 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
struct lll_scan {
struct lll_hdr hdr;
#if defined(CONFIG_BT_CENTRAL)
/* NOTE: conn context has to be after lll_hdr */
struct lll_conn *conn;
u32_t conn_ticks_slot;
u32_t conn_win_offset_us;
u16_t conn_timeout;
#endif /* CONFIG_BT_CENTRAL */
u8_t state:1;
u8_t chan:2;
u8_t filter_policy:2;
u8_t adv_addr_type:1;
u8_t init_addr_type:1;
u8_t type:1;
#if defined(CONFIG_BT_CTLR_ADV_EXT)
u8_t phy:3;
#endif /* CONFIG_BT_CTLR_ADV_EXT */
#if defined(CONFIG_BT_CTLR_PRIVACY)
u8_t rpa_gen:1;
/* initiator only */
u8_t rl_idx;
#endif /* CONFIG_BT_CTLR_PRIVACY */
u8_t init_addr[BDADDR_SIZE];
u8_t adv_addr[BDADDR_SIZE];
u16_t interval;
u32_t ticks_window;
};
int lll_scan_init(void);
int lll_scan_reset(void);
void lll_scan_prepare(void *param);

View file

@ -0,0 +1,253 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdbool.h>
#include <toolchain.h>
#include <zephyr/types.h>
#include <misc/util.h>
#include "hal/ccm.h"
#include "hal/radio.h"
#include "hal/ticker.h"
#include "util/memq.h"
#include "pdu.h"
#include "lll.h"
#include "lll_vendor.h"
#include "lll_conn.h"
#include "lll_slave.h"
#include "lll_chan.h"
#include "lll_internal.h"
#include "lll_tim_internal.h"
#define LOG_MODULE_NAME bt_ctlr_llsw_nordic_lll_slave
#include "common/log.h"
#include <soc.h>
#include "hal/debug.h"
static int init_reset(void);
static int prepare_cb(struct lll_prepare_param *prepare_param);
int lll_slave_init(void)
{
int err;
err = init_reset();
if (err) {
return err;
}
return 0;
}
int lll_slave_reset(void)
{
int err;
err = init_reset();
if (err) {
return err;
}
return 0;
}
void lll_slave_prepare(void *param)
{
struct lll_prepare_param *p = param;
int err;
err = lll_clk_on();
LL_ASSERT(!err || err == -EINPROGRESS);
err = lll_prepare(lll_conn_is_abort_cb, lll_conn_abort_cb, prepare_cb,
0, p);
LL_ASSERT(!err || err == -EINPROGRESS);
}
static int init_reset(void)
{
return 0;
}
static int prepare_cb(struct lll_prepare_param *prepare_param)
{
struct lll_conn *lll = prepare_param->param;
u32_t ticks_at_event;
struct evt_hdr *evt;
u16_t event_counter;
u32_t remainder_us;
u8_t data_chan_use;
u32_t remainder;
u32_t hcto;
u16_t lazy;
DEBUG_RADIO_START_S(1);
/* TODO: Do the below in ULL ? */
lazy = prepare_param->lazy;
/* Calc window widening */
if (lll->role) {
lll->slave.window_widening_prepare_us +=
lll->slave.window_widening_periodic_us * (lazy + 1);
if (lll->slave.window_widening_prepare_us >
lll->slave.window_widening_max_us) {
lll->slave.window_widening_prepare_us =
lll->slave.window_widening_max_us;
}
}
/* save the latency for use in event */
lll->latency_prepare += lazy;
/* calc current event counter value */
event_counter = lll->event_counter + lll->latency_prepare;
/* store the next event counter value */
lll->event_counter = event_counter + 1;
/* TODO: Do the above in ULL ? */
/* Reset connection event global variables */
lll_conn_prepare_reset();
/* TODO: can we do something in ULL? */
lll->latency_event = lll->latency_prepare;
lll->latency_prepare = 0;
if (lll->data_chan_sel) {
#if defined(CONFIG_BT_CTLR_CHAN_SEL_2)
data_chan_use = lll_chan_sel_2(lll->event_counter - 1,
lll->data_chan_id,
&lll->data_chan_map[0],
lll->data_chan_count);
#else /* !CONFIG_BT_CTLR_CHAN_SEL_2 */
LL_ASSERT(0);
#endif /* !CONFIG_BT_CTLR_CHAN_SEL_2 */
} else {
data_chan_use = lll_chan_sel_1(&lll->data_chan_use,
lll->data_chan_hop,
lll->latency_event,
&lll->data_chan_map[0],
lll->data_chan_count);
}
/* current window widening */
lll->slave.window_widening_event_us +=
lll->slave.window_widening_prepare_us;
lll->slave.window_widening_prepare_us = 0;
if (lll->slave.window_widening_event_us >
lll->slave.window_widening_max_us) {
lll->slave.window_widening_event_us =
lll->slave.window_widening_max_us;
}
/* current window size */
lll->slave.window_size_event_us +=
lll->slave.window_size_prepare_us;
lll->slave.window_size_prepare_us = 0;
/* Start setting up Radio h/w */
radio_reset();
/* TODO: other Tx Power settings */
radio_tx_power_set(RADIO_TXP_DEFAULT);
lll_conn_rx_pkt_set(lll);
radio_aa_set(lll->access_addr);
radio_crc_configure(((0x5bUL) | ((0x06UL) << 8) | ((0x00UL) << 16)),
(((u32_t)lll->crc_init[2] << 16) |
((u32_t)lll->crc_init[1] << 8) |
((u32_t)lll->crc_init[0])));
lll_chan_set(data_chan_use);
radio_isr_set(lll_conn_isr_rx, lll);
radio_tmr_tifs_set(TIFS_US);
#if defined(CONFIG_BT_CTLR_PHY)
radio_switch_complete_and_tx(lll->phy_rx, 0, lll->phy_tx,
lll->phy_flags);
#else /* !CONFIG_BT_CTLR_PHY */
radio_switch_complete_and_tx(0, 0, 0, 0);
#endif /* !CONFIG_BT_CTLR_PHY */
ticks_at_event = prepare_param->ticks_at_expire;
evt = HDR_LLL2EVT(lll);
ticks_at_event += lll_evt_offset_get(evt);
ticks_at_event += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
remainder = prepare_param->remainder;
remainder_us = radio_tmr_start(0, ticks_at_event, remainder);
radio_tmr_aa_capture();
radio_tmr_aa_save(0);
hcto = remainder_us + EVENT_JITTER_US + (EVENT_JITTER_US << 2) +
(lll->slave.window_widening_event_us << 1) +
lll->slave.window_size_event_us;
#if defined(CONFIG_BT_CTLR_PHY)
hcto += radio_rx_ready_delay_get(lll->phy_rx, 1);
hcto += addr_us_get(lll->phy_rx);
hcto += radio_rx_chain_delay_get(lll->phy_rx, 1);
#else /* !CONFIG_BT_CTLR_PHY */
hcto += radio_rx_ready_delay_get(0, 0);
hcto += addr_us_get(0);
hcto += radio_rx_chain_delay_get(0, 0);
#endif /* !CONFIG_BT_CTLR_PHY */
radio_tmr_hcto_configure(hcto);
#if defined(CONFIG_BT_CTLR_GPIO_LNA_PIN)
radio_gpio_lna_setup();
#if defined(CONFIG_BT_CTLR_PHY)
radio_gpio_pa_lna_enable(remainder_us +
radio_rx_ready_delay_get(conn->phy_rx, 1) -
CONFIG_BT_CTLR_GPIO_LNA_OFFSET);
#else /* !CONFIG_BT_CTLR_PHY */
radio_gpio_pa_lna_enable(remainder_us +
radio_rx_ready_delay_get(0, 0) -
CONFIG_BT_CTLR_GPIO_LNA_OFFSET);
#endif /* !CONFIG_BT_CTLR_PHY */
#endif /* CONFIG_BT_CTLR_GPIO_LNA_PIN */
#if defined(CONFIG_BT_CTLR_PROFILE_ISR) || \
defined(CONFIG_BT_CTLR_GPIO_PA_PIN)
radio_tmr_end_capture();
#endif /* CONFIG_BT_CTLR_PROFILE_ISR */
#if defined(CONFIG_BT_CTLR_CONN_RSSI)
radio_rssi_measure();
#endif /* CONFIG_BT_CTLR_CONN_RSSI */
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED) && \
(EVENT_OVERHEAD_PREEMPT_US <= EVENT_OVERHEAD_PREEMPT_MIN_US)
/* check if preempt to start has changed */
if (lll_preempt_calc(evt, TICKER_ID_CONN_BASE, ticks_at_event)) {
radio_isr_set(lll_conn_isr_abort, lll);
radio_disable();
} else
#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
{
u32_t ret;
ret = lll_prepare_done(lll);
LL_ASSERT(!ret);
}
DEBUG_RADIO_START_S(1);
return 0;
}

View file

@ -0,0 +1,9 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
int lll_slave_init(void);
int lll_slave_reset(void);
void lll_slave_prepare(void *param);

View file

@ -0,0 +1,343 @@
/*
* Copyright (c) 2017-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stddef.h>
#include <string.h>
#include <toolchain.h>
#include <zephyr/types.h>
#include <soc.h>
#include <clock_control.h>
#include "hal/cpu.h"
#include "hal/cntr.h"
#include "hal/ccm.h"
#include "hal/radio.h"
#include "util/memq.h"
#include "lll.h"
#include "lll_internal.h"
#include "ll_test.h"
#define CNTR_MIN_DELTA 3
static const u32_t test_sync_word = 0x71764129;
static u8_t test_phy;
static u8_t test_phy_flags;
static u16_t test_num_rx;
static bool started;
/* NOTE: The PRBS9 sequence used as packet payload.
* The bytes in the sequence are in the right order, but the bits of each byte
* in the array are reverse from that found by running the PRBS9 algorithm. This
* is done to transmit MSbit first on air.
*/
static const u8_t prbs9[] = {
0xFF, 0xC1, 0xFB, 0xE8, 0x4C, 0x90, 0x72, 0x8B,
0xE7, 0xB3, 0x51, 0x89, 0x63, 0xAB, 0x23, 0x23,
0x02, 0x84, 0x18, 0x72, 0xAA, 0x61, 0x2F, 0x3B,
0x51, 0xA8, 0xE5, 0x37, 0x49, 0xFB, 0xC9, 0xCA,
0x0C, 0x18, 0x53, 0x2C, 0xFD, 0x45, 0xE3, 0x9A,
0xE6, 0xF1, 0x5D, 0xB0, 0xB6, 0x1B, 0xB4, 0xBE,
0x2A, 0x50, 0xEA, 0xE9, 0x0E, 0x9C, 0x4B, 0x5E,
0x57, 0x24, 0xCC, 0xA1, 0xB7, 0x59, 0xB8, 0x87,
0xFF, 0xE0, 0x7D, 0x74, 0x26, 0x48, 0xB9, 0xC5,
0xF3, 0xD9, 0xA8, 0xC4, 0xB1, 0xD5, 0x91, 0x11,
0x01, 0x42, 0x0C, 0x39, 0xD5, 0xB0, 0x97, 0x9D,
0x28, 0xD4, 0xF2, 0x9B, 0xA4, 0xFD, 0x64, 0x65,
0x06, 0x8C, 0x29, 0x96, 0xFE, 0xA2, 0x71, 0x4D,
0xF3, 0xF8, 0x2E, 0x58, 0xDB, 0x0D, 0x5A, 0x5F,
0x15, 0x28, 0xF5, 0x74, 0x07, 0xCE, 0x25, 0xAF,
0x2B, 0x12, 0xE6, 0xD0, 0xDB, 0x2C, 0xDC, 0xC3,
0x7F, 0xF0, 0x3E, 0x3A, 0x13, 0xA4, 0xDC, 0xE2,
0xF9, 0x6C, 0x54, 0xE2, 0xD8, 0xEA, 0xC8, 0x88,
0x00, 0x21, 0x86, 0x9C, 0x6A, 0xD8, 0xCB, 0x4E,
0x14, 0x6A, 0xF9, 0x4D, 0xD2, 0x7E, 0xB2, 0x32,
0x03, 0xC6, 0x14, 0x4B, 0x7F, 0xD1, 0xB8, 0xA6,
0x79, 0x7C, 0x17, 0xAC, 0xED, 0x06, 0xAD, 0xAF,
0x0A, 0x94, 0x7A, 0xBA, 0x03, 0xE7, 0x92, 0xD7,
0x15, 0x09, 0x73, 0xE8, 0x6D, 0x16, 0xEE, 0xE1,
0x3F, 0x78, 0x1F, 0x9D, 0x09, 0x52, 0x6E, 0xF1,
0x7C, 0x36, 0x2A, 0x71, 0x6C, 0x75, 0x64, 0x44,
0x80, 0x10, 0x43, 0x4E, 0x35, 0xEC, 0x65, 0x27,
0x0A, 0xB5, 0xFC, 0x26, 0x69, 0x3F, 0x59, 0x99,
0x01, 0x63, 0x8A, 0xA5, 0xBF, 0x68, 0x5C, 0xD3,
0x3C, 0xBE, 0x0B, 0xD6, 0x76, 0x83, 0xD6, 0x57,
0x05, 0x4A, 0x3D, 0xDD, 0x81, 0x73, 0xC9, 0xEB,
0x8A, 0x84, 0x39, 0xF4, 0x36, 0x0B, 0xF7};
/* TODO: fill correct prbs15 */
static const u8_t prbs15[255] = { 0x00, };
static u8_t tx_req;
static u8_t volatile tx_ack;
static void isr_tx(void *param)
{
u32_t l, i, s, t;
/* Clear radio status and events */
radio_status_reset();
radio_tmr_status_reset();
#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN)
radio_gpio_pa_lna_disable();
#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN */
/* Exit if radio disabled */
if (((tx_req - tx_ack) & 0x01) == 0) {
tx_ack = tx_req;
return;
}
/* LE Test Packet Interval */
l = radio_tmr_end_get() - radio_tmr_ready_get();
i = ((l + 249 + 624) / 625) * 625;
t = radio_tmr_end_get() - l + i;
t -= radio_tx_ready_delay_get(test_phy, test_phy_flags);
/* Set timer capture in the future. */
radio_tmr_sample();
s = radio_tmr_sample_get();
while (t < s) {
t += 625;
}
/* Setup next Tx */
radio_switch_complete_and_disable();
radio_tmr_start_us(1, t);
radio_tmr_aa_capture();
radio_tmr_end_capture();
/* TODO: check for probable stale timer capture being set */
#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN)
radio_gpio_pa_setup();
radio_gpio_pa_lna_enable(t + radio_tx_ready_delay_get(test_phy,
test_phy_flags) -
CONFIG_BT_CTLR_GPIO_PA_OFFSET);
#endif /* CONFIG_BT_CTLR_GPIO_PA_PIN */
}
static void isr_rx(void *param)
{
u8_t crc_ok = 0;
u8_t trx_done;
/* Read radio status and events */
trx_done = radio_is_done();
if (trx_done) {
crc_ok = radio_crc_is_valid();
}
/* Clear radio status and events */
radio_status_reset();
radio_tmr_status_reset();
/* Exit if radio disabled */
if (!trx_done) {
return;
}
/* Setup next Rx */
radio_switch_complete_and_rx(test_phy);
/* Count Rx-ed packets */
if (crc_ok) {
test_num_rx++;
}
}
static u32_t init(u8_t chan, u8_t phy, void (*isr)(void *))
{
int err;
if (started) {
return 1;
}
/* start coarse timer */
cntr_start();
/* Setup resources required by Radio */
err = lll_clk_on_wait();
/* Reset Radio h/w */
radio_reset();
radio_isr_set(isr, NULL);
/* Store value needed in Tx/Rx ISR */
if (phy < 0x04) {
test_phy = BIT(phy - 1);
test_phy_flags = 1;
} else {
test_phy = BIT(2);
test_phy_flags = 0;
}
/* Setup Radio in Tx/Rx */
/* NOTE: No whitening in test mode. */
radio_phy_set(test_phy, test_phy_flags);
radio_tmr_tifs_set(150);
radio_tx_power_max_set();
radio_freq_chan_set((chan << 1) + 2);
radio_aa_set((u8_t *)&test_sync_word);
radio_crc_configure(0x65b, 0x555555);
radio_pkt_configure(8, 255, (test_phy << 1));
return 0;
}
u32_t ll_test_tx(u8_t chan, u8_t len, u8_t type, u8_t phy)
{
u32_t start_us;
u8_t *payload;
u8_t *pdu;
u32_t err;
if ((type > 0x07) || !phy || (phy > 0x04)) {
return 1;
}
err = init(chan, phy, isr_tx);
if (err) {
return err;
}
tx_req++;
pdu = radio_pkt_scratch_get();
payload = &pdu[2];
switch (type) {
case 0x00:
memcpy(payload, prbs9, len);
break;
case 0x01:
memset(payload, 0x0f, len);
break;
case 0x02:
memset(payload, 0x55, len);
break;
case 0x03:
memcpy(payload, prbs15, len);
break;
case 0x04:
memset(payload, 0xff, len);
break;
case 0x05:
memset(payload, 0x00, len);
break;
case 0x06:
memset(payload, 0xf0, len);
break;
case 0x07:
memset(payload, 0xaa, len);
break;
}
pdu[0] = type;
pdu[1] = len;
radio_pkt_tx_set(pdu);
radio_switch_complete_and_disable();
start_us = radio_tmr_start(1, cntr_cnt_get() + CNTR_MIN_DELTA, 0);
radio_tmr_aa_capture();
radio_tmr_end_capture();
#if defined(CONFIG_BT_CTLR_GPIO_PA_PIN)
radio_gpio_pa_setup();
radio_gpio_pa_lna_enable(start_us +
radio_tx_ready_delay_get(test_phy,
test_phy_flags) -
CONFIG_BT_CTLR_GPIO_PA_OFFSET);
#else /* !CONFIG_BT_CTLR_GPIO_PA_PIN */
ARG_UNUSED(start_us);
#endif /* !CONFIG_BT_CTLR_GPIO_PA_PIN */
started = true;
return 0;
}
u32_t ll_test_rx(u8_t chan, u8_t phy, u8_t mod_idx)
{
u32_t err;
if (!phy || (phy > 0x03)) {
return 1;
}
err = init(chan, phy, isr_rx);
if (err) {
return err;
}
radio_pkt_rx_set(radio_pkt_scratch_get());
radio_switch_complete_and_rx(test_phy);
radio_tmr_start(0, cntr_cnt_get() + CNTR_MIN_DELTA, 0);
#if defined(CONFIG_BT_CTLR_GPIO_LNA_PIN)
radio_gpio_lna_on();
#endif /* !CONFIG_BT_CTLR_GPIO_LNA_PIN */
started = true;
return 0;
}
u32_t ll_test_end(u16_t *num_rx)
{
u8_t ack;
if (!started) {
return 1;
}
/* Return packets Rx-ed/Completed */
*num_rx = test_num_rx;
test_num_rx = 0;
/* Disable Radio, if in Rx test */
ack = tx_ack;
if (tx_req == ack) {
radio_disable();
} else {
/* Wait for Tx to complete */
tx_req = ack + 2;
while (tx_req != tx_ack) {
cpu_sleep();
}
}
/* Stop packet timer */
radio_tmr_stop();
/* Release resources acquired for Radio */
lll_clk_off();
/* Stop coarse timer */
cntr_stop();
#if defined(CONFIG_BT_CTLR_GPIO_LNA_PIN)
radio_gpio_lna_off();
#endif /* !CONFIG_BT_CTLR_GPIO_LNA_PIN */
started = false;
return 0;
}

View file

@ -0,0 +1,32 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#define TIFS_US 150
/* Macro to return PDU time */
#if defined(CONFIG_BT_CTLR_PHY_CODED)
#define PKT_US(octets, phy) \
(((phy) & BIT(2)) ? \
(80 + 256 + 16 + 24 + ((((2 + (octets) + 4) * 8) + 24 + 3) * 8)) : \
(((octets) + 14) * 8 / BIT(((phy) & 0x03) >> 1)))
#else /* !CONFIG_BT_CTLR_PHY_CODED */
#define PKT_US(octets, phy) \
(((octets) + 14) * 8 / BIT(((phy) & 0x03) >> 1))
#endif /* !CONFIG_BT_CTLR_PHY_CODED */
static inline u32_t addr_us_get(u8_t phy)
{
switch (phy) {
default:
case BIT(0):
return 40;
case BIT(1):
return 24;
case BIT(2):
return 376;
}
}

View file

@ -0,0 +1,246 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdbool.h>
#include <stddef.h>
#include <toolchain.h>
#include <zephyr/types.h>
#if defined(CONFIG_BT_CTLR_DEBUG_PINS)
#if defined(CONFIG_PRINTK)
#undef CONFIG_PRINTK
#endif
#endif
#include "hal/ccm.h"
#include "util/mfifo.h"
#include "util/memq.h"
#include "ticker/ticker.h"
#include "pdu.h"
#include "lll.h"
#include "lll_conn.h"
#include "lll_tmp.h"
#include "lll_internal.h"
#include "common/log.h"
#include <soc.h>
#include "hal/debug.h"
static MFIFO_DEFINE(tmp_ack, sizeof(struct lll_tx),
CONFIG_BT_TMP_TX_COUNT_MAX);
static int _init_reset(void);
static int _prepare_cb(struct lll_prepare_param *prepare_param);
static int _is_abort_cb(void *next, int prio, void *curr,
lll_prepare_cb_t *resume_cb, int *resume_prio);
static void _abort_cb(struct lll_prepare_param *prepare_param, void *param);
static int _emulate_tx_rx(void *param);
int lll_tmp_init(void)
{
int err;
err = _init_reset();
if (err) {
return err;
}
return 0;
}
int lll_tmp_reset(void)
{
int err;
MFIFO_INIT(tmp_ack);
err = _init_reset();
if (err) {
return err;
}
return 0;
}
void lll_tmp_prepare(void *param)
{
struct lll_prepare_param *p = param;
int err;
printk("\t\tlll_tmp_prepare (%p) enter.\n", p->param);
err = lll_clk_on();
printk("\t\tlll_clk_on: %d.\n", err);
err = lll_prepare(_is_abort_cb, _abort_cb, _prepare_cb, 0, p);
printk("\t\tlll_tmp_prepare (%p) exit (%d).\n", p->param, err);
}
u8_t lll_tmp_ack_last_idx_get(void)
{
return mfifo_tmp_ack.l;
}
memq_link_t *lll_tmp_ack_peek(u16_t *handle, struct node_tx **node_tx)
{
struct lll_tx *tx;
tx = MFIFO_DEQUEUE_GET(tmp_ack);
if (!tx) {
return NULL;
}
*handle = tx->handle;
*node_tx = tx->node;
return (*node_tx)->link;
}
memq_link_t *lll_tmp_ack_by_last_peek(u8_t last, u16_t *handle,
struct node_tx **node_tx)
{
struct lll_tx *tx;
tx = mfifo_dequeue_get(mfifo_tmp_ack.m, mfifo_tmp_ack.s,
mfifo_tmp_ack.f, last);
if (!tx) {
return NULL;
}
*handle = tx->handle;
*node_tx = tx->node;
return (*node_tx)->link;
}
void *lll_tmp_ack_dequeue(void)
{
return MFIFO_DEQUEUE(tmp_ack);
}
static int _init_reset(void)
{
return 0;
}
static int _prepare_cb(struct lll_prepare_param *prepare_param)
{
int err;
printk("\t\t_prepare (%p) enter: expected %u, actual %u.\n",
prepare_param->param, prepare_param->ticks_at_expire,
ticker_ticks_now_get());
DEBUG_RADIO_PREPARE_A(1);
err = _emulate_tx_rx(prepare_param);
DEBUG_RADIO_PREPARE_A(1);
printk("\t\t_prepare (%p) exit (%d).\n", prepare_param->param, err);
return err;
}
static int _is_abort_cb(void *next, int prio, void *curr,
lll_prepare_cb_t *resume_cb, int *resume_prio)
{
static u8_t toggle;
toggle++;
return toggle & 0x01;
}
static void _abort_cb(struct lll_prepare_param *prepare_param, void *param)
{
int err;
printk("\t\t_abort (%p) enter.\n", param);
/* NOTE: This is not a prepare being cancelled */
if (!prepare_param) {
/* Perform event abort here.
* After event has been cleanly aborted, clean up resources
* and dispatch event done.
*/
/* Current event is done, pass NULL to lll_done(). */
param = NULL;
}
/* NOTE: Else clean the top half preparations of the aborted event
* currently in preparation pipeline.
*/
err = lll_clk_off();
printk("\t\tlll_clk_off: %d.\n", err);
lll_done(param);
printk("\t\tlll_done (%p).\n", param);
printk("\t\t_abort (%p) exit.\n", param);
}
static int _emulate_tx_rx(void *param)
{
struct lll_prepare_param *prepare_param = param;
struct lll_tmp *tmp = prepare_param->param;
struct node_tx *node_tx;
bool is_ull_rx = false;
memq_link_t *link;
void *free;
/* Tx */
link = memq_dequeue(tmp->memq_tx.tail, &tmp->memq_tx.head,
(void **)&node_tx);
while (link) {
struct lll_tx *tx;
u8_t idx;
idx = MFIFO_ENQUEUE_GET(tmp_ack, (void **)&tx);
LL_ASSERT(tx);
tx->handle = ull_tmp_handle_get(tmp);
tx->node = node_tx;
node_tx->link = link;
printk("\t\t_emulate_tx_rx: h= %u.\n", tx->handle);
MFIFO_ENQUEUE(tmp_ack, idx);
link = memq_dequeue(tmp->memq_tx.tail, &tmp->memq_tx.head,
(void **)&node_tx);
}
/* Rx */
free = ull_pdu_rx_alloc_peek(2);
if (free) {
struct node_rx_hdr *hdr = free;
void *_free;
_free = ull_pdu_rx_alloc();
LL_ASSERT(free == _free);
hdr->type = NODE_RX_TYPE_DC_PDU;
ull_rx_put(hdr->link, hdr);
is_ull_rx = true;
} else {
printk("\t\tOUT OF PDU RX MEMORY.\n");
}
if (is_ull_rx) {
ull_rx_sched();
}
return 0;
}

View file

@ -0,0 +1,24 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
struct lll_tmp {
struct lll_hdr hdr;
MEMQ_DECLARE(tx);
memq_link_t _link; /* Dedicated thread allocatable */
memq_link_t *link_free; /* Thread allocatable reference */
};
int lll_tmp_init(void);
void lll_tmp_prepare(void *param);
u8_t lll_tmp_ack_last_idx_get(void);
memq_link_t *lll_tmp_ack_peek(u16_t *handle, struct node_tx **node_tx);
memq_link_t *lll_tmp_ack_by_last_peek(u8_t last, u16_t *handle,
struct node_tx **node_tx);
void *lll_tmp_ack_dequeue(void);
extern u16_t ull_tmp_handle_get(struct lll_tmp *tmp);

View file

@ -0,0 +1,5 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/

View file

@ -0,0 +1,12 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#define EVENT_OVERHEAD_XTAL_US 1500
#define EVENT_OVERHEAD_PREEMPT_US 0 /* if <= min, then dynamic preempt */
#define EVENT_OVERHEAD_PREEMPT_MIN_US 0
#define EVENT_OVERHEAD_PREEMPT_MAX_US EVENT_OVERHEAD_XTAL_US
#define EVENT_OVERHEAD_START_US 200
#define EVENT_JITTER_US 16

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,106 @@
/*
* Copyright (c) 2017-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <string.h>
#include <zephyr.h>
#include <bluetooth/hci.h>
#include "hal/ccm.h"
#include "util/util.h"
#include "util/memq.h"
#include "pdu.h"
#include "lll.h"
#include "lll_adv.h"
#include "lll_conn.h"
#include "ull_internal.h"
#include "ull_adv_types.h"
#include "ull_adv_internal.h"
u8_t ll_adv_aux_random_addr_set(u8_t handle, u8_t *addr)
{
/* TODO: store in adv set instance */
return 0;
}
u8_t *ll_adv_aux_random_addr_get(u8_t handle, u8_t *addr)
{
/* TODO: copy adv set instance addr into addr and/or return reference */
return NULL;
}
u8_t ll_adv_aux_ad_data_set(u8_t handle, u8_t op, u8_t frag_pref, u8_t len,
u8_t *data)
{
struct pdu_adv_com_ext_adv *p;
struct ll_adv_set *adv;
struct ext_adv_hdr *h;
struct pdu_adv *prev;
struct pdu_adv *pdu;
u8_t idx;
/* TODO: */
adv = ull_adv_set_get(handle);
if (!adv) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
/* Dont update data if not extended advertising. */
prev = lll_adv_data_peek(&adv->lll);
if (prev->type != PDU_ADV_TYPE_EXT_IND) {
return 0;
}
pdu = lll_adv_data_alloc(&adv->lll, &idx);
p = (void *)&pdu->adv_ext_ind;
h = (void *)p->ext_hdr_adi_adv_data;
if (!h->aux_ptr) {
if (!len) {
return 0;
}
}
lll_adv_data_enqueue(&adv->lll, idx);
return 0;
}
u8_t ll_adv_aux_sr_data_set(u8_t handle, u8_t op, u8_t frag_pref, u8_t len,
u8_t *data)
{
/* TODO: */
return 0;
}
u16_t ll_adv_aux_max_data_length_get(void)
{
/* TODO: return a Kconfig value */
return 0;
}
u8_t ll_adv_aux_set_count_get(void)
{
/* TODO: return a Kconfig value */
return 0;
}
u8_t ll_adv_aux_set_remove(u8_t handle)
{
/* TODO: reset/release primary channel and Aux channel PDUs */
return 0;
}
u8_t ll_adv_aux_set_clear(void)
{
/* TODO: reset/release all adv set primary channel and Aux channel
* PDUs
*/
return 0;
}

View file

@ -0,0 +1,16 @@
/*
* Copyright (c) 2017-2018 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
u8_t ll_adv_aux_random_addr_set(u8_t handle, u8_t *addr);
u8_t *ll_adv_aux_random_addr_get(u8_t handle, u8_t *addr);
u8_t ll_adv_aux_ad_data_set(u8_t handle, u8_t op, u8_t frag_pref, u8_t len,
u8_t *data);
u8_t ll_adv_aux_sr_data_set(u8_t handle, u8_t op, u8_t frag_pref, u8_t len,
u8_t *data);
u16_t ll_adv_aux_max_data_length_get(void);
u8_t ll_adv_aux_set_count_get(void);
u8_t ll_adv_aux_set_remove(u8_t handle);
u8_t ll_adv_aux_set_clear(void);

View file

@ -0,0 +1,23 @@
/*
* Copyright (c) 2017-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
int ull_adv_init(void);
int ull_adv_reset(void);
/* Return ll_adv_set context (unconditional) */
struct ll_adv_set *ull_adv_set_get(u16_t handle);
/* Return the adv set handle given the adv set instance */
u16_t ull_adv_handle_get(struct ll_adv_set *adv);
/* Return ll_adv_set context if enabled */
struct ll_adv_set *ull_adv_is_enabled_get(u16_t handle);
/* Return flags, for now just: enabled */
u32_t ull_adv_is_enabled(u16_t handle);
/* Return filter policy used */
u32_t ull_adv_filter_pol_get(u16_t handle);

View file

@ -0,0 +1,31 @@
/*
* Copyright (c) 2017-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
struct ll_adv_set {
struct evt_hdr evt;
struct ull_hdr ull;
struct lll_adv lll;
u8_t is_enabled:1;
#if defined(CONFIG_BT_PERIPHERAL)
memq_link_t *link_cc_free;
struct node_rx_pdu *node_rx_cc_free;
#endif /* CONFIG_BT_PERIPHERAL */
#if defined(CONFIG_BT_CTLR_ADV_EXT)
u32_t interval;
#else /* !CONFIG_BT_CTLR_ADV_EXT */
u16_t interval;
#endif /* !CONFIG_BT_CTLR_ADV_EXT */
#if defined(CONFIG_BT_CTLR_PRIVACY)
u8_t own_addr_type:2;
u8_t id_addr_type:1;
u8_t rl_idx;
u8_t id_addr[BDADDR_SIZE];
#endif /* CONFIG_BT_CTLR_PRIVACY */
};

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,29 @@
/*
* Copyright (c) 2017-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
/* Macro to convert time in us to connection interval units */
#define RADIO_CONN_EVENTS(x, y) ((u16_t)(((x) + (y) - 1) / (y)))
struct ll_conn *ll_conn_acquire(void);
void ll_conn_release(struct ll_conn *conn);
u16_t ll_conn_handle_get(struct ll_conn *conn);
struct ll_conn *ll_conn_get(u16_t handle);
struct ll_conn *ll_connected_get(u16_t handle);
int ull_conn_init(void);
int ull_conn_reset(void);
u8_t ull_conn_chan_map_cpy(u8_t *chan_map);
void ull_conn_chan_map_set(u8_t *chan_map);
u8_t ull_conn_default_phy_tx_get(void);
u8_t ull_conn_default_phy_rx_get(void);
void ull_conn_setup(memq_link_t *link, struct node_rx_hdr *rx);
int ull_conn_rx(memq_link_t *link, struct node_rx_pdu **rx);
int ull_conn_llcp(struct ll_conn *conn, u32_t ticks_at_expire, u16_t lazy);
void ull_conn_done(struct node_rx_event_done *done);
void ull_conn_tx_demux(u8_t count);
void ull_conn_tx_lll_enqueue(struct ll_conn *conn, u8_t count);
void ull_conn_link_tx_release(void *link);
void ull_conn_tx_ack(struct ll_conn *conn, memq_link_t *link,
struct node_tx *tx);

View file

@ -1,9 +1,202 @@
/* /*
* Copyright (c) 2018 Nordic Semiconductor ASA * Copyright (c) 2018-2019 Nordic Semiconductor ASA
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
struct ll_conn {
struct evt_hdr evt;
struct ull_hdr ull;
struct lll_conn lll;
u16_t connect_expire;
u16_t supervision_reload;
u16_t supervision_expire;
u16_t procedure_reload;
u16_t procedure_expire;
#if defined(CONFIG_BT_CTLR_LE_PING)
u16_t appto_reload;
u16_t appto_expire;
u16_t apto_reload;
u16_t apto_expire;
#endif /* CONFIG_BT_CTLR_LE_PING */
union {
struct {
u8_t fex_valid:1;
} common;
struct {
u8_t fex_valid:1;
u32_t ticks_to_offset;
} slave;
struct {
u8_t fex_valid:1;
} master;
};
u8_t llcp_req;
u8_t llcp_ack;
u8_t llcp_type;
union {
struct {
enum {
LLCP_CUI_STATE_INPROG,
LLCP_CUI_STATE_USE,
LLCP_CUI_STATE_SELECT
} state:2 __packed;
u8_t is_internal:1;
u16_t interval;
u16_t latency;
u16_t timeout;
u16_t instant;
u32_t win_offset_us;
u8_t win_size;
u16_t *pdu_win_offset;
u32_t ticks_anchor;
} conn_upd;
struct {
u8_t initiate;
u8_t chm[5];
u16_t instant;
} chan_map;
#if defined(CONFIG_BT_CTLR_PHY)
struct {
u8_t initiate:1;
u8_t cmd:1;
u8_t tx:3;
u8_t rx:3;
u16_t instant;
} phy_upd_ind;
#endif /* CONFIG_BT_CTLR_PHY */
#if defined(CONFIG_BT_CTLR_LE_ENC)
struct {
u8_t initiate;
u8_t error_code;
u8_t rand[8];
u8_t ediv[2];
u8_t ltk[16];
u8_t skd[16];
} encryption;
#endif /* CONFIG_BT_CTLR_LE_ENC */
} llcp;
struct node_rx_pdu *llcp_rx;
u32_t llcp_features;
struct {
u8_t tx:1;
u8_t rx:1;
u8_t version_number;
u16_t company_id;
u16_t sub_version_number;
} llcp_version;
struct {
u8_t req;
u8_t ack;
u8_t reason_own;
u8_t reason_peer;
struct {
struct node_rx_hdr hdr;
u8_t reason;
} node_rx;
} llcp_terminate;
#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
struct {
u8_t req;
u8_t ack;
enum {
LLCP_CPR_STATE_REQ,
LLCP_CPR_STATE_RSP,
LLCP_CPR_STATE_APP_REQ,
LLCP_CPR_STATE_APP_WAIT,
LLCP_CPR_STATE_RSP_WAIT,
LLCP_CPR_STATE_UPD
} state:3 __packed;
u8_t cmd:1;
u8_t disabled:1;
u8_t status;
u16_t interval_min;
u16_t interval_max;
u16_t latency;
u16_t timeout;
u8_t preferred_periodicity;
u16_t reference_conn_event_count;
u16_t offset0;
u16_t offset1;
u16_t offset2;
u16_t offset3;
u16_t offset4;
u16_t offset5;
u16_t *pdu_win_offset0;
u32_t ticks_ref;
u32_t ticks_to_offset_next;
} llcp_conn_param;
#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
struct {
u8_t req;
u8_t ack;
u8_t state:2;
#define LLCP_LENGTH_STATE_REQ 0
#define LLCP_LENGTH_STATE_ACK_WAIT 1
#define LLCP_LENGTH_STATE_RSP_WAIT 2
#define LLCP_LENGTH_STATE_RESIZE 3
u8_t pause_tx:1;
u16_t rx_octets;
u16_t tx_octets;
#if defined(CONFIG_BT_CTLR_PHY)
u16_t rx_time;
u16_t tx_time;
#endif /* CONFIG_BT_CTLR_PHY */
} llcp_length;
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
#if defined(CONFIG_BT_CTLR_PHY)
struct {
u8_t req;
u8_t ack;
u8_t state:2;
#define LLCP_PHY_STATE_REQ 0
#define LLCP_PHY_STATE_ACK_WAIT 1
#define LLCP_PHY_STATE_RSP_WAIT 2
#define LLCP_PHY_STATE_UPD 3
u8_t tx:3;
u8_t rx:3;
u8_t flags:1;
u8_t cmd:1;
} llcp_phy;
u8_t phy_pref_tx:3;
u8_t phy_pref_flags:1;
u8_t phy_pref_rx:3;
#endif /* CONFIG_BT_CTLR_PHY */
#if defined(CONFIG_BT_CTLR_LE_ENC)
u8_t pause_rx:1;
u8_t pause_tx:1;
u8_t refresh:1;
#endif /* CONFIG_BT_CTLR_LE_ENC */
struct node_tx *tx_head;
struct node_tx *tx_ctrl;
struct node_tx *tx_ctrl_last;
struct node_tx *tx_data;
struct node_tx *tx_data_last;
u8_t chm_updated;
};
struct node_rx_cc { struct node_rx_cc {
u8_t status; u8_t status;
u8_t role; u8_t role;

View file

@ -0,0 +1,32 @@
/*
* Copyright (c) 2017-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
static inline u8_t ull_ref_inc(struct ull_hdr *hdr)
{
return ++hdr->ref;
}
static inline void ull_hdr_init(struct ull_hdr *hdr)
{
hdr->disabled_cb = hdr->disabled_param = NULL;
}
void *ll_rx_link_alloc(void);
void ll_rx_link_release(void *link);
void *ll_rx_alloc(void);
void ll_rx_release(void *node_rx);
void *ll_pdu_rx_alloc_peek(u8_t count);
void *ll_pdu_rx_alloc(void);
void ll_rx_put(memq_link_t *link, void *rx);
void ll_rx_sched(void);
void ull_tx_ack_put(u16_t handle, struct node_tx *node_tx);
void ull_ticker_status_give(u32_t status, void *param);
u32_t ull_ticker_status_take(u32_t ret, u32_t volatile *ret_cb);
void *ull_disable_mark(void *param);
void *ull_disable_unmark(void *param);
void *ull_disable_mark_get(void);
int ull_disable(void *param);
u8_t ull_entropy_get(u8_t len, u8_t *rand);

View file

@ -0,0 +1,894 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr.h>
#include <bluetooth/hci.h>
#include "util/util.h"
#include "util/memq.h"
#include "hal/ticker.h"
#include "hal/ccm.h"
#include "util/mayfly.h"
#include "ticker/ticker.h"
#include "pdu.h"
#include "ll.h"
#include "ll_feat.h"
#include "lll.h"
#include "lll_vendor.h"
#include "lll_clock.h"
#include "lll_scan.h"
#include "lll_conn.h"
#include "lll_master.h"
#include "lll_tim_internal.h"
#include "ull_scan_types.h"
#include "ull_conn_types.h"
#include "ull_internal.h"
#include "ull_scan_internal.h"
#include "ull_conn_internal.h"
#include "ull_master_internal.h"
#define LOG_MODULE_NAME bt_ctlr_llsw_ull_master
#include "common/log.h"
#include <soc.h>
#include "hal/debug.h"
static void ticker_op_stop_scan_cb(u32_t status, void *params);
static void ticker_op_cb(u32_t status, void *params);
static u32_t access_addr_get(void);
u8_t ll_create_connection(u16_t scan_interval, u16_t scan_window,
u8_t filter_policy, u8_t peer_addr_type,
u8_t *p_peer_addr, u8_t own_addr_type,
u16_t interval, u16_t latency, u16_t timeout)
{
struct lll_conn *conn_lll;
struct ll_scan_set *scan;
u32_t conn_interval_us;
struct lll_scan *lll;
struct ll_conn *conn;
memq_link_t *link;
u32_t access_addr;
u32_t err;
u8_t hop;
scan = ull_scan_is_disabled_get(0);
if (!scan) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
lll = &scan->lll;
if (lll->conn) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
link = ll_rx_link_alloc();
if (!link) {
return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
}
conn = ll_conn_acquire();
if (!conn) {
ll_rx_link_release(link);
return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
}
err = ull_scan_params_set(scan, 0, scan_interval, scan_window,
own_addr_type, filter_policy);
if (err) {
ll_conn_release(conn);
ll_rx_link_release(link);
return err;
}
lll->adv_addr_type = peer_addr_type;
memcpy(lll->adv_addr, p_peer_addr, BDADDR_SIZE);
lll->conn_timeout = timeout;
lll->conn_ticks_slot = 0; /* TODO: */
conn_lll = &conn->lll;
access_addr = access_addr_get();
memcpy(conn_lll->access_addr, &access_addr,
sizeof(conn_lll->access_addr));
bt_rand(&conn_lll->crc_init[0], 3);
conn_lll->handle = 0xFFFF;
conn_lll->interval = interval;
conn_lll->latency = latency;
if (!conn_lll->link_tx_free) {
conn_lll->link_tx_free = &conn_lll->link_tx;
}
memq_init(conn_lll->link_tx_free, &conn_lll->memq_tx.head,
&conn_lll->memq_tx.tail);
conn_lll->link_tx_free = NULL;
conn_lll->packet_tx_head_len = 0;
conn_lll->packet_tx_head_offset = 0;
conn_lll->sn = 0;
conn_lll->nesn = 0;
conn_lll->empty = 0;
#if defined(CONFIG_BT_CTLR_LE_ENC)
conn_lll->enc_rx = 0;
conn_lll->enc_tx = 0;
#endif /* CONFIG_BT_CTLR_LE_ENC */
#if defined(CONFIG_BT_CTLR_PHY)
conn_lll->phy_tx = BIT(0);
conn_lll->phy_flags = 0;
conn_lll->phy_tx_time = BIT(0);
conn_lll->phy_rx = BIT(0);
#endif /* CONFIG_BT_CTLR_PHY */
#if defined(CONFIG_BT_CTLR_CONN_RSSI)
conn_lll->rssi_latest = 0x7F;
conn_lll->rssi_reported = 0x7F;
conn_lll->rssi_sample_count = 0;
#endif /* CONFIG_BT_CTLR_CONN_RSSI */
/* FIXME: BEGIN: Move to ULL? */
conn_lll->latency_prepare = 0;
conn_lll->latency_event = 0;
conn_lll->event_counter = 0;
conn_lll->data_chan_count =
ull_conn_chan_map_cpy(conn_lll->data_chan_map);
bt_rand(&hop, sizeof(u8_t));
conn_lll->data_chan_hop = 5 + (hop % 12);
conn_lll->data_chan_sel = 0;
conn_lll->data_chan_use = 0;
conn_lll->role = 0;
/* FIXME: END: Move to ULL? */
conn->connect_expire = 6;
conn->supervision_expire = 0;
conn_interval_us = (u32_t)interval * 1250;
conn->supervision_reload = RADIO_CONN_EVENTS(timeout * 10000,
conn_interval_us);
conn->procedure_expire = 0;
conn->procedure_reload = RADIO_CONN_EVENTS(40000000,
conn_interval_us);
#if defined(CONFIG_BT_CTLR_LE_PING)
conn->apto_expire = 0;
/* APTO in no. of connection events */
conn->apto_reload = RADIO_CONN_EVENTS((30000000), conn_interval_us);
conn->appto_expire = 0;
/* Dispatch LE Ping PDU 6 connection events (that peer would listen to)
* before 30s timeout
* TODO: "peer listens to" is greater than 30s due to latency
*/
conn->appto_reload = (conn->apto_reload > (conn_lll->latency + 6)) ?
(conn->apto_reload - (conn_lll->latency + 6)) :
conn->apto_reload;
#endif /* CONFIG_BT_CTLR_LE_PING */
conn->common.fex_valid = 0;
conn->llcp_req = conn->llcp_ack = conn->llcp_type = 0;
conn->llcp_rx = NULL;
conn->llcp_features = LL_FEAT;
conn->llcp_version.tx = conn->llcp_version.rx = 0;
conn->llcp_terminate.reason_peer = 0;
/* NOTE: use allocated link for generating dedicated
* terminate ind rx node
*/
conn->llcp_terminate.node_rx.hdr.link = link;
#if defined(CONFIG_BT_CTLR_LE_ENC)
conn->pause_tx = conn->pause_rx = conn->refresh = 0;
#endif /* CONFIG_BT_CTLR_LE_ENC */
#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
conn->llcp_conn_param.req = 0;
conn->llcp_conn_param.ack = 0;
conn->llcp_conn_param.disabled = 0;
#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
#if defined(CONFIG_BT_CTLR_PHY)
conn->llcp_phy.req = conn->llcp_phy.ack = 0;
conn->phy_pref_tx = ull_conn_default_phy_tx_get();
conn->phy_pref_rx = ull_conn_default_phy_rx_get();
conn->phy_pref_flags = 0;
#endif /* CONFIG_BT_CTLR_PHY */
conn->tx_head = conn->tx_ctrl = conn->tx_ctrl_last =
conn->tx_data = conn->tx_data_last = 0;
lll->conn = conn_lll;
ull_hdr_init(&conn->ull);
lll_hdr_init(&conn->lll, conn);
#if defined(CONFIG_BT_CTLR_PRIVACY)
ll_filters_scan_update(filter_policy);
if (!filter_policy && ctrl_rl_enabled()) {
/* Look up the resolving list */
rl_idx = ll_rl_find(peer_addr_type, peer_addr, NULL);
}
if (own_addr_type == BT_ADDR_LE_PUBLIC_ID ||
own_addr_type == BT_ADDR_LE_RANDOM_ID) {
/* Generate RPAs if required */
ll_rl_rpa_update(false);
own_addr_type &= 0x1;
rpa_gen = 1;
}
#endif
/* wait for stable clocks */
lll_clock_wait();
return ull_scan_enable(scan);
}
u8_t ll_connect_disable(void **rx)
{
struct lll_conn *conn_lll;
struct ll_scan_set *scan;
u8_t status;
scan = ull_scan_is_enabled_get(0);
if (!scan) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
conn_lll = scan->lll.conn;
if (!conn_lll) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
status = ull_scan_disable(0, scan);
if (!status) {
struct ll_conn *conn = (void *)HDR_LLL2EVT(conn_lll);
struct node_rx_pdu *cc;
memq_link_t *link;
cc = (void *)&conn->llcp_terminate.node_rx;
link = cc->hdr.link;
LL_ASSERT(link);
/* free the memq link early, as caller could overwrite it */
ll_rx_link_release(link);
cc->hdr.type = NODE_RX_TYPE_CONNECTION;
cc->hdr.handle = 0xffff;
*((u8_t *)cc->pdu) = BT_HCI_ERR_UNKNOWN_CONN_ID;
*rx = cc;
}
return status;
}
u8_t ll_chm_update(u8_t *chm)
{
u16_t handle;
ull_conn_chan_map_set(chm);
handle = CONFIG_BT_MAX_CONN;
while (handle--) {
struct ll_conn *conn;
conn = ll_connected_get(handle);
if (!conn || conn->lll.role) {
continue;
}
if (conn->llcp_req != conn->llcp_ack) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
memcpy(conn->llcp.chan_map.chm, chm,
sizeof(conn->llcp.chan_map.chm));
/* conn->llcp.chan_map.instant = 0; */
conn->llcp.chan_map.initiate = 1;
conn->llcp_type = LLCP_CHAN_MAP;
conn->llcp_req++;
}
return 0;
}
#if defined(CONFIG_BT_CTLR_LE_ENC)
u8_t ll_enc_req_send(u16_t handle, u8_t *rand, u8_t *ediv, u8_t *ltk)
{
struct ll_conn *conn;
struct node_tx *tx;
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
if (conn->llcp_req != conn->llcp_ack) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
tx = ll_tx_mem_acquire();
if (tx) {
struct pdu_data *pdu_data_tx;
pdu_data_tx = (void *)tx->pdu;
memcpy(&conn->llcp.encryption.ltk[0], ltk,
sizeof(conn->llcp.encryption.ltk));
if ((conn->lll.enc_rx == 0) && (conn->lll.enc_tx == 0)) {
struct pdu_data_llctrl_enc_req *enc_req;
pdu_data_tx->ll_id = PDU_DATA_LLID_CTRL;
pdu_data_tx->len =
offsetof(struct pdu_data_llctrl, enc_rsp) +
sizeof(struct pdu_data_llctrl_enc_req);
pdu_data_tx->llctrl.opcode =
PDU_DATA_LLCTRL_TYPE_ENC_REQ;
enc_req = (void *)
&pdu_data_tx->llctrl.enc_req;
memcpy(enc_req->rand, rand, sizeof(enc_req->rand));
enc_req->ediv[0] = ediv[0];
enc_req->ediv[1] = ediv[1];
bt_rand(enc_req->skdm, sizeof(enc_req->skdm));
bt_rand(enc_req->ivm, sizeof(enc_req->ivm));
} else if ((conn->lll.enc_rx != 0) && (conn->lll.enc_tx != 0)) {
memcpy(&conn->llcp.encryption.rand[0], rand,
sizeof(conn->llcp.encryption.rand));
conn->llcp.encryption.ediv[0] = ediv[0];
conn->llcp.encryption.ediv[1] = ediv[1];
pdu_data_tx->ll_id = PDU_DATA_LLID_CTRL;
pdu_data_tx->len = offsetof(struct pdu_data_llctrl,
enc_req);
pdu_data_tx->llctrl.opcode =
PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_REQ;
} else {
ll_tx_mem_release(tx);
return BT_HCI_ERR_CMD_DISALLOWED;
}
if (ll_tx_mem_enqueue(handle, tx)) {
ll_tx_mem_release(tx);
return BT_HCI_ERR_CMD_DISALLOWED;
}
conn->llcp.encryption.initiate = 1;
conn->llcp_type = LLCP_ENCRYPTION;
conn->llcp_req++;
return 0;
}
return BT_HCI_ERR_CMD_DISALLOWED;
}
#endif /* CONFIG_BT_CTLR_LE_ENC */
void ull_master_setup(memq_link_t *link, struct node_rx_hdr *rx,
struct node_rx_ftr *ftr, struct lll_conn *lll)
{
u32_t conn_offset_us, conn_interval_us;
u8_t ticker_id_scan, ticker_id_conn;
u32_t ticks_slot_overhead;
u32_t mayfly_was_enabled;
u32_t ticks_slot_offset;
struct ll_scan_set *scan;
struct node_rx_cc *cc;
struct ll_conn *conn;
struct pdu_adv *pdu;
u32_t ticker_status;
u8_t chan_sel;
((struct lll_scan *)ftr->param)->conn = NULL;
scan = ((struct lll_scan *)ftr->param)->hdr.parent;
conn = lll->hdr.parent;
pdu = (void *)((struct node_rx_pdu *)rx)->pdu;
chan_sel = pdu->chan_sel;
cc = (void *)pdu;
cc->status = 0;
cc->role = 0;
cc->peer_addr_type = scan->lll.adv_addr_type;
memcpy(cc->peer_addr, scan->lll.adv_addr, BDADDR_SIZE);
cc->interval = lll->interval;
cc->latency = lll->latency;
cc->timeout = scan->lll.conn_timeout;
cc->sca = lll_conn_sca_local_get();
lll->handle = ll_conn_handle_get(conn);
rx->handle = lll->handle;
/* Use Channel Selection Algorithm #2 if peer too supports it */
if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
struct node_rx_pdu *rx_csa;
struct node_rx_cs *cs;
/* pick the rx node instance stored within the connection
* rx node.
*/
rx_csa = (void *)ftr->extra;
/* Enqueue the connection event */
ll_rx_put(link, rx);
/* use the rx node for CSA event */
rx = (void *)rx_csa;
link = rx->link;
rx->handle = lll->handle;
rx->type = NODE_RX_TYPE_CHAN_SEL_ALGO;
cs = (void *)rx_csa->pdu;
if (chan_sel) {
u16_t aa_ls = ((u16_t)lll->access_addr[1] << 8) |
lll->access_addr[0];
u16_t aa_ms = ((u16_t)lll->access_addr[3] << 8) |
lll->access_addr[2];
lll->data_chan_sel = 1;
lll->data_chan_id = aa_ms ^ aa_ls;
cs->csa = 0x01;
} else {
cs->csa = 0x00;
}
}
ll_rx_put(link, rx);
ll_rx_sched();
/* TODO: active_to_start feature port */
conn->evt.ticks_active_to_start = 0;
conn->evt.ticks_xtal_to_start =
HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
conn->evt.ticks_preempt_to_start =
HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
conn->evt.ticks_slot =
HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US +
ftr->us_radio_rdy + 328 + TIFS_US +
328);
ticks_slot_offset = max(conn->evt.ticks_active_to_start,
conn->evt.ticks_xtal_to_start);
if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
ticks_slot_overhead = ticks_slot_offset;
} else {
ticks_slot_overhead = 0;
}
conn_interval_us = lll->interval * 1250;
conn_offset_us = ftr->us_radio_end;
conn_offset_us += HAL_TICKER_TICKS_TO_US(1);
conn_offset_us -= EVENT_OVERHEAD_START_US;
conn_offset_us -= ftr->us_radio_rdy;
/* disable ticker job, in order to chain stop and start to avoid RTC
* being stopped if no tickers active.
*/
#if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
mayfly_was_enabled = mayfly_is_enabled(TICKER_USER_ID_ULL_HIGH,
TICKER_USER_ID_ULL_LOW);
mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 0);
#endif
/* Stop Scanner */
ticker_id_scan = TICKER_ID_SCAN_BASE + ull_scan_handle_get(scan);
ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR,
TICKER_USER_ID_ULL_HIGH,
ticker_id_scan, ticker_op_stop_scan_cb,
(void *)(u32_t)ticker_id_scan);
ticker_op_stop_scan_cb(ticker_status, (void *)(u32_t)ticker_id_scan);
/* Scanner stop can expire while here in this ISR.
* Deferred attempt to stop can fail as it would have
* expired, hence ignore failure.
*/
ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
TICKER_ID_SCAN_STOP, NULL, NULL);
/* Start master */
ticker_id_conn = TICKER_ID_CONN_BASE + ll_conn_handle_get(conn);
ticker_status = ticker_start(TICKER_INSTANCE_ID_CTLR,
TICKER_USER_ID_ULL_HIGH,
ticker_id_conn,
ftr->ticks_anchor - ticks_slot_offset,
HAL_TICKER_US_TO_TICKS(conn_offset_us),
HAL_TICKER_US_TO_TICKS(conn_interval_us),
HAL_TICKER_REMAINDER(conn_interval_us),
TICKER_NULL_LAZY,
(conn->evt.ticks_slot +
ticks_slot_overhead),
ull_master_ticker_cb, conn, ticker_op_cb,
(void *)__LINE__);
LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
(ticker_status == TICKER_STATUS_BUSY));
#if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
/* enable ticker job, if disabled in this function */
if (mayfly_was_enabled) {
mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW,
1);
}
#else
ARG_UNUSED(mayfly_was_enabled);
#endif
#if 0
/* Populate the master context */
conn->handle = mem_index_get(conn, _radio.conn_pool,
CONNECTION_T_SIZE);
/* Prepare the rx packet structure */
node_rx->hdr.handle = conn->handle;
node_rx->hdr.type = NODE_RX_TYPE_CONNECTION;
/* prepare connection complete structure */
pdu_data = (void *)node_rx->pdu;
cc = (void *)pdu_data->lldata;
cc->status = 0x00;
cc->role = 0x00;
#if defined(CONFIG_BT_CTLR_PRIVACY)
cc->own_addr_type = pdu_adv_tx->tx_addr;
memcpy(&cc->own_addr[0], &pdu_adv_tx->connect_ind.init_addr[0],
BDADDR_SIZE);
if (irkmatch_ok && rl_idx != FILTER_IDX_NONE) {
/* TODO: store rl_idx instead if safe */
/* Store identity address */
ll_rl_id_addr_get(rl_idx, &cc->peer_addr_type,
&cc->peer_addr[0]);
/* Mark it as identity address from RPA (0x02, 0x03) */
cc->peer_addr_type += 2;
/* Store peer RPA */
memcpy(&cc->peer_rpa[0],
&pdu_adv_tx->connect_ind.adv_addr[0],
BDADDR_SIZE);
} else {
memset(&cc->peer_rpa[0], 0x0, BDADDR_SIZE);
#else
if (1) {
#endif /* CONFIG_BT_CTLR_PRIVACY */
cc->peer_addr_type = pdu_adv_tx->rx_addr;
memcpy(&cc->peer_addr[0],
&pdu_adv_tx->connect_ind.adv_addr[0],
BDADDR_SIZE);
}
cc->interval = _radio.scanner.conn_interval;
cc->latency = _radio.scanner.conn_latency;
cc->timeout = _radio.scanner.conn_timeout;
cc->mca = pdu_adv_tx->connect_ind.sca;
/* enqueue connection complete structure into queue */
rx_fc_lock(conn->handle);
packet_rx_enqueue();
/* Use Channel Selection Algorithm #2 if peer too supports it */
if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
struct node_rx_cs *cs;
/* Generate LE Channel Selection Algorithm event */
node_rx = packet_rx_reserve_get(3);
LL_ASSERT(node_rx);
node_rx->hdr.handle = conn->handle;
node_rx->hdr.type = NODE_RX_TYPE_CHAN_SEL_ALGO;
pdu_data = (void *)node_rx->pdu;
cs = (void *)pdu_data->lldata;
if (pdu_adv_rx->chan_sel) {
u16_t aa_ls =
((u16_t)conn->access_addr[1] << 8) |
conn->access_addr[0];
u16_t aa_ms =
((u16_t)conn->access_addr[3] << 8) |
conn->access_addr[2];
conn->data_chan_sel = 1;
conn->data_chan_id = aa_ms ^ aa_ls;
cs->csa = 0x01;
} else {
cs->csa = 0x00;
}
packet_rx_enqueue();
}
/* Calculate master slot */
conn->hdr.ticks_active_to_start = _radio.ticks_active_to_start;
conn->hdr.ticks_xtal_to_start = HAL_TICKER_US_TO_TICKS(
EVENT_OVERHEAD_XTAL_US);
conn->hdr.ticks_preempt_to_start = HAL_TICKER_US_TO_TICKS(
EVENT_OVERHEAD_PREEMPT_MIN_US);
conn->hdr.ticks_slot = _radio.scanner.ticks_conn_slot;
ticks_slot_offset = max(conn->hdr.ticks_active_to_start,
conn->hdr.ticks_xtal_to_start);
/* Stop Scanner */
ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR,
TICKER_USER_ID_LLL,
TICKER_ID_SCAN_BASE,
ticker_stop_scan_assert,
(void *)__LINE__);
ticker_stop_scan_assert(ticker_status, (void *)__LINE__);
/* Scanner stop can expire while here in this ISR.
* Deferred attempt to stop can fail as it would have
* expired, hence ignore failure.
*/
ticker_stop(TICKER_INSTANCE_ID_CTLR,
TICKER_USER_ID_LLL,
TICKER_ID_SCAN_STOP, NULL, NULL);
/* Start master */
ticker_status =
ticker_start(TICKER_INSTANCE_ID_CTLR,
TICKER_USER_ID_LLL,
TICKER_ID_CONN_BASE +
conn->handle,
(_radio.ticks_anchor - ticks_slot_offset),
HAL_TICKER_US_TO_TICKS(conn_space_us),
HAL_TICKER_US_TO_TICKS(conn_interval_us),
HAL_TICKER_REMAINDER(conn_interval_us),
TICKER_NULL_LAZY,
(ticks_slot_offset + conn->hdr.ticks_slot),
event_master_prepare, conn,
ticker_success_assert, (void *)__LINE__);
LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
(ticker_status == TICKER_STATUS_BUSY));
#endif
}
void ull_master_ticker_cb(u32_t ticks_at_expire, u32_t remainder, u16_t lazy,
void *param)
{
static memq_link_t _link;
static struct mayfly _mfy = {0, 0, &_link, NULL, lll_master_prepare};
static struct lll_prepare_param p;
struct ll_conn *conn = param;
u32_t err;
u8_t ref;
int ret;
DEBUG_RADIO_PREPARE_M(1);
/* Handle any LL Control Procedures */
ret = ull_conn_llcp(conn, ticks_at_expire, lazy);
if (ret) {
return;
}
/* Increment prepare reference count */
ref = ull_ref_inc(&conn->ull);
LL_ASSERT(ref);
/* De-mux 1 tx node from FIFO */
ull_conn_tx_demux(1);
/* Enqueue towards LLL */
ull_conn_tx_lll_enqueue(conn, 1);
/* Append timing parameters */
p.ticks_at_expire = ticks_at_expire;
p.remainder = remainder;
p.lazy = lazy;
p.param = &conn->lll;
_mfy.param = &p;
/* Kick LLL prepare */
err = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
0, &_mfy);
LL_ASSERT(!err);
/* De-mux remaining tx nodes from FIFO */
ull_conn_tx_demux(UINT8_MAX);
/* Enqueue towards LLL */
ull_conn_tx_lll_enqueue(conn, UINT8_MAX);
DEBUG_RADIO_PREPARE_M(1);
}
static void ticker_op_stop_scan_cb(u32_t status, void *params)
{
/* TODO: */
}
static void ticker_op_cb(u32_t status, void *params)
{
ARG_UNUSED(params);
LL_ASSERT(status == TICKER_STATUS_SUCCESS);
}
/** @brief Prepare access address as per BT Spec.
*
* - It shall have no more than six consecutive zeros or ones.
* - It shall not be the advertising channel packets' Access Address.
* - It shall not be a sequence that differs from the advertising channel
* packets Access Address by only one bit.
* - It shall not have all four octets equal.
* - It shall have no more than 24 transitions.
* - It shall have a minimum of two transitions in the most significant six
* bits.
*
* LE Coded PHY requirements:
* - It shall have at least three ones in the least significant 8 bits.
* - It shall have no more than eleven transitions in the least significant 16
* bits.
*/
static u32_t access_addr_get(void)
{
#if defined(CONFIG_BT_CTLR_PHY_CODED)
u8_t transitions_lsb16;
u8_t ones_count_lsb8;
#endif /* CONFIG_BT_CTLR_PHY_CODED */
u8_t consecutive_cnt;
u8_t consecutive_bit;
u32_t adv_aa_check;
u32_t access_addr;
u8_t transitions;
u8_t bit_idx;
u8_t retry;
retry = 3;
again:
LL_ASSERT(retry);
retry--;
bt_rand(&access_addr, sizeof(u32_t));
bit_idx = 31;
transitions = 0;
consecutive_cnt = 1;
#if defined(CONFIG_BT_CTLR_PHY_CODED)
ones_count_lsb8 = 0;
transitions_lsb16 = 0;
#endif /* CONFIG_BT_CTLR_PHY_CODED */
consecutive_bit = (access_addr >> bit_idx) & 0x01;
while (bit_idx--) {
#if defined(CONFIG_BT_CTLR_PHY_CODED)
u8_t transitions_lsb16_prev = transitions_lsb16;
#endif /* CONFIG_BT_CTLR_PHY_CODED */
u8_t consecutive_cnt_prev = consecutive_cnt;
u8_t transitions_prev = transitions;
u8_t bit;
bit = (access_addr >> bit_idx) & 0x01;
if (bit == consecutive_bit) {
consecutive_cnt++;
} else {
consecutive_cnt = 1;
consecutive_bit = bit;
transitions++;
#if defined(CONFIG_BT_CTLR_PHY_CODED)
if (bit_idx < 15) {
transitions_lsb16++;
}
#endif /* CONFIG_BT_CTLR_PHY_CODED */
}
#if defined(CONFIG_BT_CTLR_PHY_CODED)
if ((bit_idx < 8) && consecutive_bit) {
ones_count_lsb8++;
}
#endif /* CONFIG_BT_CTLR_PHY_CODED */
/* It shall have no more than six consecutive zeros or ones. */
/* It shall have a minimum of two transitions in the most
* significant six bits.
*/
if ((consecutive_cnt > 6) ||
#if defined(CONFIG_BT_CTLR_PHY_CODED)
(!consecutive_bit && (((bit_idx < 6) &&
(ones_count_lsb8 < 1)) ||
((bit_idx < 5) &&
(ones_count_lsb8 < 2)) ||
((bit_idx < 4) &&
(ones_count_lsb8 < 3)))) ||
#endif /* CONFIG_BT_CTLR_PHY_CODED */
((consecutive_cnt < 6) &&
(((bit_idx < 29) && (transitions < 1)) ||
((bit_idx < 28) && (transitions < 2))))) {
if (consecutive_bit) {
consecutive_bit = 0;
access_addr &= ~BIT(bit_idx);
#if defined(CONFIG_BT_CTLR_PHY_CODED)
if (bit_idx < 8) {
ones_count_lsb8--;
}
#endif /* CONFIG_BT_CTLR_PHY_CODED */
} else {
consecutive_bit = 1;
access_addr |= BIT(bit_idx);
#if defined(CONFIG_BT_CTLR_PHY_CODED)
if (bit_idx < 8) {
ones_count_lsb8++;
}
#endif /* CONFIG_BT_CTLR_PHY_CODED */
}
if (transitions != transitions_prev) {
consecutive_cnt = consecutive_cnt_prev;
transitions = transitions_prev;
} else {
consecutive_cnt = 1;
transitions++;
}
#if defined(CONFIG_BT_CTLR_PHY_CODED)
if (bit_idx < 15) {
if (transitions_lsb16 !=
transitions_lsb16_prev) {
transitions_lsb16 =
transitions_lsb16_prev;
} else {
transitions_lsb16++;
}
}
#endif /* CONFIG_BT_CTLR_PHY_CODED */
}
/* It shall have no more than 24 transitions
* It shall have no more than eleven transitions in the least
* significant 16 bits.
*/
if ((transitions > 24) ||
#if defined(CONFIG_BT_CTLR_PHY_CODED)
(transitions_lsb16 > 11) ||
#endif /* CONFIG_BT_CTLR_PHY_CODED */
0) {
if (consecutive_bit) {
access_addr &= ~(BIT(bit_idx + 1) - 1);
} else {
access_addr |= (BIT(bit_idx + 1) - 1);
}
break;
}
}
/* It shall not be the advertising channel packets Access Address.
* It shall not be a sequence that differs from the advertising channel
* packets Access Address by only one bit.
*/
adv_aa_check = access_addr ^ 0x8e89bed6;
if (util_ones_count_get((u8_t *)&adv_aa_check,
sizeof(adv_aa_check)) <= 1) {
goto again;
}
/* It shall not have all four octets equal. */
if (!((access_addr & 0xFFFF) ^ (access_addr >> 16)) &&
!((access_addr & 0xFF) ^ (access_addr >> 24))) {
goto again;
}
return access_addr;
}

View file

@ -0,0 +1,10 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
void ull_master_setup(memq_link_t *link, struct node_rx_hdr *rx,
struct node_rx_ftr *ftr, struct lll_conn *lll);
void ull_master_ticker_cb(u32_t ticks_at_expire, u32_t remainder, u16_t lazy,
void *param);

View file

@ -0,0 +1,437 @@
/*
* Copyright (c) 2016-2019 Nordic Semiconductor ASA
* Copyright (c) 2016 Vinayak Kariappa Chettimada
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr.h>
#include <bluetooth/hci.h>
#include "hal/ccm.h"
#include "hal/ticker.h"
#include "util/util.h"
#include "util/memq.h"
#include "util/mayfly.h"
#include "ticker/ticker.h"
#include "pdu.h"
#include "ll.h"
#include "lll.h"
#include "lll_vendor.h"
#include "lll_adv.h"
#include "lll_scan.h"
#include "lll_conn.h"
#include "lll_filter.h"
#include "ull_adv_types.h"
#include "ull_scan_types.h"
#include "ull_internal.h"
#include "ull_adv_internal.h"
#include "ull_scan_internal.h"
#include "ull_sched_internal.h"
#define LOG_MODULE_NAME bt_ctlr_llsw_ull_scan
#include "common/log.h"
#include <soc.h>
#include "hal/debug.h"
static int _init_reset(void);
static void ticker_cb(u32_t ticks_at_expire, u32_t remainder, u16_t lazy,
void *param);
static u8_t disable(u16_t handle);
#define CONFIG_BT_SCAN_MAX 1
static struct ll_scan_set ll_scan[CONFIG_BT_SCAN_MAX];
u8_t ll_scan_params_set(u8_t type, u16_t interval, u16_t window,
u8_t own_addr_type, u8_t filter_policy)
{
struct ll_scan_set *scan;
scan = ull_scan_is_disabled_get(0);
if (!scan) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
return ull_scan_params_set(scan, type, interval, window, own_addr_type,
filter_policy);
}
u8_t ll_scan_enable(u8_t enable)
{
struct ll_scan_set *scan;
if (!enable) {
return disable(0);
}
scan = ull_scan_is_disabled_get(0);
if (!scan) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
return ull_scan_enable(scan);
}
int ull_scan_init(void)
{
int err;
err = _init_reset();
if (err) {
return err;
}
return 0;
}
int ull_scan_reset(void)
{
u16_t handle;
int err;
for (handle = 0; handle < CONFIG_BT_SCAN_MAX; handle++) {
(void)disable(handle);
}
err = _init_reset();
if (err) {
return err;
}
return 0;
}
u8_t ull_scan_params_set(struct ll_scan_set *scan, u8_t type,
u16_t interval, u16_t window,
u8_t own_addr_type, u8_t filter_policy)
{
struct lll_scan *lll = &scan->lll;
/* type value:
* 0000b - legacy 1M passive
* 0001b - legacy 1M active
* 0010b - Ext. 1M passive
* 0011b - Ext. 1M active
* 0100b - invalid
* 0101b - invalid
* 0110b - invalid
* 0111b - invalid
* 1000b - Ext. Coded passive
* 1001b - Ext. Coded active
*/
lll->type = type;
#if defined(CONFIG_BT_CTLR_ADV_EXT)
lll->phy = type >> 1;
#endif /* CONFIG_BT_CTLR_ADV_EXT */
lll->filter_policy = filter_policy;
lll->interval = interval;
lll->ticks_window = HAL_TICKER_US_TO_TICKS((u64_t)window * 625);
scan->own_addr_type = own_addr_type;
return 0;
}
u8_t ull_scan_enable(struct ll_scan_set *scan)
{
volatile u32_t ret_cb = TICKER_STATUS_BUSY;
struct lll_scan *lll = &scan->lll;
u32_t ticks_slot_overhead;
u32_t ticks_slot_offset;
u32_t ticks_interval;
u32_t ticks_anchor;
u32_t ret;
#if defined(CONFIG_BT_CTLR_PRIVACY)
ll_filters_scan_update(scan->filter_policy);
if ((scan->type & 0x1) &&
(scan->own_addr_type == BT_ADDR_LE_PUBLIC_ID ||
scan->own_addr_type == BT_ADDR_LE_RANDOM_ID)) {
/* Generate RPAs if required */
ll_rl_rpa_update(false);
lll->rpa_gen = 1;
lll->rl_idx = FILTER_IDX_NONE;
}
#endif
lll->init_addr_type = scan->own_addr_type;
ll_addr_get(lll->init_addr_type, lll->init_addr);
ull_hdr_init(&scan->ull);
lll_hdr_init(lll, scan);
ticks_interval = HAL_TICKER_US_TO_TICKS((u64_t)lll->interval * 625);
/* TODO: active_to_start feature port */
scan->evt.ticks_active_to_start = 0;
scan->evt.ticks_xtal_to_start =
HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
scan->evt.ticks_preempt_to_start =
HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
if ((lll->ticks_window +
HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US)) <
(ticks_interval -
HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US))) {
scan->evt.ticks_slot =
(lll->ticks_window +
HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US));
} else {
scan->evt.ticks_slot =
(ticks_interval -
HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US));
lll->ticks_window = 0;
}
ticks_slot_offset = max(scan->evt.ticks_active_to_start,
scan->evt.ticks_xtal_to_start);
if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
ticks_slot_overhead = ticks_slot_offset;
} else {
ticks_slot_overhead = 0;
}
ticks_anchor = ticker_ticks_now_get();
#if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_CTLR_SCHED_ADVANCED)
if (!lll->conn) {
u32_t ticks_ref = 0;
u32_t offset_us = 0;
ull_sched_after_mstr_slot_get(TICKER_USER_ID_THREAD,
(ticks_slot_offset +
scan->evt.ticks_slot),
&ticks_ref, &offset_us);
/* Use the ticks_ref as scanner's anchor if a free time space
* after any master role is available (indicated by a non-zero
* offset_us value).
*/
if (offset_us) {
ticks_anchor = ticks_ref +
HAL_TICKER_US_TO_TICKS(offset_us);
}
}
#endif /* CONFIG_BT_CENTRAL && CONFIG_BT_CTLR_SCHED_ADVANCED */
ret = ticker_start(TICKER_INSTANCE_ID_CTLR,
TICKER_USER_ID_THREAD, TICKER_ID_SCAN_BASE,
ticks_anchor, 0, ticks_interval,
HAL_TICKER_REMAINDER((u64_t)lll->interval * 625),
TICKER_NULL_LAZY,
(scan->evt.ticks_slot + ticks_slot_overhead),
ticker_cb, scan,
ull_ticker_status_give, (void *)&ret_cb);
ret = ull_ticker_status_take(ret, &ret_cb);
if (ret != TICKER_STATUS_SUCCESS) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
scan->is_enabled = 1;
#if defined(CONFIG_BT_CTLR_PRIVACY)
#if defined(CONFIG_BT_BROADCASTER)
if (!ull_adv_is_enabled_get(0))
#endif
{
ll_adv_scan_state_cb(BIT(1));
}
#endif
return 0;
}
u8_t ull_scan_disable(u16_t handle, struct ll_scan_set *scan)
{
volatile u32_t ret_cb = TICKER_STATUS_BUSY;
void *mark;
u32_t ret;
mark = ull_disable_mark(scan);
LL_ASSERT(mark == scan);
ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
TICKER_ID_SCAN_BASE + handle,
ull_ticker_status_give, (void *)&ret_cb);
ret = ull_ticker_status_take(ret, &ret_cb);
if (ret) {
mark = ull_disable_unmark(scan);
LL_ASSERT(mark == scan);
return BT_HCI_ERR_CMD_DISALLOWED;
}
ret = ull_disable(&scan->lll);
LL_ASSERT(!ret);
mark = ull_disable_unmark(scan);
LL_ASSERT(mark == scan);
return 0;
}
struct ll_scan_set *ull_scan_set_get(u16_t handle)
{
if (handle >= CONFIG_BT_SCAN_MAX) {
return NULL;
}
return &ll_scan[handle];
}
u16_t ull_scan_handle_get(struct ll_scan_set *scan)
{
return ((u8_t *)scan - (u8_t *)ll_scan) / sizeof(*scan);
}
struct ll_scan_set *ull_scan_is_enabled_get(u16_t handle)
{
struct ll_scan_set *scan;
scan = ull_scan_set_get(handle);
if (!scan || !scan->is_enabled) {
return NULL;
}
return scan;
}
struct ll_scan_set *ull_scan_is_disabled_get(u16_t handle)
{
struct ll_scan_set *scan;
scan = ull_scan_set_get(handle);
if (!scan || scan->is_enabled) {
return NULL;
}
return scan;
}
u32_t ull_scan_is_enabled(u16_t handle)
{
struct ll_scan_set *scan;
scan = ull_scan_is_enabled_get(handle);
if (!scan) {
return 0;
}
/* NOTE: BIT(0) - passive scanning enabled
* BIT(1) - active scanning enabled
* BIT(2) - initiator enabled
*/
return (((u32_t)scan->is_enabled << scan->lll.type) |
#if defined(CONFIG_BT_CENTRAL)
(scan->lll.conn ? BIT(2) : 0) |
#endif
0);
}
u32_t ull_scan_filter_pol_get(u16_t handle)
{
struct ll_scan_set *scan;
scan = ull_scan_is_enabled_get(handle);
if (!scan) {
return 0;
}
return scan->lll.filter_policy;
}
static int _init_reset(void)
{
return 0;
}
static void ticker_cb(u32_t ticks_at_expire, u32_t remainder, u16_t lazy,
void *param)
{
static memq_link_t _link;
static struct mayfly _mfy = {0, 0, &_link, NULL, lll_scan_prepare};
static struct lll_prepare_param p;
struct ll_scan_set *scan = param;
u32_t ret;
u8_t ref;
DEBUG_RADIO_PREPARE_O(1);
/* Increment prepare reference count */
ref = ull_ref_inc(&scan->ull);
LL_ASSERT(ref);
/* Append timing parameters */
p.ticks_at_expire = ticks_at_expire;
p.remainder = remainder;
p.lazy = lazy;
p.param = &scan->lll;
_mfy.param = &p;
/* Kick LLL prepare */
ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
0, &_mfy);
LL_ASSERT(!ret);
#if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_CTLR_SCHED_ADVANCED)
/* calc next group in us for the anchor where first connection event
* to be placed
*/
if (scan->lll.conn) {
static memq_link_t s_link;
static struct mayfly s_mfy_sched_after_mstr_offset_get = {
0, 0, &s_link, NULL,
ull_sched_mfy_after_mstr_offset_get};
u32_t retval;
s_mfy_sched_after_mstr_offset_get.param = (void *)scan;
retval = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
TICKER_USER_ID_ULL_LOW, 1,
&s_mfy_sched_after_mstr_offset_get);
LL_ASSERT(!retval);
}
#endif /* CONFIG_BT_CENTRAL && CONFIG_BT_CTLR_SCHED_ADVANCED */
DEBUG_RADIO_PREPARE_O(1);
}
static u8_t disable(u16_t handle)
{
struct ll_scan_set *scan;
u8_t ret;
scan = ull_scan_is_enabled_get(handle);
if (!scan || scan->lll.conn) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
ret = ull_scan_disable(handle, scan);
if (ret) {
return ret;
}
scan->is_enabled = 0;
#if defined(CONFIG_BT_CTLR_PRIVACY)
#if defined(CONFIG_BT_BROADCASTER)
if (!ull_adv_is_enabled_get(0))
#endif
{
ll_adv_scan_state_cb(0);
}
#endif
return 0;
}

View file

@ -0,0 +1,39 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
/* NOTE: Definitions used internal to ULL implementations */
int ull_scan_init(void);
int ull_scan_reset(void);
/* Set scan parameters */
u8_t ull_scan_params_set(struct ll_scan_set *scan, u8_t type,
u16_t interval, u16_t window,
u8_t own_addr_type, u8_t filter_policy);
/* Enable and start scanning/initiating role */
u8_t ull_scan_enable(struct ll_scan_set *scan);
/* Disable scanning/initiating role */
u8_t ull_scan_disable(u16_t handle, struct ll_scan_set *scan);
/* Return ll_scan_set context (unconditional) */
struct ll_scan_set *ull_scan_set_get(u16_t handle);
/* Return the scan set handle given the scan set instance */
u16_t ull_scan_handle_get(struct ll_scan_set *scan);
/* Return ll_scan_set context if enabled */
struct ll_scan_set *ull_scan_is_enabled_get(u16_t handle);
/* Return ll_scan_set contesst if disabled */
struct ll_scan_set *ull_scan_is_disabled_get(u16_t handle);
/* Return flags if enabled */
u32_t ull_scan_is_enabled(u16_t handle);
/* Return filter policy used */
u32_t ull_scan_filter_pol_get(u16_t handle);

View file

@ -0,0 +1,14 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
struct ll_scan_set {
struct evt_hdr evt;
struct ull_hdr ull;
struct lll_scan lll;
u8_t is_enabled:1;
u8_t own_addr_type:2;
};

View file

@ -0,0 +1,46 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/types.h>
#include <toolchain.h>
#include "util/memq.h"
#include "pdu.h"
#include "lll.h"
#include "lll_scan.h"
#include "ull_scan_types.h"
void ull_sched_after_mstr_slot_get(u8_t user_id, u32_t ticks_slot_abs,
u32_t *ticks_anchor, u32_t *us_offset)
{
/* TODO: */
}
void ull_sched_mfy_after_mstr_offset_get(void *param)
{
struct ll_scan_set *scan = param;
/* TODO: */
scan->lll.conn_win_offset_us = 0;
}
void ull_sched_mfy_free_win_offset_calc(void *param)
{
/* TODO: */
}
void ull_sched_mfy_win_offset_use(void *param)
{
/* TODO: */
}
void ull_sched_mfy_win_offset_select(void *param)
{
/* TODO: */
}

View file

@ -0,0 +1,12 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
void ull_sched_after_mstr_slot_get(u8_t user_id, u32_t ticks_slot_abs,
u32_t *ticks_anchor, u32_t *us_offset);
void ull_sched_mfy_after_mstr_offset_get(void *param);
void ull_sched_mfy_free_win_offset_calc(void *param);
void ull_sched_mfy_win_offset_use(void *param);
void ull_sched_mfy_win_offset_select(void *param);

View file

@ -0,0 +1,488 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stddef.h>
#include <stdbool.h>
#include <toolchain.h>
#include <zephyr/types.h>
#include <misc/util.h>
#include "hal/ticker.h"
#include "hal/ccm.h"
#include "util/memq.h"
#include "util/mayfly.h"
#include "ticker/ticker.h"
#include "util/util.h"
#include "pdu.h"
#include "lll.h"
#include "lll_vendor.h"
#include "lll_adv.h"
#include "lll_conn.h"
#include "lll_slave.h"
#include "lll_tim_internal.h"
#include "ull_adv_types.h"
#include "ull_conn_types.h"
#include "ull_internal.h"
#include "ull_adv_internal.h"
#include "ull_conn_internal.h"
#include "ull_slave_internal.h"
#define LOG_MODULE_NAME bt_ctlr_llsw_ull_slave
#include "common/log.h"
#include <soc.h>
#include "hal/debug.h"
static void ticker_op_stop_adv_cb(u32_t status, void *param);
static void ticker_op_cb(u32_t status, void *param);
void ull_slave_setup(memq_link_t *link, struct node_rx_hdr *rx,
struct node_rx_ftr *ftr, struct lll_conn *lll)
{
u32_t conn_offset_us, conn_interval_us;
u8_t ticker_id_adv, ticker_id_conn;
u8_t peer_addr[BDADDR_SIZE];
u32_t ticks_slot_overhead;
u32_t mayfly_was_enabled;
u32_t ticks_slot_offset;
struct pdu_adv *pdu_adv;
struct ll_adv_set *adv;
struct node_rx_cc *cc;
struct ll_conn *conn;
u32_t ticker_status;
u8_t peer_addr_type;
u16_t win_offset;
u16_t timeout;
u8_t chan_sel;
((struct lll_adv *)ftr->param)->conn = NULL;
adv = ((struct lll_adv *)ftr->param)->hdr.parent;
conn = lll->hdr.parent;
/* Populate the slave context */
pdu_adv = (void *)((struct node_rx_pdu *)rx)->pdu;
memcpy(&lll->crc_init[0], &pdu_adv->connect_ind.crc_init[0], 3);
memcpy(&lll->access_addr[0], &pdu_adv->connect_ind.access_addr[0], 4);
memcpy(&lll->data_chan_map[0], &pdu_adv->connect_ind.chan_map[0],
sizeof(lll->data_chan_map));
lll->data_chan_count = util_ones_count_get(&lll->data_chan_map[0],
sizeof(lll->data_chan_map));
lll->data_chan_hop = pdu_adv->connect_ind.hop;
lll->interval = pdu_adv->connect_ind.interval;
lll->latency = pdu_adv->connect_ind.latency;
win_offset = pdu_adv->connect_ind.win_offset;
conn_interval_us = pdu_adv->connect_ind.interval * 1250;
/* calculate the window widening */
lll->slave.sca = pdu_adv->connect_ind.sca;
lll->slave.window_widening_periodic_us =
(((lll_conn_ppm_local_get() +
lll_conn_ppm_get(lll->slave.sca)) *
conn_interval_us) + (1000000 - 1)) / 1000000;
lll->slave.window_widening_max_us = (conn_interval_us >> 1) - TIFS_US;
lll->slave.window_size_event_us = pdu_adv->connect_ind.win_size * 1250;
/* procedure timeouts */
conn->supervision_reload =
RADIO_CONN_EVENTS((pdu_adv->connect_ind.timeout * 10 * 1000),
conn_interval_us);
conn->procedure_reload =
RADIO_CONN_EVENTS((40 * 1000 * 1000), conn_interval_us);
#if defined(CONFIG_BT_CTLR_LE_PING)
/* APTO in no. of connection events */
conn->apto_reload = RADIO_CONN_EVENTS((30 * 1000 * 1000),
conn_interval_us);
/* Dispatch LE Ping PDU 6 connection events (that peer would
* listen to) before 30s timeout
* TODO: "peer listens to" is greater than 30s due to latency
*/
conn->appto_reload = (conn->apto_reload > (lll->latency + 6)) ?
(conn->apto_reload - (lll->latency + 6)) :
conn->apto_reload;
#endif /* CONFIG_BT_CTLR_LE_PING */
/* FIXME: */
#if 0
memcpy((void *)&lll->slave.force, &lll->access_addr[0],
sizeof(lll->slave.force));
#endif
chan_sel = pdu_adv->chan_sel;
peer_addr_type = pdu_adv->tx_addr;
memcpy(peer_addr, pdu_adv->connect_ind.init_addr, BDADDR_SIZE);
timeout = pdu_adv->connect_ind.timeout;
cc = (void *)pdu_adv;
cc->status = 0;
cc->role = 1;
cc->peer_addr_type = peer_addr_type;
memcpy(cc->peer_addr, peer_addr, BDADDR_SIZE);
cc->interval = lll->interval;
cc->latency = lll->latency;
cc->timeout = timeout;
cc->sca = lll->slave.sca;
lll->handle = ll_conn_handle_get(conn);
rx->handle = lll->handle;
/* Use Channel Selection Algorithm #2 if peer too supports it */
if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
struct node_rx_pdu *rx_csa;
struct node_rx_cs *cs;
/* pick the rx node instance stored within the connection
* rx node.
*/
rx_csa = (void *)ftr->extra;
/* Enqueue the connection event */
ll_rx_put(link, rx);
/* use the rx node for CSA event */
rx = (void *)rx_csa;
link = rx->link;
rx->handle = lll->handle;
rx->type = NODE_RX_TYPE_CHAN_SEL_ALGO;
cs = (void *)rx_csa->pdu;
if (chan_sel) {
u16_t aa_ls = ((u16_t)lll->access_addr[1] << 8) |
lll->access_addr[0];
u16_t aa_ms = ((u16_t)lll->access_addr[3] << 8) |
lll->access_addr[2];
lll->data_chan_sel = 1;
lll->data_chan_id = aa_ms ^ aa_ls;
cs->csa = 0x01;
} else {
cs->csa = 0x00;
}
}
ll_rx_put(link, rx);
ll_rx_sched();
#if 0
/* Prepare the rx packet structure */
node_rx->hdr.handle = conn->handle;
node_rx->hdr.type = NODE_RX_TYPE_CONNECTION;
/* prepare connection complete structure */
pdu_data = (void *)node_rx->pdu_data;
radio_le_conn_cmplt = (void *)pdu_data->lldata;
radio_le_conn_cmplt->status = 0x00;
radio_le_conn_cmplt->role = 0x01;
#if defined(CONFIG_BT_CTLR_PRIVACY)
radio_le_conn_cmplt->own_addr_type = pdu_adv->rx_addr;
memcpy(&radio_le_conn_cmplt->own_addr[0],
&pdu_adv->connect_ind.adv_addr[0], BDADDR_SIZE);
if (rl_idx != FILTER_IDX_NONE) {
/* TODO: store rl_idx instead if safe */
/* Store identity address */
ll_rl_id_addr_get(rl_idx,
&radio_le_conn_cmplt->peer_addr_type,
&radio_le_conn_cmplt->peer_addr[0]);
/* Mark it as identity address from RPA (0x02, 0x03) */
radio_le_conn_cmplt->peer_addr_type += 2;
/* Store peer RPA */
memcpy(&radio_le_conn_cmplt->peer_rpa[0],
&pdu_adv->connect_ind.init_addr[0],
BDADDR_SIZE);
} else {
memset(&radio_le_conn_cmplt->peer_rpa[0], 0x0,
BDADDR_SIZE);
#else
if (1) {
#endif /* CONFIG_BT_CTLR_PRIVACY */
radio_le_conn_cmplt->peer_addr_type = pdu_adv->tx_addr;
memcpy(&radio_le_conn_cmplt->peer_addr[0],
&pdu_adv->connect_ind.init_addr[0],
BDADDR_SIZE);
}
radio_le_conn_cmplt->interval =
pdu_adv->connect_ind.interval;
radio_le_conn_cmplt->latency =
pdu_adv->connect_ind.latency;
radio_le_conn_cmplt->timeout =
pdu_adv->connect_ind.timeout;
radio_le_conn_cmplt->mca =
pdu_adv->connect_ind.sca;
/* enqueue connection complete structure into queue */
rx_fc_lock(conn->handle);
packet_rx_enqueue();
/* Use Channel Selection Algorithm #2 if peer too supports it */
if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
struct radio_le_chan_sel_algo *le_chan_sel_algo;
/* Generate LE Channel Selection Algorithm event */
node_rx = packet_rx_reserve_get(3);
LL_ASSERT(node_rx);
node_rx->hdr.handle = conn->handle;
node_rx->hdr.type = NODE_RX_TYPE_CHAN_SEL_ALGO;
pdu_data = (void *)node_rx->pdu_data;
le_chan_sel_algo = (void *)pdu_data->lldata;
if (pdu_adv->chan_sel) {
u16_t aa_ls =
((u16_t)conn->access_addr[1] << 8) |
conn->access_addr[0];
u16_t aa_ms =
((u16_t)conn->access_addr[3] << 8) |
conn->access_addr[2];
conn->data_chan_sel = 1;
conn->data_chan_id = aa_ms ^ aa_ls;
le_chan_sel_algo->chan_sel_algo = 0x01;
} else {
le_chan_sel_algo->chan_sel_algo = 0x00;
}
packet_rx_enqueue();
}
#endif
/* TODO: active_to_start feature port */
conn->evt.ticks_active_to_start = 0;
conn->evt.ticks_xtal_to_start =
HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
conn->evt.ticks_preempt_to_start =
HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
conn->evt.ticks_slot =
HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US +
ftr->us_radio_rdy + 328 + TIFS_US +
328);
ticks_slot_offset = max(conn->evt.ticks_active_to_start,
conn->evt.ticks_xtal_to_start);
if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
ticks_slot_overhead = ticks_slot_offset;
} else {
ticks_slot_overhead = 0;
}
conn_interval_us -= lll->slave.window_widening_periodic_us;
conn_offset_us = ftr->us_radio_end;
conn_offset_us += ((u64_t)win_offset + 1) * 1250;
conn_offset_us -= EVENT_OVERHEAD_START_US;
conn_offset_us -= EVENT_JITTER_US << 1;
conn_offset_us -= EVENT_JITTER_US;
conn_offset_us -= ftr->us_radio_rdy;
/* disable ticker job, in order to chain stop and start to avoid RTC
* being stopped if no tickers active.
*/
#if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
mayfly_was_enabled = mayfly_is_enabled(TICKER_USER_ID_ULL_HIGH,
TICKER_USER_ID_ULL_LOW);
mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 0);
#endif
/* Stop Advertiser */
ticker_id_adv = TICKER_ID_ADV_BASE + ull_adv_handle_get(adv);
ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR,
TICKER_USER_ID_ULL_HIGH,
ticker_id_adv, ticker_op_stop_adv_cb, adv);
ticker_op_stop_adv_cb(ticker_status, adv);
/* Stop Direct Adv Stop */
if (adv->lll.is_hdcd) {
/* Advertiser stop can expire while here in this ISR.
* Deferred attempt to stop can fail as it would have
* expired, hence ignore failure.
*/
ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
TICKER_ID_ADV_STOP, NULL, NULL);
}
/* Start Slave */
ticker_id_conn = TICKER_ID_CONN_BASE + ll_conn_handle_get(conn);
ticker_status = ticker_start(TICKER_INSTANCE_ID_CTLR,
TICKER_USER_ID_ULL_HIGH,
ticker_id_conn,
ftr->ticks_anchor - ticks_slot_offset,
HAL_TICKER_US_TO_TICKS(conn_offset_us),
HAL_TICKER_US_TO_TICKS(conn_interval_us),
HAL_TICKER_REMAINDER(conn_interval_us),
TICKER_NULL_LAZY,
(conn->evt.ticks_slot +
ticks_slot_overhead),
ull_slave_ticker_cb, conn, ticker_op_cb,
(void *)__LINE__);
LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
(ticker_status == TICKER_STATUS_BUSY));
#if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
/* enable ticker job, if disabled in this function */
if (mayfly_was_enabled) {
mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW,
1);
}
#else
ARG_UNUSED(mayfly_was_enabled);
#endif
}
void ull_slave_done(struct node_rx_event_done *done, u32_t *ticks_drift_plus,
u32_t *ticks_drift_minus)
{
u32_t start_to_address_expected_us;
u32_t start_to_address_actual_us;
u32_t window_widening_event_us;
u32_t preamble_to_addr_us;
start_to_address_actual_us =
done->extra.slave.start_to_address_actual_us;
window_widening_event_us =
done->extra.slave.window_widening_event_us;
preamble_to_addr_us =
done->extra.slave.preamble_to_addr_us;
start_to_address_expected_us = EVENT_JITTER_US +
(EVENT_JITTER_US << 1) +
window_widening_event_us +
preamble_to_addr_us;
if (start_to_address_actual_us <= start_to_address_expected_us) {
*ticks_drift_plus =
HAL_TICKER_US_TO_TICKS(window_widening_event_us);
*ticks_drift_minus =
HAL_TICKER_US_TO_TICKS((start_to_address_expected_us -
start_to_address_actual_us));
} else {
*ticks_drift_plus =
HAL_TICKER_US_TO_TICKS(start_to_address_actual_us);
*ticks_drift_minus =
HAL_TICKER_US_TO_TICKS(EVENT_JITTER_US +
(EVENT_JITTER_US << 1) +
preamble_to_addr_us);
}
}
void ull_slave_ticker_cb(u32_t ticks_at_expire, u32_t remainder, u16_t lazy,
void *param)
{
static memq_link_t _link;
static struct mayfly _mfy = {0, 0, &_link, NULL, lll_slave_prepare};
static struct lll_prepare_param p;
struct ll_conn *conn = param;
u32_t err;
u8_t ref;
int ret;
DEBUG_RADIO_PREPARE_S(1);
/* Handle any LL Control Procedures */
ret = ull_conn_llcp(conn, ticks_at_expire, lazy);
if (ret) {
return;
}
/* Increment prepare reference count */
ref = ull_ref_inc(&conn->ull);
LL_ASSERT(ref);
/* Append timing parameters */
p.ticks_at_expire = ticks_at_expire;
p.remainder = remainder;
p.lazy = lazy;
p.param = &conn->lll;
_mfy.param = &p;
/* Kick LLL prepare */
err = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
0, &_mfy);
LL_ASSERT(!err);
/* De-mux remaining tx nodes from FIFO */
ull_conn_tx_demux(UINT8_MAX);
/* Enqueue towards LLL */
ull_conn_tx_lll_enqueue(conn, UINT8_MAX);
DEBUG_RADIO_PREPARE_S(1);
}
#if defined(CONFIG_BT_CTLR_LE_ENC)
u8_t ll_start_enc_req_send(u16_t handle, u8_t error_code,
u8_t const *const ltk)
{
struct ll_conn *conn;
conn = ll_connected_get(handle);
if (!conn) {
return BT_HCI_ERR_UNKNOWN_CONN_ID;
}
if (error_code) {
if (conn->refresh == 0) {
if (conn->llcp_req != conn->llcp_ack) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
conn->llcp.encryption.error_code = error_code;
conn->llcp.encryption.initiate = 0;
conn->llcp_type = LLCP_ENCRYPTION;
conn->llcp_req++;
} else {
if (conn->llcp_terminate.ack !=
conn->llcp_terminate.req) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
conn->llcp_terminate.reason_own = error_code;
conn->llcp_terminate.req++;
}
} else {
memcpy(&conn->llcp.encryption.ltk[0], ltk,
sizeof(conn->llcp.encryption.ltk));
if (conn->llcp_req != conn->llcp_ack) {
return BT_HCI_ERR_CMD_DISALLOWED;
}
conn->llcp.encryption.error_code = 0;
conn->llcp.encryption.initiate = 0;
conn->llcp_type = LLCP_ENCRYPTION;
conn->llcp_req++;
}
return 0;
}
#endif /* CONFIG_BT_CTLR_LE_ENC */
static void ticker_op_stop_adv_cb(u32_t status, void *param)
{
LL_ASSERT(status != TICKER_STATUS_FAILURE ||
param == ull_disable_mark_get());
}
static void ticker_op_cb(u32_t status, void *param)
{
ARG_UNUSED(param);
LL_ASSERT(status == TICKER_STATUS_SUCCESS);
}

View file

@ -0,0 +1,12 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
void ull_slave_setup(memq_link_t *link, struct node_rx_hdr *rx,
struct node_rx_ftr *ftr, struct lll_conn *lll);
void ull_slave_done(struct node_rx_event_done *done, u32_t *ticks_drift_plus,
u32_t *ticks_drift_minus);
void ull_slave_ticker_cb(u32_t ticks_at_expire, u32_t remainder, u16_t lazy,
void *param);

View file

@ -0,0 +1,325 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdbool.h>
#include <stddef.h>
#include <errno.h>
#include <toolchain.h>
#include <zephyr/types.h>
#if defined(CONFIG_BT_CTLR_DEBUG_PINS)
#if defined(CONFIG_PRINTK)
#undef CONFIG_PRINTK
#endif
#endif
#include "hal/ccm.h"
#include "util/mem.h"
#include "util/mfifo.h"
#include "util/memq.h"
#include "util/mayfly.h"
#include "ticker/ticker.h"
#include "pdu.h"
#include "lll.h"
#include "lll_conn.h"
#include "lll_tmp.h"
#include "ull_internal.h"
#include "ull_tmp.h"
#include "ull_tmp_internal.h"
#define LOG_MODULE_NAME bt_ctlr_llsw_ull_tmp
#include "common/log.h"
#include <soc.h>
#include "hal/debug.h"
#define TMP_TICKER_TICKS_PERIOD 32768
#define TMP_TICKER_TICKS_SLOT 327
#define TMP_TX_POOL_SIZE ((CONFIG_BT_TMP_TX_SIZE_MAX) * \
(CONFIG_BT_TMP_TX_COUNT_MAX))
/* NOTE: structure accessed by Thread and ULL */
struct ull_tmp {
struct ull_hdr hdr;
u8_t is_enabled:1;
};
struct tmp {
struct ull_tmp ull;
struct lll_tmp lll;
};
static struct tmp tmp_inst[CONFIG_BT_TMP_MAX];
static MFIFO_DEFINE(tmp_tx, sizeof(struct lll_tx),
CONFIG_BT_TMP_TX_COUNT_MAX);
static struct {
void *free;
u8_t pool[TMP_TX_POOL_SIZE];
} mem_tmp_tx;
static struct {
void *free;
u8_t pool[sizeof(memq_link_t) * CONFIG_BT_TMP_TX_COUNT_MAX];
} mem_link_tx;
static int _init_reset(void);
static void _ticker_cb(u32_t ticks_at_expire, u32_t remainder,
u16_t lazy, void *param);
static void _tx_demux(void);
int ull_tmp_init(void)
{
int err;
err = _init_reset();
if (err) {
return err;
}
return 0;
}
int ull_tmp_reset(void)
{
u16_t handle;
int err;
handle = CONFIG_BT_TMP_MAX;
while (handle--) {
ull_tmp_disable(handle);
}
/* Re-initialize the Tx mfifo */
MFIFO_INIT(tmp_tx);
err = _init_reset();
if (err) {
return err;
}
return 0;
}
u16_t ull_tmp_handle_get(struct lll_tmp *tmp)
{
return ((u8_t *)CONTAINER_OF(tmp, struct tmp, lll) -
(u8_t *)&tmp_inst[0]) / sizeof(struct tmp);
}
int ull_tmp_enable(u16_t handle)
{
u32_t tmp_ticker_anchor;
u8_t tmp_ticker_id;
struct tmp *inst;
int ret;
if (handle >= CONFIG_BT_TMP_MAX) {
return -EINVAL;
}
inst = &tmp_inst[handle];
if (inst->ull.is_enabled) {
return -EALREADY;
}
ull_hdr_init(&inst->ull.hdr);
lll_hdr_init(&inst->lll, inst);
if (!inst->lll.link_free) {
inst->lll.link_free = &inst->lll._link;
}
memq_init(inst->lll.link_free, &inst->lll.memq_tx.head,
&inst->lll.memq_tx.tail);
tmp_ticker_id = TICKER_ID_TMP_BASE + handle;
tmp_ticker_anchor = ticker_ticks_now_get();
ret = ticker_start(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
tmp_ticker_id,
tmp_ticker_anchor,
0,
TMP_TICKER_TICKS_PERIOD,
TICKER_NULL_REMAINDER,
TICKER_NULL_LAZY,
TMP_TICKER_TICKS_SLOT,
_ticker_cb, inst,
NULL, NULL);
if (ret) {
goto enable_cleanup;
}
inst->lll.link_free = NULL;
inst->ull.is_enabled = 1;
enable_cleanup:
return ret;
}
int ull_tmp_disable(u16_t handle)
{
u8_t tmp_ticker_id;
struct tmp *inst;
int ret;
if (handle >= CONFIG_BT_TMP_MAX) {
return -EINVAL;
}
inst = &tmp_inst[handle];
if (!inst->ull.is_enabled) {
return -EALREADY;
}
tmp_ticker_id = TICKER_ID_TMP_BASE + handle;
ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
tmp_ticker_id,
NULL, NULL);
if (ret) {
return ret;
}
ret = ull_disable(&inst->lll);
if (ret) {
return ret;
}
inst->ull.is_enabled = 0;
inst->lll.link_free = memq_deinit(&inst->lll.memq_tx.head,
&inst->lll.memq_tx.tail);
return ret;
}
int ull_tmp_data_send(u16_t handle, u8_t size, u8_t *data)
{
struct lll_tx *tx;
struct node_tx *node_tx;
struct tmp *inst;
u8_t idx;
if (handle >= CONFIG_BT_TMP_MAX) {
return -EINVAL;
}
inst = &tmp_inst[handle];
if (!inst->ull.is_enabled) {
return -EINVAL;
}
if (size > CONFIG_BT_TMP_TX_SIZE_MAX) {
return -EMSGSIZE;
}
idx = MFIFO_ENQUEUE_GET(tmp_tx, (void **) &tx);
if (!tx) {
return -ENOBUFS;
}
tx->node = mem_acquire(&mem_tmp_tx.free);
if (!tx->node) {
return -ENOMEM;
}
tx->handle = handle;
node_tx = tx->node;
memcpy(node_tx->pdu, data, size);
MFIFO_ENQUEUE(tmp_tx, idx);
return 0;
}
void ull_tmp_link_tx_release(memq_link_t *link)
{
mem_release(link, &mem_link_tx.free);
}
static int _init_reset(void)
{
/* Initialize tx pool. */
mem_init(mem_tmp_tx.pool, CONFIG_BT_TMP_TX_SIZE_MAX,
CONFIG_BT_TMP_TX_COUNT_MAX, &mem_tmp_tx.free);
/* Initialize tx link pool. */
mem_init(mem_link_tx.pool, sizeof(memq_link_t),
CONFIG_BT_TMP_TX_COUNT_MAX, &mem_link_tx.free);
return 0;
}
static void _ticker_cb(u32_t ticks_at_expire, u32_t remainder,
u16_t lazy, void *param)
{
static memq_link_t _link;
static struct mayfly _mfy = {0, 0, &_link, NULL, lll_tmp_prepare};
static struct lll_prepare_param p;
struct tmp *inst = param;
u32_t ret;
u8_t ref;
printk("\t_ticker_cb (%p) enter: %u, %u, %u.\n", param,
ticks_at_expire, remainder, lazy);
DEBUG_RADIO_PREPARE_A(1);
/* Increment prepare reference count */
ref = ull_ref_inc(&inst->ull.hdr);
LL_ASSERT(ref);
/* Append timing parameters */
p.ticks_at_expire = ticks_at_expire;
p.remainder = remainder;
p.lazy = lazy;
p.param = &inst->lll;
_mfy.param = &p;
ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
0, &_mfy);
LL_ASSERT(!ret);
/* De-mux tx FIFO */
_tx_demux();
DEBUG_RADIO_PREPARE_A(1);
printk("\t_ticker_cb (%p) exit.\n", param);
}
static void _tx_demux(void)
{
struct lll_tx *tx;
tx = MFIFO_DEQUEUE_GET(tmp_tx);
while (tx) {
memq_link_t *link;
struct tmp *inst;
inst = &tmp_inst[tx->handle];
printk("\t_ticker_cb (%p) tx_demux (%p): h = 0x%x, n=%p.\n",
inst, tx, tx->handle, tx->node);
link = mem_acquire(&mem_link_tx.free);
LL_ASSERT(link);
memq_enqueue(link, tx->node, &inst->lll.memq_tx.tail);
MFIFO_DEQUEUE(tmp_tx);
tx = MFIFO_DEQUEUE_GET(tmp_tx);
}
}

View file

@ -0,0 +1,10 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
int ull_tmp_enable(u16_t handle);
int ull_tmp_disable(u16_t handle);
int ull_tmp_data_send(u16_t handle, u8_t size, u8_t *data);

View file

@ -0,0 +1,9 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
int ull_tmp_init(void);
int ull_tmp_reset(void);
void ull_tmp_link_tx_release(memq_link_t *link);

View file

@ -0,0 +1,182 @@
/*
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#define MFIFO_DEFINE(name, sz, cnt) \
struct { \
u8_t const s; /* TODO: const, optimise RAM use */ \
u8_t const n; /* TODO: const, optimise RAM use */ \
u8_t f; \
u8_t l; \
u8_t m[sz * ((cnt) + 1)]; \
} mfifo_##name = { \
.n = ((cnt) + 1), \
.s = (sz), \
.f = 0, \
.l = 0, \
}
#define MFIFO_INIT(name) \
mfifo_##name.f = mfifo_##name.l = 0
static inline bool mfifo_enqueue_idx_get(u8_t count, u8_t first, u8_t last,
u8_t *idx)
{
last = last + 1;
if (last == count) {
last = 0;
}
if (last == first) {
return false;
}
*idx = last;
return true;
}
#define MFIFO_ENQUEUE_IDX_GET(name, i) \
mfifo_enqueue_idx_get(mfifo_##name.n, mfifo_##name.f, \
mfifo_##name.l, (i))
static inline void mfifo_by_idx_enqueue(u8_t *fifo, u8_t size, u8_t idx,
void *mem, u8_t *last)
{
void **p = (void **)(fifo + (*last) * size);
*p = mem;
*last = idx;
}
#define MFIFO_BY_IDX_ENQUEUE(name, i, mem) \
mfifo_by_idx_enqueue(mfifo_##name.m, mfifo_##name.s, (i), \
(mem), &mfifo_##name.l)
static inline u8_t mfifo_enqueue_get(u8_t *fifo, u8_t size, u8_t count,
u8_t first, u8_t last, void **mem)
{
u8_t idx;
if (!mfifo_enqueue_idx_get(count, first, last, &idx)) {
*mem = NULL;
return 0;
}
*mem = (void *)(fifo + last * size);
return idx;
}
#define MFIFO_ENQUEUE_GET(name, mem) \
mfifo_enqueue_get(mfifo_##name.m, mfifo_##name.s, \
mfifo_##name.n, mfifo_##name.f, \
mfifo_##name.l, (mem))
static inline void mfifo_enqueue(u8_t idx, u8_t *last)
{
*last = idx;
}
#define MFIFO_ENQUEUE(name, idx) \
mfifo_enqueue((idx), &mfifo_##name.l)
static inline u8_t mfifo_avail_count_get(u8_t count, u8_t first, u8_t last)
{
if (last >= first) {
return last - first;
} else {
return count - first + last;
}
}
#define MFIFO_AVAIL_COUNT_GET(name) \
mfifo_avail_count_get(mfifo_##name.n, mfifo_##name.f, \
mfifo_##name.l)
static inline void *mfifo_dequeue_get(u8_t *fifo, u8_t size, u8_t first,
u8_t last)
{
if (first == last) {
return NULL;
}
return (void *)(fifo + first * size);
}
#define MFIFO_DEQUEUE_GET(name) \
mfifo_dequeue_get(mfifo_##name.m, mfifo_##name.s, \
mfifo_##name.f, mfifo_##name.l)
static inline void *mfifo_dequeue_peek(u8_t *fifo, u8_t size, u8_t first,
u8_t last)
{
if (first == last) {
return NULL;
}
return *((void **)(fifo + first * size));
}
#define MFIFO_DEQUEUE_PEEK(name) \
mfifo_dequeue_peek(mfifo_##name.m, mfifo_##name.s, \
mfifo_##name.f, mfifo_##name.l)
static inline void *mfifo_dequeue_iter_get(u8_t *fifo, u8_t size, u8_t count,
u8_t first, u8_t last, u8_t *idx)
{
void *p;
u8_t i;
if (*idx >= count) {
*idx = first;
}
if (*idx == last) {
return NULL;
}
i = *idx + 1;
if (i == count) {
i = 0;
}
p = (void *)(fifo + (*idx) * size);
*idx = i;
return p;
}
#define MFIFO_DEQUEUE_ITER_GET(name, idx) \
mfifo_dequeue_iter_get(mfifo_##name.m, mfifo_##name.s, \
mfifo_##name.n, mfifo_##name.f, \
mfifo_##name.l, (idx))
static inline void *mfifo_dequeue(u8_t *fifo, u8_t size, u8_t count,
u8_t last, u8_t *first)
{
u8_t _first = *first;
void *mem;
if (_first == last) {
return NULL;
}
mem = *((void **)(fifo + _first * size));
_first += 1;
if (_first == count) {
_first = 0;
}
*first = _first;
return mem;
}
#define MFIFO_DEQUEUE(name) \
mfifo_dequeue(mfifo_##name.m, mfifo_##name.s, \
mfifo_##name.n, mfifo_##name.l, \
&mfifo_##name.f)