Bluetooth: Controller: Add trpa cache for BT_CTLR_SW_DEFERRED_PRIVACY

Adds a target RPA known unknown cache; it fulfills a similar function
to the existing peer RPA cache, used for scanning of directed
advertisements

Signed-off-by: Troels Nilsson <trnn@demant.com>
This commit is contained in:
Troels Nilsson 2023-07-04 15:40:27 +02:00 committed by Carles Cufí
commit 8a2a2bd613
4 changed files with 149 additions and 6 deletions

View file

@ -261,6 +261,15 @@ config BT_CTLR_RPA_CACHE_SIZE
Set the size of the Known Unknown Resolving List for LE
Controller-based Software deferred Privacy.
config BT_CTLR_TRPA_CACHE_SIZE
int "LE Controller-based Software Privacy target RPA cache size"
depends on BT_CTLR_SW_DEFERRED_PRIVACY
default 4
range 1 64
help
Set the size of the Known Unknown Target RPA Resolving List for LE
Controller-based Software deferred Privacy.
config BT_CTLR_DATA_LENGTH_CLEAR
bool "Data Length Support (Cleartext only)"
depends on SOC_SERIES_NRF51X

View file

@ -86,6 +86,12 @@ struct lll_prpa_cache {
uint8_t taken:1;
bt_addr_t rpa;
};
/* Cache of known unknown target RPAs */
struct lll_trpa_cache {
uint8_t rl_idx;
bt_addr_t rpa;
};
#endif
extern uint8_t ull_filter_lll_fal_match(struct lll_filter const *const filter,
@ -111,6 +117,7 @@ extern bool ull_filter_lll_rl_addr_resolve(uint8_t id_addr_type,
extern bool ull_filter_lll_rl_enabled(void);
#if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
extern const struct lll_prpa_cache *ull_filter_lll_prpa_cache_get(void);
extern const struct lll_trpa_cache *ull_filter_lll_trpa_cache_get(void);
typedef void (*resolve_callback_t)(void *param);
extern uint8_t ull_filter_deferred_resolve(bt_addr_t *rpa,
resolve_callback_t cb);

View file

@ -74,6 +74,10 @@ static uint8_t rl_enable;
static uint8_t newest_prpa;
static struct lll_prpa_cache prpa_cache[CONFIG_BT_CTLR_RPA_CACHE_SIZE];
/* Cache of known unknown target RPAs */
static uint8_t newest_trpa;
static struct lll_trpa_cache trpa_cache[CONFIG_BT_CTLR_TRPA_CACHE_SIZE];
struct prpa_resolve_work {
struct k_work prpa_work;
bt_addr_t rpa;
@ -99,6 +103,7 @@ static struct prpa_resolve_work resolve_work;
static struct target_resolve_work t_work;
BUILD_ASSERT(ARRAY_SIZE(prpa_cache) < FILTER_IDX_NONE);
BUILD_ASSERT(ARRAY_SIZE(trpa_cache) < FILTER_IDX_NONE);
#endif /* CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */
BUILD_ASSERT(ARRAY_SIZE(fal) < FILTER_IDX_NONE);
BUILD_ASSERT(ARRAY_SIZE(rl) < FILTER_IDX_NONE);
@ -158,6 +163,9 @@ static void prpa_cache_add(bt_addr_t *prpa_cache_addr);
static uint8_t prpa_cache_try_resolve(bt_addr_t *rpa);
static void prpa_cache_resolve(struct k_work *work);
static void target_resolve(struct k_work *work);
static void trpa_cache_clear(void);
static uint8_t trpa_cache_find(bt_addr_t *prpa_cache_addr, uint8_t rl_idx);
static void trpa_cache_add(bt_addr_t *prpa_cache_addr, uint8_t rl_idx);
#endif /* CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */
#endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
@ -326,6 +334,7 @@ uint8_t ll_rl_add(bt_addr_le_t *id_addr, const uint8_t pirk[IRK_SIZE],
#if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
/* a new key was added, invalidate the known/unknown list */
prpa_cache_clear();
trpa_cache_clear();
#endif
}
if (rl[i].lirk) {
@ -629,6 +638,7 @@ void ull_filter_reset(bool init)
rl_clear();
#if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
prpa_cache_clear();
trpa_cache_clear();
#endif
if (init) {
k_work_init_delayable(&rpa_work, rpa_timeout);
@ -748,6 +758,7 @@ void ull_filter_rpa_update(bool timeout)
* invalidate the known/unknown peer RPA cache
*/
prpa_cache_clear();
trpa_cache_clear();
#endif
}
@ -1496,12 +1507,25 @@ static void target_resolve(struct k_work *work)
if (rl[idx].taken && bt_addr_eq(&(rl[idx].target_rpa), search_rpa)) {
j = idx;
} else {
/* No match - so not in list Need to see if we can resolve */
if (bt_rpa_irk_matches(rl[idx].local_irk, search_rpa)) {
uint8_t i;
/* No match - so not in list; Need to see if we can resolve */
i = trpa_cache_find(search_rpa, idx);
if (i != FILTER_IDX_NONE) {
/* Found a known unknown - do nothing */
j = FILTER_IDX_NONE;
} else if (bt_rpa_irk_matches(rl[idx].local_irk, search_rpa)) {
/* Could resolve, store RPA */
(void)memcpy(rl[idx].target_rpa.val, search_rpa->val,
sizeof(bt_addr_t));
j = idx;
} else if (rl[idx].taken) {
/* No match - thus cannot resolve, we have an unknown
* so insert in known unknown list
*/
trpa_cache_add(search_rpa, idx);
j = FILTER_IDX_NONE;
} else {
/* Could not resolve, and not in table */
j = FILTER_IDX_NONE;
@ -1629,4 +1653,45 @@ const struct lll_prpa_cache *ull_filter_lll_prpa_cache_get(void)
{
return prpa_cache;
}
static void trpa_cache_clear(void)
{
/* Note the first element will not be in use before wrap around
* is reached.
* The first element in actual use will be at index 1.
* There is no element waisted with this implementation, as
* element 0 will eventually be allocated.
*/
newest_trpa = 0U;
for (uint8_t i = 0; i < CONFIG_BT_CTLR_TRPA_CACHE_SIZE; i++) {
trpa_cache[i].rl_idx = FILTER_IDX_NONE;
}
}
static void trpa_cache_add(bt_addr_t *rpa, uint8_t rl_idx)
{
newest_trpa = (newest_trpa + 1) % CONFIG_BT_CTLR_TRPA_CACHE_SIZE;
(void)memcpy(trpa_cache[newest_trpa].rpa.val, rpa->val,
sizeof(bt_addr_t));
trpa_cache[newest_trpa].rl_idx = rl_idx;
}
static uint8_t trpa_cache_find(bt_addr_t *rpa, uint8_t rl_idx)
{
for (uint8_t i = 0; i < CONFIG_BT_CTLR_TRPA_CACHE_SIZE; i++) {
if (trpa_cache[i].rl_idx == rl_idx &&
bt_addr_eq(&(trpa_cache[i].rpa), rpa)) {
return i;
}
}
return FILTER_IDX_NONE;
}
const struct lll_trpa_cache *ull_filter_lll_trpa_cache_get(void)
{
return trpa_cache;
}
#endif /* !CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */

View file

@ -16,6 +16,7 @@
#define CONFIG_BT_CTLR_RL_SIZE 8
#define CONFIG_BT_CTLR_FAL_SIZE 8
#define CONFIG_BT_CTLR_RPA_CACHE_SIZE 4
#define CONFIG_BT_CTLR_TRPA_CACHE_SIZE 4
#define CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY 1
#define CONFIG_BT_LOG_LEVEL 1
@ -23,7 +24,8 @@
/*
* Unit test of SW deferred privacy data structure and related methods
* Tests the prpa_cache_add, prpa_cache_clear and prpa_cache_find functions
* Tests the prpa and trpa cache functions (prpa_cache_add, prpa_cache_clear, prpa_cache_find
* and trpa_cache_add, trpa_cache_clear, trpa_cache_find)
*/
#define BT_ADDR_INIT(P0, P1, P2, P3, P4, P5) \
@ -35,9 +37,13 @@ void helper_privacy_clear(void)
for (uint8_t i = 0; i < CONFIG_BT_CTLR_RPA_CACHE_SIZE; i++) {
zassert_equal(prpa_cache[i].taken, 0U, "");
}
zassert_equal(newest_trpa, 0, "");
for (uint8_t i = 0; i < CONFIG_BT_CTLR_TRPA_CACHE_SIZE; i++) {
zassert_equal(trpa_cache[i].rl_idx, FILTER_IDX_NONE, "");
}
}
void helper_privacy_add(int skew)
void helper_prpa_add(int skew)
{
bt_addr_t a1, a2, a3, a4, a5;
uint8_t pos, ex_pos;
@ -80,12 +86,56 @@ void helper_privacy_add(int skew)
zassert_equal(pos, FILTER_IDX_NONE, "");
}
void helper_trpa_add(int skew)
{
bt_addr_t a1, a2, a3, a4, a5;
uint8_t pos, ex_pos;
bt_addr_copy(&a1, BT_ADDR_INIT(0x12, 0x13, 0x14, 0x15, 0x16, 0x17));
bt_addr_copy(&a2, BT_ADDR_INIT(0x22, 0x23, 0x24, 0x25, 0x26, 0x27));
bt_addr_copy(&a3, BT_ADDR_INIT(0x32, 0x33, 0x34, 0x35, 0x36, 0x37));
bt_addr_copy(&a4, BT_ADDR_INIT(0x42, 0x43, 0x44, 0x45, 0x46, 0x47));
bt_addr_copy(&a5, BT_ADDR_INIT(0x52, 0x53, 0x54, 0x55, 0x56, 0x57));
trpa_cache_add(&a1, 0);
pos = trpa_cache_find(&a1, 0);
ex_pos = (1 + skew) % CONFIG_BT_CTLR_TRPA_CACHE_SIZE;
zassert_equal(pos, ex_pos, "%d == %d", pos, ex_pos);
trpa_cache_add(&a2, 1);
pos = trpa_cache_find(&a2, 1);
ex_pos = (2 + skew) % CONFIG_BT_CTLR_TRPA_CACHE_SIZE;
zassert_equal(pos, ex_pos, "");
trpa_cache_add(&a3, 2);
pos = trpa_cache_find(&a3, 2);
ex_pos = (3 + skew) % CONFIG_BT_CTLR_TRPA_CACHE_SIZE;
zassert_equal(pos, ex_pos, "");
/* Adding this should cause wrap around */
trpa_cache_add(&a4, 3);
pos = trpa_cache_find(&a4, 3);
ex_pos = (4 + skew) % CONFIG_BT_CTLR_TRPA_CACHE_SIZE;
zassert_equal(pos, ex_pos, "");
/* adding this should cause a1 to be dropped */
trpa_cache_add(&a5, 4);
pos = trpa_cache_find(&a5, 4);
ex_pos = (1 + skew) % CONFIG_BT_CTLR_TRPA_CACHE_SIZE;
zassert_equal(pos, ex_pos, "");
/* check that a1 can no longer be found */
pos = trpa_cache_find(&a1, 0);
zassert_equal(pos, FILTER_IDX_NONE, "");
}
static void before(void *data)
{
ARG_UNUSED(data);
/* Run before each test - clear the cache so we start fresh each time. */
prpa_cache_clear();
trpa_cache_clear();
}
ZTEST_SUITE(test_ctrl_sw_privacy_unit, NULL, NULL, before, NULL, NULL);
@ -97,7 +147,8 @@ ZTEST(test_ctrl_sw_privacy_unit, test_privacy_clear)
ZTEST(test_ctrl_sw_privacy_unit, test_privacy_add)
{
helper_privacy_add(0);
helper_prpa_add(0);
helper_trpa_add(0);
}
ZTEST(test_ctrl_sw_privacy_unit, test_privacy_add_stress)
@ -111,7 +162,18 @@ ZTEST(test_ctrl_sw_privacy_unit, test_privacy_add_stress)
0xef, 0xaa, 0xff));
prpa_cache_add(&ar);
}
helper_privacy_add(skew);
helper_prpa_add(skew);
prpa_cache_clear();
}
for (uint8_t skew = 0; skew < CONFIG_BT_CTLR_TRPA_CACHE_SIZE; skew++) {
for (uint8_t i = 0; i < skew; i++) {
bt_addr_copy(&ar,
BT_ADDR_INIT(0xde, 0xad, 0xbe,
0xef, 0xaa, 0xff));
trpa_cache_add(&ar, 0);
}
helper_trpa_add(skew);
trpa_cache_clear();
}
}