Bluetooth: host: Update GATT to use new delayable work API

Update GATT to use the new delayable work API for the delayed
calculation of the database hash and the service changed indication.

When the database hash characteristic is read the hash work needs to be
canceled if in progress and if currently executing wait for it to
complete so that the threads don't both write to the stored value.

Signed-off-by: Joakim Andersson <joakim.andersson@nordicsemi.no>
This commit is contained in:
Joakim Andersson 2021-04-15 10:51:02 +02:00 committed by Anas Nashif
commit 92d80d50b9

View file

@ -223,6 +223,36 @@ struct gatt_sc_cfg {
static struct gatt_sc_cfg sc_cfg[SC_CFG_MAX];
BUILD_ASSERT(sizeof(struct sc_data) == sizeof(sc_cfg[0].data));
enum {
SC_RANGE_CHANGED, /* SC range changed */
SC_INDICATE_PENDING, /* SC indicate pending */
#if defined(CONFIG_BT_GATT_CACHING)
DB_HASH_VALID, /* Database hash needs to be calculated */
#endif
/* Total number of flags - must be at the end of the enum */
SC_NUM_FLAGS,
};
#if defined(CONFIG_BT_GATT_SERVICE_CHANGED)
static struct gatt_sc {
struct bt_gatt_indicate_params params;
uint16_t start;
uint16_t end;
struct k_work_delayable work;
ATOMIC_DEFINE(flags, SC_NUM_FLAGS);
} gatt_sc;
#endif /* defined(CONFIG_BT_GATT_SERVICE_CHANGED) */
#if defined(CONFIG_BT_GATT_CACHING)
static struct db_hash {
uint8_t hash[16];
struct k_work_delayable work;
struct k_work_sync sync;
} db_hash;
#endif
static struct gatt_sc_cfg *find_sc_cfg(uint8_t id, bt_addr_le_t *addr)
{
BT_DBG("id: %u, addr: %s", id, bt_addr_le_str(addr));
@ -561,9 +591,6 @@ static ssize_t cf_write(struct bt_conn *conn, const struct bt_gatt_attr *attr,
return len;
}
static uint8_t db_hash[16];
struct k_delayed_work db_hash_work;
struct gen_hash_state {
struct tc_cmac_struct state;
int err;
@ -648,7 +675,7 @@ static void db_hash_store(void)
{
int err;
err = settings_save_one("bt/hash", &db_hash, sizeof(db_hash));
err = settings_save_one("bt/hash", &db_hash.hash, sizeof(db_hash.hash));
if (err) {
BT_ERR("Failed to save Database Hash (err %d)", err);
}
@ -656,13 +683,6 @@ static void db_hash_store(void)
BT_DBG("Database Hash stored");
}
/* Once the db_hash work has started we cannot cancel it anymore, so the
* assumption is made that the in-progress work cannot be pre-empted.
* This assumption should hold as long as calculation does not make any calls
* that would make it unready.
* If this assumption is no longer true we will have to solve the case where
* k_delayed_work_cancel failed because the work was in-progress but pre-empted.
*/
static void db_hash_gen(bool store)
{
uint8_t key[16] = {};
@ -676,7 +696,7 @@ static void db_hash_gen(bool store)
bt_gatt_foreach_attr(0x0001, 0xffff, gen_hash_m, &state);
if (tc_cmac_final(db_hash, &state.state) == TC_CRYPTO_FAIL) {
if (tc_cmac_final(db_hash.hash, &state.state) == TC_CRYPTO_FAIL) {
BT_ERR("Unable to calculate hash");
return;
}
@ -688,13 +708,15 @@ static void db_hash_gen(bool store)
* in little endianess as well. bt_smp_aes_cmac calculates the hash in
* big endianess so we have to swap.
*/
sys_mem_swap(db_hash, sizeof(db_hash));
sys_mem_swap(db_hash.hash, sizeof(db_hash.hash));
BT_HEXDUMP_DBG(db_hash, sizeof(db_hash), "Hash: ");
BT_HEXDUMP_DBG(db_hash.hash, sizeof(db_hash.hash), "Hash: ");
if (IS_ENABLED(CONFIG_BT_SETTINGS) && store) {
db_hash_store();
}
atomic_set_bit(gatt_sc.flags, DB_HASH_VALID);
}
static void db_hash_process(struct k_work *work)
@ -706,13 +728,11 @@ static ssize_t db_hash_read(struct bt_conn *conn,
const struct bt_gatt_attr *attr,
void *buf, uint16_t len, uint16_t offset)
{
int err;
/* Check if db_hash is already pending in which case it shall be
* generated immediately instead of waiting for the work to complete.
*/
err = k_delayed_work_cancel(&db_hash_work);
if (!err) {
(void)k_work_cancel_delayable_sync(&db_hash.work, &db_hash.sync);
if (!atomic_test_bit(gatt_sc.flags, DB_HASH_VALID)) {
db_hash_gen(true);
}
@ -724,8 +744,8 @@ static ssize_t db_hash_read(struct bt_conn *conn,
*/
bt_gatt_change_aware(conn, true);
return bt_gatt_attr_read(conn, attr, buf, len, offset, db_hash,
sizeof(db_hash));
return bt_gatt_attr_read(conn, attr, buf, len, offset, db_hash.hash,
sizeof(db_hash.hash));
}
static void remove_cf_cfg(struct bt_conn *conn)
@ -883,28 +903,10 @@ populate:
}
#endif /* CONFIG_BT_GATT_DYNAMIC_DB */
enum {
SC_RANGE_CHANGED, /* SC range changed */
SC_INDICATE_PENDING, /* SC indicate pending */
/* Total number of flags - must be at the end of the enum */
SC_NUM_FLAGS,
};
#if defined(CONFIG_BT_GATT_SERVICE_CHANGED)
static struct gatt_sc {
struct bt_gatt_indicate_params params;
uint16_t start;
uint16_t end;
struct k_delayed_work work;
ATOMIC_DEFINE(flags, SC_NUM_FLAGS);
} gatt_sc;
#endif /* defined(CONFIG_BT_GATT_SERVICE_CHANGED) */
static inline void sc_work_submit(k_timeout_t timeout)
{
#if defined(CONFIG_BT_GATT_SERVICE_CHANGED)
k_delayed_work_submit(&gatt_sc.work, timeout);
k_work_reschedule(&gatt_sc.work, timeout);
#endif
}
@ -981,7 +983,7 @@ static void clear_ccc_cfg(struct bt_gatt_ccc_cfg *cfg)
#if defined(CONFIG_BT_SETTINGS_CCC_STORE_ON_WRITE)
static struct gatt_ccc_store {
struct bt_conn *conn_list[CONFIG_BT_MAX_CONN];
struct k_delayed_work work;
struct k_work_delayable work;
} gatt_ccc_store;
static bool gatt_ccc_conn_is_queued(struct bt_conn *conn)
@ -1009,7 +1011,7 @@ static void gatt_ccc_conn_enqueue(struct bt_conn *conn)
gatt_ccc_store.conn_list[bt_conn_index(conn)] =
bt_conn_ref(conn);
k_delayed_work_submit(&gatt_ccc_store.work, CCC_STORE_DELAY);
k_work_reschedule(&gatt_ccc_store.work, CCC_STORE_DELAY);
}
}
@ -1065,16 +1067,16 @@ void bt_gatt_init(void)
bt_gatt_service_init();
#if defined(CONFIG_BT_GATT_CACHING)
k_delayed_work_init(&db_hash_work, db_hash_process);
k_work_init_delayable(&db_hash.work, db_hash_process);
/* Submit work to Generate initial hash as there could be static
* services already in the database.
*/
k_delayed_work_submit(&db_hash_work, DB_HASH_TIMEOUT);
k_work_schedule(&db_hash.work, DB_HASH_TIMEOUT);
#endif /* CONFIG_BT_GATT_CACHING */
#if defined(CONFIG_BT_GATT_SERVICE_CHANGED)
k_delayed_work_init(&gatt_sc.work, sc_process);
k_work_init_delayable(&gatt_sc.work, sc_process);
if (IS_ENABLED(CONFIG_BT_SETTINGS)) {
/* Make sure to not send SC indications until SC
* settings are loaded
@ -1084,7 +1086,7 @@ void bt_gatt_init(void)
#endif /* defined(CONFIG_BT_GATT_SERVICE_CHANGED) */
#if defined(CONFIG_BT_SETTINGS_CCC_STORE_ON_WRITE)
k_delayed_work_init(&gatt_ccc_store.work, ccc_delayed_store);
k_work_init_delayable(&gatt_ccc_store.work, ccc_delayed_store);
#endif
}
@ -1121,7 +1123,8 @@ static void db_changed(void)
#if defined(CONFIG_BT_GATT_CACHING)
int i;
k_delayed_work_submit(&db_hash_work, DB_HASH_TIMEOUT);
atomic_clear_bit(gatt_sc.flags, DB_HASH_VALID);
k_work_reschedule(&db_hash.work, DB_HASH_TIMEOUT);
for (i = 0; i < ARRAY_SIZE(cf_cfg); i++) {
struct gatt_cf_cfg *cfg = &cf_cfg[i];
@ -1214,13 +1217,17 @@ int bt_gatt_service_register(struct bt_gatt_service *svc)
return -EALREADY;
}
k_sched_lock();
err = gatt_register(svc);
if (err < 0) {
k_sched_unlock();
return err;
}
/* Don't submit any work until the stack is initialized */
if (!atomic_get(&init)) {
k_sched_unlock();
return 0;
}
@ -1229,6 +1236,8 @@ int bt_gatt_service_register(struct bt_gatt_service *svc)
db_changed();
k_sched_unlock();
return 0;
}
@ -1238,13 +1247,17 @@ int bt_gatt_service_unregister(struct bt_gatt_service *svc)
__ASSERT(svc, "invalid parameters\n");
k_sched_lock();
err = gatt_unregister(svc);
if (err) {
k_sched_unlock();
return err;
}
/* Don't submit any work until the stack is initialized */
if (!atomic_get(&init)) {
k_sched_unlock();
return 0;
}
@ -1253,6 +1266,8 @@ int bt_gatt_service_unregister(struct bt_gatt_service *svc)
db_changed();
k_sched_unlock();
return 0;
}
#endif /* CONFIG_BT_GATT_DYNAMIC_DB */
@ -5123,23 +5138,26 @@ static int db_hash_set(const char *name, size_t len_rd,
static int db_hash_commit(void)
{
int err;
k_sched_lock();
/* Stop work and generate the hash */
err = k_delayed_work_cancel(&db_hash_work);
if (!err) {
(void)k_work_cancel_delayable_sync(&db_hash.work, &db_hash.sync);
if (!atomic_test_bit(gatt_sc.flags, DB_HASH_VALID)) {
db_hash_gen(false);
}
k_sched_unlock();
/* Check if hash matches then skip SC update */
if (!memcmp(stored_hash, db_hash, sizeof(stored_hash))) {
if (!memcmp(stored_hash, db_hash.hash, sizeof(stored_hash))) {
BT_DBG("Database Hash matches");
k_delayed_work_cancel(&gatt_sc.work);
k_work_cancel_delayable(&gatt_sc.work);
atomic_clear_bit(gatt_sc.flags, SC_RANGE_CHANGED);
return 0;
}
BT_HEXDUMP_DBG(db_hash, sizeof(db_hash), "New Hash: ");
BT_HEXDUMP_DBG(db_hash.hash, sizeof(db_hash.hash), "New Hash: ");
/**
* GATT database has been modified since last boot, likely due to
@ -5316,7 +5334,7 @@ void bt_gatt_disconnected(struct bt_conn *conn)
gatt_ccc_conn_unqueue(conn);
if (gatt_ccc_conn_queue_is_empty()) {
k_delayed_work_cancel(&gatt_ccc_store.work);
k_work_cancel_delayable(&gatt_ccc_store.work);
}
#endif