2017-06-16 12:30:54 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2017 Intel Corporation
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
includes: prefer <zephyr/kernel.h> over <zephyr/zephyr.h>
As of today <zephyr/zephyr.h> is 100% equivalent to <zephyr/kernel.h>.
This patch proposes to then include <zephyr/kernel.h> instead of
<zephyr/zephyr.h> since it is more clear that you are including the
Kernel APIs and (probably) nothing else. <zephyr/zephyr.h> sounds like a
catch-all header that may be confusing. Most applications need to
include a bunch of other things to compile, e.g. driver headers or
subsystem headers like BT, logging, etc.
The idea of a catch-all header in Zephyr is probably not feasible
anyway. Reason is that Zephyr is not a library, like it could be for
example `libpython`. Zephyr provides many utilities nowadays: a kernel,
drivers, subsystems, etc and things will likely grow. A catch-all header
would be massive, difficult to keep up-to-date. It is also likely that
an application will only build a small subset. Note that subsystem-level
headers may use a catch-all approach to make things easier, though.
NOTE: This patch is **NOT** removing the header, just removing its usage
in-tree. I'd advocate for its deprecation (add a #warning on it), but I
understand many people will have concerns.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
2022-08-25 09:58:46 +02:00
|
|
|
#include <zephyr/kernel.h>
|
2017-06-16 12:30:54 +03:00
|
|
|
#include <errno.h>
|
|
|
|
#include <string.h>
|
2021-01-06 09:56:56 +01:00
|
|
|
#include <stdlib.h>
|
2017-06-16 12:30:54 +03:00
|
|
|
#include <sys/types.h>
|
2022-05-06 11:12:04 +02:00
|
|
|
#include <zephyr/sys/util.h>
|
|
|
|
#include <zephyr/sys/byteorder.h>
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2022-05-06 11:12:04 +02:00
|
|
|
#include <zephyr/net/buf.h>
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2022-05-06 11:12:04 +02:00
|
|
|
#include <zephyr/bluetooth/hci.h>
|
|
|
|
#include <zephyr/bluetooth/mesh.h>
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2017-08-09 09:21:11 +03:00
|
|
|
#define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_MESH_DEBUG_TRANS)
|
2018-07-17 10:35:52 +03:00
|
|
|
#define LOG_MODULE_NAME bt_mesh_transport
|
2017-06-16 12:30:54 +03:00
|
|
|
#include "common/log.h"
|
2022-10-25 08:48:54 +02:00
|
|
|
#include "common/bt_str.h"
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2019-07-09 10:33:28 +03:00
|
|
|
#include "host/testing.h"
|
2018-01-02 11:07:45 +01:00
|
|
|
|
2017-06-16 12:30:54 +03:00
|
|
|
#include "crypto.h"
|
|
|
|
#include "adv.h"
|
|
|
|
#include "mesh.h"
|
|
|
|
#include "net.h"
|
2020-08-25 11:03:42 +02:00
|
|
|
#include "app_keys.h"
|
2017-06-16 12:30:54 +03:00
|
|
|
#include "lpn.h"
|
2020-09-09 16:59:43 +08:00
|
|
|
#include "rpl.h"
|
2017-06-16 12:30:54 +03:00
|
|
|
#include "friend.h"
|
|
|
|
#include "access.h"
|
|
|
|
#include "foundation.h"
|
2018-05-07 12:23:37 +03:00
|
|
|
#include "settings.h"
|
2020-08-12 17:07:38 +02:00
|
|
|
#include "heartbeat.h"
|
2017-06-16 12:30:54 +03:00
|
|
|
#include "transport.h"
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
#define AID_MASK ((uint8_t)(BIT_MASK(6)))
|
2017-06-16 12:30:54 +03:00
|
|
|
|
|
|
|
#define SEG(data) ((data)[0] >> 7)
|
|
|
|
#define AKF(data) (((data)[0] >> 6) & 0x01)
|
|
|
|
#define AID(data) ((data)[0] & AID_MASK)
|
Bluetooth: Mesh: Fix model publication
Model publication was broken in a couple of ways:
- The Publish Retransmit State was not taken into account at all
- Health Server used a single publish state for all elements
To implement Publish Retransmit properly, one has to use a callback to
track when the message has been sent. The problem with the transport
layer sending APIs was that giving a callback would cause the
transport layer to assume that segmentation (with acks) is desired,
which is not the case for Model Publication (unless the message itself
is too large, of course). Because of this, the message sending context
receives a new send_rel ("Send Reliable") boolean member that an app
can use to force reliable sending.
Another challenge with the Publish Retransmit state is that a buffer
is needed for storing the AppKey-encrypted SDU once it has been sent
out for the first time.To solve this, a new new net_buf_simple member
is added to the model publication context. The separate 'msg' input
parameter of the bt_mesh_model_publish() API is removed, since the
application is now expected to pre-fill pub->msg instead.
To help with the publishing API change, the Health Server model gets a
new helper macro for initializing the publishing context with a
right-sized publishing message.
The API for creating Health Server instances is also redesigned since
it was so far using a single model publishing state, which would
result in erratic behavior in case of multiple elements with the
Health Server Model. Now, the application needs to provide a unique
publishing context for each Health Server instance.
The changes are heavily intertwined, so it's not easily possible to
split them into multiple patches, hence the large(ish) patch.
Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2017-11-18 10:24:18 +02:00
|
|
|
#define ASZMIC(data) (((data)[1] >> 7) & 1)
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
#define APP_MIC_LEN(aszmic) ((aszmic) ? BT_MESH_MIC_LONG : BT_MESH_MIC_SHORT)
|
2017-06-16 12:30:54 +03:00
|
|
|
|
|
|
|
#define UNSEG_HDR(akf, aid) ((akf << 6) | (aid & AID_MASK))
|
|
|
|
#define SEG_HDR(akf, aid) (UNSEG_HDR(akf, aid) | 0x80)
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
#define BLOCK_COMPLETE(seg_n) (uint32_t)(((uint64_t)1 << (seg_n + 1)) - 1)
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
#define SEQ_AUTH(iv_index, seq) (((uint64_t)iv_index) << 24 | (uint64_t)seq)
|
2017-06-16 12:30:54 +03:00
|
|
|
|
|
|
|
/* Number of retransmit attempts (after the initial transmit) per segment */
|
2020-09-04 09:51:39 +08:00
|
|
|
#define SEG_RETRANSMIT_ATTEMPTS CONFIG_BT_MESH_TX_SEG_RETRANS_COUNT
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2018-02-01 14:55:40 -08:00
|
|
|
/* "This timer shall be set to a minimum of 200 + 50 * TTL milliseconds.".
|
|
|
|
* We use 400 since 300 is a common send duration for standard HCI, and we
|
|
|
|
* need to have a timeout that's bigger than that.
|
|
|
|
*/
|
2020-09-04 09:51:39 +08:00
|
|
|
#define SEG_RETRANSMIT_TIMEOUT_UNICAST(tx) \
|
|
|
|
(CONFIG_BT_MESH_TX_SEG_RETRANS_TIMEOUT_UNICAST + 50 * (tx)->ttl)
|
|
|
|
|
2020-02-26 12:54:15 +01:00
|
|
|
/* When sending to a group, the messages are not acknowledged, and there's no
|
|
|
|
* reason to delay the repetitions significantly. Delaying by more than 0 ms
|
|
|
|
* to avoid flooding the network.
|
|
|
|
*/
|
2020-09-04 09:51:39 +08:00
|
|
|
#define SEG_RETRANSMIT_TIMEOUT_GROUP CONFIG_BT_MESH_TX_SEG_RETRANS_TIMEOUT_GROUP
|
2018-02-01 14:55:40 -08:00
|
|
|
|
2020-02-26 12:54:15 +01:00
|
|
|
#define SEG_RETRANSMIT_TIMEOUT(tx) \
|
|
|
|
(BT_MESH_ADDR_IS_UNICAST(tx->dst) ? \
|
|
|
|
SEG_RETRANSMIT_TIMEOUT_UNICAST(tx) : \
|
|
|
|
SEG_RETRANSMIT_TIMEOUT_GROUP)
|
2017-06-16 12:30:54 +03:00
|
|
|
/* How long to wait for available buffers before giving up */
|
|
|
|
#define BUF_TIMEOUT K_NO_WAIT
|
|
|
|
|
2021-01-06 09:56:56 +01:00
|
|
|
struct virtual_addr {
|
|
|
|
uint16_t ref:15,
|
|
|
|
changed:1;
|
|
|
|
uint16_t addr;
|
|
|
|
uint8_t uuid[16];
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Virtual Address information for persistent storage. */
|
|
|
|
struct va_val {
|
|
|
|
uint16_t ref;
|
|
|
|
uint16_t addr;
|
|
|
|
uint8_t uuid[16];
|
|
|
|
} __packed;
|
|
|
|
|
2017-06-16 12:30:54 +03:00
|
|
|
static struct seg_tx {
|
2020-07-07 15:52:17 +02:00
|
|
|
struct bt_mesh_subnet *sub;
|
2022-01-29 16:05:22 +08:00
|
|
|
void *seg[BT_MESH_TX_SEG_MAX];
|
2020-07-07 15:52:17 +02:00
|
|
|
uint64_t seq_auth;
|
|
|
|
uint16_t src;
|
|
|
|
uint16_t dst;
|
2022-03-06 22:14:18 +01:00
|
|
|
uint16_t ack_src;
|
2020-07-07 15:52:17 +02:00
|
|
|
uint16_t len;
|
|
|
|
uint8_t hdr;
|
|
|
|
uint8_t xmit;
|
|
|
|
uint8_t seg_n; /* Last segment index */
|
|
|
|
uint8_t seg_o; /* Segment being sent */
|
|
|
|
uint8_t nack_count; /* Number of unacked segs */
|
|
|
|
uint8_t attempts; /* Remaining tx attempts */
|
|
|
|
uint8_t ttl; /* Transmitted TTL value */
|
|
|
|
uint8_t blocked:1, /* Blocked by ongoing tx */
|
|
|
|
ctl:1, /* Control packet */
|
|
|
|
aszmic:1, /* MIC size */
|
|
|
|
started:1, /* Start cb called */
|
2022-06-30 10:58:41 +02:00
|
|
|
friend_cred:1, /* Using Friend credentials */
|
|
|
|
seg_send_started:1; /* Used to check if seg_send_start cb is called */
|
2017-11-17 15:19:51 +02:00
|
|
|
const struct bt_mesh_send_cb *cb;
|
2020-07-07 15:52:17 +02:00
|
|
|
void *cb_data;
|
2021-03-29 09:03:02 -05:00
|
|
|
struct k_work_delayable retransmit; /* Retransmit timer */
|
2017-08-09 09:21:11 +03:00
|
|
|
} seg_tx[CONFIG_BT_MESH_TX_SEG_MSG_COUNT];
|
2017-06-16 12:30:54 +03:00
|
|
|
|
|
|
|
static struct seg_rx {
|
|
|
|
struct bt_mesh_subnet *sub;
|
2022-01-29 16:05:22 +08:00
|
|
|
void *seg[BT_MESH_RX_SEG_MAX];
|
2020-05-27 11:26:57 -05:00
|
|
|
uint64_t seq_auth;
|
|
|
|
uint16_t src;
|
|
|
|
uint16_t dst;
|
|
|
|
uint16_t len;
|
|
|
|
uint8_t hdr;
|
|
|
|
uint8_t seg_n:5,
|
2017-06-16 12:30:54 +03:00
|
|
|
ctl:1,
|
2017-10-31 16:16:28 +02:00
|
|
|
in_use:1,
|
|
|
|
obo:1;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t ttl;
|
|
|
|
uint32_t block;
|
|
|
|
uint32_t last;
|
2021-03-29 09:03:02 -05:00
|
|
|
struct k_work_delayable ack;
|
2020-03-05 16:10:15 +01:00
|
|
|
} seg_rx[CONFIG_BT_MESH_RX_SEG_MSG_COUNT];
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
K_MEM_SLAB_DEFINE(segs, BT_MESH_APP_SEG_SDU_MAX, CONFIG_BT_MESH_SEG_BUFS, 4);
|
2018-02-10 10:32:58 +02:00
|
|
|
|
2021-01-06 09:56:56 +01:00
|
|
|
static struct virtual_addr virtual_addrs[CONFIG_BT_MESH_LABEL_COUNT];
|
2020-08-18 10:53:15 +02:00
|
|
|
|
Bluetooth: Mesh: Fix model publication
Model publication was broken in a couple of ways:
- The Publish Retransmit State was not taken into account at all
- Health Server used a single publish state for all elements
To implement Publish Retransmit properly, one has to use a callback to
track when the message has been sent. The problem with the transport
layer sending APIs was that giving a callback would cause the
transport layer to assume that segmentation (with acks) is desired,
which is not the case for Model Publication (unless the message itself
is too large, of course). Because of this, the message sending context
receives a new send_rel ("Send Reliable") boolean member that an app
can use to force reliable sending.
Another challenge with the Publish Retransmit state is that a buffer
is needed for storing the AppKey-encrypted SDU once it has been sent
out for the first time.To solve this, a new new net_buf_simple member
is added to the model publication context. The separate 'msg' input
parameter of the bt_mesh_model_publish() API is removed, since the
application is now expected to pre-fill pub->msg instead.
To help with the publishing API change, the Health Server model gets a
new helper macro for initializing the publishing context with a
right-sized publishing message.
The API for creating Health Server instances is also redesigned since
it was so far using a single model publishing state, which would
result in erratic behavior in case of multiple elements with the
Health Server Model. Now, the application needs to provide a unique
publishing context for each Health Server instance.
The changes are heavily intertwined, so it's not easily possible to
split them into multiple patches, hence the large(ish) patch.
Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2017-11-18 10:24:18 +02:00
|
|
|
static int send_unseg(struct bt_mesh_net_tx *tx, struct net_buf_simple *sdu,
|
2020-03-05 16:10:15 +01:00
|
|
|
const struct bt_mesh_send_cb *cb, void *cb_data,
|
2020-05-27 11:26:57 -05:00
|
|
|
const uint8_t *ctl_op)
|
2017-06-16 12:30:54 +03:00
|
|
|
{
|
|
|
|
struct net_buf *buf;
|
|
|
|
|
2021-11-11 10:26:52 +08:00
|
|
|
buf = bt_mesh_adv_create(BT_MESH_ADV_DATA, BT_MESH_LOCAL_ADV,
|
|
|
|
tx->xmit, BUF_TIMEOUT);
|
2017-06-16 12:30:54 +03:00
|
|
|
if (!buf) {
|
|
|
|
BT_ERR("Out of network buffers");
|
|
|
|
return -ENOBUFS;
|
|
|
|
}
|
|
|
|
|
2017-10-28 21:46:49 +02:00
|
|
|
net_buf_reserve(buf, BT_MESH_NET_HDR_LEN);
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
if (ctl_op) {
|
|
|
|
net_buf_add_u8(buf, TRANS_CTL_HDR(*ctl_op, 0));
|
|
|
|
} else if (BT_MESH_IS_DEV_KEY(tx->ctx->app_idx)) {
|
2017-06-16 12:30:54 +03:00
|
|
|
net_buf_add_u8(buf, UNSEG_HDR(0, 0));
|
|
|
|
} else {
|
Bluetooth: Mesh: Fix model publication
Model publication was broken in a couple of ways:
- The Publish Retransmit State was not taken into account at all
- Health Server used a single publish state for all elements
To implement Publish Retransmit properly, one has to use a callback to
track when the message has been sent. The problem with the transport
layer sending APIs was that giving a callback would cause the
transport layer to assume that segmentation (with acks) is desired,
which is not the case for Model Publication (unless the message itself
is too large, of course). Because of this, the message sending context
receives a new send_rel ("Send Reliable") boolean member that an app
can use to force reliable sending.
Another challenge with the Publish Retransmit state is that a buffer
is needed for storing the AppKey-encrypted SDU once it has been sent
out for the first time.To solve this, a new new net_buf_simple member
is added to the model publication context. The separate 'msg' input
parameter of the bt_mesh_model_publish() API is removed, since the
application is now expected to pre-fill pub->msg instead.
To help with the publishing API change, the Health Server model gets a
new helper macro for initializing the publishing context with a
right-sized publishing message.
The API for creating Health Server instances is also redesigned since
it was so far using a single model publishing state, which would
result in erratic behavior in case of multiple elements with the
Health Server Model. Now, the application needs to provide a unique
publishing context for each Health Server instance.
The changes are heavily intertwined, so it's not easily possible to
split them into multiple patches, hence the large(ish) patch.
Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2017-11-18 10:24:18 +02:00
|
|
|
net_buf_add_u8(buf, UNSEG_HDR(1, tx->aid));
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
net_buf_add_mem(buf, sdu->data, sdu->len);
|
|
|
|
|
2017-10-31 16:16:28 +02:00
|
|
|
if (IS_ENABLED(CONFIG_BT_MESH_FRIEND)) {
|
2019-08-15 14:45:09 +03:00
|
|
|
if (!bt_mesh_friend_queue_has_space(tx->sub->net_idx,
|
|
|
|
tx->src, tx->ctx->addr,
|
|
|
|
NULL, 1)) {
|
|
|
|
if (BT_MESH_ADDR_IS_UNICAST(tx->ctx->addr)) {
|
|
|
|
BT_ERR("Not enough space in Friend Queue");
|
|
|
|
net_buf_unref(buf);
|
|
|
|
return -ENOBUFS;
|
|
|
|
} else {
|
|
|
|
BT_WARN("No space in Friend Queue");
|
|
|
|
goto send;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-31 16:16:28 +02:00
|
|
|
if (bt_mesh_friend_enqueue_tx(tx, BT_MESH_FRIEND_PDU_SINGLE,
|
2019-08-15 14:45:09 +03:00
|
|
|
NULL, 1, &buf->b) &&
|
2017-10-31 16:16:28 +02:00
|
|
|
BT_MESH_ADDR_IS_UNICAST(tx->ctx->addr)) {
|
|
|
|
/* PDUs for a specific Friend should only go
|
|
|
|
* out through the Friend Queue.
|
|
|
|
*/
|
|
|
|
net_buf_unref(buf);
|
2019-08-15 18:27:05 +03:00
|
|
|
send_cb_finalize(cb, cb_data);
|
2017-10-31 16:16:28 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-15 14:45:09 +03:00
|
|
|
send:
|
2017-11-17 15:19:51 +02:00
|
|
|
return bt_mesh_net_send(tx, buf, cb, cb_data);
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
static inline uint8_t seg_len(bool ctl)
|
2020-03-05 16:10:15 +01:00
|
|
|
{
|
|
|
|
if (ctl) {
|
|
|
|
return BT_MESH_CTL_SEG_SDU_MAX;
|
|
|
|
} else {
|
|
|
|
return BT_MESH_APP_SEG_SDU_MAX;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-16 12:30:54 +03:00
|
|
|
bool bt_mesh_tx_in_progress(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(seg_tx); i++) {
|
|
|
|
if (seg_tx[i].nack_count) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
static void seg_tx_done(struct seg_tx *tx, uint8_t seg_idx)
|
2020-02-26 12:54:15 +01:00
|
|
|
{
|
2020-03-05 16:10:15 +01:00
|
|
|
k_mem_slab_free(&segs, (void **)&tx->seg[seg_idx]);
|
2020-02-26 12:54:15 +01:00
|
|
|
tx->seg[seg_idx] = NULL;
|
|
|
|
tx->nack_count--;
|
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
static bool seg_tx_blocks(struct seg_tx *tx, uint16_t src, uint16_t dst)
|
2020-03-05 16:10:15 +01:00
|
|
|
{
|
|
|
|
return (tx->src == src) && (tx->dst == dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void seg_tx_unblock_check(struct seg_tx *tx)
|
|
|
|
{
|
|
|
|
struct seg_tx *blocked = NULL;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Unblock the first blocked tx with the same params. */
|
|
|
|
for (i = 0; i < ARRAY_SIZE(seg_tx); ++i) {
|
|
|
|
if (&seg_tx[i] != tx &&
|
|
|
|
seg_tx[i].blocked &&
|
|
|
|
seg_tx_blocks(tx, seg_tx[i].src, seg_tx[i].dst) &&
|
|
|
|
(!blocked || seg_tx[i].seq_auth < blocked->seq_auth)) {
|
|
|
|
blocked = &seg_tx[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (blocked) {
|
|
|
|
BT_DBG("Unblocked 0x%04x",
|
2020-05-27 11:26:57 -05:00
|
|
|
(uint16_t)(blocked->seq_auth & TRANS_SEQ_ZERO_MASK));
|
2020-03-05 16:10:15 +01:00
|
|
|
blocked->blocked = false;
|
2021-03-29 09:03:02 -05:00
|
|
|
k_work_reschedule(&blocked->retransmit, K_NO_WAIT);
|
2020-03-05 16:10:15 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-16 12:30:54 +03:00
|
|
|
static void seg_tx_reset(struct seg_tx *tx)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2021-03-29 09:03:02 -05:00
|
|
|
/* If this call fails, the handler will exit early, as nack_count is 0. */
|
|
|
|
(void)k_work_cancel_delayable(&tx->retransmit);
|
2017-06-16 12:30:54 +03:00
|
|
|
|
|
|
|
tx->cb = NULL;
|
|
|
|
tx->cb_data = NULL;
|
2018-11-29 11:23:03 -08:00
|
|
|
tx->seq_auth = 0U;
|
2017-06-16 12:30:54 +03:00
|
|
|
tx->sub = NULL;
|
2020-03-05 16:10:15 +01:00
|
|
|
tx->src = BT_MESH_ADDR_UNASSIGNED;
|
2017-10-28 21:50:29 +02:00
|
|
|
tx->dst = BT_MESH_ADDR_UNASSIGNED;
|
2022-03-06 22:14:18 +01:00
|
|
|
tx->ack_src = BT_MESH_ADDR_UNASSIGNED;
|
2020-03-05 16:10:15 +01:00
|
|
|
tx->blocked = false;
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2020-02-26 12:54:15 +01:00
|
|
|
for (i = 0; i <= tx->seg_n && tx->nack_count; i++) {
|
2017-06-16 12:30:54 +03:00
|
|
|
if (!tx->seg[i]) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-02-26 12:54:15 +01:00
|
|
|
seg_tx_done(tx, i);
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
tx->nack_count = 0;
|
2022-06-30 10:58:41 +02:00
|
|
|
tx->seg_send_started = 0;
|
2020-03-05 16:10:15 +01:00
|
|
|
|
2019-01-25 15:57:45 +02:00
|
|
|
if (atomic_test_and_clear_bit(bt_mesh.flags, BT_MESH_IVU_PENDING)) {
|
2019-06-18 14:45:40 -04:00
|
|
|
BT_DBG("Proceeding with pending IV Update");
|
2017-11-24 21:43:19 +02:00
|
|
|
/* bt_mesh_net_iv_update() will re-enable the flag if this
|
2017-06-16 12:30:54 +03:00
|
|
|
* wasn't the only transfer.
|
|
|
|
*/
|
2020-08-25 11:03:42 +02:00
|
|
|
bt_mesh_net_iv_update(bt_mesh.iv_index, false);
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void seg_tx_complete(struct seg_tx *tx, int err)
|
|
|
|
{
|
2020-02-26 12:54:15 +01:00
|
|
|
const struct bt_mesh_send_cb *cb = tx->cb;
|
|
|
|
void *cb_data = tx->cb_data;
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
seg_tx_unblock_check(tx);
|
|
|
|
|
2017-06-16 12:30:54 +03:00
|
|
|
seg_tx_reset(tx);
|
2020-02-26 12:54:15 +01:00
|
|
|
|
|
|
|
if (cb && cb->end) {
|
|
|
|
cb->end(err, cb_data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void schedule_retransmit(struct seg_tx *tx)
|
|
|
|
{
|
2020-03-05 16:10:15 +01:00
|
|
|
if (!tx->nack_count) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-07-07 15:52:17 +02:00
|
|
|
BT_DBG("");
|
2020-02-26 12:54:15 +01:00
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
/* If we haven't gone through all the segments for this attempt yet,
|
|
|
|
* (likely because of a buffer allocation failure or because we
|
|
|
|
* called this from inside bt_mesh_net_send), we should continue the
|
|
|
|
* retransmit immediately, as we just freed up a tx buffer.
|
|
|
|
*/
|
2022-06-30 10:58:41 +02:00
|
|
|
k_work_reschedule(&tx->retransmit, K_NO_WAIT);
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
static void seg_send_start(uint16_t duration, int err, void *user_data)
|
2017-11-18 13:10:42 +02:00
|
|
|
{
|
|
|
|
struct seg_tx *tx = user_data;
|
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
if (!tx->started && tx->cb && tx->cb->start) {
|
2017-11-18 13:10:42 +02:00
|
|
|
tx->cb->start(duration, err, tx->cb_data);
|
2020-03-05 16:10:15 +01:00
|
|
|
tx->started = 1U;
|
2017-11-18 13:10:42 +02:00
|
|
|
}
|
2018-01-28 14:04:04 -08:00
|
|
|
|
2022-06-30 10:58:41 +02:00
|
|
|
tx->seg_send_started = 1U;
|
|
|
|
|
2018-02-01 14:55:40 -08:00
|
|
|
/* If there's an error in transmitting the 'sent' callback will never
|
|
|
|
* be called. Make sure that we kick the retransmit timer also in this
|
|
|
|
* case since otherwise we risk the transmission of becoming stale.
|
2018-01-28 14:04:04 -08:00
|
|
|
*/
|
2018-02-01 14:55:40 -08:00
|
|
|
if (err) {
|
2020-02-26 12:54:15 +01:00
|
|
|
schedule_retransmit(tx);
|
2018-02-01 14:55:40 -08:00
|
|
|
}
|
|
|
|
}
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2018-02-01 14:55:40 -08:00
|
|
|
static void seg_sent(int err, void *user_data)
|
|
|
|
{
|
|
|
|
struct seg_tx *tx = user_data;
|
|
|
|
|
2022-06-30 10:58:41 +02:00
|
|
|
if (!tx->seg_send_started) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-02-26 12:54:15 +01:00
|
|
|
schedule_retransmit(tx);
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
2017-11-17 15:19:51 +02:00
|
|
|
static const struct bt_mesh_send_cb seg_sent_cb = {
|
2018-02-01 14:55:40 -08:00
|
|
|
.start = seg_send_start,
|
2017-11-17 15:19:51 +02:00
|
|
|
.end = seg_sent,
|
2017-11-17 12:40:18 +02:00
|
|
|
};
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
static void seg_tx_buf_build(struct seg_tx *tx, uint8_t seg_o,
|
2020-03-05 16:10:15 +01:00
|
|
|
struct net_buf_simple *buf)
|
|
|
|
{
|
2020-05-27 11:26:57 -05:00
|
|
|
uint16_t seq_zero = tx->seq_auth & TRANS_SEQ_ZERO_MASK;
|
|
|
|
uint8_t len = MIN(seg_len(tx->ctl), tx->len - (seg_len(tx->ctl) * seg_o));
|
2020-03-05 16:10:15 +01:00
|
|
|
|
|
|
|
net_buf_simple_add_u8(buf, tx->hdr);
|
|
|
|
net_buf_simple_add_u8(buf, (tx->aszmic << 7) | seq_zero >> 6);
|
|
|
|
net_buf_simple_add_u8(buf, (((seq_zero & 0x3f) << 2) | (seg_o >> 3)));
|
|
|
|
net_buf_simple_add_u8(buf, ((seg_o & 0x07) << 5) | tx->seg_n);
|
|
|
|
net_buf_simple_add_mem(buf, tx->seg[seg_o], len);
|
|
|
|
}
|
|
|
|
|
2017-06-16 12:30:54 +03:00
|
|
|
static void seg_tx_send_unacked(struct seg_tx *tx)
|
|
|
|
{
|
2020-07-07 15:52:17 +02:00
|
|
|
if (!tx->nack_count) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
struct bt_mesh_msg_ctx ctx = {
|
|
|
|
.net_idx = tx->sub->net_idx,
|
|
|
|
/* App idx only used by network to detect control messages: */
|
|
|
|
.app_idx = (tx->ctl ? BT_MESH_KEY_UNUSED : 0),
|
|
|
|
.addr = tx->dst,
|
|
|
|
.send_rel = true,
|
|
|
|
.send_ttl = tx->ttl,
|
|
|
|
};
|
|
|
|
struct bt_mesh_net_tx net_tx = {
|
|
|
|
.sub = tx->sub,
|
|
|
|
.ctx = &ctx,
|
|
|
|
.src = tx->src,
|
|
|
|
.xmit = tx->xmit,
|
|
|
|
.friend_cred = tx->friend_cred,
|
|
|
|
.aid = tx->hdr & AID_MASK,
|
|
|
|
};
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
if (!tx->attempts) {
|
2020-07-07 15:52:17 +02:00
|
|
|
if (BT_MESH_ADDR_IS_UNICAST(tx->dst)) {
|
|
|
|
BT_ERR("Ran out of retransmit attempts");
|
|
|
|
seg_tx_complete(tx, -ETIMEDOUT);
|
|
|
|
} else {
|
|
|
|
/* Segmented sending to groups doesn't have acks, so
|
|
|
|
* running out of attempts is the expected behavior.
|
|
|
|
*/
|
|
|
|
seg_tx_complete(tx, 0);
|
|
|
|
}
|
|
|
|
|
2020-02-26 12:54:15 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
BT_DBG("SeqZero: 0x%04x Attempts: %u",
|
2020-05-27 11:26:57 -05:00
|
|
|
(uint16_t)(tx->seq_auth & TRANS_SEQ_ZERO_MASK), tx->attempts);
|
2020-02-26 12:54:15 +01:00
|
|
|
|
2022-06-30 10:58:41 +02:00
|
|
|
while (tx->seg_o <= tx->seg_n) {
|
2020-03-05 16:10:15 +01:00
|
|
|
struct net_buf *seg;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!tx->seg[tx->seg_o]) {
|
2022-06-30 10:58:41 +02:00
|
|
|
/* Move on to the next segment */
|
|
|
|
tx->seg_o++;
|
2017-06-16 12:30:54 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-11-11 10:26:52 +08:00
|
|
|
seg = bt_mesh_adv_create(BT_MESH_ADV_DATA, BT_MESH_LOCAL_ADV,
|
|
|
|
tx->xmit, BUF_TIMEOUT);
|
2020-03-05 16:10:15 +01:00
|
|
|
if (!seg) {
|
|
|
|
BT_DBG("Allocating segment failed");
|
2020-04-05 10:10:45 -07:00
|
|
|
goto end;
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
net_buf_reserve(seg, BT_MESH_NET_HDR_LEN);
|
|
|
|
seg_tx_buf_build(tx, tx->seg_o, &seg->b);
|
|
|
|
|
|
|
|
BT_DBG("Sending %u/%u", tx->seg_o, tx->seg_n);
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
err = bt_mesh_net_send(&net_tx, seg, &seg_sent_cb, tx);
|
2017-06-16 12:30:54 +03:00
|
|
|
if (err) {
|
2020-03-05 16:10:15 +01:00
|
|
|
BT_DBG("Sending segment failed");
|
2020-04-05 10:10:45 -07:00
|
|
|
goto end;
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
2022-06-30 10:58:41 +02:00
|
|
|
|
|
|
|
/* Move on to the next segment */
|
|
|
|
tx->seg_o++;
|
|
|
|
|
|
|
|
return;
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
2020-03-05 16:10:15 +01:00
|
|
|
|
|
|
|
tx->seg_o = 0U;
|
2020-05-15 10:17:02 +02:00
|
|
|
tx->attempts--;
|
2020-04-05 10:10:45 -07:00
|
|
|
|
|
|
|
end:
|
2022-03-07 17:03:24 +01:00
|
|
|
if (IS_ENABLED(CONFIG_BT_MESH_LOW_POWER) &&
|
|
|
|
bt_mesh_lpn_established()) {
|
|
|
|
bt_mesh_lpn_poll();
|
|
|
|
}
|
|
|
|
|
2022-06-30 10:58:41 +02:00
|
|
|
k_work_reschedule(&tx->retransmit, K_MSEC(SEG_RETRANSMIT_TIMEOUT(tx)));
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void seg_retransmit(struct k_work *work)
|
|
|
|
{
|
2022-01-19 12:07:51 +08:00
|
|
|
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
|
|
|
|
struct seg_tx *tx = CONTAINER_OF(dwork, struct seg_tx, retransmit);
|
2017-06-16 12:30:54 +03:00
|
|
|
|
|
|
|
seg_tx_send_unacked(tx);
|
|
|
|
}
|
|
|
|
|
Bluetooth: Mesh: Fix model publication
Model publication was broken in a couple of ways:
- The Publish Retransmit State was not taken into account at all
- Health Server used a single publish state for all elements
To implement Publish Retransmit properly, one has to use a callback to
track when the message has been sent. The problem with the transport
layer sending APIs was that giving a callback would cause the
transport layer to assume that segmentation (with acks) is desired,
which is not the case for Model Publication (unless the message itself
is too large, of course). Because of this, the message sending context
receives a new send_rel ("Send Reliable") boolean member that an app
can use to force reliable sending.
Another challenge with the Publish Retransmit state is that a buffer
is needed for storing the AppKey-encrypted SDU once it has been sent
out for the first time.To solve this, a new new net_buf_simple member
is added to the model publication context. The separate 'msg' input
parameter of the bt_mesh_model_publish() API is removed, since the
application is now expected to pre-fill pub->msg instead.
To help with the publishing API change, the Health Server model gets a
new helper macro for initializing the publishing context with a
right-sized publishing message.
The API for creating Health Server instances is also redesigned since
it was so far using a single model publishing state, which would
result in erratic behavior in case of multiple elements with the
Health Server Model. Now, the application needs to provide a unique
publishing context for each Health Server instance.
The changes are heavily intertwined, so it's not easily possible to
split them into multiple patches, hence the large(ish) patch.
Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2017-11-18 10:24:18 +02:00
|
|
|
static int send_seg(struct bt_mesh_net_tx *net_tx, struct net_buf_simple *sdu,
|
2020-03-05 16:10:15 +01:00
|
|
|
const struct bt_mesh_send_cb *cb, void *cb_data,
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t *ctl_op)
|
2017-06-16 12:30:54 +03:00
|
|
|
{
|
2020-03-05 16:10:15 +01:00
|
|
|
bool blocked = false;
|
2017-06-16 12:30:54 +03:00
|
|
|
struct seg_tx *tx;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t seg_o;
|
2017-06-16 12:30:54 +03:00
|
|
|
int i;
|
|
|
|
|
Bluetooth: Mesh: Fix model publication
Model publication was broken in a couple of ways:
- The Publish Retransmit State was not taken into account at all
- Health Server used a single publish state for all elements
To implement Publish Retransmit properly, one has to use a callback to
track when the message has been sent. The problem with the transport
layer sending APIs was that giving a callback would cause the
transport layer to assume that segmentation (with acks) is desired,
which is not the case for Model Publication (unless the message itself
is too large, of course). Because of this, the message sending context
receives a new send_rel ("Send Reliable") boolean member that an app
can use to force reliable sending.
Another challenge with the Publish Retransmit state is that a buffer
is needed for storing the AppKey-encrypted SDU once it has been sent
out for the first time.To solve this, a new new net_buf_simple member
is added to the model publication context. The separate 'msg' input
parameter of the bt_mesh_model_publish() API is removed, since the
application is now expected to pre-fill pub->msg instead.
To help with the publishing API change, the Health Server model gets a
new helper macro for initializing the publishing context with a
right-sized publishing message.
The API for creating Health Server instances is also redesigned since
it was so far using a single model publishing state, which would
result in erratic behavior in case of multiple elements with the
Health Server Model. Now, the application needs to provide a unique
publishing context for each Health Server instance.
The changes are heavily intertwined, so it's not easily possible to
split them into multiple patches, hence the large(ish) patch.
Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2017-11-18 10:24:18 +02:00
|
|
|
BT_DBG("src 0x%04x dst 0x%04x app_idx 0x%04x aszmic %u sdu_len %u",
|
|
|
|
net_tx->src, net_tx->ctx->addr, net_tx->ctx->app_idx,
|
|
|
|
net_tx->aszmic, sdu->len);
|
2017-06-16 12:30:54 +03:00
|
|
|
|
|
|
|
for (tx = NULL, i = 0; i < ARRAY_SIZE(seg_tx); i++) {
|
2020-03-05 16:10:15 +01:00
|
|
|
if (seg_tx[i].nack_count) {
|
|
|
|
blocked |= seg_tx_blocks(&seg_tx[i], net_tx->src,
|
|
|
|
net_tx->ctx->addr);
|
|
|
|
} else if (!tx) {
|
2017-06-16 12:30:54 +03:00
|
|
|
tx = &seg_tx[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!tx) {
|
|
|
|
BT_ERR("No multi-segment message contexts available");
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
if (ctl_op) {
|
|
|
|
tx->hdr = TRANS_CTL_HDR(*ctl_op, 1);
|
|
|
|
} else if (BT_MESH_IS_DEV_KEY(net_tx->ctx->app_idx)) {
|
|
|
|
tx->hdr = SEG_HDR(0, 0);
|
2017-06-16 12:30:54 +03:00
|
|
|
} else {
|
2020-03-05 16:10:15 +01:00
|
|
|
tx->hdr = SEG_HDR(1, net_tx->aid);
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
tx->src = net_tx->src;
|
2017-10-28 21:50:29 +02:00
|
|
|
tx->dst = net_tx->ctx->addr;
|
2020-03-05 16:10:15 +01:00
|
|
|
tx->seg_n = (sdu->len - 1) / seg_len(!!ctl_op);
|
|
|
|
tx->seg_o = 0;
|
|
|
|
tx->len = sdu->len;
|
2017-06-16 12:30:54 +03:00
|
|
|
tx->nack_count = tx->seg_n + 1;
|
|
|
|
tx->seq_auth = SEQ_AUTH(BT_MESH_NET_IVI_TX, bt_mesh.seq);
|
|
|
|
tx->sub = net_tx->sub;
|
2017-11-12 15:31:19 +02:00
|
|
|
tx->cb = cb;
|
|
|
|
tx->cb_data = cb_data;
|
2020-02-26 12:54:15 +01:00
|
|
|
tx->attempts = SEG_RETRANSMIT_ATTEMPTS;
|
2020-03-05 16:10:15 +01:00
|
|
|
tx->xmit = net_tx->xmit;
|
|
|
|
tx->aszmic = net_tx->aszmic;
|
|
|
|
tx->friend_cred = net_tx->friend_cred;
|
|
|
|
tx->blocked = blocked;
|
|
|
|
tx->started = 0;
|
2022-06-30 10:58:41 +02:00
|
|
|
tx->seg_send_started = 0;
|
2020-03-05 16:10:15 +01:00
|
|
|
tx->ctl = !!ctl_op;
|
2020-11-07 15:31:34 +01:00
|
|
|
tx->ttl = net_tx->ctx->send_ttl;
|
2018-01-28 14:04:04 -08:00
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
BT_DBG("SeqZero 0x%04x (segs: %u)",
|
2020-05-27 11:26:57 -05:00
|
|
|
(uint16_t)(tx->seq_auth & TRANS_SEQ_ZERO_MASK), tx->nack_count);
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2019-08-15 14:45:09 +03:00
|
|
|
if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) &&
|
|
|
|
!bt_mesh_friend_queue_has_space(tx->sub->net_idx, net_tx->src,
|
|
|
|
tx->dst, &tx->seq_auth,
|
|
|
|
tx->seg_n + 1) &&
|
|
|
|
BT_MESH_ADDR_IS_UNICAST(tx->dst)) {
|
|
|
|
BT_ERR("Not enough space in Friend Queue for %u segments",
|
|
|
|
tx->seg_n + 1);
|
|
|
|
seg_tx_reset(tx);
|
|
|
|
return -ENOBUFS;
|
|
|
|
}
|
|
|
|
|
2018-11-29 11:23:03 -08:00
|
|
|
for (seg_o = 0U; sdu->len; seg_o++) {
|
2020-03-05 16:10:15 +01:00
|
|
|
void *buf;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint16_t len;
|
2017-06-16 12:30:54 +03:00
|
|
|
int err;
|
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
err = k_mem_slab_alloc(&segs, &buf, BUF_TIMEOUT);
|
|
|
|
if (err) {
|
2017-06-16 12:30:54 +03:00
|
|
|
BT_ERR("Out of segment buffers");
|
|
|
|
seg_tx_reset(tx);
|
|
|
|
return -ENOBUFS;
|
|
|
|
}
|
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
len = MIN(sdu->len, seg_len(!!ctl_op));
|
|
|
|
memcpy(buf, net_buf_simple_pull_mem(sdu, len), len);
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
BT_DBG("seg %u: %s", seg_o, bt_hex(buf, len));
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2020-07-30 11:29:15 +08:00
|
|
|
tx->seg[seg_o] = buf;
|
|
|
|
|
2017-10-31 16:16:28 +02:00
|
|
|
if (IS_ENABLED(CONFIG_BT_MESH_FRIEND)) {
|
|
|
|
enum bt_mesh_friend_pdu_type type;
|
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
NET_BUF_SIMPLE_DEFINE(seg, 16);
|
|
|
|
seg_tx_buf_build(tx, seg_o, &seg);
|
|
|
|
|
2017-10-31 16:16:28 +02:00
|
|
|
if (seg_o == tx->seg_n) {
|
|
|
|
type = BT_MESH_FRIEND_PDU_COMPLETE;
|
|
|
|
} else {
|
|
|
|
type = BT_MESH_FRIEND_PDU_PARTIAL;
|
|
|
|
}
|
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
if (bt_mesh_friend_enqueue_tx(
|
|
|
|
net_tx, type, ctl_op ? NULL : &tx->seq_auth,
|
|
|
|
tx->seg_n + 1, &seg) &&
|
2017-10-31 16:16:28 +02:00
|
|
|
BT_MESH_ADDR_IS_UNICAST(net_tx->ctx->addr)) {
|
|
|
|
/* PDUs for a specific Friend should only go
|
|
|
|
* out through the Friend Queue.
|
|
|
|
*/
|
2020-03-05 16:10:15 +01:00
|
|
|
k_mem_slab_free(&segs, &buf);
|
2020-07-30 11:29:15 +08:00
|
|
|
tx->seg[seg_o] = NULL;
|
2017-10-31 16:16:28 +02:00
|
|
|
}
|
2020-07-30 11:29:15 +08:00
|
|
|
|
2017-10-31 16:16:28 +02:00
|
|
|
}
|
|
|
|
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
2019-08-13 16:29:47 +03:00
|
|
|
/* This can happen if segments only went into the Friend Queue */
|
|
|
|
if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && !tx->seg[0]) {
|
|
|
|
seg_tx_reset(tx);
|
|
|
|
|
|
|
|
/* If there was a callback notify sending immediately since
|
|
|
|
* there's no other way to track this (at least currently)
|
|
|
|
* with the Friend Queue.
|
|
|
|
*/
|
2019-08-15 08:54:08 +03:00
|
|
|
send_cb_finalize(cb, cb_data);
|
2020-03-05 16:10:15 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (blocked) {
|
|
|
|
/* Move the sequence number, so we don't end up creating
|
|
|
|
* another segmented transmission with the same SeqZero while
|
|
|
|
* this one is blocked.
|
|
|
|
*/
|
|
|
|
bt_mesh_next_seq();
|
|
|
|
BT_DBG("Blocked.");
|
|
|
|
return 0;
|
2019-08-13 16:29:47 +03:00
|
|
|
}
|
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
seg_tx_send_unacked(tx);
|
|
|
|
|
2017-06-16 12:30:54 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-11-03 09:10:33 +01:00
|
|
|
static int trans_encrypt(const struct bt_mesh_net_tx *tx, const uint8_t *key,
|
|
|
|
struct net_buf_simple *msg)
|
2017-06-16 12:30:54 +03:00
|
|
|
{
|
2020-08-25 11:03:42 +02:00
|
|
|
struct bt_mesh_app_crypto_ctx crypto = {
|
|
|
|
.dev_key = BT_MESH_IS_DEV_KEY(tx->ctx->app_idx),
|
|
|
|
.aszmic = tx->aszmic,
|
|
|
|
.src = tx->src,
|
|
|
|
.dst = tx->ctx->addr,
|
|
|
|
.seq_num = bt_mesh.seq,
|
|
|
|
.iv_index = BT_MESH_NET_IVI_TX,
|
|
|
|
};
|
2020-11-03 09:10:33 +01:00
|
|
|
|
|
|
|
if (BT_MESH_ADDR_IS_VIRTUAL(tx->ctx->addr)) {
|
|
|
|
crypto.ad = bt_mesh_va_label_get(tx->ctx->addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return bt_mesh_app_encrypt(key, &crypto, msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
int bt_mesh_trans_send(struct bt_mesh_net_tx *tx, struct net_buf_simple *msg,
|
|
|
|
const struct bt_mesh_send_cb *cb, void *cb_data)
|
|
|
|
{
|
2020-05-27 11:26:57 -05:00
|
|
|
const uint8_t *key;
|
|
|
|
uint8_t aid;
|
2017-06-16 12:30:54 +03:00
|
|
|
int err;
|
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
if (msg->len < 1) {
|
|
|
|
BT_ERR("Zero-length SDU not allowed");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2021-04-15 14:17:52 +02:00
|
|
|
if (msg->len > BT_MESH_TX_SDU_MAX - BT_MESH_MIC_SHORT) {
|
|
|
|
BT_ERR("Message too big: %u", msg->len);
|
2020-03-05 16:10:15 +01:00
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
2021-04-15 14:17:52 +02:00
|
|
|
if (net_buf_simple_tailroom(msg) < BT_MESH_MIC_SHORT) {
|
Bluetooth: Mesh: Fix model publication
Model publication was broken in a couple of ways:
- The Publish Retransmit State was not taken into account at all
- Health Server used a single publish state for all elements
To implement Publish Retransmit properly, one has to use a callback to
track when the message has been sent. The problem with the transport
layer sending APIs was that giving a callback would cause the
transport layer to assume that segmentation (with acks) is desired,
which is not the case for Model Publication (unless the message itself
is too large, of course). Because of this, the message sending context
receives a new send_rel ("Send Reliable") boolean member that an app
can use to force reliable sending.
Another challenge with the Publish Retransmit state is that a buffer
is needed for storing the AppKey-encrypted SDU once it has been sent
out for the first time.To solve this, a new new net_buf_simple member
is added to the model publication context. The separate 'msg' input
parameter of the bt_mesh_model_publish() API is removed, since the
application is now expected to pre-fill pub->msg instead.
To help with the publishing API change, the Health Server model gets a
new helper macro for initializing the publishing context with a
right-sized publishing message.
The API for creating Health Server instances is also redesigned since
it was so far using a single model publishing state, which would
result in erratic behavior in case of multiple elements with the
Health Server Model. Now, the application needs to provide a unique
publishing context for each Health Server instance.
The changes are heavily intertwined, so it's not easily possible to
split them into multiple patches, hence the large(ish) patch.
Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2017-11-18 10:24:18 +02:00
|
|
|
BT_ERR("Insufficient tailroom for Transport MIC");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-11-07 15:31:34 +01:00
|
|
|
if (tx->ctx->send_ttl == BT_MESH_TTL_DEFAULT) {
|
|
|
|
tx->ctx->send_ttl = bt_mesh_default_ttl_get();
|
|
|
|
} else if (tx->ctx->send_ttl > BT_MESH_TTL_MAX) {
|
|
|
|
BT_ERR("TTL too large (max 127)");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
if (msg->len > BT_MESH_SDU_UNSEG_MAX) {
|
2019-06-11 11:01:36 +02:00
|
|
|
tx->ctx->send_rel = true;
|
Bluetooth: Mesh: Fix model publication
Model publication was broken in a couple of ways:
- The Publish Retransmit State was not taken into account at all
- Health Server used a single publish state for all elements
To implement Publish Retransmit properly, one has to use a callback to
track when the message has been sent. The problem with the transport
layer sending APIs was that giving a callback would cause the
transport layer to assume that segmentation (with acks) is desired,
which is not the case for Model Publication (unless the message itself
is too large, of course). Because of this, the message sending context
receives a new send_rel ("Send Reliable") boolean member that an app
can use to force reliable sending.
Another challenge with the Publish Retransmit state is that a buffer
is needed for storing the AppKey-encrypted SDU once it has been sent
out for the first time.To solve this, a new new net_buf_simple member
is added to the model publication context. The separate 'msg' input
parameter of the bt_mesh_model_publish() API is removed, since the
application is now expected to pre-fill pub->msg instead.
To help with the publishing API change, the Health Server model gets a
new helper macro for initializing the publishing context with a
right-sized publishing message.
The API for creating Health Server instances is also redesigned since
it was so far using a single model publishing state, which would
result in erratic behavior in case of multiple elements with the
Health Server Model. Now, the application needs to provide a unique
publishing context for each Health Server instance.
The changes are heavily intertwined, so it's not easily possible to
split them into multiple patches, hence the large(ish) patch.
Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2017-11-18 10:24:18 +02:00
|
|
|
}
|
|
|
|
|
2020-11-08 12:23:00 +08:00
|
|
|
if (tx->ctx->addr == BT_MESH_ADDR_UNASSIGNED ||
|
|
|
|
(!BT_MESH_ADDR_IS_UNICAST(tx->ctx->addr) &&
|
|
|
|
BT_MESH_IS_DEV_KEY(tx->ctx->app_idx))) {
|
|
|
|
BT_ERR("Invalid destination address");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-08-25 11:03:42 +02:00
|
|
|
err = bt_mesh_keys_resolve(tx->ctx, &tx->sub, &key, &aid);
|
2019-09-30 13:27:02 +02:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
2021-02-17 19:16:35 +01:00
|
|
|
BT_DBG("net_idx 0x%04x app_idx 0x%04x dst 0x%04x", tx->sub->net_idx,
|
|
|
|
tx->ctx->app_idx, tx->ctx->addr);
|
|
|
|
BT_DBG("len %u: %s", msg->len, bt_hex(msg->data, msg->len));
|
|
|
|
|
2020-08-25 11:03:42 +02:00
|
|
|
tx->xmit = bt_mesh_net_transmit_get();
|
2019-09-30 13:27:02 +02:00
|
|
|
tx->aid = aid;
|
|
|
|
|
Bluetooth: Mesh: Fix model publication
Model publication was broken in a couple of ways:
- The Publish Retransmit State was not taken into account at all
- Health Server used a single publish state for all elements
To implement Publish Retransmit properly, one has to use a callback to
track when the message has been sent. The problem with the transport
layer sending APIs was that giving a callback would cause the
transport layer to assume that segmentation (with acks) is desired,
which is not the case for Model Publication (unless the message itself
is too large, of course). Because of this, the message sending context
receives a new send_rel ("Send Reliable") boolean member that an app
can use to force reliable sending.
Another challenge with the Publish Retransmit state is that a buffer
is needed for storing the AppKey-encrypted SDU once it has been sent
out for the first time.To solve this, a new new net_buf_simple member
is added to the model publication context. The separate 'msg' input
parameter of the bt_mesh_model_publish() API is removed, since the
application is now expected to pre-fill pub->msg instead.
To help with the publishing API change, the Health Server model gets a
new helper macro for initializing the publishing context with a
right-sized publishing message.
The API for creating Health Server instances is also redesigned since
it was so far using a single model publishing state, which would
result in erratic behavior in case of multiple elements with the
Health Server Model. Now, the application needs to provide a unique
publishing context for each Health Server instance.
The changes are heavily intertwined, so it's not easily possible to
split them into multiple patches, hence the large(ish) patch.
Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2017-11-18 10:24:18 +02:00
|
|
|
if (!tx->ctx->send_rel || net_buf_simple_tailroom(msg) < 8) {
|
2018-11-29 11:23:03 -08:00
|
|
|
tx->aszmic = 0U;
|
2017-06-16 12:30:54 +03:00
|
|
|
} else {
|
2018-11-29 11:23:03 -08:00
|
|
|
tx->aszmic = 1U;
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
2020-11-03 09:10:33 +01:00
|
|
|
err = trans_encrypt(tx, key, msg);
|
2017-06-16 12:30:54 +03:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
Bluetooth: Mesh: Fix model publication
Model publication was broken in a couple of ways:
- The Publish Retransmit State was not taken into account at all
- Health Server used a single publish state for all elements
To implement Publish Retransmit properly, one has to use a callback to
track when the message has been sent. The problem with the transport
layer sending APIs was that giving a callback would cause the
transport layer to assume that segmentation (with acks) is desired,
which is not the case for Model Publication (unless the message itself
is too large, of course). Because of this, the message sending context
receives a new send_rel ("Send Reliable") boolean member that an app
can use to force reliable sending.
Another challenge with the Publish Retransmit state is that a buffer
is needed for storing the AppKey-encrypted SDU once it has been sent
out for the first time.To solve this, a new new net_buf_simple member
is added to the model publication context. The separate 'msg' input
parameter of the bt_mesh_model_publish() API is removed, since the
application is now expected to pre-fill pub->msg instead.
To help with the publishing API change, the Health Server model gets a
new helper macro for initializing the publishing context with a
right-sized publishing message.
The API for creating Health Server instances is also redesigned since
it was so far using a single model publishing state, which would
result in erratic behavior in case of multiple elements with the
Health Server Model. Now, the application needs to provide a unique
publishing context for each Health Server instance.
The changes are heavily intertwined, so it's not easily possible to
split them into multiple patches, hence the large(ish) patch.
Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2017-11-18 10:24:18 +02:00
|
|
|
if (tx->ctx->send_rel) {
|
2020-03-05 16:10:15 +01:00
|
|
|
err = send_seg(tx, msg, cb, cb_data, NULL);
|
Bluetooth: Mesh: Fix model publication
Model publication was broken in a couple of ways:
- The Publish Retransmit State was not taken into account at all
- Health Server used a single publish state for all elements
To implement Publish Retransmit properly, one has to use a callback to
track when the message has been sent. The problem with the transport
layer sending APIs was that giving a callback would cause the
transport layer to assume that segmentation (with acks) is desired,
which is not the case for Model Publication (unless the message itself
is too large, of course). Because of this, the message sending context
receives a new send_rel ("Send Reliable") boolean member that an app
can use to force reliable sending.
Another challenge with the Publish Retransmit state is that a buffer
is needed for storing the AppKey-encrypted SDU once it has been sent
out for the first time.To solve this, a new new net_buf_simple member
is added to the model publication context. The separate 'msg' input
parameter of the bt_mesh_model_publish() API is removed, since the
application is now expected to pre-fill pub->msg instead.
To help with the publishing API change, the Health Server model gets a
new helper macro for initializing the publishing context with a
right-sized publishing message.
The API for creating Health Server instances is also redesigned since
it was so far using a single model publishing state, which would
result in erratic behavior in case of multiple elements with the
Health Server Model. Now, the application needs to provide a unique
publishing context for each Health Server instance.
The changes are heavily intertwined, so it's not easily possible to
split them into multiple patches, hence the large(ish) patch.
Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2017-11-18 10:24:18 +02:00
|
|
|
} else {
|
2020-03-05 16:10:15 +01:00
|
|
|
err = send_unseg(tx, msg, cb, cb_data, NULL);
|
Bluetooth: Mesh: Fix model publication
Model publication was broken in a couple of ways:
- The Publish Retransmit State was not taken into account at all
- Health Server used a single publish state for all elements
To implement Publish Retransmit properly, one has to use a callback to
track when the message has been sent. The problem with the transport
layer sending APIs was that giving a callback would cause the
transport layer to assume that segmentation (with acks) is desired,
which is not the case for Model Publication (unless the message itself
is too large, of course). Because of this, the message sending context
receives a new send_rel ("Send Reliable") boolean member that an app
can use to force reliable sending.
Another challenge with the Publish Retransmit state is that a buffer
is needed for storing the AppKey-encrypted SDU once it has been sent
out for the first time.To solve this, a new new net_buf_simple member
is added to the model publication context. The separate 'msg' input
parameter of the bt_mesh_model_publish() API is removed, since the
application is now expected to pre-fill pub->msg instead.
To help with the publishing API change, the Health Server model gets a
new helper macro for initializing the publishing context with a
right-sized publishing message.
The API for creating Health Server instances is also redesigned since
it was so far using a single model publishing state, which would
result in erratic behavior in case of multiple elements with the
Health Server Model. Now, the application needs to provide a unique
publishing context for each Health Server instance.
The changes are heavily intertwined, so it's not easily possible to
split them into multiple patches, hence the large(ish) patch.
Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2017-11-18 10:24:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
static void seg_rx_assemble(struct seg_rx *rx, struct net_buf_simple *buf,
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t aszmic)
|
2020-03-05 16:10:15 +01:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
net_buf_simple_reset(buf);
|
|
|
|
|
|
|
|
for (i = 0; i <= rx->seg_n; i++) {
|
|
|
|
net_buf_simple_add_mem(buf, rx->seg[i],
|
|
|
|
MIN(seg_len(rx->ctl),
|
|
|
|
rx->len - (i * seg_len(rx->ctl))));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Adjust the length to not contain the MIC at the end */
|
|
|
|
if (!rx->ctl) {
|
|
|
|
buf->len -= APP_MIC_LEN(aszmic);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-25 11:03:42 +02:00
|
|
|
struct decrypt_ctx {
|
|
|
|
struct bt_mesh_app_crypto_ctx crypto;
|
|
|
|
struct net_buf_simple *buf;
|
|
|
|
struct net_buf_simple *sdu;
|
|
|
|
struct seg_rx *seg;
|
|
|
|
};
|
2020-03-05 16:10:15 +01:00
|
|
|
|
2020-08-25 11:03:42 +02:00
|
|
|
static int sdu_try_decrypt(struct bt_mesh_net_rx *rx, const uint8_t key[16],
|
|
|
|
void *cb_data)
|
2017-06-16 12:30:54 +03:00
|
|
|
{
|
2020-08-25 11:03:42 +02:00
|
|
|
const struct decrypt_ctx *ctx = cb_data;
|
2019-07-12 15:57:58 +02:00
|
|
|
|
2020-08-25 11:03:42 +02:00
|
|
|
if (ctx->seg) {
|
|
|
|
seg_rx_assemble(ctx->seg, ctx->buf, ctx->crypto.aszmic);
|
2020-03-05 16:10:15 +01:00
|
|
|
}
|
|
|
|
|
2020-08-25 11:03:42 +02:00
|
|
|
net_buf_simple_reset(ctx->sdu);
|
2020-03-05 16:10:15 +01:00
|
|
|
|
2020-08-25 11:03:42 +02:00
|
|
|
return bt_mesh_app_decrypt(key, &ctx->crypto, ctx->buf, ctx->sdu);
|
2020-03-05 16:10:15 +01:00
|
|
|
}
|
|
|
|
|
2020-08-25 11:03:42 +02:00
|
|
|
static int sdu_recv(struct bt_mesh_net_rx *rx, uint8_t hdr, uint8_t aszmic,
|
|
|
|
struct net_buf_simple *buf, struct net_buf_simple *sdu,
|
|
|
|
struct seg_rx *seg)
|
2020-03-05 16:10:15 +01:00
|
|
|
{
|
2020-08-25 11:03:42 +02:00
|
|
|
struct decrypt_ctx ctx = {
|
|
|
|
.crypto = {
|
|
|
|
.dev_key = !AKF(&hdr),
|
|
|
|
.aszmic = aszmic,
|
|
|
|
.src = rx->ctx.addr,
|
|
|
|
.dst = rx->ctx.recv_dst,
|
2020-11-29 19:41:16 -08:00
|
|
|
.seq_num = seg ? (seg->seq_auth & 0xffffff) : rx->seq,
|
2020-08-25 11:03:42 +02:00
|
|
|
.iv_index = BT_MESH_NET_IVI_RX(rx),
|
|
|
|
},
|
|
|
|
.buf = buf,
|
|
|
|
.sdu = sdu,
|
|
|
|
.seg = seg,
|
|
|
|
};
|
2020-03-05 16:10:15 +01:00
|
|
|
|
2020-08-25 11:03:42 +02:00
|
|
|
BT_DBG("AKF %u AID 0x%02x", !ctx.crypto.dev_key, AID(&hdr));
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && !rx->local_match) {
|
|
|
|
BT_DBG("Ignoring PDU for LPN 0x%04x of this Friend",
|
|
|
|
rx->ctx.recv_dst);
|
|
|
|
return 0;
|
|
|
|
}
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
if (BT_MESH_ADDR_IS_VIRTUAL(rx->ctx.recv_dst)) {
|
2020-08-18 10:53:15 +02:00
|
|
|
ctx.crypto.ad = bt_mesh_va_label_get(rx->ctx.recv_dst);
|
2020-03-05 16:10:15 +01:00
|
|
|
}
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2020-08-25 11:03:42 +02:00
|
|
|
rx->ctx.app_idx = bt_mesh_app_key_find(ctx.crypto.dev_key, AID(&hdr),
|
|
|
|
rx, sdu_try_decrypt, &ctx);
|
|
|
|
if (rx->ctx.app_idx == BT_MESH_KEY_UNUSED) {
|
|
|
|
BT_DBG("No matching AppKey");
|
2020-03-05 16:10:15 +01:00
|
|
|
return 0;
|
|
|
|
}
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2020-08-25 11:03:42 +02:00
|
|
|
BT_DBG("Decrypted (AppIdx: 0x%03x)", rx->ctx.app_idx);
|
2020-03-05 16:10:15 +01:00
|
|
|
|
2020-08-25 11:03:42 +02:00
|
|
|
bt_mesh_model_recv(rx, sdu);
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2020-05-15 09:44:06 +02:00
|
|
|
return 0;
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
static struct seg_tx *seg_tx_lookup(uint16_t seq_zero, uint8_t obo, uint16_t addr)
|
2017-10-28 21:50:29 +02:00
|
|
|
{
|
|
|
|
struct seg_tx *tx;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(seg_tx); i++) {
|
|
|
|
tx = &seg_tx[i];
|
|
|
|
|
2019-08-28 13:46:08 +02:00
|
|
|
if ((tx->seq_auth & TRANS_SEQ_ZERO_MASK) != seq_zero) {
|
2017-10-28 21:50:29 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tx->dst == addr) {
|
|
|
|
return tx;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the expected remote address doesn't match,
|
|
|
|
* but the OBO flag is set and this is the first
|
|
|
|
* acknowledgement, assume it's a Friend that's
|
|
|
|
* responding and therefore accept the message.
|
|
|
|
*/
|
2022-03-06 22:14:18 +01:00
|
|
|
if (obo && (tx->nack_count == tx->seg_n + 1 || tx->ack_src == addr)) {
|
|
|
|
tx->ack_src = addr;
|
2017-10-28 21:50:29 +02:00
|
|
|
return tx;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
static int trans_ack(struct bt_mesh_net_rx *rx, uint8_t hdr,
|
|
|
|
struct net_buf_simple *buf, uint64_t *seq_auth)
|
2017-06-16 12:30:54 +03:00
|
|
|
{
|
|
|
|
struct seg_tx *tx;
|
|
|
|
unsigned int bit;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t ack;
|
|
|
|
uint16_t seq_zero;
|
|
|
|
uint8_t obo;
|
2017-06-16 12:30:54 +03:00
|
|
|
|
|
|
|
if (buf->len < 6) {
|
|
|
|
BT_ERR("Too short ack message");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
seq_zero = net_buf_simple_pull_be16(buf);
|
|
|
|
obo = seq_zero >> 15;
|
2019-08-28 13:46:08 +02:00
|
|
|
seq_zero = (seq_zero >> 2) & TRANS_SEQ_ZERO_MASK;
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2017-10-31 16:16:28 +02:00
|
|
|
if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && rx->friend_match) {
|
2018-05-10 16:44:10 +03:00
|
|
|
BT_DBG("Ack for LPN 0x%04x of this Friend", rx->ctx.recv_dst);
|
2017-10-31 16:16:28 +02:00
|
|
|
/* Best effort - we don't have enough info for true SeqAuth */
|
|
|
|
*seq_auth = SEQ_AUTH(BT_MESH_NET_IVI_RX(rx), seq_zero);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-16 12:30:54 +03:00
|
|
|
ack = net_buf_simple_pull_be32(buf);
|
|
|
|
|
|
|
|
BT_DBG("OBO %u seq_zero 0x%04x ack 0x%08x", obo, seq_zero, ack);
|
|
|
|
|
2017-10-28 21:50:29 +02:00
|
|
|
tx = seg_tx_lookup(seq_zero, obo, rx->ctx.addr);
|
2017-06-16 12:30:54 +03:00
|
|
|
if (!tx) {
|
2017-10-28 21:50:29 +02:00
|
|
|
BT_WARN("No matching TX context for ack");
|
2017-06-16 12:30:54 +03:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-02-26 12:54:15 +01:00
|
|
|
if (!BT_MESH_ADDR_IS_UNICAST(tx->dst)) {
|
|
|
|
BT_ERR("Received ack for group seg");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-10-31 16:16:28 +02:00
|
|
|
*seq_auth = tx->seq_auth;
|
|
|
|
|
2017-06-16 12:30:54 +03:00
|
|
|
if (!ack) {
|
|
|
|
BT_WARN("SDU canceled");
|
|
|
|
seg_tx_complete(tx, -ECANCELED);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (find_msb_set(ack) - 1 > tx->seg_n) {
|
|
|
|
BT_ERR("Too large segment number in ack");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
while ((bit = find_lsb_set(ack))) {
|
|
|
|
if (tx->seg[bit - 1]) {
|
|
|
|
BT_DBG("seg %u/%u acked", bit - 1, tx->seg_n);
|
2020-02-26 12:54:15 +01:00
|
|
|
seg_tx_done(tx, bit - 1);
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
ack &= ~BIT(bit - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tx->nack_count) {
|
2021-03-29 09:03:02 -05:00
|
|
|
/* According to the Bluetooth Mesh Profile specification,
|
|
|
|
* section 3.5.3.3, we should reset the retransmit timer and
|
2022-06-30 10:58:41 +02:00
|
|
|
* retransmit immediately when receiving a valid ack message.
|
|
|
|
* Don't reset the retransmit timer if we didn't finish sending
|
|
|
|
* segments.
|
2021-03-29 09:03:02 -05:00
|
|
|
*/
|
2022-06-30 10:58:41 +02:00
|
|
|
if (tx->seg_o == 0) {
|
|
|
|
k_work_reschedule(&tx->retransmit, K_NO_WAIT);
|
|
|
|
}
|
2017-06-16 12:30:54 +03:00
|
|
|
} else {
|
|
|
|
BT_DBG("SDU TX complete");
|
|
|
|
seg_tx_complete(tx, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
static int ctl_recv(struct bt_mesh_net_rx *rx, uint8_t hdr,
|
|
|
|
struct net_buf_simple *buf, uint64_t *seq_auth)
|
2017-06-16 12:30:54 +03:00
|
|
|
{
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t ctl_op = TRANS_CTL_OP(&hdr);
|
2017-06-16 12:30:54 +03:00
|
|
|
|
|
|
|
BT_DBG("OpCode 0x%02x len %u", ctl_op, buf->len);
|
|
|
|
|
|
|
|
switch (ctl_op) {
|
|
|
|
case TRANS_CTL_OP_ACK:
|
2017-10-31 16:16:28 +02:00
|
|
|
return trans_ack(rx, hdr, buf, seq_auth);
|
2017-11-02 13:30:46 +02:00
|
|
|
case TRANS_CTL_OP_HEARTBEAT:
|
2020-08-12 17:07:38 +02:00
|
|
|
return bt_mesh_hb_recv(rx, buf);
|
2017-10-31 16:16:28 +02:00
|
|
|
}
|
|
|
|
|
2017-11-02 13:30:46 +02:00
|
|
|
/* Only acks and heartbeats may need processing without local_match */
|
|
|
|
if (!rx->local_match) {
|
2017-10-31 16:16:28 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-11-09 23:45:40 +02:00
|
|
|
if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && !bt_mesh_lpn_established()) {
|
2017-10-31 16:16:28 +02:00
|
|
|
switch (ctl_op) {
|
|
|
|
case TRANS_CTL_OP_FRIEND_POLL:
|
|
|
|
return bt_mesh_friend_poll(rx, buf);
|
|
|
|
case TRANS_CTL_OP_FRIEND_REQ:
|
|
|
|
return bt_mesh_friend_req(rx, buf);
|
|
|
|
case TRANS_CTL_OP_FRIEND_CLEAR:
|
|
|
|
return bt_mesh_friend_clear(rx, buf);
|
2017-11-09 23:45:40 +02:00
|
|
|
case TRANS_CTL_OP_FRIEND_CLEAR_CFM:
|
|
|
|
return bt_mesh_friend_clear_cfm(rx, buf);
|
2017-10-31 16:16:28 +02:00
|
|
|
case TRANS_CTL_OP_FRIEND_SUB_ADD:
|
|
|
|
return bt_mesh_friend_sub_add(rx, buf);
|
|
|
|
case TRANS_CTL_OP_FRIEND_SUB_REM:
|
|
|
|
return bt_mesh_friend_sub_rem(rx, buf);
|
|
|
|
}
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
2017-08-09 09:21:11 +03:00
|
|
|
#if defined(CONFIG_BT_MESH_LOW_POWER)
|
2017-06-16 12:30:54 +03:00
|
|
|
if (ctl_op == TRANS_CTL_OP_FRIEND_OFFER) {
|
|
|
|
return bt_mesh_lpn_friend_offer(rx, buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rx->ctx.addr == bt_mesh.lpn.frnd) {
|
|
|
|
if (ctl_op == TRANS_CTL_OP_FRIEND_CLEAR_CFM) {
|
|
|
|
return bt_mesh_lpn_friend_clear_cfm(rx, buf);
|
|
|
|
}
|
|
|
|
|
2017-11-09 20:35:19 +02:00
|
|
|
if (!rx->friend_cred) {
|
2017-06-16 12:30:54 +03:00
|
|
|
BT_WARN("Message from friend with wrong credentials");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (ctl_op) {
|
|
|
|
case TRANS_CTL_OP_FRIEND_UPDATE:
|
|
|
|
return bt_mesh_lpn_friend_update(rx, buf);
|
|
|
|
case TRANS_CTL_OP_FRIEND_SUB_CFM:
|
|
|
|
return bt_mesh_lpn_friend_sub_cfm(rx, buf);
|
|
|
|
}
|
|
|
|
}
|
2017-08-09 09:21:11 +03:00
|
|
|
#endif /* CONFIG_BT_MESH_LOW_POWER */
|
2017-06-16 12:30:54 +03:00
|
|
|
|
|
|
|
BT_WARN("Unhandled TransOpCode 0x%02x", ctl_op);
|
|
|
|
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
2017-10-31 16:16:28 +02:00
|
|
|
static int trans_unseg(struct net_buf_simple *buf, struct bt_mesh_net_rx *rx,
|
2020-05-27 11:26:57 -05:00
|
|
|
uint64_t *seq_auth)
|
2017-06-16 12:30:54 +03:00
|
|
|
{
|
2020-08-25 11:03:42 +02:00
|
|
|
NET_BUF_SIMPLE_DEFINE_STATIC(sdu, BT_MESH_SDU_UNSEG_MAX);
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t hdr;
|
2017-06-16 12:30:54 +03:00
|
|
|
|
|
|
|
BT_DBG("AFK %u AID 0x%02x", AKF(buf->data), AID(buf->data));
|
|
|
|
|
|
|
|
if (buf->len < 1) {
|
|
|
|
BT_ERR("Too small unsegmented PDU");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-09-14 17:00:11 +08:00
|
|
|
if (bt_mesh_rpl_check(rx, NULL)) {
|
2017-11-04 22:50:56 +02:00
|
|
|
BT_WARN("Replay: src 0x%04x dst 0x%04x seq 0x%06x",
|
2018-05-10 16:44:10 +03:00
|
|
|
rx->ctx.addr, rx->ctx.recv_dst, rx->seq);
|
2017-11-04 22:50:56 +02:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-06-16 12:30:54 +03:00
|
|
|
hdr = net_buf_simple_pull_u8(buf);
|
|
|
|
|
|
|
|
if (rx->ctl) {
|
2017-10-31 16:16:28 +02:00
|
|
|
return ctl_recv(rx, hdr, buf, seq_auth);
|
2020-08-25 11:03:42 +02:00
|
|
|
}
|
2017-11-02 13:30:46 +02:00
|
|
|
|
2020-08-25 11:03:42 +02:00
|
|
|
if (buf->len < 1 + APP_MIC_LEN(0)) {
|
|
|
|
BT_ERR("Too short SDU + MIC");
|
|
|
|
return -EINVAL;
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
2020-08-25 11:03:42 +02:00
|
|
|
|
|
|
|
/* Adjust the length to not contain the MIC at the end */
|
|
|
|
buf->len -= APP_MIC_LEN(0);
|
|
|
|
|
|
|
|
return sdu_recv(rx, hdr, 0, buf, &sdu, NULL);
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
static inline int32_t ack_timeout(struct seg_rx *rx)
|
2017-06-16 12:30:54 +03:00
|
|
|
{
|
2020-05-27 11:26:57 -05:00
|
|
|
int32_t to;
|
|
|
|
uint8_t ttl;
|
2018-01-28 13:14:18 -08:00
|
|
|
|
|
|
|
if (rx->ttl == BT_MESH_TTL_DEFAULT) {
|
|
|
|
ttl = bt_mesh_default_ttl_get();
|
|
|
|
} else {
|
|
|
|
ttl = rx->ttl;
|
|
|
|
}
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2017-11-05 16:53:53 +02:00
|
|
|
/* The acknowledgment timer shall be set to a minimum of
|
|
|
|
* 150 + 50 * TTL milliseconds.
|
|
|
|
*/
|
2022-08-30 13:07:39 +02:00
|
|
|
to = CONFIG_BT_MESH_SEG_ACK_BASE_TIMEOUT +
|
|
|
|
(ttl * (int32_t)CONFIG_BT_MESH_SEG_ACK_PER_HOP_TIMEOUT);
|
2017-11-05 16:53:53 +02:00
|
|
|
|
2022-08-30 13:07:39 +02:00
|
|
|
/* Add timeout for evenry not yet received segment. */
|
|
|
|
to += ((rx->seg_n + 1) - popcount(rx->block)) *
|
|
|
|
(int32_t)CONFIG_BT_MESH_SEG_ACK_PER_SEGMENT_TIMEOUT;
|
2017-06-16 12:30:54 +03:00
|
|
|
|
|
|
|
/* Make sure we don't send more frequently than the duration for
|
2022-08-30 13:07:39 +02:00
|
|
|
* each packet (default is 400ms).
|
2017-06-16 12:30:54 +03:00
|
|
|
*/
|
2020-04-06 13:56:14 +02:00
|
|
|
return MAX(to, 400);
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
int bt_mesh_ctl_send(struct bt_mesh_net_tx *tx, uint8_t ctl_op, void *data,
|
2020-03-05 16:10:15 +01:00
|
|
|
size_t data_len,
|
|
|
|
const struct bt_mesh_send_cb *cb, void *cb_data)
|
2019-12-18 14:09:08 +01:00
|
|
|
{
|
2020-03-05 16:10:15 +01:00
|
|
|
struct net_buf_simple buf;
|
2019-12-18 14:09:08 +01:00
|
|
|
|
2020-11-07 15:31:34 +01:00
|
|
|
if (tx->ctx->send_ttl == BT_MESH_TTL_DEFAULT) {
|
|
|
|
tx->ctx->send_ttl = bt_mesh_default_ttl_get();
|
|
|
|
} else if (tx->ctx->send_ttl > BT_MESH_TTL_MAX) {
|
|
|
|
BT_ERR("TTL too large (max 127)");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
net_buf_simple_init_with_data(&buf, data, data_len);
|
2019-12-18 14:09:08 +01:00
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
if (data_len > BT_MESH_SDU_UNSEG_MAX) {
|
|
|
|
tx->ctx->send_rel = true;
|
2019-12-18 14:09:08 +01:00
|
|
|
}
|
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
tx->ctx->app_idx = BT_MESH_KEY_UNUSED;
|
2019-12-18 14:09:08 +01:00
|
|
|
|
2020-11-08 12:23:00 +08:00
|
|
|
if (tx->ctx->addr == BT_MESH_ADDR_UNASSIGNED ||
|
|
|
|
BT_MESH_ADDR_IS_VIRTUAL(tx->ctx->addr)) {
|
|
|
|
BT_ERR("Invalid destination address");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-12-18 14:09:08 +01:00
|
|
|
BT_DBG("src 0x%04x dst 0x%04x ttl 0x%02x ctl 0x%02x", tx->src,
|
|
|
|
tx->ctx->addr, tx->ctx->send_ttl, ctl_op);
|
|
|
|
BT_DBG("len %zu: %s", data_len, bt_hex(data, data_len));
|
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
if (tx->ctx->send_rel) {
|
|
|
|
return send_seg(tx, &buf, cb, cb_data, &ctl_op);
|
2019-12-18 14:09:08 +01:00
|
|
|
} else {
|
2020-03-05 16:10:15 +01:00
|
|
|
return send_unseg(tx, &buf, cb, cb_data, &ctl_op);
|
2019-12-18 14:09:08 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
static int send_ack(struct bt_mesh_subnet *sub, uint16_t src, uint16_t dst,
|
|
|
|
uint8_t ttl, uint64_t *seq_auth, uint32_t block, uint8_t obo)
|
2017-06-16 12:30:54 +03:00
|
|
|
{
|
|
|
|
struct bt_mesh_msg_ctx ctx = {
|
|
|
|
.net_idx = sub->net_idx,
|
|
|
|
.app_idx = BT_MESH_KEY_UNUSED,
|
|
|
|
.addr = dst,
|
|
|
|
.send_ttl = ttl,
|
|
|
|
};
|
|
|
|
struct bt_mesh_net_tx tx = {
|
|
|
|
.sub = sub,
|
|
|
|
.ctx = &ctx,
|
2017-10-31 16:16:28 +02:00
|
|
|
.src = obo ? bt_mesh_primary_addr() : src,
|
2017-11-09 19:58:25 +02:00
|
|
|
.xmit = bt_mesh_net_transmit_get(),
|
2017-06-16 12:30:54 +03:00
|
|
|
};
|
2020-05-27 11:26:57 -05:00
|
|
|
uint16_t seq_zero = *seq_auth & TRANS_SEQ_ZERO_MASK;
|
|
|
|
uint8_t buf[6];
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2017-11-28 09:48:48 +02:00
|
|
|
BT_DBG("SeqZero 0x%04x Block 0x%08x OBO %u", seq_zero, block, obo);
|
2017-06-16 12:30:54 +03:00
|
|
|
|
|
|
|
if (bt_mesh_lpn_established()) {
|
|
|
|
BT_WARN("Not sending ack when LPN is enabled");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This can happen if the segmented message was destined for a group
|
|
|
|
* or virtual address.
|
|
|
|
*/
|
|
|
|
if (!BT_MESH_ADDR_IS_UNICAST(src)) {
|
2020-02-26 12:54:15 +01:00
|
|
|
BT_DBG("Not sending ack for non-unicast address");
|
2017-06-16 12:30:54 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-10-31 16:16:28 +02:00
|
|
|
sys_put_be16(((seq_zero << 2) & 0x7ffc) | (obo << 15), buf);
|
2017-06-16 12:30:54 +03:00
|
|
|
sys_put_be32(block, &buf[2]);
|
|
|
|
|
2017-10-31 16:16:28 +02:00
|
|
|
return bt_mesh_ctl_send(&tx, TRANS_CTL_OP_ACK, buf, sizeof(buf),
|
2020-03-06 13:07:22 +01:00
|
|
|
NULL, NULL);
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
2018-01-28 12:20:16 -08:00
|
|
|
static void seg_rx_reset(struct seg_rx *rx, bool full_reset)
|
2017-06-16 12:30:54 +03:00
|
|
|
{
|
2020-03-05 16:10:15 +01:00
|
|
|
int i;
|
|
|
|
|
2017-06-16 12:30:54 +03:00
|
|
|
BT_DBG("rx %p", rx);
|
|
|
|
|
2021-03-29 09:03:02 -05:00
|
|
|
/* If this fails, the handler will exit early on the next execution, as
|
|
|
|
* it checks rx->in_use.
|
|
|
|
*/
|
|
|
|
(void)k_work_cancel_delayable(&rx->ack);
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2017-10-31 16:16:28 +02:00
|
|
|
if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && rx->obo &&
|
|
|
|
rx->block != BLOCK_COMPLETE(rx->seg_n)) {
|
|
|
|
BT_WARN("Clearing incomplete buffers from Friend queue");
|
|
|
|
bt_mesh_friend_clear_incomplete(rx->sub, rx->src, rx->dst,
|
|
|
|
&rx->seq_auth);
|
|
|
|
}
|
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
for (i = 0; i <= rx->seg_n; i++) {
|
2020-04-05 10:10:45 -07:00
|
|
|
if (!rx->seg[i]) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
k_mem_slab_free(&segs, &rx->seg[i]);
|
|
|
|
rx->seg[i] = NULL;
|
|
|
|
}
|
|
|
|
|
2018-11-29 11:23:03 -08:00
|
|
|
rx->in_use = 0U;
|
2018-01-28 12:20:16 -08:00
|
|
|
|
|
|
|
/* We don't always reset these values since we need to be able to
|
|
|
|
* send an ack if we receive a segment after we've already received
|
|
|
|
* the full SDU.
|
|
|
|
*/
|
|
|
|
if (full_reset) {
|
2018-11-29 11:23:03 -08:00
|
|
|
rx->seq_auth = 0U;
|
2018-01-28 12:20:16 -08:00
|
|
|
rx->sub = NULL;
|
|
|
|
rx->src = BT_MESH_ADDR_UNASSIGNED;
|
|
|
|
rx->dst = BT_MESH_ADDR_UNASSIGNED;
|
|
|
|
}
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void seg_ack(struct k_work *work)
|
|
|
|
{
|
2022-01-19 12:07:51 +08:00
|
|
|
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
|
|
|
|
struct seg_rx *rx = CONTAINER_OF(dwork, struct seg_rx, ack);
|
2020-12-10 14:24:31 +01:00
|
|
|
int32_t timeout;
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2021-03-29 09:03:02 -05:00
|
|
|
if (!rx->in_use || rx->block == BLOCK_COMPLETE(rx->seg_n)) {
|
|
|
|
/* Cancellation of this timer may have failed. If it fails as
|
|
|
|
* part of seg_reset, in_use will be false.
|
|
|
|
* If it fails as part of the processing of a fully received
|
|
|
|
* SDU, the ack is already being sent from the receive handler,
|
|
|
|
* and the timer based ack sending can be ignored.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-06-16 12:30:54 +03:00
|
|
|
BT_DBG("rx %p", rx);
|
|
|
|
|
2020-04-06 13:56:14 +02:00
|
|
|
if (k_uptime_get_32() - rx->last > (60 * MSEC_PER_SEC)) {
|
2017-06-16 12:30:54 +03:00
|
|
|
BT_WARN("Incomplete timer expired");
|
2018-02-14 15:48:19 +01:00
|
|
|
seg_rx_reset(rx, false);
|
2018-01-02 11:26:08 +01:00
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_BT_TESTING)) {
|
|
|
|
bt_test_mesh_trans_incomp_timer_exp();
|
|
|
|
}
|
|
|
|
|
2017-06-16 12:30:54 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-10-31 16:16:28 +02:00
|
|
|
send_ack(rx->sub, rx->dst, rx->src, rx->ttl, &rx->seq_auth,
|
|
|
|
rx->block, rx->obo);
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2020-12-10 14:24:31 +01:00
|
|
|
timeout = ack_timeout(rx);
|
2021-03-29 09:03:02 -05:00
|
|
|
k_work_schedule(&rx->ack, K_MSEC(timeout));
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
static inline bool sdu_len_is_ok(bool ctl, uint8_t seg_n)
|
2017-06-16 12:30:54 +03:00
|
|
|
{
|
2022-01-29 16:05:22 +08:00
|
|
|
return (seg_n < BT_MESH_RX_SEG_MAX);
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct seg_rx *seg_rx_find(struct bt_mesh_net_rx *net_rx,
|
2020-05-27 11:26:57 -05:00
|
|
|
const uint64_t *seq_auth)
|
2017-06-16 12:30:54 +03:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(seg_rx); i++) {
|
|
|
|
struct seg_rx *rx = &seg_rx[i];
|
|
|
|
|
2018-05-10 16:44:10 +03:00
|
|
|
if (rx->src != net_rx->ctx.addr ||
|
|
|
|
rx->dst != net_rx->ctx.recv_dst) {
|
2017-06-16 12:30:54 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-01-27 11:59:10 -08:00
|
|
|
/* Return newer RX context in addition to an exact match, so
|
|
|
|
* the calling function can properly discard an old SeqAuth.
|
|
|
|
*/
|
|
|
|
if (rx->seq_auth >= *seq_auth) {
|
2017-06-16 12:30:54 +03:00
|
|
|
return rx;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rx->in_use) {
|
|
|
|
BT_WARN("Duplicate SDU from src 0x%04x",
|
|
|
|
net_rx->ctx.addr);
|
|
|
|
|
|
|
|
/* Clear out the old context since the sender
|
|
|
|
* has apparently started sending a new SDU.
|
|
|
|
*/
|
2018-01-28 12:20:16 -08:00
|
|
|
seg_rx_reset(rx, true);
|
2017-10-31 16:16:28 +02:00
|
|
|
|
|
|
|
/* Return non-match so caller can re-allocate */
|
|
|
|
return NULL;
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool seg_rx_is_valid(struct seg_rx *rx, struct bt_mesh_net_rx *net_rx,
|
2020-05-27 11:26:57 -05:00
|
|
|
const uint8_t *hdr, uint8_t seg_n)
|
2017-06-16 12:30:54 +03:00
|
|
|
{
|
|
|
|
if (rx->hdr != *hdr || rx->seg_n != seg_n) {
|
|
|
|
BT_ERR("Invalid segment for ongoing session");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-05-10 16:44:10 +03:00
|
|
|
if (rx->src != net_rx->ctx.addr || rx->dst != net_rx->ctx.recv_dst) {
|
2017-06-16 12:30:54 +03:00
|
|
|
BT_ERR("Invalid source or destination for segment");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rx->ctl != net_rx->ctl) {
|
|
|
|
BT_ERR("Inconsistent CTL in segment");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct seg_rx *seg_rx_alloc(struct bt_mesh_net_rx *net_rx,
|
2020-05-27 11:26:57 -05:00
|
|
|
const uint8_t *hdr, const uint64_t *seq_auth,
|
|
|
|
uint8_t seg_n)
|
2017-06-16 12:30:54 +03:00
|
|
|
{
|
2020-04-05 10:10:45 -07:00
|
|
|
int i;
|
2020-03-05 16:10:15 +01:00
|
|
|
|
|
|
|
/* No race condition on this check, as this function only executes in
|
|
|
|
* the collaborative Bluetooth rx thread:
|
|
|
|
*/
|
2020-04-05 10:10:45 -07:00
|
|
|
if (k_mem_slab_num_free_get(&segs) < 1) {
|
2020-03-05 16:10:15 +01:00
|
|
|
BT_WARN("Not enough segments for incoming message");
|
|
|
|
return NULL;
|
|
|
|
}
|
2017-06-16 12:30:54 +03:00
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(seg_rx); i++) {
|
|
|
|
struct seg_rx *rx = &seg_rx[i];
|
|
|
|
|
|
|
|
if (rx->in_use) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-11-29 11:23:03 -08:00
|
|
|
rx->in_use = 1U;
|
2017-06-16 12:30:54 +03:00
|
|
|
rx->sub = net_rx->sub;
|
|
|
|
rx->ctl = net_rx->ctl;
|
|
|
|
rx->seq_auth = *seq_auth;
|
|
|
|
rx->seg_n = seg_n;
|
|
|
|
rx->hdr = *hdr;
|
|
|
|
rx->ttl = net_rx->ctx.send_ttl;
|
|
|
|
rx->src = net_rx->ctx.addr;
|
2018-05-10 16:44:10 +03:00
|
|
|
rx->dst = net_rx->ctx.recv_dst;
|
2018-11-29 11:23:03 -08:00
|
|
|
rx->block = 0U;
|
2017-06-16 12:30:54 +03:00
|
|
|
|
|
|
|
BT_DBG("New RX context. Block Complete 0x%08x",
|
|
|
|
BLOCK_COMPLETE(seg_n));
|
|
|
|
|
|
|
|
return rx;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-10-31 16:16:28 +02:00
|
|
|
static int trans_seg(struct net_buf_simple *buf, struct bt_mesh_net_rx *net_rx,
|
2020-05-27 11:26:57 -05:00
|
|
|
enum bt_mesh_friend_pdu_type *pdu_type, uint64_t *seq_auth,
|
|
|
|
uint8_t *seg_count)
|
2017-06-16 12:30:54 +03:00
|
|
|
{
|
2019-06-18 14:09:18 +03:00
|
|
|
struct bt_mesh_rpl *rpl = NULL;
|
2017-06-16 12:30:54 +03:00
|
|
|
struct seg_rx *rx;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t *hdr = buf->data;
|
|
|
|
uint16_t seq_zero;
|
Bluetooth: Mesh: Perform replay check on SeqAuth
To prevent the transport layer from accepting duplicate or out of order
segmented messages, add an RPL-like check for the SeqAuth of the
segmented messages when their context is allocated. This prevents
duplicate receives of the same segmented messages in the case where a
single source address sends two segmented messages in parallel (to two
different addresses):
Previously, when receiving two segmented messages, the first message
would go through to the access layer, then the second. Then, if the
transport layer received any repeated segments for the first message, it
would fail to identify the SeqAuth as old, as all its segments were of
new sequence numbers, and the "already complete SDU" check would only
look at the second message. Thus, the segmented message got processed
again and passed to the access layer, even though it was a duplicate.
To solve this, we need a mechanism like RPL, but only for the segmented
messages' SeqAuth. We cannot re-use the actual RPL mechanism, as it
can't support the scenario provoked by the "blocking tx" mechanism in
transport. This mechanism allocates the SeqAuth when the message is
first passed to the transport layer. The ongoing message that caused the
block would keep sending segments with higher sequence numbers than
the blocked message got, which will cause the blocked message to fail
the RPL check.
This patch adds a parallel SeqAuth mechanism to the RPL module, which
only deals with the SeqAuth of the segmented messages. This list gets
checked when the segmented message is first allocated, in the same
manner as the general RPL mechanism. The storage gets hooked into the
RPL mechanism, by adding a separate seg field to each RPL entry.
Signed-off-by: Trond Einar Snekvik <Trond.Einar.Snekvik@nordicsemi.no>
2021-02-17 19:17:42 +01:00
|
|
|
uint32_t auth_seqnum;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t seg_n;
|
|
|
|
uint8_t seg_o;
|
2017-06-16 12:30:54 +03:00
|
|
|
int err;
|
|
|
|
|
|
|
|
if (buf->len < 5) {
|
|
|
|
BT_ERR("Too short segmented message (len %u)", buf->len);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-09-14 17:00:11 +08:00
|
|
|
if (bt_mesh_rpl_check(net_rx, &rpl)) {
|
2019-06-18 14:09:18 +03:00
|
|
|
BT_WARN("Replay: src 0x%04x dst 0x%04x seq 0x%06x",
|
|
|
|
net_rx->ctx.addr, net_rx->ctx.recv_dst, net_rx->seq);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
Bluetooth: Mesh: Fix model publication
Model publication was broken in a couple of ways:
- The Publish Retransmit State was not taken into account at all
- Health Server used a single publish state for all elements
To implement Publish Retransmit properly, one has to use a callback to
track when the message has been sent. The problem with the transport
layer sending APIs was that giving a callback would cause the
transport layer to assume that segmentation (with acks) is desired,
which is not the case for Model Publication (unless the message itself
is too large, of course). Because of this, the message sending context
receives a new send_rel ("Send Reliable") boolean member that an app
can use to force reliable sending.
Another challenge with the Publish Retransmit state is that a buffer
is needed for storing the AppKey-encrypted SDU once it has been sent
out for the first time.To solve this, a new new net_buf_simple member
is added to the model publication context. The separate 'msg' input
parameter of the bt_mesh_model_publish() API is removed, since the
application is now expected to pre-fill pub->msg instead.
To help with the publishing API change, the Health Server model gets a
new helper macro for initializing the publishing context with a
right-sized publishing message.
The API for creating Health Server instances is also redesigned since
it was so far using a single model publishing state, which would
result in erratic behavior in case of multiple elements with the
Health Server Model. Now, the application needs to provide a unique
publishing context for each Health Server instance.
The changes are heavily intertwined, so it's not easily possible to
split them into multiple patches, hence the large(ish) patch.
Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2017-11-18 10:24:18 +02:00
|
|
|
BT_DBG("ASZMIC %u AKF %u AID 0x%02x", ASZMIC(hdr), AKF(hdr), AID(hdr));
|
2017-06-16 12:30:54 +03:00
|
|
|
|
|
|
|
net_buf_simple_pull(buf, 1);
|
|
|
|
|
|
|
|
seq_zero = net_buf_simple_pull_be16(buf);
|
|
|
|
seg_o = (seq_zero & 0x03) << 3;
|
2019-08-28 13:46:08 +02:00
|
|
|
seq_zero = (seq_zero >> 2) & TRANS_SEQ_ZERO_MASK;
|
2017-06-16 12:30:54 +03:00
|
|
|
seg_n = net_buf_simple_pull_u8(buf);
|
|
|
|
seg_o |= seg_n >> 5;
|
|
|
|
seg_n &= 0x1f;
|
|
|
|
|
|
|
|
BT_DBG("SeqZero 0x%04x SegO %u SegN %u", seq_zero, seg_o, seg_n);
|
|
|
|
|
|
|
|
if (seg_o > seg_n) {
|
|
|
|
BT_ERR("SegO greater than SegN (%u > %u)", seg_o, seg_n);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2018-08-16 14:51:19 +03:00
|
|
|
/* According to Mesh 1.0 specification:
|
|
|
|
* "The SeqAuth is composed of the IV Index and the sequence number
|
|
|
|
* (SEQ) of the first segment"
|
|
|
|
*
|
|
|
|
* Therefore we need to calculate very first SEQ in order to find
|
|
|
|
* seqAuth. We can calculate as below:
|
|
|
|
*
|
|
|
|
* SEQ(0) = SEQ(n) - (delta between seqZero and SEQ(n) by looking into
|
|
|
|
* 14 least significant bits of SEQ(n))
|
|
|
|
*
|
|
|
|
* Mentioned delta shall be >= 0, if it is not then seq_auth will
|
|
|
|
* be broken and it will be verified by the code below.
|
|
|
|
*/
|
2017-10-31 16:16:28 +02:00
|
|
|
*seq_auth = SEQ_AUTH(BT_MESH_NET_IVI_RX(net_rx),
|
2018-08-16 14:51:19 +03:00
|
|
|
(net_rx->seq -
|
|
|
|
((((net_rx->seq & BIT_MASK(14)) - seq_zero)) &
|
|
|
|
BIT_MASK(13))));
|
Bluetooth: Mesh: Perform replay check on SeqAuth
To prevent the transport layer from accepting duplicate or out of order
segmented messages, add an RPL-like check for the SeqAuth of the
segmented messages when their context is allocated. This prevents
duplicate receives of the same segmented messages in the case where a
single source address sends two segmented messages in parallel (to two
different addresses):
Previously, when receiving two segmented messages, the first message
would go through to the access layer, then the second. Then, if the
transport layer received any repeated segments for the first message, it
would fail to identify the SeqAuth as old, as all its segments were of
new sequence numbers, and the "already complete SDU" check would only
look at the second message. Thus, the segmented message got processed
again and passed to the access layer, even though it was a duplicate.
To solve this, we need a mechanism like RPL, but only for the segmented
messages' SeqAuth. We cannot re-use the actual RPL mechanism, as it
can't support the scenario provoked by the "blocking tx" mechanism in
transport. This mechanism allocates the SeqAuth when the message is
first passed to the transport layer. The ongoing message that caused the
block would keep sending segments with higher sequence numbers than
the blocked message got, which will cause the blocked message to fail
the RPL check.
This patch adds a parallel SeqAuth mechanism to the RPL module, which
only deals with the SeqAuth of the segmented messages. This list gets
checked when the segmented message is first allocated, in the same
manner as the general RPL mechanism. The storage gets hooked into the
RPL mechanism, by adding a separate seg field to each RPL entry.
Signed-off-by: Trond Einar Snekvik <Trond.Einar.Snekvik@nordicsemi.no>
2021-02-17 19:17:42 +01:00
|
|
|
auth_seqnum = *seq_auth & BIT_MASK(24);
|
2019-08-15 14:45:09 +03:00
|
|
|
*seg_count = seg_n + 1;
|
|
|
|
|
2017-06-16 12:30:54 +03:00
|
|
|
/* Look for old RX sessions */
|
2017-10-31 16:16:28 +02:00
|
|
|
rx = seg_rx_find(net_rx, seq_auth);
|
2017-06-16 12:30:54 +03:00
|
|
|
if (rx) {
|
2018-01-27 11:59:10 -08:00
|
|
|
/* Discard old SeqAuth packet */
|
|
|
|
if (rx->seq_auth > *seq_auth) {
|
|
|
|
BT_WARN("Ignoring old SeqAuth");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-06-16 12:30:54 +03:00
|
|
|
if (!seg_rx_is_valid(rx, net_rx, hdr, seg_n)) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rx->in_use) {
|
|
|
|
BT_DBG("Existing RX context. Block 0x%08x", rx->block);
|
|
|
|
goto found_rx;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rx->block == BLOCK_COMPLETE(rx->seg_n)) {
|
2020-02-26 12:54:15 +01:00
|
|
|
BT_DBG("Got segment for already complete SDU");
|
2019-06-18 14:09:18 +03:00
|
|
|
|
2018-05-10 16:44:10 +03:00
|
|
|
send_ack(net_rx->sub, net_rx->ctx.recv_dst,
|
|
|
|
net_rx->ctx.addr, net_rx->ctx.send_ttl,
|
|
|
|
seq_auth, rx->block, rx->obo);
|
2019-06-18 14:09:18 +03:00
|
|
|
|
|
|
|
if (rpl) {
|
2020-09-09 16:59:43 +08:00
|
|
|
bt_mesh_rpl_update(rpl, net_rx);
|
2019-06-18 14:09:18 +03:00
|
|
|
}
|
|
|
|
|
2017-10-31 16:16:28 +02:00
|
|
|
return -EALREADY;
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* We ignore instead of sending block ack 0 since the
|
|
|
|
* ack timer is always smaller than the incomplete
|
|
|
|
* timer, i.e. the sender is misbehaving.
|
|
|
|
*/
|
|
|
|
BT_WARN("Got segment for canceled SDU");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Bail out early if we're not ready to receive such a large SDU */
|
2017-09-26 11:26:55 +03:00
|
|
|
if (!sdu_len_is_ok(net_rx->ctl, seg_n)) {
|
2017-06-16 12:30:54 +03:00
|
|
|
BT_ERR("Too big incoming SDU length");
|
2018-05-10 16:44:10 +03:00
|
|
|
send_ack(net_rx->sub, net_rx->ctx.recv_dst, net_rx->ctx.addr,
|
2017-10-31 16:16:28 +02:00
|
|
|
net_rx->ctx.send_ttl, seq_auth, 0,
|
|
|
|
net_rx->friend_match);
|
|
|
|
return -EMSGSIZE;
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
2019-08-15 14:45:09 +03:00
|
|
|
/* Verify early that there will be space in the Friend Queue(s) in
|
|
|
|
* case this message is destined to an LPN of ours.
|
|
|
|
*/
|
|
|
|
if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) &&
|
|
|
|
net_rx->friend_match && !net_rx->local_match &&
|
|
|
|
!bt_mesh_friend_queue_has_space(net_rx->sub->net_idx,
|
|
|
|
net_rx->ctx.addr,
|
|
|
|
net_rx->ctx.recv_dst, seq_auth,
|
|
|
|
*seg_count)) {
|
|
|
|
BT_ERR("No space in Friend Queue for %u segments", *seg_count);
|
|
|
|
send_ack(net_rx->sub, net_rx->ctx.recv_dst, net_rx->ctx.addr,
|
|
|
|
net_rx->ctx.send_ttl, seq_auth, 0,
|
|
|
|
net_rx->friend_match);
|
|
|
|
return -ENOBUFS;
|
|
|
|
}
|
|
|
|
|
Bluetooth: Mesh: Perform replay check on SeqAuth
To prevent the transport layer from accepting duplicate or out of order
segmented messages, add an RPL-like check for the SeqAuth of the
segmented messages when their context is allocated. This prevents
duplicate receives of the same segmented messages in the case where a
single source address sends two segmented messages in parallel (to two
different addresses):
Previously, when receiving two segmented messages, the first message
would go through to the access layer, then the second. Then, if the
transport layer received any repeated segments for the first message, it
would fail to identify the SeqAuth as old, as all its segments were of
new sequence numbers, and the "already complete SDU" check would only
look at the second message. Thus, the segmented message got processed
again and passed to the access layer, even though it was a duplicate.
To solve this, we need a mechanism like RPL, but only for the segmented
messages' SeqAuth. We cannot re-use the actual RPL mechanism, as it
can't support the scenario provoked by the "blocking tx" mechanism in
transport. This mechanism allocates the SeqAuth when the message is
first passed to the transport layer. The ongoing message that caused the
block would keep sending segments with higher sequence numbers than
the blocked message got, which will cause the blocked message to fail
the RPL check.
This patch adds a parallel SeqAuth mechanism to the RPL module, which
only deals with the SeqAuth of the segmented messages. This list gets
checked when the segmented message is first allocated, in the same
manner as the general RPL mechanism. The storage gets hooked into the
RPL mechanism, by adding a separate seg field to each RPL entry.
Signed-off-by: Trond Einar Snekvik <Trond.Einar.Snekvik@nordicsemi.no>
2021-02-17 19:17:42 +01:00
|
|
|
/* Keep track of the received SeqAuth values received from this address
|
|
|
|
* and discard segmented messages that are not newer, as described in
|
|
|
|
* the Bluetooth Mesh specification section 3.5.3.4.
|
|
|
|
*
|
|
|
|
* The logic on the first segmented receive is a bit special, since the
|
|
|
|
* initial value of rpl->seg is 0, which would normally fail the
|
|
|
|
* comparison check with auth_seqnum:
|
|
|
|
* - If this is the first time we receive from this source, rpl->src
|
|
|
|
* will be 0, and we can skip this check.
|
|
|
|
* - If this is the first time we receive from this source on the new IV
|
|
|
|
* index, rpl->old_iv will be set, and the check is also skipped.
|
|
|
|
* - If this is the first segmented message on the new IV index, but we
|
|
|
|
* have received an unsegmented message already, the unsegmented
|
|
|
|
* message will have reset rpl->seg to 0, and this message's SeqAuth
|
|
|
|
* cannot be zero.
|
|
|
|
*/
|
|
|
|
if (rpl && rpl->src && auth_seqnum <= rpl->seg &&
|
|
|
|
(!rpl->old_iv || net_rx->old_iv)) {
|
|
|
|
BT_WARN("Ignoring old SeqAuth 0x%06x", auth_seqnum);
|
|
|
|
return -EALREADY;
|
|
|
|
}
|
|
|
|
|
2017-06-16 12:30:54 +03:00
|
|
|
/* Look for free slot for a new RX session */
|
2017-10-31 16:16:28 +02:00
|
|
|
rx = seg_rx_alloc(net_rx, hdr, seq_auth, seg_n);
|
2017-06-16 12:30:54 +03:00
|
|
|
if (!rx) {
|
2022-03-16 21:07:43 +00:00
|
|
|
/* Warn but don't cancel since the existing slots will
|
2017-06-16 12:30:54 +03:00
|
|
|
* eventually be freed up and we'll be able to process
|
|
|
|
* this one.
|
|
|
|
*/
|
|
|
|
BT_WARN("No free slots for new incoming segmented messages");
|
2017-10-31 16:16:28 +02:00
|
|
|
return -ENOMEM;
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
2017-10-31 16:16:28 +02:00
|
|
|
rx->obo = net_rx->friend_match;
|
|
|
|
|
2017-06-16 12:30:54 +03:00
|
|
|
found_rx:
|
|
|
|
if (BIT(seg_o) & rx->block) {
|
2020-07-07 15:52:17 +02:00
|
|
|
BT_DBG("Received already received fragment");
|
2017-10-31 16:16:28 +02:00
|
|
|
return -EALREADY;
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* All segments, except the last one, must either have 8 bytes of
|
|
|
|
* payload (for 64bit Net MIC) or 12 bytes of payload (for 32bit
|
|
|
|
* Net MIC).
|
|
|
|
*/
|
|
|
|
if (seg_o == seg_n) {
|
|
|
|
/* Set the expected final buffer length */
|
2020-03-05 16:10:15 +01:00
|
|
|
rx->len = seg_n * seg_len(rx->ctl) + buf->len;
|
2017-06-16 12:30:54 +03:00
|
|
|
BT_DBG("Target len %u * %u + %u = %u", seg_n, seg_len(rx->ctl),
|
2020-03-05 16:10:15 +01:00
|
|
|
buf->len, rx->len);
|
2017-06-16 12:30:54 +03:00
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
if (rx->len > BT_MESH_RX_SDU_MAX) {
|
2017-06-16 12:30:54 +03:00
|
|
|
BT_ERR("Too large SDU len");
|
2018-05-10 16:44:10 +03:00
|
|
|
send_ack(net_rx->sub, net_rx->ctx.recv_dst,
|
|
|
|
net_rx->ctx.addr, net_rx->ctx.send_ttl,
|
|
|
|
seq_auth, 0, rx->obo);
|
2018-01-28 12:20:16 -08:00
|
|
|
seg_rx_reset(rx, true);
|
2017-10-31 16:16:28 +02:00
|
|
|
return -EMSGSIZE;
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (buf->len != seg_len(rx->ctl)) {
|
|
|
|
BT_ERR("Incorrect segment size for message type");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Reset the Incomplete Timer */
|
|
|
|
rx->last = k_uptime_get_32();
|
|
|
|
|
2021-03-29 09:03:02 -05:00
|
|
|
if (!bt_mesh_lpn_established()) {
|
2020-12-10 14:24:31 +01:00
|
|
|
int32_t timeout = ack_timeout(rx);
|
2021-03-29 09:03:02 -05:00
|
|
|
/* Should only start ack timer if it isn't running already: */
|
|
|
|
k_work_schedule(&rx->ack, K_MSEC(timeout));
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
2020-04-05 10:10:45 -07:00
|
|
|
/* Allocated segment here */
|
|
|
|
err = k_mem_slab_alloc(&segs, &rx->seg[seg_o], K_NO_WAIT);
|
|
|
|
if (err) {
|
|
|
|
BT_WARN("Unable allocate buffer for Seg %u", seg_o);
|
|
|
|
return -ENOBUFS;
|
|
|
|
}
|
|
|
|
|
2020-03-05 16:10:15 +01:00
|
|
|
memcpy(rx->seg[seg_o], buf->data, buf->len);
|
2017-06-16 12:30:54 +03:00
|
|
|
|
|
|
|
BT_DBG("Received %u/%u", seg_o, seg_n);
|
|
|
|
|
|
|
|
/* Mark segment as received */
|
|
|
|
rx->block |= BIT(seg_o);
|
|
|
|
|
|
|
|
if (rx->block != BLOCK_COMPLETE(seg_n)) {
|
2017-10-31 16:16:28 +02:00
|
|
|
*pdu_type = BT_MESH_FRIEND_PDU_PARTIAL;
|
2017-06-16 12:30:54 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
BT_DBG("Complete SDU");
|
|
|
|
|
2019-06-18 14:09:18 +03:00
|
|
|
if (rpl) {
|
2020-09-09 16:59:43 +08:00
|
|
|
bt_mesh_rpl_update(rpl, net_rx);
|
Bluetooth: Mesh: Perform replay check on SeqAuth
To prevent the transport layer from accepting duplicate or out of order
segmented messages, add an RPL-like check for the SeqAuth of the
segmented messages when their context is allocated. This prevents
duplicate receives of the same segmented messages in the case where a
single source address sends two segmented messages in parallel (to two
different addresses):
Previously, when receiving two segmented messages, the first message
would go through to the access layer, then the second. Then, if the
transport layer received any repeated segments for the first message, it
would fail to identify the SeqAuth as old, as all its segments were of
new sequence numbers, and the "already complete SDU" check would only
look at the second message. Thus, the segmented message got processed
again and passed to the access layer, even though it was a duplicate.
To solve this, we need a mechanism like RPL, but only for the segmented
messages' SeqAuth. We cannot re-use the actual RPL mechanism, as it
can't support the scenario provoked by the "blocking tx" mechanism in
transport. This mechanism allocates the SeqAuth when the message is
first passed to the transport layer. The ongoing message that caused the
block would keep sending segments with higher sequence numbers than
the blocked message got, which will cause the blocked message to fail
the RPL check.
This patch adds a parallel SeqAuth mechanism to the RPL module, which
only deals with the SeqAuth of the segmented messages. This list gets
checked when the segmented message is first allocated, in the same
manner as the general RPL mechanism. The storage gets hooked into the
RPL mechanism, by adding a separate seg field to each RPL entry.
Signed-off-by: Trond Einar Snekvik <Trond.Einar.Snekvik@nordicsemi.no>
2021-02-17 19:17:42 +01:00
|
|
|
/* Update the seg, unless it has already been surpassed:
|
|
|
|
* This needs to happen after rpl_update to ensure that the IV
|
|
|
|
* update reset logic inside rpl_update doesn't overwrite the
|
|
|
|
* change.
|
|
|
|
*/
|
|
|
|
rpl->seg = MAX(rpl->seg, auth_seqnum);
|
2017-11-04 22:50:56 +02:00
|
|
|
}
|
|
|
|
|
2017-10-31 16:16:28 +02:00
|
|
|
*pdu_type = BT_MESH_FRIEND_PDU_COMPLETE;
|
|
|
|
|
2021-03-29 09:03:02 -05:00
|
|
|
/* If this fails, the work handler will either exit early because the
|
|
|
|
* block is fully received, or rx->in_use is false.
|
|
|
|
*/
|
|
|
|
(void)k_work_cancel_delayable(&rx->ack);
|
2018-05-10 16:44:10 +03:00
|
|
|
send_ack(net_rx->sub, net_rx->ctx.recv_dst, net_rx->ctx.addr,
|
2017-10-31 16:16:28 +02:00
|
|
|
net_rx->ctx.send_ttl, seq_auth, rx->block, rx->obo);
|
2017-06-16 12:30:54 +03:00
|
|
|
|
|
|
|
if (net_rx->ctl) {
|
2020-03-05 16:10:15 +01:00
|
|
|
NET_BUF_SIMPLE_DEFINE(sdu, BT_MESH_RX_CTL_MAX);
|
|
|
|
seg_rx_assemble(rx, &sdu, 0U);
|
|
|
|
err = ctl_recv(net_rx, *hdr, &sdu, seq_auth);
|
2020-08-25 11:03:42 +02:00
|
|
|
} else if (rx->len < 1 + APP_MIC_LEN(ASZMIC(hdr))) {
|
|
|
|
BT_ERR("Too short SDU + MIC");
|
|
|
|
err = -EINVAL;
|
2017-06-16 12:30:54 +03:00
|
|
|
} else {
|
2020-08-25 11:03:42 +02:00
|
|
|
NET_BUF_SIMPLE_DEFINE_STATIC(seg_buf, BT_MESH_RX_SDU_MAX);
|
|
|
|
struct net_buf_simple sdu;
|
|
|
|
|
|
|
|
/* Decrypting in place to avoid creating two assembly buffers.
|
|
|
|
* We'll reassemble the buffer from the segments before each
|
|
|
|
* decryption attempt.
|
|
|
|
*/
|
|
|
|
net_buf_simple_init(&seg_buf, 0);
|
|
|
|
net_buf_simple_init_with_data(
|
|
|
|
&sdu, seg_buf.data, rx->len - APP_MIC_LEN(ASZMIC(hdr)));
|
|
|
|
|
|
|
|
err = sdu_recv(net_rx, *hdr, ASZMIC(hdr), &seg_buf, &sdu, rx);
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
2018-01-28 12:20:16 -08:00
|
|
|
seg_rx_reset(rx, false);
|
2017-06-16 12:30:54 +03:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bt_mesh_trans_recv(struct net_buf_simple *buf, struct bt_mesh_net_rx *rx)
|
|
|
|
{
|
2020-05-27 11:26:57 -05:00
|
|
|
uint64_t seq_auth = TRANS_SEQ_AUTH_NVAL;
|
2017-10-31 16:16:28 +02:00
|
|
|
enum bt_mesh_friend_pdu_type pdu_type = BT_MESH_FRIEND_PDU_SINGLE;
|
|
|
|
struct net_buf_simple_state state;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t seg_count = 0;
|
2017-06-16 12:30:54 +03:00
|
|
|
int err;
|
|
|
|
|
2017-10-31 16:16:28 +02:00
|
|
|
if (IS_ENABLED(CONFIG_BT_MESH_FRIEND)) {
|
|
|
|
rx->friend_match = bt_mesh_friend_match(rx->sub->net_idx,
|
2018-05-10 16:44:10 +03:00
|
|
|
rx->ctx.recv_dst);
|
2017-10-31 16:16:28 +02:00
|
|
|
} else {
|
|
|
|
rx->friend_match = false;
|
|
|
|
}
|
|
|
|
|
2017-11-02 13:30:46 +02:00
|
|
|
BT_DBG("src 0x%04x dst 0x%04x seq 0x%08x friend_match %u",
|
2018-05-10 16:44:10 +03:00
|
|
|
rx->ctx.addr, rx->ctx.recv_dst, rx->seq, rx->friend_match);
|
2017-10-25 09:46:59 +02:00
|
|
|
|
|
|
|
/* Remove network headers */
|
|
|
|
net_buf_simple_pull(buf, BT_MESH_NET_HDR_LEN);
|
|
|
|
|
2017-06-16 12:30:54 +03:00
|
|
|
BT_DBG("Payload %s", bt_hex(buf->data, buf->len));
|
|
|
|
|
2017-12-06 14:59:09 +01:00
|
|
|
if (IS_ENABLED(CONFIG_BT_TESTING)) {
|
|
|
|
bt_test_mesh_net_recv(rx->ctx.recv_ttl, rx->ctl, rx->ctx.addr,
|
2018-05-10 16:44:10 +03:00
|
|
|
rx->ctx.recv_dst, buf->data, buf->len);
|
2017-12-06 14:59:09 +01:00
|
|
|
}
|
|
|
|
|
2017-06-16 12:30:54 +03:00
|
|
|
/* If LPN mode is enabled messages are only accepted when we've
|
|
|
|
* requested the Friend to send them. The messages must also
|
|
|
|
* be encrypted using the Friend Credentials.
|
|
|
|
*/
|
2017-08-09 09:21:11 +03:00
|
|
|
if (IS_ENABLED(CONFIG_BT_MESH_LOW_POWER) &&
|
2017-11-18 12:04:25 +02:00
|
|
|
bt_mesh_lpn_established() && rx->net_if == BT_MESH_NET_IF_ADV &&
|
2017-11-09 20:35:19 +02:00
|
|
|
(!bt_mesh_lpn_waiting_update() || !rx->friend_cred)) {
|
2017-06-16 12:30:54 +03:00
|
|
|
BT_WARN("Ignoring unexpected message in Low Power mode");
|
2017-11-02 13:30:46 +02:00
|
|
|
return -EAGAIN;
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
2017-11-13 10:32:04 +02:00
|
|
|
/* Save the app-level state so the buffer can later be placed in
|
|
|
|
* the Friend Queue.
|
2017-10-31 16:16:28 +02:00
|
|
|
*/
|
|
|
|
net_buf_simple_save(buf, &state);
|
|
|
|
|
2017-06-16 12:30:54 +03:00
|
|
|
if (SEG(buf->data)) {
|
2017-11-02 13:30:46 +02:00
|
|
|
/* Segmented messages must match a local element or an
|
|
|
|
* LPN of this Friend.
|
|
|
|
*/
|
|
|
|
if (!rx->local_match && !rx->friend_match) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-08-15 14:45:09 +03:00
|
|
|
err = trans_seg(buf, rx, &pdu_type, &seq_auth, &seg_count);
|
2017-06-16 12:30:54 +03:00
|
|
|
} else {
|
2019-08-15 14:45:09 +03:00
|
|
|
seg_count = 1;
|
2017-10-31 16:16:28 +02:00
|
|
|
err = trans_unseg(buf, rx, &seq_auth);
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
2021-04-16 14:22:44 +02:00
|
|
|
/* Notify LPN state machine so a Friend Poll will be sent. */
|
|
|
|
if (IS_ENABLED(CONFIG_BT_MESH_LOW_POWER)) {
|
2017-06-16 12:30:54 +03:00
|
|
|
bt_mesh_lpn_msg_received(rx);
|
|
|
|
}
|
|
|
|
|
2017-10-31 16:16:28 +02:00
|
|
|
net_buf_simple_restore(buf, &state);
|
|
|
|
|
2017-11-02 13:30:46 +02:00
|
|
|
if (IS_ENABLED(CONFIG_BT_MESH_FRIEND) && rx->friend_match && !err) {
|
2017-10-31 16:16:28 +02:00
|
|
|
if (seq_auth == TRANS_SEQ_AUTH_NVAL) {
|
2019-08-15 14:45:09 +03:00
|
|
|
bt_mesh_friend_enqueue_rx(rx, pdu_type, NULL,
|
|
|
|
seg_count, buf);
|
2017-10-31 16:16:28 +02:00
|
|
|
} else {
|
2019-08-15 14:45:09 +03:00
|
|
|
bt_mesh_friend_enqueue_rx(rx, pdu_type, &seq_auth,
|
|
|
|
seg_count, buf);
|
2017-10-31 16:16:28 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-16 12:30:54 +03:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
void bt_mesh_rx_reset(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
BT_DBG("");
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(seg_rx); i++) {
|
2018-01-28 12:20:16 -08:00
|
|
|
seg_rx_reset(&seg_rx[i], true);
|
2017-11-20 14:51:32 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-06 09:56:56 +01:00
|
|
|
static void store_va_label(void)
|
|
|
|
{
|
|
|
|
bt_mesh_settings_store_schedule(BT_MESH_SETTINGS_VA_PENDING);
|
|
|
|
}
|
|
|
|
|
2020-08-18 10:53:15 +02:00
|
|
|
void bt_mesh_trans_reset(void)
|
2017-11-20 14:51:32 +02:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2020-08-18 10:53:15 +02:00
|
|
|
bt_mesh_rx_reset();
|
|
|
|
|
2017-11-20 14:51:32 +02:00
|
|
|
BT_DBG("");
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(seg_tx); i++) {
|
|
|
|
seg_tx_reset(&seg_tx[i]);
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
2020-08-18 10:53:15 +02:00
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(virtual_addrs); i++) {
|
|
|
|
if (virtual_addrs[i].ref) {
|
|
|
|
virtual_addrs[i].ref = 0U;
|
|
|
|
virtual_addrs[i].changed = 1U;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bt_mesh_rpl_clear();
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_BT_SETTINGS)) {
|
2021-01-06 09:56:56 +01:00
|
|
|
store_va_label();
|
2020-08-18 10:53:15 +02:00
|
|
|
}
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void bt_mesh_trans_init(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(seg_tx); i++) {
|
2021-03-29 09:03:02 -05:00
|
|
|
k_work_init_delayable(&seg_tx[i].retransmit, seg_retransmit);
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(seg_rx); i++) {
|
2021-03-29 09:03:02 -05:00
|
|
|
k_work_init_delayable(&seg_rx[i].ack, seg_ack);
|
2017-06-16 12:30:54 +03:00
|
|
|
}
|
|
|
|
}
|
2017-11-29 13:17:27 +02:00
|
|
|
|
2021-01-06 09:56:56 +01:00
|
|
|
static inline void va_store(struct virtual_addr *store)
|
2020-08-18 10:53:15 +02:00
|
|
|
{
|
|
|
|
store->changed = 1U;
|
|
|
|
if (IS_ENABLED(CONFIG_BT_SETTINGS)) {
|
2021-01-06 09:56:56 +01:00
|
|
|
store_va_label();
|
2020-08-18 10:53:15 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-17 19:23:18 +01:00
|
|
|
uint8_t bt_mesh_va_add(const uint8_t uuid[16], uint16_t *addr)
|
2020-08-18 10:53:15 +02:00
|
|
|
{
|
2021-01-06 09:56:56 +01:00
|
|
|
struct virtual_addr *va = NULL;
|
2020-08-18 10:53:15 +02:00
|
|
|
int err;
|
|
|
|
|
|
|
|
for (int i = 0; i < ARRAY_SIZE(virtual_addrs); i++) {
|
|
|
|
if (!virtual_addrs[i].ref) {
|
|
|
|
if (!va) {
|
|
|
|
va = &virtual_addrs[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!memcmp(uuid, virtual_addrs[i].uuid,
|
|
|
|
ARRAY_SIZE(virtual_addrs[i].uuid))) {
|
|
|
|
*addr = virtual_addrs[i].addr;
|
|
|
|
virtual_addrs[i].ref++;
|
|
|
|
va_store(&virtual_addrs[i]);
|
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!va) {
|
|
|
|
return STATUS_INSUFF_RESOURCES;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(va->uuid, uuid, ARRAY_SIZE(va->uuid));
|
|
|
|
err = bt_mesh_virtual_addr(uuid, &va->addr);
|
|
|
|
if (err) {
|
|
|
|
va->addr = BT_MESH_ADDR_UNASSIGNED;
|
|
|
|
return STATUS_UNSPECIFIED;
|
|
|
|
}
|
|
|
|
|
|
|
|
va->ref = 1;
|
|
|
|
va_store(va);
|
|
|
|
|
2020-11-13 20:33:10 -08:00
|
|
|
*addr = va->addr;
|
|
|
|
|
2020-08-18 10:53:15 +02:00
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2021-02-17 19:23:18 +01:00
|
|
|
uint8_t bt_mesh_va_del(const uint8_t uuid[16], uint16_t *addr)
|
2020-08-18 10:53:15 +02:00
|
|
|
{
|
2021-01-06 09:56:56 +01:00
|
|
|
struct virtual_addr *va = NULL;
|
2020-08-18 10:53:15 +02:00
|
|
|
|
|
|
|
for (int i = 0; i < ARRAY_SIZE(virtual_addrs); i++) {
|
|
|
|
if (virtual_addrs[i].ref &&
|
|
|
|
!memcmp(uuid, virtual_addrs[i].uuid,
|
|
|
|
ARRAY_SIZE(virtual_addrs[i].uuid))) {
|
|
|
|
va = &virtual_addrs[i];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!va) {
|
|
|
|
return STATUS_CANNOT_REMOVE;
|
|
|
|
}
|
|
|
|
|
|
|
|
va->ref--;
|
|
|
|
if (addr) {
|
|
|
|
*addr = va->addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
va_store(va);
|
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint8_t *bt_mesh_va_label_get(uint16_t addr)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
BT_DBG("addr 0x%04x", addr);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(virtual_addrs); i++) {
|
|
|
|
if (virtual_addrs[i].ref && virtual_addrs[i].addr == addr) {
|
|
|
|
BT_DBG("Found Label UUID for 0x%04x: %s", addr,
|
|
|
|
bt_hex(virtual_addrs[i].uuid, 16));
|
|
|
|
return virtual_addrs[i].uuid;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
BT_WARN("No matching Label UUID for 0x%04x", addr);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
2021-01-06 09:56:56 +01:00
|
|
|
|
2021-03-08 10:09:34 +01:00
|
|
|
#if CONFIG_BT_MESH_LABEL_COUNT > 0
|
2021-01-06 09:56:56 +01:00
|
|
|
static struct virtual_addr *bt_mesh_va_get(uint16_t index)
|
|
|
|
{
|
|
|
|
if (index >= ARRAY_SIZE(virtual_addrs)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return &virtual_addrs[index];
|
|
|
|
}
|
|
|
|
|
|
|
|
static int va_set(const char *name, size_t len_rd,
|
|
|
|
settings_read_cb read_cb, void *cb_arg)
|
|
|
|
{
|
|
|
|
struct va_val va;
|
|
|
|
struct virtual_addr *lab;
|
|
|
|
uint16_t index;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!name) {
|
|
|
|
BT_ERR("Insufficient number of arguments");
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
index = strtol(name, NULL, 16);
|
|
|
|
|
|
|
|
if (len_rd == 0) {
|
|
|
|
BT_WARN("Mesh Virtual Address length = 0");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = bt_mesh_settings_set(read_cb, cb_arg, &va, sizeof(va));
|
|
|
|
if (err) {
|
|
|
|
BT_ERR("Failed to set \'virtual address\'");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (va.ref == 0) {
|
|
|
|
BT_WARN("Ignore Mesh Virtual Address ref = 0");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
lab = bt_mesh_va_get(index);
|
|
|
|
if (lab == NULL) {
|
|
|
|
BT_WARN("Out of labels buffers");
|
|
|
|
return -ENOBUFS;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(lab->uuid, va.uuid, 16);
|
|
|
|
lab->addr = va.addr;
|
|
|
|
lab->ref = va.ref;
|
|
|
|
|
|
|
|
BT_DBG("Restored Virtual Address, addr 0x%04x ref 0x%04x",
|
|
|
|
lab->addr, lab->ref);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
BT_MESH_SETTINGS_DEFINE(va, "Va", va_set);
|
|
|
|
|
|
|
|
#define IS_VA_DEL(_label) ((_label)->ref == 0)
|
|
|
|
void bt_mesh_va_pending_store(void)
|
|
|
|
{
|
|
|
|
struct virtual_addr *lab;
|
|
|
|
struct va_val va;
|
|
|
|
char path[18];
|
|
|
|
uint16_t i;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
for (i = 0; (lab = bt_mesh_va_get(i)) != NULL; i++) {
|
|
|
|
if (!lab->changed) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
lab->changed = 0U;
|
|
|
|
|
|
|
|
snprintk(path, sizeof(path), "bt/mesh/Va/%x", i);
|
|
|
|
|
|
|
|
if (IS_VA_DEL(lab)) {
|
|
|
|
err = settings_delete(path);
|
|
|
|
} else {
|
|
|
|
va.ref = lab->ref;
|
|
|
|
va.addr = lab->addr;
|
|
|
|
memcpy(va.uuid, lab->uuid, 16);
|
|
|
|
|
|
|
|
err = settings_save_one(path, &va, sizeof(va));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
BT_ERR("Failed to %s %s value (err %d)",
|
|
|
|
IS_VA_DEL(lab) ? "delete" : "store",
|
2022-06-20 07:43:37 +02:00
|
|
|
path, err);
|
2021-01-06 09:56:56 +01:00
|
|
|
} else {
|
|
|
|
BT_DBG("%s %s value",
|
|
|
|
IS_VA_DEL(lab) ? "Deleted" : "Stored",
|
2022-06-20 07:43:37 +02:00
|
|
|
path);
|
2021-01-06 09:56:56 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-03-08 10:09:34 +01:00
|
|
|
#else
|
|
|
|
void bt_mesh_va_pending_store(void)
|
|
|
|
{
|
|
|
|
/* Do nothing. */
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_MESH_LABEL_COUNT > 0 */
|