2015-12-07 19:58:52 +02:00
|
|
|
/* h4.c - H:4 UART based Bluetooth driver */
|
2015-04-13 14:38:44 +03:00
|
|
|
|
|
|
|
/*
|
2016-06-10 12:10:18 +03:00
|
|
|
* Copyright (c) 2015-2016 Intel Corporation
|
2015-04-13 14:38:44 +03:00
|
|
|
*
|
2017-01-18 17:01:01 -08:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2015-04-13 14:38:44 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <errno.h>
|
|
|
|
#include <stddef.h>
|
|
|
|
|
includes: prefer <zephyr/kernel.h> over <zephyr/zephyr.h>
As of today <zephyr/zephyr.h> is 100% equivalent to <zephyr/kernel.h>.
This patch proposes to then include <zephyr/kernel.h> instead of
<zephyr/zephyr.h> since it is more clear that you are including the
Kernel APIs and (probably) nothing else. <zephyr/zephyr.h> sounds like a
catch-all header that may be confusing. Most applications need to
include a bunch of other things to compile, e.g. driver headers or
subsystem headers like BT, logging, etc.
The idea of a catch-all header in Zephyr is probably not feasible
anyway. Reason is that Zephyr is not a library, like it could be for
example `libpython`. Zephyr provides many utilities nowadays: a kernel,
drivers, subsystems, etc and things will likely grow. A catch-all header
would be massive, difficult to keep up-to-date. It is also likely that
an application will only build a small subset. Note that subsystem-level
headers may use a catch-all approach to make things easier, though.
NOTE: This patch is **NOT** removing the header, just removing its usage
in-tree. I'd advocate for its deprecation (add a #warning on it), but I
understand many people will have concerns.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
2022-08-25 09:58:46 +02:00
|
|
|
#include <zephyr/kernel.h>
|
2022-05-06 10:25:46 +02:00
|
|
|
#include <zephyr/arch/cpu.h>
|
2015-04-13 14:38:44 +03:00
|
|
|
|
2022-05-06 10:25:46 +02:00
|
|
|
#include <zephyr/init.h>
|
|
|
|
#include <zephyr/drivers/uart.h>
|
|
|
|
#include <zephyr/sys/util.h>
|
|
|
|
#include <zephyr/sys/byteorder.h>
|
2015-04-29 13:36:41 +03:00
|
|
|
#include <string.h>
|
2015-04-13 14:38:44 +03:00
|
|
|
|
2022-05-06 10:25:46 +02:00
|
|
|
#include <zephyr/bluetooth/bluetooth.h>
|
|
|
|
#include <zephyr/bluetooth/hci.h>
|
2024-05-05 18:10:59 +03:00
|
|
|
#include <zephyr/drivers/bluetooth.h>
|
2015-04-13 14:38:44 +03:00
|
|
|
|
2022-11-02 14:31:13 +01:00
|
|
|
#define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
|
|
|
|
#include <zephyr/logging/log.h>
|
|
|
|
LOG_MODULE_REGISTER(bt_driver);
|
|
|
|
|
2022-10-25 08:48:54 +02:00
|
|
|
#include "common/bt_str.h"
|
2017-05-10 16:27:16 +02:00
|
|
|
|
2016-10-27 16:55:01 +03:00
|
|
|
#include "../util.h"
|
2016-05-04 15:12:16 +03:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
#define DT_DRV_COMPAT zephyr_bt_hci_uart
|
2016-12-25 15:21:39 +02:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
struct h4_data {
|
|
|
|
struct {
|
|
|
|
struct net_buf *buf;
|
|
|
|
struct k_fifo fifo;
|
2016-12-25 15:21:39 +02:00
|
|
|
|
2025-05-14 12:24:32 +08:00
|
|
|
struct k_sem ready;
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
uint16_t remaining;
|
|
|
|
uint16_t discard;
|
2016-12-25 15:21:39 +02:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
bool have_hdr;
|
|
|
|
bool discardable;
|
2017-01-31 11:24:37 +02:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
uint8_t hdr_len;
|
2016-12-25 15:21:39 +02:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
uint8_t type;
|
|
|
|
union {
|
|
|
|
struct bt_hci_evt_hdr evt;
|
|
|
|
struct bt_hci_acl_hdr acl;
|
|
|
|
struct bt_hci_iso_hdr iso;
|
|
|
|
uint8_t hdr[4];
|
|
|
|
};
|
|
|
|
} rx;
|
2015-04-13 14:38:44 +03:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
struct {
|
|
|
|
uint8_t type;
|
|
|
|
struct net_buf *buf;
|
|
|
|
struct k_fifo fifo;
|
|
|
|
} tx;
|
|
|
|
|
|
|
|
bt_hci_recv_t recv;
|
2016-12-31 19:45:00 +02:00
|
|
|
};
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
struct h4_config {
|
|
|
|
const struct device *uart;
|
|
|
|
k_thread_stack_t *rx_thread_stack;
|
|
|
|
size_t rx_thread_stack_size;
|
|
|
|
struct k_thread *rx_thread;
|
|
|
|
};
|
2015-12-01 08:42:20 -08:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
static inline void h4_get_type(const struct device *dev)
|
2015-04-13 14:38:44 +03:00
|
|
|
{
|
2024-05-05 18:10:59 +03:00
|
|
|
const struct h4_config *cfg = dev->config;
|
|
|
|
struct h4_data *h4 = dev->data;
|
|
|
|
|
2016-12-25 15:21:39 +02:00
|
|
|
/* Get packet type */
|
2024-05-05 18:10:59 +03:00
|
|
|
if (uart_fifo_read(cfg->uart, &h4->rx.type, 1) != 1) {
|
2022-11-02 14:31:13 +01:00
|
|
|
LOG_WRN("Unable to read H:4 packet type");
|
2024-05-05 18:10:59 +03:00
|
|
|
h4->rx.type = BT_HCI_H4_NONE;
|
2016-12-25 15:21:39 +02:00
|
|
|
return;
|
|
|
|
}
|
2015-04-13 14:38:44 +03:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
switch (h4->rx.type) {
|
2024-01-10 01:21:42 +07:00
|
|
|
case BT_HCI_H4_EVT:
|
2024-05-05 18:10:59 +03:00
|
|
|
h4->rx.remaining = sizeof(h4->rx.evt);
|
|
|
|
h4->rx.hdr_len = h4->rx.remaining;
|
2016-12-25 15:21:39 +02:00
|
|
|
break;
|
2024-01-10 01:21:42 +07:00
|
|
|
case BT_HCI_H4_ACL:
|
2024-05-05 18:10:59 +03:00
|
|
|
h4->rx.remaining = sizeof(h4->rx.acl);
|
|
|
|
h4->rx.hdr_len = h4->rx.remaining;
|
2016-12-25 15:21:39 +02:00
|
|
|
break;
|
2024-01-10 01:21:42 +07:00
|
|
|
case BT_HCI_H4_ISO:
|
2020-05-12 13:26:26 -07:00
|
|
|
if (IS_ENABLED(CONFIG_BT_ISO)) {
|
2024-05-05 18:10:59 +03:00
|
|
|
h4->rx.remaining = sizeof(h4->rx.iso);
|
|
|
|
h4->rx.hdr_len = h4->rx.remaining;
|
2020-05-12 13:26:26 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
__fallthrough;
|
2016-12-25 15:21:39 +02:00
|
|
|
default:
|
2024-05-05 18:10:59 +03:00
|
|
|
LOG_ERR("Unknown H:4 type 0x%02x", h4->rx.type);
|
|
|
|
h4->rx.type = BT_HCI_H4_NONE;
|
2016-12-25 15:21:39 +02:00
|
|
|
}
|
|
|
|
}
|
2015-04-13 14:38:44 +03:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
static void h4_read_hdr(const struct device *dev)
|
2021-10-28 19:19:01 +03:00
|
|
|
{
|
2024-05-05 18:10:59 +03:00
|
|
|
const struct h4_config *cfg = dev->config;
|
|
|
|
struct h4_data *h4 = dev->data;
|
|
|
|
int bytes_read = h4->rx.hdr_len - h4->rx.remaining;
|
2021-10-28 19:19:01 +03:00
|
|
|
int ret;
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
ret = uart_fifo_read(cfg->uart, h4->rx.hdr + bytes_read, h4->rx.remaining);
|
2021-10-28 19:19:01 +03:00
|
|
|
if (unlikely(ret < 0)) {
|
2022-11-02 14:31:13 +01:00
|
|
|
LOG_ERR("Unable to read from UART (ret %d)", ret);
|
2021-10-28 19:19:01 +03:00
|
|
|
} else {
|
2024-05-05 18:10:59 +03:00
|
|
|
h4->rx.remaining -= ret;
|
2021-10-28 19:19:01 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
static inline void get_acl_hdr(const struct device *dev)
|
2016-12-25 15:21:39 +02:00
|
|
|
{
|
2024-05-05 18:10:59 +03:00
|
|
|
struct h4_data *h4 = dev->data;
|
2016-12-25 15:21:39 +02:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
h4_read_hdr(dev);
|
2021-10-28 19:19:01 +03:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
if (!h4->rx.remaining) {
|
|
|
|
struct bt_hci_acl_hdr *hdr = &h4->rx.acl;
|
|
|
|
|
|
|
|
h4->rx.remaining = sys_le16_to_cpu(hdr->len);
|
|
|
|
LOG_DBG("Got ACL header. Payload %u bytes", h4->rx.remaining);
|
|
|
|
h4->rx.have_hdr = true;
|
2015-04-13 14:38:44 +03:00
|
|
|
}
|
2016-12-25 15:21:39 +02:00
|
|
|
}
|
2015-04-13 14:38:44 +03:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
static inline void get_iso_hdr(const struct device *dev)
|
2020-05-12 13:26:26 -07:00
|
|
|
{
|
2024-05-05 18:10:59 +03:00
|
|
|
struct h4_data *h4 = dev->data;
|
|
|
|
|
|
|
|
h4_read_hdr(dev);
|
2020-05-12 13:26:26 -07:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
if (!h4->rx.remaining) {
|
|
|
|
struct bt_hci_iso_hdr *hdr = &h4->rx.iso;
|
2021-10-28 19:19:01 +03:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
h4->rx.remaining = bt_iso_hdr_len(sys_le16_to_cpu(hdr->len));
|
|
|
|
LOG_DBG("Got ISO header. Payload %u bytes", h4->rx.remaining);
|
|
|
|
h4->rx.have_hdr = true;
|
2020-05-12 13:26:26 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
static inline void get_evt_hdr(const struct device *dev)
|
2016-12-25 15:21:39 +02:00
|
|
|
{
|
2024-05-05 18:10:59 +03:00
|
|
|
struct h4_data *h4 = dev->data;
|
|
|
|
|
|
|
|
struct bt_hci_evt_hdr *hdr = &h4->rx.evt;
|
2016-12-25 15:21:39 +02:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
h4_read_hdr(dev);
|
2021-10-28 19:19:01 +03:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
if (h4->rx.hdr_len == sizeof(*hdr) && h4->rx.remaining < sizeof(*hdr)) {
|
|
|
|
switch (h4->rx.evt.evt) {
|
2017-01-31 11:24:37 +02:00
|
|
|
case BT_HCI_EVT_LE_META_EVENT:
|
2024-05-05 18:10:59 +03:00
|
|
|
h4->rx.remaining++;
|
|
|
|
h4->rx.hdr_len++;
|
2017-01-31 11:24:37 +02:00
|
|
|
break;
|
2024-03-01 18:45:44 +08:00
|
|
|
#if defined(CONFIG_BT_CLASSIC)
|
2017-01-31 11:24:37 +02:00
|
|
|
case BT_HCI_EVT_INQUIRY_RESULT_WITH_RSSI:
|
|
|
|
case BT_HCI_EVT_EXTENDED_INQUIRY_RESULT:
|
2024-05-05 18:10:59 +03:00
|
|
|
h4->rx.discardable = true;
|
2017-01-31 11:24:37 +02:00
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
if (!h4->rx.remaining) {
|
|
|
|
if (h4->rx.evt.evt == BT_HCI_EVT_LE_META_EVENT &&
|
|
|
|
(h4->rx.hdr[sizeof(*hdr)] == BT_HCI_EVT_LE_ADVERTISING_REPORT)) {
|
2022-11-02 14:31:13 +01:00
|
|
|
LOG_DBG("Marking adv report as discardable");
|
2024-05-05 18:10:59 +03:00
|
|
|
h4->rx.discardable = true;
|
2017-01-31 11:24:37 +02:00
|
|
|
}
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
h4->rx.remaining = hdr->len - (h4->rx.hdr_len - sizeof(*hdr));
|
2022-11-02 14:31:13 +01:00
|
|
|
LOG_DBG("Got event header. Payload %u bytes", hdr->len);
|
2024-05-05 18:10:59 +03:00
|
|
|
h4->rx.have_hdr = true;
|
2016-12-25 15:21:39 +02:00
|
|
|
}
|
2015-04-13 14:38:44 +03:00
|
|
|
}
|
|
|
|
|
2016-12-25 15:21:39 +02:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
static inline void copy_hdr(struct h4_data *h4)
|
2016-12-25 15:21:39 +02:00
|
|
|
{
|
2024-05-05 18:10:59 +03:00
|
|
|
net_buf_add_mem(h4->rx.buf, h4->rx.hdr, h4->rx.hdr_len);
|
2015-04-29 13:36:41 +03:00
|
|
|
}
|
2015-04-13 14:38:44 +03:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
static void reset_rx(struct h4_data *h4)
|
2017-02-01 17:37:14 +02:00
|
|
|
{
|
2025-03-26 11:04:20 +02:00
|
|
|
if (h4->rx.buf) {
|
|
|
|
net_buf_unref(h4->rx.buf);
|
|
|
|
h4->rx.buf = NULL;
|
|
|
|
}
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
h4->rx.type = BT_HCI_H4_NONE;
|
|
|
|
h4->rx.remaining = 0U;
|
|
|
|
h4->rx.have_hdr = false;
|
|
|
|
h4->rx.hdr_len = 0U;
|
|
|
|
h4->rx.discardable = false;
|
2017-02-01 17:37:14 +02:00
|
|
|
}
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
static struct net_buf *get_rx(struct h4_data *h4, k_timeout_t timeout)
|
2017-02-01 17:37:14 +02:00
|
|
|
{
|
2024-05-05 18:10:59 +03:00
|
|
|
LOG_DBG("type 0x%02x, evt 0x%02x", h4->rx.type, h4->rx.evt.evt);
|
2017-02-01 17:37:14 +02:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
switch (h4->rx.type) {
|
2024-01-10 01:21:42 +07:00
|
|
|
case BT_HCI_H4_EVT:
|
2024-05-05 18:10:59 +03:00
|
|
|
return bt_buf_get_evt(h4->rx.evt.evt, h4->rx.discardable, timeout);
|
2024-01-10 01:21:42 +07:00
|
|
|
case BT_HCI_H4_ACL:
|
2020-05-12 13:26:26 -07:00
|
|
|
return bt_buf_get_rx(BT_BUF_ACL_IN, timeout);
|
2024-01-10 01:21:42 +07:00
|
|
|
case BT_HCI_H4_ISO:
|
2020-05-12 13:26:26 -07:00
|
|
|
if (IS_ENABLED(CONFIG_BT_ISO)) {
|
|
|
|
return bt_buf_get_rx(BT_BUF_ISO_IN, timeout);
|
|
|
|
}
|
2017-02-01 17:37:14 +02:00
|
|
|
}
|
|
|
|
|
2020-05-12 13:26:26 -07:00
|
|
|
return NULL;
|
2017-02-01 17:37:14 +02:00
|
|
|
}
|
|
|
|
|
2016-12-25 15:21:39 +02:00
|
|
|
static void rx_thread(void *p1, void *p2, void *p3)
|
2015-04-29 13:36:41 +03:00
|
|
|
{
|
2024-05-05 18:10:59 +03:00
|
|
|
const struct device *dev = p1;
|
|
|
|
const struct h4_config *cfg = dev->config;
|
|
|
|
struct h4_data *h4 = dev->data;
|
2015-10-28 10:26:44 +02:00
|
|
|
struct net_buf *buf;
|
2015-04-29 13:36:41 +03:00
|
|
|
|
2016-12-25 15:21:39 +02:00
|
|
|
ARG_UNUSED(p2);
|
|
|
|
ARG_UNUSED(p3);
|
2015-04-13 14:38:44 +03:00
|
|
|
|
2022-11-02 14:31:13 +01:00
|
|
|
LOG_DBG("started");
|
2015-04-29 13:36:41 +03:00
|
|
|
|
2016-12-25 15:21:39 +02:00
|
|
|
while (1) {
|
2024-05-05 18:10:59 +03:00
|
|
|
LOG_DBG("rx.buf %p", h4->rx.buf);
|
2016-12-25 15:21:39 +02:00
|
|
|
|
2017-02-01 17:37:14 +02:00
|
|
|
/* We can only do the allocation if we know the initial
|
|
|
|
* header, since Command Complete/Status events must use the
|
|
|
|
* original command buffer (if available).
|
|
|
|
*/
|
2024-05-05 18:10:59 +03:00
|
|
|
if (h4->rx.have_hdr && !h4->rx.buf) {
|
|
|
|
h4->rx.buf = get_rx(h4, K_FOREVER);
|
|
|
|
LOG_DBG("Got rx.buf %p", h4->rx.buf);
|
|
|
|
if (h4->rx.remaining > net_buf_tailroom(h4->rx.buf)) {
|
2022-11-02 14:31:13 +01:00
|
|
|
LOG_ERR("Not enough space in buffer");
|
2024-05-05 18:10:59 +03:00
|
|
|
h4->rx.discard = h4->rx.remaining;
|
|
|
|
reset_rx(h4);
|
2017-02-01 17:37:14 +02:00
|
|
|
} else {
|
2024-05-05 18:10:59 +03:00
|
|
|
copy_hdr(h4);
|
2016-12-25 15:21:39 +02:00
|
|
|
}
|
|
|
|
}
|
2016-12-31 14:41:45 +02:00
|
|
|
|
|
|
|
/* Let the ISR continue receiving new packets */
|
2024-05-05 18:10:59 +03:00
|
|
|
uart_irq_rx_enable(cfg->uart);
|
2016-12-31 14:41:45 +02:00
|
|
|
|
2025-05-14 12:24:32 +08:00
|
|
|
k_sem_take(&h4->rx.ready, K_FOREVER);
|
|
|
|
|
|
|
|
buf = k_fifo_get(&h4->rx.fifo, K_NO_WAIT);
|
|
|
|
while (buf != NULL) {
|
2024-05-05 18:10:59 +03:00
|
|
|
uart_irq_rx_enable(cfg->uart);
|
2016-12-31 14:41:45 +02:00
|
|
|
|
2022-11-02 14:31:13 +01:00
|
|
|
LOG_DBG("Calling bt_recv(%p)", buf);
|
2024-05-05 18:10:59 +03:00
|
|
|
h4->recv(dev, buf);
|
2016-12-31 14:41:45 +02:00
|
|
|
|
|
|
|
/* Give other threads a chance to run if the ISR
|
|
|
|
* is receiving data so fast that rx.fifo never
|
|
|
|
* or very rarely goes empty.
|
|
|
|
*/
|
|
|
|
k_yield();
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
uart_irq_rx_disable(cfg->uart);
|
2024-06-28 19:42:56 +00:00
|
|
|
buf = k_fifo_get(&h4->rx.fifo, K_NO_WAIT);
|
2025-05-14 12:24:32 +08:00
|
|
|
}
|
2015-05-05 10:50:14 +03:00
|
|
|
}
|
2016-12-25 15:21:39 +02:00
|
|
|
}
|
2015-04-13 14:38:44 +03:00
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static size_t h4_discard(const struct device *uart, size_t len)
|
2016-12-25 15:21:39 +02:00
|
|
|
{
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t buf[33];
|
2021-10-28 19:19:01 +03:00
|
|
|
int err;
|
|
|
|
|
|
|
|
err = uart_fifo_read(uart, buf, MIN(len, sizeof(buf)));
|
|
|
|
if (unlikely(err < 0)) {
|
2022-11-02 14:31:13 +01:00
|
|
|
LOG_ERR("Unable to read from UART (err %d)", err);
|
2021-10-28 19:19:01 +03:00
|
|
|
return 0;
|
|
|
|
}
|
2015-04-29 13:36:41 +03:00
|
|
|
|
2021-10-28 19:19:01 +03:00
|
|
|
return err;
|
2015-04-13 14:38:44 +03:00
|
|
|
}
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
static inline void read_payload(const struct device *dev)
|
2015-04-13 14:38:44 +03:00
|
|
|
{
|
2024-05-05 18:10:59 +03:00
|
|
|
const struct h4_config *cfg = dev->config;
|
|
|
|
struct h4_data *h4 = dev->data;
|
2016-12-31 14:41:45 +02:00
|
|
|
struct net_buf *buf;
|
2016-12-25 15:21:39 +02:00
|
|
|
int read;
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
if (!h4->rx.buf) {
|
2021-12-08 18:01:20 +01:00
|
|
|
size_t buf_tailroom;
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
h4->rx.buf = get_rx(h4, K_NO_WAIT);
|
|
|
|
if (!h4->rx.buf) {
|
|
|
|
if (h4->rx.discardable) {
|
|
|
|
LOG_WRN("Discarding event 0x%02x", h4->rx.evt.evt);
|
|
|
|
h4->rx.discard = h4->rx.remaining;
|
|
|
|
reset_rx(h4);
|
2017-01-31 11:24:37 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-11-02 14:31:13 +01:00
|
|
|
LOG_WRN("Failed to allocate, deferring to rx_thread");
|
2024-05-05 18:10:59 +03:00
|
|
|
uart_irq_rx_disable(cfg->uart);
|
2025-05-14 12:24:32 +08:00
|
|
|
/*
|
|
|
|
* At this time, HCI UART RX is turned off, which means that no new
|
|
|
|
* received data buffer will be put into the RX FIFO. This will cause
|
|
|
|
* `rx.ready` to not be modified. It will probably remain unchanged and
|
|
|
|
* the count of `rx.ready` will probably be 0.
|
|
|
|
*
|
|
|
|
* Since it is uncertain whether the RX thread is blocked waiting for
|
|
|
|
* `rx.ready`, give a semaphore to try to wake up the RX thread.
|
|
|
|
* Then there will be a renewed attempt at allocating an RX buffer in
|
|
|
|
* the RX thread.
|
|
|
|
*/
|
|
|
|
k_sem_give(&h4->rx.ready);
|
2016-12-31 19:45:00 +02:00
|
|
|
return;
|
2016-12-25 15:21:39 +02:00
|
|
|
}
|
2015-04-13 14:38:44 +03:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
LOG_DBG("Allocated rx.buf %p", h4->rx.buf);
|
2016-12-31 14:41:45 +02:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
buf_tailroom = net_buf_tailroom(h4->rx.buf);
|
|
|
|
if (buf_tailroom < h4->rx.remaining) {
|
|
|
|
LOG_ERR("Not enough space in buffer %u/%zu", h4->rx.remaining,
|
|
|
|
buf_tailroom);
|
|
|
|
h4->rx.discard = h4->rx.remaining;
|
|
|
|
reset_rx(h4);
|
2016-12-31 19:45:00 +02:00
|
|
|
return;
|
2016-12-31 14:41:45 +02:00
|
|
|
}
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
copy_hdr(h4);
|
2016-12-25 15:21:39 +02:00
|
|
|
}
|
2015-04-13 14:38:44 +03:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
read = uart_fifo_read(cfg->uart, net_buf_tail(h4->rx.buf), h4->rx.remaining);
|
2021-10-28 19:19:01 +03:00
|
|
|
if (unlikely(read < 0)) {
|
2022-11-02 14:31:13 +01:00
|
|
|
LOG_ERR("Failed to read UART (err %d)", read);
|
2021-10-28 19:19:01 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
net_buf_add(h4->rx.buf, read);
|
|
|
|
h4->rx.remaining -= read;
|
2016-12-25 15:21:39 +02:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
LOG_DBG("got %d bytes, remaining %u", read, h4->rx.remaining);
|
|
|
|
LOG_DBG("Payload (len %u): %s", h4->rx.buf->len,
|
|
|
|
bt_hex(h4->rx.buf->data, h4->rx.buf->len));
|
2016-12-25 15:21:39 +02:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
if (h4->rx.remaining) {
|
2016-12-31 19:45:00 +02:00
|
|
|
return;
|
2015-05-05 10:50:14 +03:00
|
|
|
}
|
2015-04-13 14:38:44 +03:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
buf = h4->rx.buf;
|
|
|
|
h4->rx.buf = NULL;
|
2016-12-31 14:41:45 +02:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
reset_rx(h4);
|
2016-12-31 14:41:45 +02:00
|
|
|
|
2024-03-20 14:44:07 +01:00
|
|
|
LOG_DBG("Putting buf %p to rx fifo", buf);
|
2024-06-28 19:42:56 +00:00
|
|
|
k_fifo_put(&h4->rx.fifo, buf);
|
2025-05-14 12:24:32 +08:00
|
|
|
k_sem_give(&h4->rx.ready);
|
2016-12-25 15:21:39 +02:00
|
|
|
}
|
2015-04-13 14:38:44 +03:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
static inline void read_header(const struct device *dev)
|
2016-12-25 15:21:39 +02:00
|
|
|
{
|
2024-05-05 18:10:59 +03:00
|
|
|
struct h4_data *h4 = dev->data;
|
|
|
|
|
|
|
|
switch (h4->rx.type) {
|
2024-01-10 01:21:42 +07:00
|
|
|
case BT_HCI_H4_NONE:
|
2024-05-05 18:10:59 +03:00
|
|
|
h4_get_type(dev);
|
2016-12-25 15:21:39 +02:00
|
|
|
return;
|
2024-01-10 01:21:42 +07:00
|
|
|
case BT_HCI_H4_EVT:
|
2024-05-05 18:10:59 +03:00
|
|
|
get_evt_hdr(dev);
|
2016-12-25 15:21:39 +02:00
|
|
|
break;
|
2024-01-10 01:21:42 +07:00
|
|
|
case BT_HCI_H4_ACL:
|
2024-05-05 18:10:59 +03:00
|
|
|
get_acl_hdr(dev);
|
2016-12-25 15:21:39 +02:00
|
|
|
break;
|
2024-01-10 01:21:42 +07:00
|
|
|
case BT_HCI_H4_ISO:
|
2020-05-12 13:26:26 -07:00
|
|
|
if (IS_ENABLED(CONFIG_BT_ISO)) {
|
2024-05-05 18:10:59 +03:00
|
|
|
get_iso_hdr(dev);
|
2020-05-12 13:26:26 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
__fallthrough;
|
2016-12-25 15:21:39 +02:00
|
|
|
default:
|
|
|
|
CODE_UNREACHABLE;
|
|
|
|
return;
|
|
|
|
}
|
2015-04-29 13:36:41 +03:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
if (h4->rx.have_hdr && h4->rx.buf) {
|
|
|
|
if (h4->rx.remaining > net_buf_tailroom(h4->rx.buf)) {
|
2022-11-02 14:31:13 +01:00
|
|
|
LOG_ERR("Not enough space in buffer");
|
2024-05-05 18:10:59 +03:00
|
|
|
h4->rx.discard = h4->rx.remaining;
|
|
|
|
reset_rx(h4);
|
2016-12-25 15:21:39 +02:00
|
|
|
} else {
|
2024-05-05 18:10:59 +03:00
|
|
|
copy_hdr(h4);
|
2016-12-25 15:21:39 +02:00
|
|
|
}
|
|
|
|
}
|
2015-04-13 14:38:44 +03:00
|
|
|
}
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
static inline void process_tx(const struct device *dev)
|
2015-04-13 14:38:44 +03:00
|
|
|
{
|
2024-05-05 18:10:59 +03:00
|
|
|
const struct h4_config *cfg = dev->config;
|
|
|
|
struct h4_data *h4 = dev->data;
|
2016-12-31 19:45:00 +02:00
|
|
|
int bytes;
|
2015-10-31 10:53:34 +02:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
if (!h4->tx.buf) {
|
2024-06-28 19:42:56 +00:00
|
|
|
h4->tx.buf = k_fifo_get(&h4->tx.fifo, K_NO_WAIT);
|
2024-05-05 18:10:59 +03:00
|
|
|
if (!h4->tx.buf) {
|
2022-11-02 14:31:13 +01:00
|
|
|
LOG_ERR("TX interrupt but no pending buffer!");
|
2024-05-05 18:10:59 +03:00
|
|
|
uart_irq_tx_disable(cfg->uart);
|
2016-12-31 19:45:00 +02:00
|
|
|
return;
|
2015-04-13 14:38:44 +03:00
|
|
|
}
|
2016-12-31 19:45:00 +02:00
|
|
|
}
|
2015-04-13 14:38:44 +03:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
bytes = uart_fifo_fill(cfg->uart, h4->tx.buf->data, h4->tx.buf->len);
|
2021-10-28 20:08:13 +03:00
|
|
|
if (unlikely(bytes < 0)) {
|
2022-11-02 14:31:13 +01:00
|
|
|
LOG_ERR("Unable to write to UART (err %d)", bytes);
|
2021-10-28 20:08:13 +03:00
|
|
|
} else {
|
2024-05-05 18:10:59 +03:00
|
|
|
net_buf_pull(h4->tx.buf, bytes);
|
2021-10-28 20:08:13 +03:00
|
|
|
}
|
2016-12-31 19:45:00 +02:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
if (h4->tx.buf->len) {
|
2016-12-31 19:45:00 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
h4->tx.type = BT_HCI_H4_NONE;
|
|
|
|
net_buf_unref(h4->tx.buf);
|
2024-06-28 19:42:56 +00:00
|
|
|
h4->tx.buf = k_fifo_get(&h4->tx.fifo, K_NO_WAIT);
|
2024-05-05 18:10:59 +03:00
|
|
|
if (!h4->tx.buf) {
|
|
|
|
uart_irq_tx_disable(cfg->uart);
|
2016-12-31 19:45:00 +02:00
|
|
|
}
|
2015-04-13 14:38:44 +03:00
|
|
|
}
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
static inline void process_rx(const struct device *dev)
|
2015-04-13 14:38:44 +03:00
|
|
|
{
|
2024-05-05 18:10:59 +03:00
|
|
|
const struct h4_config *cfg = dev->config;
|
|
|
|
struct h4_data *h4 = dev->data;
|
|
|
|
|
|
|
|
LOG_DBG("remaining %u discard %u have_hdr %u rx.buf %p len %u",
|
|
|
|
h4->rx.remaining, h4->rx.discard, h4->rx.have_hdr, h4->rx.buf,
|
|
|
|
h4->rx.buf ? h4->rx.buf->len : 0);
|
2016-04-19 14:37:30 +03:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
if (h4->rx.discard) {
|
|
|
|
h4->rx.discard -= h4_discard(cfg->uart, h4->rx.discard);
|
2016-12-31 19:45:00 +02:00
|
|
|
return;
|
2015-04-13 14:38:44 +03:00
|
|
|
}
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
if (h4->rx.have_hdr) {
|
|
|
|
read_payload(dev);
|
2016-12-31 19:45:00 +02:00
|
|
|
} else {
|
2024-05-05 18:10:59 +03:00
|
|
|
read_header(dev);
|
2015-11-07 15:04:21 +02:00
|
|
|
}
|
2016-12-31 19:45:00 +02:00
|
|
|
}
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
static void bt_uart_isr(const struct device *uart, void *user_data)
|
2016-12-31 19:45:00 +02:00
|
|
|
{
|
2024-05-05 18:10:59 +03:00
|
|
|
struct device *dev = user_data;
|
2016-12-31 19:45:00 +02:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
while (uart_irq_update(uart) && uart_irq_is_pending(uart)) {
|
|
|
|
if (uart_irq_tx_ready(uart)) {
|
|
|
|
process_tx(dev);
|
2016-12-31 19:45:00 +02:00
|
|
|
}
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
if (uart_irq_rx_ready(uart)) {
|
|
|
|
process_rx(dev);
|
2016-12-31 19:45:00 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
static int h4_send(const struct device *dev, struct net_buf *buf)
|
2016-12-31 19:45:00 +02:00
|
|
|
{
|
2024-05-05 18:10:59 +03:00
|
|
|
const struct h4_config *cfg = dev->config;
|
|
|
|
struct h4_data *h4 = dev->data;
|
|
|
|
|
2025-04-25 11:15:53 +03:00
|
|
|
LOG_DBG("buf %p type %u len %u", buf, buf->data[0], buf->len);
|
2015-11-07 15:04:21 +02:00
|
|
|
|
2024-06-28 19:42:56 +00:00
|
|
|
k_fifo_put(&h4->tx.fifo, buf);
|
2024-05-05 18:10:59 +03:00
|
|
|
uart_irq_tx_enable(cfg->uart);
|
2015-09-16 10:58:22 +03:00
|
|
|
|
|
|
|
return 0;
|
2015-04-13 14:38:44 +03:00
|
|
|
}
|
|
|
|
|
2019-04-18 13:34:43 +02:00
|
|
|
/** Setup the HCI transport, which usually means to reset the Bluetooth IC
|
|
|
|
*
|
|
|
|
* @param dev The device structure for the bus connecting to the IC
|
|
|
|
*
|
|
|
|
* @return 0 on success, negative error value on failure
|
|
|
|
*/
|
2024-05-05 18:10:59 +03:00
|
|
|
int __weak bt_hci_transport_setup(const struct device *uart)
|
2019-04-18 13:34:43 +02:00
|
|
|
{
|
2024-05-05 18:10:59 +03:00
|
|
|
h4_discard(uart, 32);
|
2019-04-18 13:34:43 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
static int h4_open(const struct device *dev, bt_hci_recv_t recv)
|
2015-04-13 14:38:44 +03:00
|
|
|
{
|
2024-05-05 18:10:59 +03:00
|
|
|
const struct h4_config *cfg = dev->config;
|
|
|
|
struct h4_data *h4 = dev->data;
|
2019-04-18 13:34:43 +02:00
|
|
|
int ret;
|
2021-03-03 13:27:52 -08:00
|
|
|
k_tid_t tid;
|
2019-04-18 13:34:43 +02:00
|
|
|
|
2022-11-02 14:31:13 +01:00
|
|
|
LOG_DBG("");
|
2015-04-13 14:38:44 +03:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
uart_irq_rx_disable(cfg->uart);
|
|
|
|
uart_irq_tx_disable(cfg->uart);
|
2015-04-13 14:38:44 +03:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
ret = bt_hci_transport_setup(cfg->uart);
|
2019-04-18 13:34:43 +02:00
|
|
|
if (ret < 0) {
|
2016-04-08 13:50:41 +03:00
|
|
|
return -EIO;
|
2016-04-05 11:07:37 +03:00
|
|
|
}
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
h4->recv = recv;
|
|
|
|
|
|
|
|
uart_irq_callback_user_data_set(cfg->uart, bt_uart_isr, (void *)dev);
|
2016-03-03 10:14:50 -08:00
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
tid = k_thread_create(cfg->rx_thread, cfg->rx_thread_stack,
|
|
|
|
cfg->rx_thread_stack_size,
|
|
|
|
rx_thread, (void *)dev, NULL, NULL,
|
2021-03-03 13:27:52 -08:00
|
|
|
K_PRIO_COOP(CONFIG_BT_RX_PRIO),
|
|
|
|
0, K_NO_WAIT);
|
|
|
|
k_thread_name_set(tid, "bt_rx_thread");
|
2016-12-25 15:21:39 +02:00
|
|
|
|
2025-05-14 12:24:32 +08:00
|
|
|
/* Active rx_thread at first time */
|
|
|
|
k_sem_give(&h4->rx.ready);
|
|
|
|
|
2016-04-08 13:50:41 +03:00
|
|
|
return 0;
|
2015-04-13 14:38:44 +03:00
|
|
|
}
|
|
|
|
|
2025-02-27 15:45:47 +00:00
|
|
|
int __weak bt_hci_transport_teardown(const struct device *dev)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int h4_close(const struct device *dev)
|
|
|
|
{
|
|
|
|
const struct h4_config *cfg = dev->config;
|
|
|
|
struct h4_data *h4 = dev->data;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
LOG_DBG("");
|
|
|
|
|
|
|
|
uart_irq_rx_disable(cfg->uart);
|
|
|
|
uart_irq_tx_disable(cfg->uart);
|
|
|
|
|
|
|
|
err = bt_hci_transport_teardown(cfg->uart);
|
|
|
|
if (err < 0) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Abort RX thread */
|
|
|
|
k_thread_abort(cfg->rx_thread);
|
|
|
|
|
|
|
|
h4->recv = NULL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-01-20 15:51:13 +02:00
|
|
|
#if defined(CONFIG_BT_HCI_SETUP)
|
2024-05-05 18:10:59 +03:00
|
|
|
static int h4_setup(const struct device *dev, const struct bt_hci_setup_params *params)
|
2022-01-20 15:51:13 +02:00
|
|
|
{
|
2024-05-05 18:10:59 +03:00
|
|
|
const struct h4_config *cfg = dev->config;
|
|
|
|
|
2023-11-15 15:13:49 +00:00
|
|
|
ARG_UNUSED(params);
|
|
|
|
|
2022-01-20 15:51:13 +02:00
|
|
|
/* Extern bt_h4_vnd_setup function.
|
|
|
|
* This function executes vendor-specific commands sequence to
|
|
|
|
* initialize BT Controller before BT Host executes Reset sequence.
|
|
|
|
* bt_h4_vnd_setup function must be implemented in vendor-specific HCI
|
|
|
|
* extansion module if CONFIG_BT_HCI_SETUP is enabled.
|
|
|
|
*/
|
|
|
|
extern int bt_h4_vnd_setup(const struct device *dev);
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
return bt_h4_vnd_setup(cfg->uart);
|
2022-01-20 15:51:13 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2024-11-27 13:03:38 +01:00
|
|
|
static DEVICE_API(bt_hci, h4_driver_api) = {
|
2024-05-05 18:10:59 +03:00
|
|
|
.open = h4_open,
|
|
|
|
.send = h4_send,
|
2025-02-27 15:45:47 +00:00
|
|
|
.close = h4_close,
|
2022-01-20 15:51:13 +02:00
|
|
|
#if defined(CONFIG_BT_HCI_SETUP)
|
2024-05-05 18:10:59 +03:00
|
|
|
.setup = h4_setup,
|
2022-01-20 15:51:13 +02:00
|
|
|
#endif
|
2015-04-13 14:38:44 +03:00
|
|
|
};
|
|
|
|
|
2024-05-05 18:10:59 +03:00
|
|
|
#define BT_UART_DEVICE_INIT(inst) \
|
|
|
|
static K_KERNEL_STACK_DEFINE(rx_thread_stack_##inst, CONFIG_BT_DRV_RX_STACK_SIZE); \
|
|
|
|
static struct k_thread rx_thread_##inst; \
|
|
|
|
static const struct h4_config h4_config_##inst = { \
|
|
|
|
.uart = DEVICE_DT_GET(DT_INST_PARENT(inst)), \
|
|
|
|
.rx_thread_stack = rx_thread_stack_##inst, \
|
|
|
|
.rx_thread_stack_size = K_KERNEL_STACK_SIZEOF(rx_thread_stack_##inst), \
|
|
|
|
.rx_thread = &rx_thread_##inst, \
|
|
|
|
}; \
|
|
|
|
static struct h4_data h4_data_##inst = { \
|
|
|
|
.rx = { \
|
|
|
|
.fifo = Z_FIFO_INITIALIZER(h4_data_##inst.rx.fifo), \
|
2025-05-14 12:24:32 +08:00
|
|
|
.ready = Z_SEM_INITIALIZER(h4_data_##inst.rx.ready, 0, 1), \
|
2024-05-05 18:10:59 +03:00
|
|
|
}, \
|
|
|
|
.tx = { \
|
|
|
|
.fifo = Z_FIFO_INITIALIZER(h4_data_##inst.tx.fifo), \
|
|
|
|
}, \
|
|
|
|
}; \
|
|
|
|
DEVICE_DT_INST_DEFINE(inst, NULL, NULL, &h4_data_##inst, &h4_config_##inst, \
|
2025-06-16 18:04:30 +02:00
|
|
|
POST_KERNEL, CONFIG_BT_HCI_INIT_PRIORITY, &h4_driver_api)
|
2024-05-05 18:10:59 +03:00
|
|
|
|
|
|
|
DT_INST_FOREACH_STATUS_OKAY(BT_UART_DEVICE_INIT)
|