zephyr/drivers/bluetooth/nble/uart.c

245 lines
4.9 KiB
C
Raw Normal View History

/* uart.c - Nordic BLE UART based Bluetooth driver */
/*
* Copyright (c) 2016 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <errno.h>
#include <zephyr.h>
#include <sections.h>
#include <board.h>
#include <init.h>
#include <uart.h>
#include <string.h>
#include <gpio.h>
#include <net/buf.h>
#define BT_DBG_ENABLED IS_ENABLED(CONFIG_BLUETOOTH_DEBUG_HCI_DRIVER)
#include <bluetooth/log.h>
#include "../util.h"
#include "rpc.h"
#if defined(CONFIG_BLUETOOTH_NRF51_PM)
#include "../nrf51_pm.h"
#endif
/**
* @note this structure must be self-aligned and self-packed
*/
struct ipc_uart_header {
u16_t len; /**< Length of IPC message. */
u8_t channel; /**< Channel number of IPC message. */
u8_t src_cpu_id; /**< CPU id of IPC sender. */
} __packed;
/* TODO: check size */
#define NBLE_TX_BUF_COUNT 2
#define NBLE_RX_BUF_COUNT 10
#define NBLE_BUF_SIZE 384
net: buf: Redesigned pool & buffer allocation API Until now it has been necessary to separately define a k_fifo and an array of buffers when creating net_buf pools. This has been a bit of an inconvenience as well as blurred the line of what exactly constitutes the "pool". This patch removes the NET_BUF_POOL() macro and replaces it with a NET_BUF_POOL_DEFINE() macro that internally expands into the buffer array and new net_buf_pool struct with a given name: NET_BUF_POOL_DEFINE(pool_name, ...); Having a dedicated context struct for the pool has the added benefit that we can start moving there net_buf members that have the same value for all buffers from the same pool. The first such member that gets moved is the destroy callback, thus shrinking net_buf by four bytes. Another potential candidate is the user_data_size, however right not that's left out since it would just leave 2 bytes of padding in net_buf (i.e. not influence its size). Another common value is buf->size, however that one is also used by net_buf_simple and can therefore not be moved. This patch also splits getting buffers from a FIFO and allocating a new buffer from a pool into two separate APIs: net_buf_get and net_buf_alloc, thus simplifying the APIs and their usage. There is no separate 'reserve_head' parameter anymore when allocating, rather the user is expected to call net_buf_reserve() afterwards if something else than 0 headroom is desired. Change-Id: Id91b1e5c2be2deb1274dde47f5edebfe29af383a Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2016-10-18 23:24:51 +03:00
NET_BUF_POOL_DEFINE(rx_pool, NBLE_RX_BUF_COUNT, NBLE_BUF_SIZE, 0, NULL);
NET_BUF_POOL_DEFINE(tx_pool, NBLE_TX_BUF_COUNT, NBLE_BUF_SIZE, 0, NULL);
static BT_STACK_NOINIT(rx_thread_stack, CONFIG_BLUETOOTH_RX_STACK_SIZE);
static struct k_thread rx_thread_data;
static struct device *nble_dev;
static K_FIFO_DEFINE(rx_queue);
static void rx_thread(void)
{
BT_DBG("Started");
while (true) {
struct net_buf *buf;
net: buf: Redesigned pool & buffer allocation API Until now it has been necessary to separately define a k_fifo and an array of buffers when creating net_buf pools. This has been a bit of an inconvenience as well as blurred the line of what exactly constitutes the "pool". This patch removes the NET_BUF_POOL() macro and replaces it with a NET_BUF_POOL_DEFINE() macro that internally expands into the buffer array and new net_buf_pool struct with a given name: NET_BUF_POOL_DEFINE(pool_name, ...); Having a dedicated context struct for the pool has the added benefit that we can start moving there net_buf members that have the same value for all buffers from the same pool. The first such member that gets moved is the destroy callback, thus shrinking net_buf by four bytes. Another potential candidate is the user_data_size, however right not that's left out since it would just leave 2 bytes of padding in net_buf (i.e. not influence its size). Another common value is buf->size, however that one is also used by net_buf_simple and can therefore not be moved. This patch also splits getting buffers from a FIFO and allocating a new buffer from a pool into two separate APIs: net_buf_get and net_buf_alloc, thus simplifying the APIs and their usage. There is no separate 'reserve_head' parameter anymore when allocating, rather the user is expected to call net_buf_reserve() afterwards if something else than 0 headroom is desired. Change-Id: Id91b1e5c2be2deb1274dde47f5edebfe29af383a Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2016-10-18 23:24:51 +03:00
buf = net_buf_get(&rx_queue, K_FOREVER);
BT_DBG("Got buf %p", buf);
rpc_deserialize(buf);
net_buf_unref(buf);
/* Make sure we don't hog the CPU if the rx_queue never
* gets empty.
*/
k_yield();
}
}
struct net_buf *rpc_alloc_cb(u16_t length)
{
struct net_buf *buf;
BT_DBG("length %u", length);
net: buf: Redesigned pool & buffer allocation API Until now it has been necessary to separately define a k_fifo and an array of buffers when creating net_buf pools. This has been a bit of an inconvenience as well as blurred the line of what exactly constitutes the "pool". This patch removes the NET_BUF_POOL() macro and replaces it with a NET_BUF_POOL_DEFINE() macro that internally expands into the buffer array and new net_buf_pool struct with a given name: NET_BUF_POOL_DEFINE(pool_name, ...); Having a dedicated context struct for the pool has the added benefit that we can start moving there net_buf members that have the same value for all buffers from the same pool. The first such member that gets moved is the destroy callback, thus shrinking net_buf by four bytes. Another potential candidate is the user_data_size, however right not that's left out since it would just leave 2 bytes of padding in net_buf (i.e. not influence its size). Another common value is buf->size, however that one is also used by net_buf_simple and can therefore not be moved. This patch also splits getting buffers from a FIFO and allocating a new buffer from a pool into two separate APIs: net_buf_get and net_buf_alloc, thus simplifying the APIs and their usage. There is no separate 'reserve_head' parameter anymore when allocating, rather the user is expected to call net_buf_reserve() afterwards if something else than 0 headroom is desired. Change-Id: Id91b1e5c2be2deb1274dde47f5edebfe29af383a Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2016-10-18 23:24:51 +03:00
buf = net_buf_alloc(&tx_pool, K_FOREVER);
if (!buf) {
BT_ERR("Unable to get tx buffer");
return NULL;
}
net: buf: Redesigned pool & buffer allocation API Until now it has been necessary to separately define a k_fifo and an array of buffers when creating net_buf pools. This has been a bit of an inconvenience as well as blurred the line of what exactly constitutes the "pool". This patch removes the NET_BUF_POOL() macro and replaces it with a NET_BUF_POOL_DEFINE() macro that internally expands into the buffer array and new net_buf_pool struct with a given name: NET_BUF_POOL_DEFINE(pool_name, ...); Having a dedicated context struct for the pool has the added benefit that we can start moving there net_buf members that have the same value for all buffers from the same pool. The first such member that gets moved is the destroy callback, thus shrinking net_buf by four bytes. Another potential candidate is the user_data_size, however right not that's left out since it would just leave 2 bytes of padding in net_buf (i.e. not influence its size). Another common value is buf->size, however that one is also used by net_buf_simple and can therefore not be moved. This patch also splits getting buffers from a FIFO and allocating a new buffer from a pool into two separate APIs: net_buf_get and net_buf_alloc, thus simplifying the APIs and their usage. There is no separate 'reserve_head' parameter anymore when allocating, rather the user is expected to call net_buf_reserve() afterwards if something else than 0 headroom is desired. Change-Id: Id91b1e5c2be2deb1274dde47f5edebfe29af383a Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2016-10-18 23:24:51 +03:00
net_buf_reserve(buf, sizeof(struct ipc_uart_header));
if (length > net_buf_tailroom(buf)) {
BT_ERR("Too big tx buffer requested");
net_buf_unref(buf);
return NULL;
}
return buf;
}
void rpc_transmit_cb(struct net_buf *buf)
{
struct ipc_uart_header *hdr;
BT_DBG("buf %p length %u", buf, buf->len);
hdr = net_buf_push(buf, sizeof(*hdr));
hdr->len = buf->len - sizeof(*hdr);
hdr->channel = 0;
hdr->src_cpu_id = 0;
#if defined(CONFIG_BLUETOOTH_NRF51_PM)
/* Wake-up nble */
nrf51_wakeup();
#endif
while (buf->len) {
uart_poll_out(nble_dev, net_buf_pull_u8(buf));
}
net_buf_unref(buf);
#if defined(CONFIG_BLUETOOTH_NRF51_PM)
/* TODO check if FIFO is empty */
/* Allow nble to go to deep sleep */
nrf51_allow_sleep();
#endif
}
static size_t nble_discard(struct device *uart, size_t len)
{
/* FIXME: correct size for nble */
u8_t buf[33];
return uart_fifo_read(uart, buf, min(len, sizeof(buf)));
}
uart: add ISR callback mechanism for UART drivers The peripherals utilizing UART were required to register their own ISR rountines. This means that all those peripherals drivers need to know which IRQ line is attached to a UART controller, and all the other config values required to register a ISR. This causes scalibility issue as every board and peripherals have to define those values. Another reason for this patch is to support virtual serial ports. Virtual serial ports do not have physical interrupt lines to attach, and thus would not work. This patch adds a simple callback mechanism, which calls a function when UART interrupts are triggered. The low level plumbing still needs to be done by the peripheral drivers, as these drivers may need to access low level capability of UART to function correctly. This simply moves the interrupt setup into the UART drivers themselves. By doing this, the peripheral drivers do not need to know all the config values to properly setup the interrupts and attaching the ISR. One drawback is that this adds to the interrupt latency. Note that this patch breaks backward compatibility in terms of setting up interrupt for UART controller. How to use UART is still the same. This also addresses the following issues: () UART driver for Atmel SAM3 currently does not support interrupts. So remove the code from vector table. This will be updated when there is interrupt support for the driver. () Corrected some config options for Stellaris UART driver. This was tested with samples/shell on Arduino 101, and on QEMU (Cortex-M3 and x86). Origin: original code Change-Id: Ib4593d8ccd711f4e97d388c7293205d213be1aec Signed-off-by: Daniel Leung <daniel.leung@intel.com>
2016-03-03 10:14:50 -08:00
static void bt_uart_isr(struct device *unused)
{
static struct net_buf *buf;
ARG_UNUSED(unused);
while (uart_irq_update(nble_dev) && uart_irq_is_pending(nble_dev)) {
static struct ipc_uart_header hdr;
static u8_t hdr_bytes;
int read;
if (!uart_irq_rx_ready(nble_dev)) {
if (uart_irq_tx_ready(nble_dev)) {
BT_DBG("transmit ready");
/*
* Implementing ISR based transmit requires
* extra API for uart such as
* uart_line_status(), etc. The support was
* removed from the recent code, using polling
* for transmit for now.
*/
} else {
BT_DBG("spurious interrupt");
}
continue;
}
if (hdr_bytes < sizeof(hdr)) {
/* Get packet type */
hdr_bytes += uart_fifo_read(nble_dev,
(u8_t *)&hdr + hdr_bytes,
sizeof(hdr) - hdr_bytes);
if (hdr_bytes < sizeof(hdr)) {
continue;
}
if (hdr.len > NBLE_BUF_SIZE) {
BT_ERR("Too much data to fit buffer");
buf = NULL;
} else {
net: buf: Redesigned pool & buffer allocation API Until now it has been necessary to separately define a k_fifo and an array of buffers when creating net_buf pools. This has been a bit of an inconvenience as well as blurred the line of what exactly constitutes the "pool". This patch removes the NET_BUF_POOL() macro and replaces it with a NET_BUF_POOL_DEFINE() macro that internally expands into the buffer array and new net_buf_pool struct with a given name: NET_BUF_POOL_DEFINE(pool_name, ...); Having a dedicated context struct for the pool has the added benefit that we can start moving there net_buf members that have the same value for all buffers from the same pool. The first such member that gets moved is the destroy callback, thus shrinking net_buf by four bytes. Another potential candidate is the user_data_size, however right not that's left out since it would just leave 2 bytes of padding in net_buf (i.e. not influence its size). Another common value is buf->size, however that one is also used by net_buf_simple and can therefore not be moved. This patch also splits getting buffers from a FIFO and allocating a new buffer from a pool into two separate APIs: net_buf_get and net_buf_alloc, thus simplifying the APIs and their usage. There is no separate 'reserve_head' parameter anymore when allocating, rather the user is expected to call net_buf_reserve() afterwards if something else than 0 headroom is desired. Change-Id: Id91b1e5c2be2deb1274dde47f5edebfe29af383a Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2016-10-18 23:24:51 +03:00
buf = net_buf_alloc(&rx_pool, K_NO_WAIT);
if (!buf) {
BT_ERR("No available IPC buffers");
}
}
}
if (!buf) {
hdr.len -= nble_discard(nble_dev, hdr.len);
if (!hdr.len) {
hdr_bytes = 0;
}
continue;
}
read = uart_fifo_read(nble_dev, net_buf_tail(buf), hdr.len);
buf->len += read;
hdr.len -= read;
if (!hdr.len) {
BT_DBG("full packet received");
hdr_bytes = 0;
/* Pass buffer to the stack */
net_buf_put(&rx_queue, buf);
}
}
}
int nble_open(void)
{
BT_DBG("");
/* Initialize receive queue and start rx_thread */
k_thread_create(&rx_thread_data, rx_thread_stack,
sizeof(rx_thread_stack), (k_thread_entry_t)rx_thread,
NULL, NULL, NULL, K_PRIO_COOP(7), 0, K_NO_WAIT);
uart_irq_rx_disable(nble_dev);
uart_irq_tx_disable(nble_dev);
#if defined(CONFIG_BLUETOOTH_NRF51_PM)
if (nrf51_init(nble_dev) < 0) {
return -EIO;
}
#else
bt_uart_drain(nble_dev);
#endif
uart: add ISR callback mechanism for UART drivers The peripherals utilizing UART were required to register their own ISR rountines. This means that all those peripherals drivers need to know which IRQ line is attached to a UART controller, and all the other config values required to register a ISR. This causes scalibility issue as every board and peripherals have to define those values. Another reason for this patch is to support virtual serial ports. Virtual serial ports do not have physical interrupt lines to attach, and thus would not work. This patch adds a simple callback mechanism, which calls a function when UART interrupts are triggered. The low level plumbing still needs to be done by the peripheral drivers, as these drivers may need to access low level capability of UART to function correctly. This simply moves the interrupt setup into the UART drivers themselves. By doing this, the peripheral drivers do not need to know all the config values to properly setup the interrupts and attaching the ISR. One drawback is that this adds to the interrupt latency. Note that this patch breaks backward compatibility in terms of setting up interrupt for UART controller. How to use UART is still the same. This also addresses the following issues: () UART driver for Atmel SAM3 currently does not support interrupts. So remove the code from vector table. This will be updated when there is interrupt support for the driver. () Corrected some config options for Stellaris UART driver. This was tested with samples/shell on Arduino 101, and on QEMU (Cortex-M3 and x86). Origin: original code Change-Id: Ib4593d8ccd711f4e97d388c7293205d213be1aec Signed-off-by: Daniel Leung <daniel.leung@intel.com>
2016-03-03 10:14:50 -08:00
uart_irq_callback_set(nble_dev, bt_uart_isr);
uart_irq_rx_enable(nble_dev);
return 0;
}
static int _bt_nble_init(struct device *unused)
{
ARG_UNUSED(unused);
nble_dev = device_get_binding(CONFIG_NBLE_UART_ON_DEV_NAME);
if (!nble_dev) {
return -EINVAL;
}
return 0;
}
DEVICE_INIT(bt_nble, "", _bt_nble_init, NULL, NULL, POST_KERNEL,
CONFIG_KERNEL_INIT_PRIORITY_DEVICE);