drivers: serial: Add async to interrupt driven adaptation

Add adaptation layer which allows to provide interrupt driven
API for drivers which exposes only asynchronous API.

Signed-off-by: Krzysztof Chruściński <krzysztof.chruscinski@nordicsemi.no>
This commit is contained in:
Krzysztof Chruściński 2023-11-08 13:33:57 +01:00 committed by Carles Cufí
commit 2854fc18fd
4 changed files with 678 additions and 0 deletions

View file

@ -93,3 +93,4 @@ endif()
zephyr_library_sources_ifdef(CONFIG_SERIAL_TEST serial_test.c)
zephyr_library_sources_ifdef(CONFIG_UART_ASYNC_RX_HELPER uart_async_rx.c)
zephyr_library_sources_ifdef(CONFIG_UART_ASYNC_TO_INT_DRIVEN_API uart_async_to_irq.c)

View file

@ -133,6 +133,21 @@ config UART_ASYNC_RX_HELPER
is delayed. Module implements zero-copy approach with multiple reception
buffers.
config UART_ASYNC_TO_INT_DRIVEN_API
bool
select UART_ASYNC_RX_HELPER
help
Asynchronous to Interrupt driven adaptation layer. When enabled device
which implements only asynchronous API can be used with interrupt driven
API implemented by the generic adaptation layer.
config UART_ASYNC_TO_INT_DRIVEN_RX_TIMEOUT
int "Receiver timeout (in bauds)"
depends on UART_ASYNC_TO_INT_DRIVEN_API
default 100
help
Receiver inactivity timeout. It is used to calculate timeout in microseconds.
comment "Serial Drivers"
source "drivers/serial/Kconfig.b91"

View file

@ -0,0 +1,377 @@
/*
* Copyright (c) 2023 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/drivers/serial/uart_async_to_irq.h>
#include <string.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(UART_ASYNC_TO_IRQ_LOG_NAME, CONFIG_UART_LOG_LEVEL);
/* Internal state flags. */
/* RX interrupt enabled. */
#define A2I_RX_IRQ_ENABLED BIT(0)
/* TX interrupt enabled. */
#define A2I_TX_IRQ_ENABLED BIT(1)
/* Error interrupt enabled. */
#define A2I_ERR_IRQ_ENABLED BIT(2)
/* Receiver to be kept enabled. */
#define A2I_RX_ENABLE BIT(3)
/* TX busy. */
#define A2I_TX_BUSY BIT(4)
static struct uart_async_to_irq_data *get_data(const struct device *dev)
{
struct uart_async_to_irq_data **data = dev->data;
return *data;
}
static const struct uart_async_to_irq_config *get_config(const struct device *dev)
{
const struct uart_async_to_irq_config * const *config = dev->config;
return *config;
}
/* Function calculates RX timeout based on baudrate. */
static uint32_t get_rx_timeout(const struct device *dev)
{
struct uart_config cfg;
int err;
uint32_t baudrate;
err = uart_config_get(dev, &cfg);
if (err == 0) {
baudrate = cfg.baudrate;
} else {
baudrate = get_config(dev)->baudrate;
}
uint32_t us = (CONFIG_UART_ASYNC_TO_INT_DRIVEN_RX_TIMEOUT * 1000000) / baudrate;
return us;
}
static int rx_enable(const struct device *dev,
struct uart_async_to_irq_data *data,
uint8_t *buf,
size_t len)
{
int err;
const struct uart_async_to_irq_config *config = get_config(dev);
err = config->api->rx_enable(dev, buf, len, get_rx_timeout(dev));
return err;
}
static int try_rx_enable(const struct device *dev, struct uart_async_to_irq_data *data)
{
uint8_t *buf = uart_async_rx_buf_req(&data->rx.async_rx);
size_t len = uart_async_rx_get_buf_len(&data->rx.async_rx);
if (buf == NULL) {
return -EBUSY;
}
return rx_enable(dev, data, buf, len);
}
static void on_rx_buf_req(const struct device *dev,
const struct uart_async_to_irq_config *config,
struct uart_async_to_irq_data *data)
{
struct uart_async_rx *async_rx = &data->rx.async_rx;
uint8_t *buf = uart_async_rx_buf_req(async_rx);
size_t len = uart_async_rx_get_buf_len(async_rx);
if (buf) {
int err = config->api->rx_buf_rsp(dev, buf, len);
if (err < 0) {
uart_async_rx_on_buf_rel(async_rx, buf);
}
} else {
atomic_inc(&data->rx.pending_buf_req);
}
}
static void on_rx_dis(const struct device *dev, struct uart_async_to_irq_data *data)
{
if (data->flags & A2I_RX_ENABLE) {
data->rx.pending_buf_req = 0;
int err = try_rx_enable(dev, data);
LOG_INST_DBG(get_config(dev)->log, "Reenabling RX from RX_DISABLED (err:%d)", err);
__ASSERT_NO_MSG(err >= 0);
return;
}
k_sem_give(&data->rx.sem);
}
static void uart_async_to_irq_callback(const struct device *dev,
struct uart_event *evt,
void *user_data)
{
struct uart_async_to_irq_data *data = (struct uart_async_to_irq_data *)user_data;
const struct uart_async_to_irq_config *config = get_config(dev);
bool call_handler = false;
switch (evt->type) {
case UART_TX_DONE:
atomic_and(&data->flags, ~A2I_TX_BUSY);
call_handler = data->flags & A2I_TX_IRQ_ENABLED;
break;
case UART_RX_RDY:
uart_async_rx_on_rdy(&data->rx.async_rx, evt->data.rx.buf, evt->data.rx.len);
call_handler = data->flags & A2I_RX_IRQ_ENABLED;
break;
case UART_RX_BUF_REQUEST:
on_rx_buf_req(dev, config, data);
break;
case UART_RX_BUF_RELEASED:
uart_async_rx_on_buf_rel(&data->rx.async_rx, evt->data.rx_buf.buf);
break;
case UART_RX_STOPPED:
call_handler = data->flags & A2I_ERR_IRQ_ENABLED;
break;
case UART_RX_DISABLED:
on_rx_dis(dev, data);
break;
default:
break;
}
if (data->callback && call_handler) {
atomic_inc(&data->irq_req);
config->trampoline(dev);
}
}
int z_uart_async_to_irq_fifo_fill(const struct device *dev, const uint8_t *buf, int len)
{
struct uart_async_to_irq_data *data = get_data(dev);
const struct uart_async_to_irq_config *config = get_config(dev);
int err;
len = MIN(len, data->tx.len);
if (atomic_or(&data->flags, A2I_TX_BUSY) & A2I_TX_BUSY) {
return 0;
}
memcpy(data->tx.buf, buf, len);
err = config->api->tx(dev, data->tx.buf, len, SYS_FOREVER_US);
if (err < 0) {
atomic_and(&data->flags, ~A2I_TX_BUSY);
return 0;
}
return len;
}
/** Interrupt driven FIFO read function */
int z_uart_async_to_irq_fifo_read(const struct device *dev,
uint8_t *buf,
const int len)
{
struct uart_async_to_irq_data *data = get_data(dev);
const struct uart_async_to_irq_config *config = get_config(dev);
struct uart_async_rx *async_rx = &data->rx.async_rx;
size_t claim_len;
uint8_t *claim_buf;
claim_len = uart_async_rx_data_claim(async_rx, &claim_buf, len);
if (claim_len == 0) {
return 0;
}
memcpy(buf, claim_buf, claim_len);
uart_async_rx_data_consume(async_rx, claim_len);
if (data->rx.pending_buf_req) {
buf = uart_async_rx_buf_req(async_rx);
if (buf) {
int err;
size_t rx_len = uart_async_rx_get_buf_len(async_rx);
atomic_dec(&data->rx.pending_buf_req);
err = config->api->rx_buf_rsp(dev, buf, rx_len);
if (err < 0) {
if (err == -EACCES) {
data->rx.pending_buf_req = 0;
err = rx_enable(dev, data, buf, rx_len);
}
if (err < 0) {
return err;
}
}
}
}
return (int)claim_len;
}
static void dir_disable(const struct device *dev, uint32_t flag)
{
struct uart_async_to_irq_data *data = get_data(dev);
atomic_and(&data->flags, ~flag);
}
static void dir_enable(const struct device *dev, uint32_t flag)
{
struct uart_async_to_irq_data *data = get_data(dev);
atomic_or(&data->flags, flag);
atomic_inc(&data->irq_req);
get_config(dev)->trampoline(dev);
}
/** Interrupt driven transfer enabling function */
void z_uart_async_to_irq_irq_tx_enable(const struct device *dev)
{
dir_enable(dev, A2I_TX_IRQ_ENABLED);
}
/** Interrupt driven transfer disabling function */
void z_uart_async_to_irq_irq_tx_disable(const struct device *dev)
{
dir_disable(dev, A2I_TX_IRQ_ENABLED);
}
/** Interrupt driven transfer ready function */
int z_uart_async_to_irq_irq_tx_ready(const struct device *dev)
{
struct uart_async_to_irq_data *data = get_data(dev);
return (data->flags & A2I_TX_IRQ_ENABLED) && !(data->flags & A2I_TX_BUSY);
}
/** Interrupt driven receiver enabling function */
void z_uart_async_to_irq_irq_rx_enable(const struct device *dev)
{
dir_enable(dev, A2I_RX_IRQ_ENABLED);
}
/** Interrupt driven receiver disabling function */
void z_uart_async_to_irq_irq_rx_disable(const struct device *dev)
{
dir_disable(dev, A2I_RX_IRQ_ENABLED);
}
/** Interrupt driven transfer complete function */
int z_uart_async_to_irq_irq_tx_complete(const struct device *dev)
{
return z_uart_async_to_irq_irq_tx_ready(dev);
}
/** Interrupt driven receiver ready function */
int z_uart_async_to_irq_irq_rx_ready(const struct device *dev)
{
struct uart_async_to_irq_data *data = get_data(dev);
return (data->flags & A2I_RX_IRQ_ENABLED) && (data->rx.async_rx.pending_bytes > 0);
}
/** Interrupt driven error enabling function */
void z_uart_async_to_irq_irq_err_enable(const struct device *dev)
{
dir_enable(dev, A2I_ERR_IRQ_ENABLED);
}
/** Interrupt driven error disabling function */
void z_uart_async_to_irq_irq_err_disable(const struct device *dev)
{
dir_disable(dev, A2I_ERR_IRQ_ENABLED);
}
/** Interrupt driven pending status function */
int z_uart_async_to_irq_irq_is_pending(const struct device *dev)
{
return z_uart_async_to_irq_irq_tx_ready(dev) || z_uart_async_to_irq_irq_rx_ready(dev);
}
/** Interrupt driven interrupt update function */
int z_uart_async_to_irq_irq_update(const struct device *dev)
{
return 1;
}
/** Set the irq callback function */
void z_uart_async_to_irq_irq_callback_set(const struct device *dev,
uart_irq_callback_user_data_t cb,
void *user_data)
{
struct uart_async_to_irq_data *data = get_data(dev);
data->callback = cb;
data->user_data = user_data;
}
int uart_async_to_irq_rx_enable(const struct device *dev)
{
struct uart_async_to_irq_data *data = get_data(dev);
const struct uart_async_to_irq_config *config = get_config(dev);
int err;
err = config->api->callback_set(dev, uart_async_to_irq_callback, data);
if (err < 0) {
return err;
}
uart_async_rx_reset(&data->rx.async_rx);
err = try_rx_enable(dev, data);
if (err == 0) {
atomic_or(&data->flags, A2I_RX_ENABLE);
}
return err;
}
int uart_async_to_irq_rx_disable(const struct device *dev)
{
struct uart_async_to_irq_data *data = get_data(dev);
const struct uart_async_to_irq_config *config = get_config(dev);
int err;
if (atomic_and(&data->flags, ~A2I_RX_ENABLE) & A2I_RX_ENABLE) {
err = config->api->rx_disable(dev);
if (err < 0) {
return err;
}
k_sem_take(&data->rx.sem, K_FOREVER);
}
return 0;
}
void uart_async_to_irq_trampoline_cb(const struct device *dev)
{
struct uart_async_to_irq_data *data = get_data(dev);
do {
data->callback(dev, data->user_data);
} while (atomic_dec(&data->irq_req) > 1);
}
int uart_async_to_irq_init(struct uart_async_to_irq_data *data,
const struct uart_async_to_irq_config *config)
{
data->tx.buf = config->tx_buf;
data->tx.len = config->tx_len;
k_sem_init(&data->rx.sem, 0, 1);
return uart_async_rx_init(&data->rx.async_rx, &config->async_rx);
}

View file

@ -0,0 +1,285 @@
/*
* Copyright (c) 2023 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_DRIVERS_SERIAL_UART_ASYNC_TO_IRQ_H_
#define ZEPHYR_DRIVERS_SERIAL_UART_ASYNC_TO_IRQ_H_
#include <zephyr/drivers/uart.h>
#include <zephyr/logging/log.h>
#include <zephyr/spinlock.h>
#include <zephyr/sys/util.h>
#include <zephyr/drivers/serial/uart_async_rx.h>
/**
* @brief UART Asynchronous to Interrupt driven API adaptation layer
* @ingroup uart_interface
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
/* Forward declarations. */
/** @brief Data structure used by the adaptation layer.
*
* Pointer to that data must be the first element of the UART device data structure.
*/
struct uart_async_to_irq_data;
/** @brief Configuration structure used by the adaptation layer.
*
* Pointer to this data must be the first element of the UART device configuration structure.
*/
struct uart_async_to_irq_config;
/* @brief Function that triggers trampoline to the interrupt context.
*
* This context is used to call user UART interrupt handler. It is to used to
* fulfill the requirement that UART interrupt driven API shall be called from
* the UART interrupt. Trampoline context shall have the same priority as UART.
*
* One option may be to use k_timer configured to expire immediately.
*/
typedef void (*uart_async_to_irq_trampoline)(const struct device *dev);
/** @brief Callback to be called from trampoline context.
*
* @param dev UART device.
*/
void uart_async_to_irq_trampoline_cb(const struct device *dev);
/** @brief Interrupt driven API initializer.
*
* It should be used in the initialization of the UART API structure in the
* driver to provide interrupt driven API functions.
*/
#define UART_ASYNC_TO_IRQ_API_INIT() \
.fifo_fill = z_uart_async_to_irq_fifo_fill, \
.fifo_read = z_uart_async_to_irq_fifo_read, \
.irq_tx_enable = z_uart_async_to_irq_irq_tx_enable, \
.irq_tx_disable = z_uart_async_to_irq_irq_tx_disable, \
.irq_tx_ready = z_uart_async_to_irq_irq_tx_ready, \
.irq_rx_enable = z_uart_async_to_irq_irq_rx_enable, \
.irq_rx_disable = z_uart_async_to_irq_irq_rx_disable, \
.irq_tx_complete = z_uart_async_to_irq_irq_tx_complete,\
.irq_rx_ready = z_uart_async_to_irq_irq_rx_ready, \
.irq_err_enable = z_uart_async_to_irq_irq_err_enable, \
.irq_err_disable = z_uart_async_to_irq_irq_err_disable,\
.irq_is_pending = z_uart_async_to_irq_irq_is_pending, \
.irq_update = z_uart_async_to_irq_irq_update, \
.irq_callback_set = z_uart_async_to_irq_irq_callback_set
/** @brief Configuration structure initializer.
*
* @param _api Structure with UART asynchronous API.
* @param _trampoline Function that trampolines to the interrupt context.
* @param _baudrate UART baudrate.
* @param _tx_buf TX buffer.
* @param _tx_len TX buffer length.
* @param _rx_buf RX buffer.
* @param _rx_len RX buffer length.
* @param _rx_cnt Number of chunks into which RX buffer is divided.
* @param _log Logging instance, if not provided (empty) then default is used.
*/
#define UART_ASYNC_TO_IRQ_API_CONFIG_INITIALIZER(_api, _trampoline, _baudrate, _tx_buf, \
_tx_len, _rx_buf, _rx_len, _rx_cnt, _log) \
{ \
.tx_buf = _tx_buf, \
.tx_len = _tx_len, \
.async_rx = { \
.buffer = _rx_buf, \
.length = _rx_len, \
.buf_cnt = _rx_cnt \
}, \
.api = _api, \
.trampoline = _trampoline, \
.baudrate = _baudrate, \
LOG_OBJECT_PTR_INIT(log, \
COND_CODE_1(IS_EMPTY(_log), \
(LOG_OBJECT_PTR(UART_ASYNC_TO_IRQ_LOG_NAME)), \
(_log) \
) \
) \
}
/** @brief Initialize the adaptation layer.
*
* @param data Data associated with the given adaptation layer instance.
* @param config Configuration structure. Must be persistent.
*
* @retval 0 On successful initialization.
*/
int uart_async_to_irq_init(struct uart_async_to_irq_data *data,
const struct uart_async_to_irq_config *config);
/* @brief Enable RX for interrupt driven API.
*
* @param dev UART device. Device must support asynchronous API.
*
* @retval 0 on successful operation.
* @retval -EINVAL if adaption layer has wrong configuration.
* @retval negative value Error reported by the UART API.
*/
int uart_async_to_irq_rx_enable(const struct device *dev);
/* @brief Disable RX for interrupt driven API.
*
* @param dev UART device. Device must support asynchronous API.
*
* @retval 0 on successful operation.
* @retval -EINVAL if adaption layer has wrong configuration.
* @retval negative value Error reported by the UART API.
*/
int uart_async_to_irq_rx_disable(const struct device *dev);
/* Starting from here API is internal only. */
/** @cond INTERNAL_HIDDEN
* @brief Structure used by the adaptation layer.
*/
struct uart_async_to_irq_config {
/** Pointer to the TX buffer. */
uint8_t *tx_buf;
/** TX buffer length. */
size_t tx_len;
/** UART Asynchronous RX helper configuration. */
struct uart_async_rx_config async_rx;
/** Async API used by the a2i layer. */
const struct uart_async_to_irq_async_api *api;
/** Trampoline callback. */
uart_async_to_irq_trampoline trampoline;
/** Initial baudrate. */
uint32_t baudrate;
/** Instance logging handler. */
LOG_INSTANCE_PTR_DECLARE(log);
};
/** @brief Asynchronous API used by the adaptation layer. */
struct uart_async_to_irq_async_api {
int (*callback_set)(const struct device *dev,
uart_callback_t callback,
void *user_data);
int (*tx)(const struct device *dev, const uint8_t *buf, size_t len,
int32_t timeout);
int (*tx_abort)(const struct device *dev);
int (*rx_enable)(const struct device *dev, uint8_t *buf, size_t len,
int32_t timeout);
int (*rx_buf_rsp)(const struct device *dev, uint8_t *buf, size_t len);
int (*rx_disable)(const struct device *dev);
};
/** @brief Structure holding receiver data. */
struct uart_async_to_irq_rx_data {
/** Asynchronous RX helper data. */
struct uart_async_rx async_rx;
/** Semaphore for pending on RX disable. */
struct k_sem sem;
/** Number of pending buffer requests which weren't handled because lack of free buffers. */
atomic_t pending_buf_req;
};
/** @brief Structure holding transmitter data. */
struct uart_async_to_irq_tx_data {
/** TX buffer. */
uint8_t *buf;
/** Length of the buffer. */
size_t len;
};
/** @briref Data associated with the asynchronous to the interrupt driven API adaptation layer. */
struct uart_async_to_irq_data {
/** User callback for interrupt driven API. */
uart_irq_callback_user_data_t callback;
/** User data. */
void *user_data;
/** Interrupt request counter. */
atomic_t irq_req;
/** RX specific data. */
struct uart_async_to_irq_rx_data rx;
/** TX specific data. */
struct uart_async_to_irq_tx_data tx;
/** Spinlock. */
struct k_spinlock lock;
/** Internally used flags for holding the state of the a2i layer. */
atomic_t flags;
};
/** Interrupt driven FIFO fill function. */
int z_uart_async_to_irq_fifo_fill(const struct device *dev,
const uint8_t *buf,
int len);
/** Interrupt driven FIFO read function. */
int z_uart_async_to_irq_fifo_read(const struct device *dev,
uint8_t *buf,
const int len);
/** Interrupt driven transfer enabling function. */
void z_uart_async_to_irq_irq_tx_enable(const struct device *dev);
/** Interrupt driven transfer disabling function */
void z_uart_async_to_irq_irq_tx_disable(const struct device *dev);
/** Interrupt driven transfer ready function */
int z_uart_async_to_irq_irq_tx_ready(const struct device *dev);
/** Interrupt driven receiver enabling function */
void z_uart_async_to_irq_irq_rx_enable(const struct device *dev);
/** Interrupt driven receiver disabling function */
void z_uart_async_to_irq_irq_rx_disable(const struct device *dev);
/** Interrupt driven transfer complete function */
int z_uart_async_to_irq_irq_tx_complete(const struct device *dev);
/** Interrupt driven receiver ready function */
int z_uart_async_to_irq_irq_rx_ready(const struct device *dev);
/** Interrupt driven error enabling function */
void z_uart_async_to_irq_irq_err_enable(const struct device *dev);
/** Interrupt driven error disabling function */
void z_uart_async_to_irq_irq_err_disable(const struct device *dev);
/** Interrupt driven pending status function */
int z_uart_async_to_irq_irq_is_pending(const struct device *dev);
/** Interrupt driven interrupt update function */
int z_uart_async_to_irq_irq_update(const struct device *dev);
/** Set the irq callback function */
void z_uart_async_to_irq_irq_callback_set(const struct device *dev,
uart_irq_callback_user_data_t cb,
void *user_data);
/** @endcond */
#ifdef __cplusplus
}
#endif
/** @} */
#endif /* ZEPHYR_DRIVERS_SERIAL_UART_ASYNC_TO_IRQ_H_ */