2018-07-02 12:55:49 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018 Nordic Semiconductor ASA
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Driver for Nordic Semiconductor nRF UARTE
|
|
|
|
*/
|
|
|
|
|
2019-06-25 15:54:01 -04:00
|
|
|
#include <drivers/uart.h>
|
2018-07-02 12:55:49 +02:00
|
|
|
#include <hal/nrf_gpio.h>
|
|
|
|
#include <hal/nrf_uarte.h>
|
2018-12-20 15:35:06 +01:00
|
|
|
#include <nrfx_timer.h>
|
2019-06-26 10:33:55 -04:00
|
|
|
#include <sys/util.h>
|
2018-12-20 15:35:06 +01:00
|
|
|
#include <kernel.h>
|
|
|
|
#include <logging/log.h>
|
2020-10-01 10:43:11 +02:00
|
|
|
#include <helpers/nrfx_gppi.h>
|
2018-12-20 15:35:06 +01:00
|
|
|
LOG_MODULE_REGISTER(uart_nrfx_uarte, LOG_LEVEL_ERR);
|
2018-07-02 12:55:49 +02:00
|
|
|
|
2019-02-26 14:30:13 +01:00
|
|
|
/* Generalize PPI or DPPI channel management */
|
|
|
|
#if defined(CONFIG_HAS_HW_NRF_PPI)
|
|
|
|
#include <nrfx_ppi.h>
|
|
|
|
#define gppi_channel_t nrf_ppi_channel_t
|
|
|
|
#define gppi_channel_alloc nrfx_ppi_channel_alloc
|
|
|
|
#define gppi_channel_enable nrfx_ppi_channel_enable
|
|
|
|
#elif defined(CONFIG_HAS_HW_NRF_DPPIC)
|
|
|
|
#include <nrfx_dppi.h>
|
2020-05-27 11:26:57 -05:00
|
|
|
#define gppi_channel_t uint8_t
|
2019-02-26 14:30:13 +01:00
|
|
|
#define gppi_channel_alloc nrfx_dppi_channel_alloc
|
|
|
|
#define gppi_channel_enable nrfx_dppi_channel_enable
|
|
|
|
#else
|
|
|
|
#error "No PPI or DPPI"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2018-07-02 12:55:49 +02:00
|
|
|
#if (defined(CONFIG_UART_0_NRF_UARTE) && \
|
|
|
|
defined(CONFIG_UART_0_INTERRUPT_DRIVEN)) || \
|
|
|
|
(defined(CONFIG_UART_1_NRF_UARTE) && \
|
2019-01-15 13:20:14 +01:00
|
|
|
defined(CONFIG_UART_1_INTERRUPT_DRIVEN)) || \
|
|
|
|
(defined(CONFIG_UART_2_NRF_UARTE) && \
|
|
|
|
defined(CONFIG_UART_2_INTERRUPT_DRIVEN)) || \
|
|
|
|
(defined(CONFIG_UART_3_NRF_UARTE) && \
|
|
|
|
defined(CONFIG_UART_3_INTERRUPT_DRIVEN))
|
2018-12-20 15:35:06 +01:00
|
|
|
#define UARTE_INTERRUPT_DRIVEN 1
|
2018-07-02 12:55:49 +02:00
|
|
|
#endif
|
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
#if (defined(CONFIG_UART_0_NRF_UARTE) && !defined(CONFIG_UART_0_ASYNC)) || \
|
|
|
|
(defined(CONFIG_UART_1_NRF_UARTE) && !defined(CONFIG_UART_1_ASYNC)) || \
|
|
|
|
(defined(CONFIG_UART_2_NRF_UARTE) && !defined(CONFIG_UART_2_ASYNC)) || \
|
|
|
|
(defined(CONFIG_UART_3_NRF_UARTE) && !defined(CONFIG_UART_3_ASYNC))
|
|
|
|
#define UARTE_ANY_NONE_ASYNC 1
|
|
|
|
#endif
|
2018-12-20 15:35:06 +01:00
|
|
|
/*
|
|
|
|
* RX timeout is divided into time slabs, this define tells how many divisions
|
|
|
|
* should be made. More divisions - higher timeout accuracy and processor usage.
|
|
|
|
*/
|
|
|
|
#define RX_TIMEOUT_DIV 5
|
|
|
|
|
|
|
|
#ifdef CONFIG_UART_ASYNC_API
|
|
|
|
struct uarte_async_cb {
|
|
|
|
uart_callback_t user_callback;
|
|
|
|
void *user_data;
|
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
const uint8_t *tx_buf;
|
|
|
|
volatile size_t tx_size;
|
|
|
|
uint8_t *pend_tx_buf;
|
|
|
|
|
2018-12-20 15:35:06 +01:00
|
|
|
struct k_timer tx_timeout_timer;
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t *rx_buf;
|
drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE
radio/ints running) data could be lost between when enough characters
have been RX'd to fill the DMA buffer and when the ENDRX event was
fired, where the the STARTRX task is invoked to start filling the next
buffer (which is set up earlier, but I think will not be filled until
STARTRX).
To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the
'next' buffer is available, so that STARTRX is invoked automatically and
subsequent chars go into the next buffer via EasyDMA.
To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX
event _before_ the STARTRX event.
There was also an issue in rx_timeout() where the received character
count (rx_total_byte_count) could be incremented greater than the actual
buffer size. This arises from rx_total_byte_count value coming from the
counting the RXDRDY events (either by PPI/timer counter or counting the
RXDRDY ints themselves) and so if chars are received in the rx_timeout()
(or before ENDRX is handled) the rx_timeout() could increment rx_offset
past the length of the buffer. This could result the remaining 'len'
being calculated incorrectly (an underflow due to unsigned - signed ,
where signed > unsigned).
To fix this, we now store the lengths of the buffers and don't invoke
the UART_RX_RDY callback when the buffers are full; its handled by
ENDRX.
(Also note that the buffer size should be available via the RXD.MAXCNT
register on the nrf, but this register is not exposed through the nrfx
HAL and is also double buffered, so it seemed clearer to just track the
buffer lengths explicitly here in the driver).
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
for fixup
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
2020-04-08 22:47:44 +10:00
|
|
|
size_t rx_buf_len;
|
2018-12-20 15:35:06 +01:00
|
|
|
size_t rx_offset;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t *rx_next_buf;
|
drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE
radio/ints running) data could be lost between when enough characters
have been RX'd to fill the DMA buffer and when the ENDRX event was
fired, where the the STARTRX task is invoked to start filling the next
buffer (which is set up earlier, but I think will not be filled until
STARTRX).
To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the
'next' buffer is available, so that STARTRX is invoked automatically and
subsequent chars go into the next buffer via EasyDMA.
To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX
event _before_ the STARTRX event.
There was also an issue in rx_timeout() where the received character
count (rx_total_byte_count) could be incremented greater than the actual
buffer size. This arises from rx_total_byte_count value coming from the
counting the RXDRDY events (either by PPI/timer counter or counting the
RXDRDY ints themselves) and so if chars are received in the rx_timeout()
(or before ENDRX is handled) the rx_timeout() could increment rx_offset
past the length of the buffer. This could result the remaining 'len'
being calculated incorrectly (an underflow due to unsigned - signed ,
where signed > unsigned).
To fix this, we now store the lengths of the buffers and don't invoke
the UART_RX_RDY callback when the buffers are full; its handled by
ENDRX.
(Also note that the buffer size should be available via the RXD.MAXCNT
register on the nrf, but this register is not exposed through the nrfx
HAL and is also double buffered, so it seemed clearer to just track the
buffer lengths explicitly here in the driver).
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
for fixup
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
2020-04-08 22:47:44 +10:00
|
|
|
size_t rx_next_buf_len;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t rx_total_byte_cnt; /* Total number of bytes received */
|
|
|
|
uint32_t rx_total_user_byte_cnt; /* Total number of bytes passed to user */
|
|
|
|
int32_t rx_timeout; /* Timeout set by user */
|
|
|
|
int32_t rx_timeout_slab; /* rx_timeout divided by RX_TIMEOUT_DIV */
|
|
|
|
int32_t rx_timeout_left; /* Current time left until user callback */
|
2018-12-20 15:35:06 +01:00
|
|
|
struct k_timer rx_timeout_timer;
|
|
|
|
union {
|
2019-02-26 14:30:13 +01:00
|
|
|
gppi_channel_t ppi;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t cnt;
|
2018-12-20 15:35:06 +01:00
|
|
|
} rx_cnt;
|
2020-10-01 10:43:11 +02:00
|
|
|
volatile int tx_amount;
|
2018-12-20 15:35:06 +01:00
|
|
|
|
|
|
|
bool rx_enabled;
|
|
|
|
bool hw_rx_counting;
|
2019-08-26 11:25:35 +02:00
|
|
|
/* Flag to ensure that RX timeout won't be executed during ENDRX ISR */
|
|
|
|
volatile bool is_in_irq;
|
2018-12-20 15:35:06 +01:00
|
|
|
};
|
|
|
|
#endif
|
2018-07-02 12:55:49 +02:00
|
|
|
|
|
|
|
#ifdef UARTE_INTERRUPT_DRIVEN
|
2018-12-20 15:35:06 +01:00
|
|
|
struct uarte_nrfx_int_driven {
|
2018-07-16 21:12:26 +03:00
|
|
|
uart_irq_callback_user_data_t cb; /**< Callback function pointer */
|
|
|
|
void *cb_data; /**< Callback function arg */
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t *tx_buffer;
|
|
|
|
uint16_t tx_buff_size;
|
2018-11-19 13:43:47 +01:00
|
|
|
volatile bool disable_tx_irq;
|
2020-10-01 10:43:11 +02:00
|
|
|
atomic_t fifo_fill_lock;
|
2018-12-20 15:35:06 +01:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Device data structure */
|
|
|
|
struct uarte_nrfx_data {
|
2020-07-08 13:37:36 +02:00
|
|
|
const struct device *dev;
|
2018-12-20 15:35:06 +01:00
|
|
|
struct uart_config uart_config;
|
|
|
|
#ifdef UARTE_INTERRUPT_DRIVEN
|
|
|
|
struct uarte_nrfx_int_driven *int_driven;
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_UART_ASYNC_API
|
|
|
|
struct uarte_async_cb *async;
|
2019-03-21 13:41:25 +01:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_DEVICE_POWER_MANAGEMENT
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t pm_state;
|
2018-12-20 15:35:06 +01:00
|
|
|
#endif
|
2020-10-01 10:43:11 +02:00
|
|
|
uint8_t char_out;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t rx_data;
|
2020-10-01 10:43:11 +02:00
|
|
|
gppi_channel_t ppi_ch_endtx;
|
2018-07-02 12:55:49 +02:00
|
|
|
};
|
|
|
|
|
2020-06-05 15:25:38 +02:00
|
|
|
#define CTS_PIN_SET_MASK BIT(1)
|
|
|
|
#define RTS_PIN_SET_MASK BIT(2)
|
|
|
|
|
|
|
|
#define IS_CTS_PIN_SET(mask) (mask & CTS_PIN_SET_MASK)
|
|
|
|
#define IS_RTS_PIN_SET(mask) (mask & RTS_PIN_SET_MASK)
|
|
|
|
|
2018-07-02 12:55:49 +02:00
|
|
|
/**
|
|
|
|
* @brief Structure for UARTE configuration.
|
|
|
|
*/
|
|
|
|
struct uarte_nrfx_config {
|
|
|
|
NRF_UARTE_Type *uarte_regs; /* Instance address */
|
2020-06-05 15:25:38 +02:00
|
|
|
uint8_t rts_cts_pins_set;
|
2019-10-10 13:02:08 +02:00
|
|
|
bool gpio_mgmt;
|
2020-10-01 10:43:11 +02:00
|
|
|
bool ppi_endtx;
|
2018-12-20 15:35:06 +01:00
|
|
|
#ifdef CONFIG_UART_ASYNC_API
|
|
|
|
nrfx_timer_t timer;
|
|
|
|
#endif
|
2018-07-02 12:55:49 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
struct uarte_init_config {
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t pseltxd; /* PSEL.TXD register value */
|
|
|
|
uint32_t pselrxd; /* PSEL.RXD register value */
|
|
|
|
uint32_t pselcts; /* PSEL.CTS register value */
|
|
|
|
uint32_t pselrts; /* PSEL.RTS register value */
|
2018-07-02 12:55:49 +02:00
|
|
|
};
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static inline struct uarte_nrfx_data *get_dev_data(const struct device *dev)
|
2018-07-02 12:55:49 +02:00
|
|
|
{
|
2020-05-28 21:23:02 +02:00
|
|
|
return dev->data;
|
2018-07-02 12:55:49 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static inline const struct uarte_nrfx_config *get_dev_config(const struct device *dev)
|
2018-07-02 12:55:49 +02:00
|
|
|
{
|
2020-05-28 20:44:16 +02:00
|
|
|
return dev->config;
|
2018-07-02 12:55:49 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static inline NRF_UARTE_Type *get_uarte_instance(const struct device *dev)
|
2018-07-02 12:55:49 +02:00
|
|
|
{
|
|
|
|
const struct uarte_nrfx_config *config = get_dev_config(dev);
|
|
|
|
|
|
|
|
return config->uarte_regs;
|
|
|
|
}
|
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
static void endtx_isr(const struct device *dev)
|
|
|
|
{
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
|
|
|
|
int key = irq_lock();
|
|
|
|
|
|
|
|
if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX)) {
|
|
|
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX);
|
|
|
|
nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX);
|
|
|
|
}
|
|
|
|
|
|
|
|
irq_unlock(key);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef UARTE_ANY_NONE_ASYNC
|
2018-07-02 12:55:49 +02:00
|
|
|
/**
|
|
|
|
* @brief Interrupt service routine.
|
|
|
|
*
|
|
|
|
* This simply calls the callback function, if one exists.
|
|
|
|
*
|
|
|
|
* @param arg Argument to ISR.
|
|
|
|
*
|
|
|
|
* @return N/A
|
|
|
|
*/
|
2018-12-20 15:35:06 +01:00
|
|
|
static void uarte_nrfx_isr_int(void *arg)
|
2018-07-02 12:55:49 +02:00
|
|
|
{
|
2020-04-30 20:33:38 +02:00
|
|
|
const struct device *dev = arg;
|
2018-11-19 13:43:47 +01:00
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
/* If interrupt driven and asynchronous APIs are disabled then UART
|
|
|
|
* interrupt is still called to stop TX. Unless it is done using PPI.
|
|
|
|
*/
|
|
|
|
if (nrf_uarte_int_enable_check(uarte, NRF_UARTE_INT_ENDTX_MASK) &&
|
|
|
|
nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX)) {
|
|
|
|
endtx_isr(dev);
|
|
|
|
}
|
2018-11-19 13:43:47 +01:00
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
#ifdef UARTE_INTERRUPT_DRIVEN
|
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
2018-11-19 13:43:47 +01:00
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
if (!data->int_driven) {
|
2018-11-19 13:43:47 +01:00
|
|
|
return;
|
|
|
|
}
|
2018-07-02 12:55:49 +02:00
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED)) {
|
|
|
|
data->int_driven->fifo_fill_lock = 0;
|
|
|
|
if (data->int_driven->disable_tx_irq) {
|
|
|
|
nrf_uarte_int_disable(uarte,
|
|
|
|
NRF_UARTE_INT_TXSTOPPED_MASK);
|
|
|
|
data->int_driven->disable_tx_irq = false;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-11-22 11:13:57 +01:00
|
|
|
if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ERROR)) {
|
|
|
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ERROR);
|
|
|
|
}
|
|
|
|
|
2018-12-20 15:35:06 +01:00
|
|
|
if (data->int_driven->cb) {
|
2020-06-24 15:47:15 +02:00
|
|
|
data->int_driven->cb(dev, data->int_driven->cb_data);
|
2018-07-02 12:55:49 +02:00
|
|
|
}
|
|
|
|
#endif /* UARTE_INTERRUPT_DRIVEN */
|
2020-10-01 10:43:11 +02:00
|
|
|
}
|
|
|
|
#endif /* UARTE_ANY_NONE_ASYNC */
|
2018-07-02 12:55:49 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Set the baud rate
|
|
|
|
*
|
|
|
|
* This routine set the given baud rate for the UARTE.
|
|
|
|
*
|
|
|
|
* @param dev UARTE device struct
|
|
|
|
* @param baudrate Baud rate
|
|
|
|
*
|
|
|
|
* @return 0 on success or error code
|
|
|
|
*/
|
2020-04-30 20:33:38 +02:00
|
|
|
static int baudrate_set(const struct device *dev, uint32_t baudrate)
|
2018-07-02 12:55:49 +02:00
|
|
|
{
|
|
|
|
nrf_uarte_baudrate_t nrf_baudrate; /* calculated baudrate divisor */
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
|
|
|
|
switch (baudrate) {
|
|
|
|
case 300:
|
|
|
|
/* value not supported by Nordic HAL */
|
|
|
|
nrf_baudrate = 0x00014000;
|
|
|
|
break;
|
|
|
|
case 600:
|
|
|
|
/* value not supported by Nordic HAL */
|
|
|
|
nrf_baudrate = 0x00027000;
|
|
|
|
break;
|
|
|
|
case 1200:
|
|
|
|
nrf_baudrate = NRF_UARTE_BAUDRATE_1200;
|
|
|
|
break;
|
|
|
|
case 2400:
|
|
|
|
nrf_baudrate = NRF_UARTE_BAUDRATE_2400;
|
|
|
|
break;
|
|
|
|
case 4800:
|
|
|
|
nrf_baudrate = NRF_UARTE_BAUDRATE_4800;
|
|
|
|
break;
|
|
|
|
case 9600:
|
|
|
|
nrf_baudrate = NRF_UARTE_BAUDRATE_9600;
|
|
|
|
break;
|
|
|
|
case 14400:
|
|
|
|
nrf_baudrate = NRF_UARTE_BAUDRATE_14400;
|
|
|
|
break;
|
|
|
|
case 19200:
|
|
|
|
nrf_baudrate = NRF_UARTE_BAUDRATE_19200;
|
|
|
|
break;
|
|
|
|
case 28800:
|
|
|
|
nrf_baudrate = NRF_UARTE_BAUDRATE_28800;
|
|
|
|
break;
|
|
|
|
case 31250:
|
|
|
|
nrf_baudrate = NRF_UARTE_BAUDRATE_31250;
|
|
|
|
break;
|
|
|
|
case 38400:
|
|
|
|
nrf_baudrate = NRF_UARTE_BAUDRATE_38400;
|
|
|
|
break;
|
|
|
|
case 56000:
|
|
|
|
nrf_baudrate = NRF_UARTE_BAUDRATE_56000;
|
|
|
|
break;
|
|
|
|
case 57600:
|
|
|
|
nrf_baudrate = NRF_UARTE_BAUDRATE_57600;
|
|
|
|
break;
|
|
|
|
case 76800:
|
|
|
|
nrf_baudrate = NRF_UARTE_BAUDRATE_76800;
|
|
|
|
break;
|
|
|
|
case 115200:
|
|
|
|
nrf_baudrate = NRF_UARTE_BAUDRATE_115200;
|
|
|
|
break;
|
|
|
|
case 230400:
|
|
|
|
nrf_baudrate = NRF_UARTE_BAUDRATE_230400;
|
|
|
|
break;
|
|
|
|
case 250000:
|
|
|
|
nrf_baudrate = NRF_UARTE_BAUDRATE_250000;
|
|
|
|
break;
|
|
|
|
case 460800:
|
|
|
|
nrf_baudrate = NRF_UARTE_BAUDRATE_460800;
|
|
|
|
break;
|
|
|
|
case 921600:
|
|
|
|
nrf_baudrate = NRF_UARTE_BAUDRATE_921600;
|
|
|
|
break;
|
|
|
|
case 1000000:
|
|
|
|
nrf_baudrate = NRF_UARTE_BAUDRATE_1000000;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
nrf_uarte_baudrate_set(uarte, nrf_baudrate);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uarte_nrfx_configure(const struct device *dev,
|
2018-11-20 12:24:42 +01:00
|
|
|
const struct uart_config *cfg)
|
|
|
|
{
|
2019-11-07 22:07:47 +01:00
|
|
|
nrf_uarte_config_t uarte_cfg;
|
2018-11-20 12:24:42 +01:00
|
|
|
|
2019-11-07 22:07:47 +01:00
|
|
|
#if defined(UARTE_CONFIG_STOP_Msk)
|
2019-09-09 15:33:18 +02:00
|
|
|
switch (cfg->stop_bits) {
|
|
|
|
case UART_CFG_STOP_BITS_1:
|
2019-11-07 22:07:47 +01:00
|
|
|
uarte_cfg.stop = NRF_UARTE_STOP_ONE;
|
2019-09-09 15:33:18 +02:00
|
|
|
break;
|
|
|
|
case UART_CFG_STOP_BITS_2:
|
2019-11-07 22:07:47 +01:00
|
|
|
uarte_cfg.stop = NRF_UARTE_STOP_TWO;
|
2019-09-09 15:33:18 +02:00
|
|
|
break;
|
|
|
|
default:
|
2018-11-20 12:24:42 +01:00
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
2019-11-07 22:07:47 +01:00
|
|
|
#else
|
|
|
|
if (cfg->stop_bits != UART_CFG_STOP_BITS_1) {
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
#endif
|
2018-11-20 12:24:42 +01:00
|
|
|
|
|
|
|
if (cfg->data_bits != UART_CFG_DATA_BITS_8) {
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (cfg->flow_ctrl) {
|
|
|
|
case UART_CFG_FLOW_CTRL_NONE:
|
2019-11-07 22:07:47 +01:00
|
|
|
uarte_cfg.hwfc = NRF_UARTE_HWFC_DISABLED;
|
2018-11-20 12:24:42 +01:00
|
|
|
break;
|
|
|
|
case UART_CFG_FLOW_CTRL_RTS_CTS:
|
|
|
|
if (get_dev_config(dev)->rts_cts_pins_set) {
|
2019-11-07 22:07:47 +01:00
|
|
|
uarte_cfg.hwfc = NRF_UARTE_HWFC_ENABLED;
|
2018-11-20 12:24:42 +01:00
|
|
|
} else {
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2019-11-07 22:07:47 +01:00
|
|
|
#if defined(UARTE_CONFIG_PARITYTYPE_Msk)
|
|
|
|
uarte_cfg.paritytype = NRF_UARTE_PARITYTYPE_EVEN;
|
|
|
|
#endif
|
2018-11-20 12:24:42 +01:00
|
|
|
switch (cfg->parity) {
|
|
|
|
case UART_CFG_PARITY_NONE:
|
2019-11-07 22:07:47 +01:00
|
|
|
uarte_cfg.parity = NRF_UARTE_PARITY_EXCLUDED;
|
2018-11-20 12:24:42 +01:00
|
|
|
break;
|
|
|
|
case UART_CFG_PARITY_EVEN:
|
2019-11-07 22:07:47 +01:00
|
|
|
uarte_cfg.parity = NRF_UARTE_PARITY_INCLUDED;
|
|
|
|
break;
|
|
|
|
#if defined(UARTE_CONFIG_PARITYTYPE_Msk)
|
|
|
|
case UART_CFG_PARITY_ODD:
|
|
|
|
uarte_cfg.parity = NRF_UARTE_PARITY_INCLUDED;
|
|
|
|
uarte_cfg.paritytype = NRF_UARTE_PARITYTYPE_ODD;
|
2018-11-20 12:24:42 +01:00
|
|
|
break;
|
2019-11-07 22:07:47 +01:00
|
|
|
#endif
|
2018-11-20 12:24:42 +01:00
|
|
|
default:
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (baudrate_set(dev, cfg->baudrate) != 0) {
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2019-11-07 22:07:47 +01:00
|
|
|
nrf_uarte_configure(get_uarte_instance(dev), &uarte_cfg);
|
2018-11-20 12:24:42 +01:00
|
|
|
|
|
|
|
get_dev_data(dev)->uart_config = *cfg;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uarte_nrfx_config_get(const struct device *dev,
|
|
|
|
struct uart_config *cfg)
|
2018-11-20 12:24:42 +01:00
|
|
|
{
|
|
|
|
*cfg = get_dev_data(dev)->uart_config;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-20 15:35:06 +01:00
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uarte_nrfx_err_check(const struct device *dev)
|
2018-12-20 15:35:06 +01:00
|
|
|
{
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
2019-11-22 11:13:57 +01:00
|
|
|
/* register bitfields maps to the defines in uart.h */
|
|
|
|
return nrf_uarte_errorsrc_get_and_clear(uarte);
|
2018-12-20 15:35:06 +01:00
|
|
|
}
|
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
/* Function returns true if new transfer can be started. Since TXSTOPPED
|
|
|
|
* (and ENDTX) is cleared before triggering new transfer, TX is ready for new
|
|
|
|
* transfer if any event is set.
|
|
|
|
*/
|
|
|
|
static bool is_tx_ready(const struct device *dev)
|
|
|
|
{
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
bool ppi_endtx = get_dev_config(dev)->ppi_endtx;
|
|
|
|
|
|
|
|
return nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED) ||
|
|
|
|
(!ppi_endtx ?
|
|
|
|
nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX) : 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tx_start(NRF_UARTE_Type *uarte, const uint8_t *buf, size_t len)
|
|
|
|
{
|
|
|
|
nrf_uarte_tx_buffer_set(uarte, buf, len);
|
|
|
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX);
|
|
|
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_TXSTOPPED);
|
|
|
|
nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTTX);
|
|
|
|
}
|
|
|
|
|
2018-12-20 15:35:06 +01:00
|
|
|
#ifdef CONFIG_UART_ASYNC_API
|
|
|
|
|
|
|
|
static inline bool hw_rx_counting_enabled(struct uarte_nrfx_data *data)
|
|
|
|
{
|
|
|
|
if (IS_ENABLED(CONFIG_UARTE_NRF_HW_ASYNC)) {
|
|
|
|
return data->async->hw_rx_counting;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void timer_handler(nrf_timer_event_t event_type, void *p_context) { }
|
|
|
|
static void rx_timeout(struct k_timer *timer);
|
|
|
|
static void tx_timeout(struct k_timer *timer);
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uarte_nrfx_rx_counting_init(const struct device *dev)
|
2018-12-20 15:35:06 +01:00
|
|
|
{
|
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
|
|
|
const struct uarte_nrfx_config *cfg = get_dev_config(dev);
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (hw_rx_counting_enabled(data)) {
|
|
|
|
nrfx_timer_config_t tmr_config = NRFX_TIMER_DEFAULT_CONFIG;
|
|
|
|
|
|
|
|
tmr_config.mode = NRF_TIMER_MODE_COUNTER;
|
|
|
|
tmr_config.bit_width = NRF_TIMER_BIT_WIDTH_32;
|
|
|
|
ret = nrfx_timer_init(&cfg->timer,
|
|
|
|
&tmr_config,
|
|
|
|
timer_handler);
|
|
|
|
if (ret != NRFX_SUCCESS) {
|
|
|
|
LOG_ERR("Timer already initialized, "
|
|
|
|
"switching to software byte counting.");
|
|
|
|
data->async->hw_rx_counting = false;
|
|
|
|
} else {
|
|
|
|
nrfx_timer_enable(&cfg->timer);
|
|
|
|
nrfx_timer_clear(&cfg->timer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hw_rx_counting_enabled(data)) {
|
2019-02-26 14:30:13 +01:00
|
|
|
ret = gppi_channel_alloc(&data->async->rx_cnt.ppi);
|
|
|
|
if (ret != NRFX_SUCCESS) {
|
|
|
|
LOG_ERR("Failed to allocate PPI Channel, "
|
|
|
|
"switching to software byte counting.");
|
|
|
|
data->async->hw_rx_counting = false;
|
|
|
|
nrfx_timer_uninit(&cfg->timer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hw_rx_counting_enabled(data)) {
|
|
|
|
#if CONFIG_HAS_HW_NRF_PPI
|
2018-12-20 15:35:06 +01:00
|
|
|
ret = nrfx_ppi_channel_assign(
|
|
|
|
data->async->rx_cnt.ppi,
|
|
|
|
nrf_uarte_event_address_get(uarte,
|
|
|
|
NRF_UARTE_EVENT_RXDRDY),
|
2019-02-26 14:30:13 +01:00
|
|
|
nrfx_timer_task_address_get(&cfg->timer,
|
2018-12-20 15:35:06 +01:00
|
|
|
NRF_TIMER_TASK_COUNT));
|
|
|
|
|
2019-02-26 14:30:13 +01:00
|
|
|
if (ret != NRFX_SUCCESS) {
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
nrf_uarte_publish_set(uarte,
|
|
|
|
NRF_UARTE_EVENT_RXDRDY,
|
|
|
|
data->async->rx_cnt.ppi);
|
|
|
|
nrf_timer_subscribe_set(cfg->timer.p_reg,
|
|
|
|
NRF_TIMER_TASK_COUNT,
|
|
|
|
data->async->rx_cnt.ppi);
|
|
|
|
|
|
|
|
#endif
|
|
|
|
ret = gppi_channel_enable(data->async->rx_cnt.ppi);
|
2018-12-20 15:35:06 +01:00
|
|
|
if (ret != NRFX_SUCCESS) {
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
nrf_uarte_int_enable(uarte, NRF_UARTE_INT_RXDRDY_MASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uarte_nrfx_init(const struct device *dev)
|
2018-12-20 15:35:06 +01:00
|
|
|
{
|
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
|
|
|
|
int ret = uarte_nrfx_rx_counting_init(dev);
|
|
|
|
|
|
|
|
if (ret != 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
nrf_uarte_int_enable(uarte,
|
|
|
|
NRF_UARTE_INT_ENDRX_MASK |
|
|
|
|
NRF_UARTE_INT_RXSTARTED_MASK |
|
|
|
|
NRF_UARTE_INT_ERROR_MASK |
|
|
|
|
NRF_UARTE_INT_RXTO_MASK);
|
|
|
|
nrf_uarte_enable(uarte);
|
|
|
|
|
2020-07-01 11:10:15 +10:00
|
|
|
/**
|
|
|
|
* Stop any currently running RX operations. This can occur when a
|
|
|
|
* bootloader sets up the UART hardware and does not clean it up
|
|
|
|
* before jumping to the next application.
|
|
|
|
*/
|
|
|
|
if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) {
|
|
|
|
nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX);
|
|
|
|
while (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXTO)) {
|
|
|
|
/* Busy wait for event to register */
|
|
|
|
}
|
|
|
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
|
|
|
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
|
|
|
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXTO);
|
|
|
|
}
|
|
|
|
|
2018-12-20 15:35:06 +01:00
|
|
|
k_timer_init(&data->async->rx_timeout_timer, rx_timeout, NULL);
|
2020-07-08 13:37:36 +02:00
|
|
|
k_timer_user_data_set(&data->async->rx_timeout_timer, data);
|
2018-12-20 15:35:06 +01:00
|
|
|
k_timer_init(&data->async->tx_timeout_timer, tx_timeout, NULL);
|
2020-07-08 13:37:36 +02:00
|
|
|
k_timer_user_data_set(&data->async->tx_timeout_timer, data);
|
2018-12-20 15:35:06 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uarte_nrfx_tx(const struct device *dev, const uint8_t *buf,
|
|
|
|
size_t len,
|
2020-05-27 11:26:57 -05:00
|
|
|
int32_t timeout)
|
2018-12-20 15:35:06 +01:00
|
|
|
{
|
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
|
|
|
|
if (!nrfx_is_in_ram(buf)) {
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
int key = irq_lock();
|
|
|
|
|
|
|
|
if (data->async->tx_size) {
|
|
|
|
irq_unlock(key);
|
2018-12-20 15:35:06 +01:00
|
|
|
return -EBUSY;
|
2020-10-01 10:43:11 +02:00
|
|
|
} else {
|
|
|
|
data->async->tx_size = len;
|
2018-12-20 15:35:06 +01:00
|
|
|
}
|
2020-01-31 16:35:32 +01:00
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
|
|
|
|
|
|
|
|
if (!is_tx_ready(dev)) {
|
|
|
|
/* Active poll out, postpone until it is completed. */
|
|
|
|
data->async->pend_tx_buf = (uint8_t *)buf;
|
|
|
|
} else {
|
|
|
|
data->async->tx_buf = buf;
|
|
|
|
data->async->tx_amount = -1;
|
|
|
|
tx_start(uarte, buf, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
irq_unlock(key);
|
2020-01-31 16:35:32 +01:00
|
|
|
|
2020-01-24 16:04:16 +01:00
|
|
|
if (data->uart_config.flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS
|
2020-05-01 11:58:15 +02:00
|
|
|
&& timeout != SYS_FOREVER_MS) {
|
|
|
|
k_timer_start(&data->async->tx_timeout_timer, K_MSEC(timeout),
|
2019-10-02 19:26:14 -05:00
|
|
|
K_NO_WAIT);
|
2018-12-20 15:35:06 +01:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uarte_nrfx_tx_abort(const struct device *dev)
|
2018-12-20 15:35:06 +01:00
|
|
|
{
|
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
|
|
|
|
if (data->async->tx_buf == NULL) {
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
k_timer_stop(&data->async->tx_timeout_timer);
|
|
|
|
nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf,
|
|
|
|
size_t len,
|
2020-05-27 11:26:57 -05:00
|
|
|
int32_t timeout)
|
2018-12-20 15:35:06 +01:00
|
|
|
{
|
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
|
2020-02-24 08:33:19 +01:00
|
|
|
if (nrf_uarte_rx_pin_get(uarte) == NRF_UARTE_PSEL_DISCONNECTED) {
|
|
|
|
__ASSERT(false, "TX only UARTE instance");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
2020-01-27 15:47:15 +01:00
|
|
|
|
2018-12-20 15:35:06 +01:00
|
|
|
data->async->rx_timeout = timeout;
|
|
|
|
data->async->rx_timeout_slab =
|
|
|
|
MAX(timeout / RX_TIMEOUT_DIV,
|
|
|
|
NRFX_CEIL_DIV(1000, CONFIG_SYS_CLOCK_TICKS_PER_SEC));
|
|
|
|
|
|
|
|
data->async->rx_buf = buf;
|
drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE
radio/ints running) data could be lost between when enough characters
have been RX'd to fill the DMA buffer and when the ENDRX event was
fired, where the the STARTRX task is invoked to start filling the next
buffer (which is set up earlier, but I think will not be filled until
STARTRX).
To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the
'next' buffer is available, so that STARTRX is invoked automatically and
subsequent chars go into the next buffer via EasyDMA.
To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX
event _before_ the STARTRX event.
There was also an issue in rx_timeout() where the received character
count (rx_total_byte_count) could be incremented greater than the actual
buffer size. This arises from rx_total_byte_count value coming from the
counting the RXDRDY events (either by PPI/timer counter or counting the
RXDRDY ints themselves) and so if chars are received in the rx_timeout()
(or before ENDRX is handled) the rx_timeout() could increment rx_offset
past the length of the buffer. This could result the remaining 'len'
being calculated incorrectly (an underflow due to unsigned - signed ,
where signed > unsigned).
To fix this, we now store the lengths of the buffers and don't invoke
the UART_RX_RDY callback when the buffers are full; its handled by
ENDRX.
(Also note that the buffer size should be available via the RXD.MAXCNT
register on the nrf, but this register is not exposed through the nrfx
HAL and is also double buffered, so it seemed clearer to just track the
buffer lengths explicitly here in the driver).
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
for fixup
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
2020-04-08 22:47:44 +10:00
|
|
|
data->async->rx_buf_len = len;
|
2018-12-20 15:35:06 +01:00
|
|
|
data->async->rx_offset = 0;
|
drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE
radio/ints running) data could be lost between when enough characters
have been RX'd to fill the DMA buffer and when the ENDRX event was
fired, where the the STARTRX task is invoked to start filling the next
buffer (which is set up earlier, but I think will not be filled until
STARTRX).
To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the
'next' buffer is available, so that STARTRX is invoked automatically and
subsequent chars go into the next buffer via EasyDMA.
To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX
event _before_ the STARTRX event.
There was also an issue in rx_timeout() where the received character
count (rx_total_byte_count) could be incremented greater than the actual
buffer size. This arises from rx_total_byte_count value coming from the
counting the RXDRDY events (either by PPI/timer counter or counting the
RXDRDY ints themselves) and so if chars are received in the rx_timeout()
(or before ENDRX is handled) the rx_timeout() could increment rx_offset
past the length of the buffer. This could result the remaining 'len'
being calculated incorrectly (an underflow due to unsigned - signed ,
where signed > unsigned).
To fix this, we now store the lengths of the buffers and don't invoke
the UART_RX_RDY callback when the buffers are full; its handled by
ENDRX.
(Also note that the buffer size should be available via the RXD.MAXCNT
register on the nrf, but this register is not exposed through the nrfx
HAL and is also double buffered, so it seemed clearer to just track the
buffer lengths explicitly here in the driver).
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
for fixup
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
2020-04-08 22:47:44 +10:00
|
|
|
data->async->rx_next_buf = NULL;
|
|
|
|
data->async->rx_next_buf_len = 0;
|
2018-12-20 15:35:06 +01:00
|
|
|
nrf_uarte_rx_buffer_set(uarte, buf, len);
|
|
|
|
|
|
|
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
|
|
|
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
|
|
|
|
|
|
|
|
data->async->rx_enabled = true;
|
|
|
|
nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uarte_nrfx_rx_buf_rsp(const struct device *dev, uint8_t *buf,
|
|
|
|
size_t len)
|
2018-12-20 15:35:06 +01:00
|
|
|
{
|
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
2020-07-29 13:05:42 +02:00
|
|
|
int err;
|
2018-12-20 15:35:06 +01:00
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
2020-07-29 13:05:42 +02:00
|
|
|
int key = irq_lock();
|
2018-12-20 15:35:06 +01:00
|
|
|
|
2020-07-29 13:05:42 +02:00
|
|
|
if ((data->async->rx_buf == NULL)) {
|
|
|
|
err = -EACCES;
|
|
|
|
} else if (data->async->rx_next_buf == NULL) {
|
2018-12-20 15:35:06 +01:00
|
|
|
data->async->rx_next_buf = buf;
|
drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE
radio/ints running) data could be lost between when enough characters
have been RX'd to fill the DMA buffer and when the ENDRX event was
fired, where the the STARTRX task is invoked to start filling the next
buffer (which is set up earlier, but I think will not be filled until
STARTRX).
To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the
'next' buffer is available, so that STARTRX is invoked automatically and
subsequent chars go into the next buffer via EasyDMA.
To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX
event _before_ the STARTRX event.
There was also an issue in rx_timeout() where the received character
count (rx_total_byte_count) could be incremented greater than the actual
buffer size. This arises from rx_total_byte_count value coming from the
counting the RXDRDY events (either by PPI/timer counter or counting the
RXDRDY ints themselves) and so if chars are received in the rx_timeout()
(or before ENDRX is handled) the rx_timeout() could increment rx_offset
past the length of the buffer. This could result the remaining 'len'
being calculated incorrectly (an underflow due to unsigned - signed ,
where signed > unsigned).
To fix this, we now store the lengths of the buffers and don't invoke
the UART_RX_RDY callback when the buffers are full; its handled by
ENDRX.
(Also note that the buffer size should be available via the RXD.MAXCNT
register on the nrf, but this register is not exposed through the nrfx
HAL and is also double buffered, so it seemed clearer to just track the
buffer lengths explicitly here in the driver).
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
for fixup
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
2020-04-08 22:47:44 +10:00
|
|
|
data->async->rx_next_buf_len = len;
|
2018-12-20 15:35:06 +01:00
|
|
|
nrf_uarte_rx_buffer_set(uarte, buf, len);
|
drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE
radio/ints running) data could be lost between when enough characters
have been RX'd to fill the DMA buffer and when the ENDRX event was
fired, where the the STARTRX task is invoked to start filling the next
buffer (which is set up earlier, but I think will not be filled until
STARTRX).
To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the
'next' buffer is available, so that STARTRX is invoked automatically and
subsequent chars go into the next buffer via EasyDMA.
To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX
event _before_ the STARTRX event.
There was also an issue in rx_timeout() where the received character
count (rx_total_byte_count) could be incremented greater than the actual
buffer size. This arises from rx_total_byte_count value coming from the
counting the RXDRDY events (either by PPI/timer counter or counting the
RXDRDY ints themselves) and so if chars are received in the rx_timeout()
(or before ENDRX is handled) the rx_timeout() could increment rx_offset
past the length of the buffer. This could result the remaining 'len'
being calculated incorrectly (an underflow due to unsigned - signed ,
where signed > unsigned).
To fix this, we now store the lengths of the buffers and don't invoke
the UART_RX_RDY callback when the buffers are full; its handled by
ENDRX.
(Also note that the buffer size should be available via the RXD.MAXCNT
register on the nrf, but this register is not exposed through the nrfx
HAL and is also double buffered, so it seemed clearer to just track the
buffer lengths explicitly here in the driver).
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
for fixup
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
2020-04-08 22:47:44 +10:00
|
|
|
nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX);
|
2020-07-29 13:05:42 +02:00
|
|
|
err = 0;
|
2018-12-20 15:35:06 +01:00
|
|
|
} else {
|
2020-07-29 13:05:42 +02:00
|
|
|
err = -EBUSY;
|
2018-12-20 15:35:06 +01:00
|
|
|
}
|
2020-07-29 13:05:42 +02:00
|
|
|
|
|
|
|
irq_unlock(key);
|
|
|
|
|
|
|
|
return err;
|
2018-12-20 15:35:06 +01:00
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uarte_nrfx_callback_set(const struct device *dev,
|
|
|
|
uart_callback_t callback,
|
|
|
|
void *user_data)
|
2018-12-20 15:35:06 +01:00
|
|
|
{
|
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
|
|
|
|
|
|
|
data->async->user_callback = callback;
|
|
|
|
data->async->user_data = user_data;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uarte_nrfx_rx_disable(const struct device *dev)
|
2018-12-20 15:35:06 +01:00
|
|
|
{
|
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
|
|
|
|
if (data->async->rx_buf == NULL) {
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE
radio/ints running) data could be lost between when enough characters
have been RX'd to fill the DMA buffer and when the ENDRX event was
fired, where the the STARTRX task is invoked to start filling the next
buffer (which is set up earlier, but I think will not be filled until
STARTRX).
To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the
'next' buffer is available, so that STARTRX is invoked automatically and
subsequent chars go into the next buffer via EasyDMA.
To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX
event _before_ the STARTRX event.
There was also an issue in rx_timeout() where the received character
count (rx_total_byte_count) could be incremented greater than the actual
buffer size. This arises from rx_total_byte_count value coming from the
counting the RXDRDY events (either by PPI/timer counter or counting the
RXDRDY ints themselves) and so if chars are received in the rx_timeout()
(or before ENDRX is handled) the rx_timeout() could increment rx_offset
past the length of the buffer. This could result the remaining 'len'
being calculated incorrectly (an underflow due to unsigned - signed ,
where signed > unsigned).
To fix this, we now store the lengths of the buffers and don't invoke
the UART_RX_RDY callback when the buffers are full; its handled by
ENDRX.
(Also note that the buffer size should be available via the RXD.MAXCNT
register on the nrf, but this register is not exposed through the nrfx
HAL and is also double buffered, so it seemed clearer to just track the
buffer lengths explicitly here in the driver).
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
for fixup
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
2020-04-08 22:47:44 +10:00
|
|
|
if (data->async->rx_next_buf != NULL) {
|
|
|
|
nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX);
|
2020-06-02 13:59:07 +00:00
|
|
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
|
drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE
radio/ints running) data could be lost between when enough characters
have been RX'd to fill the DMA buffer and when the ENDRX event was
fired, where the the STARTRX task is invoked to start filling the next
buffer (which is set up earlier, but I think will not be filled until
STARTRX).
To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the
'next' buffer is available, so that STARTRX is invoked automatically and
subsequent chars go into the next buffer via EasyDMA.
To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX
event _before_ the STARTRX event.
There was also an issue in rx_timeout() where the received character
count (rx_total_byte_count) could be incremented greater than the actual
buffer size. This arises from rx_total_byte_count value coming from the
counting the RXDRDY events (either by PPI/timer counter or counting the
RXDRDY ints themselves) and so if chars are received in the rx_timeout()
(or before ENDRX is handled) the rx_timeout() could increment rx_offset
past the length of the buffer. This could result the remaining 'len'
being calculated incorrectly (an underflow due to unsigned - signed ,
where signed > unsigned).
To fix this, we now store the lengths of the buffers and don't invoke
the UART_RX_RDY callback when the buffers are full; its handled by
ENDRX.
(Also note that the buffer size should be available via the RXD.MAXCNT
register on the nrf, but this register is not exposed through the nrfx
HAL and is also double buffered, so it seemed clearer to just track the
buffer lengths explicitly here in the driver).
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
for fixup
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
2020-04-08 22:47:44 +10:00
|
|
|
}
|
2018-12-20 15:35:06 +01:00
|
|
|
|
|
|
|
k_timer_stop(&data->async->rx_timeout_timer);
|
|
|
|
data->async->rx_enabled = false;
|
|
|
|
|
|
|
|
nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tx_timeout(struct k_timer *timer)
|
|
|
|
{
|
2020-07-08 13:37:36 +02:00
|
|
|
struct uarte_nrfx_data *data = k_timer_user_data_get(timer);
|
|
|
|
(void) uarte_nrfx_tx_abort(data->dev);
|
2018-12-20 15:35:06 +01:00
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void user_callback(const struct device *dev, struct uart_event *evt)
|
2018-12-20 15:35:06 +01:00
|
|
|
{
|
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
|
|
|
|
|
|
|
if (data->async->user_callback) {
|
2020-06-24 14:28:05 +02:00
|
|
|
data->async->user_callback(dev, evt, data->async->user_data);
|
2018-12-20 15:35:06 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Whole timeout is divided by RX_TIMEOUT_DIV into smaller units, rx_timeout
|
|
|
|
* is executed periodically every rx_timeout_slab ms. If between executions
|
|
|
|
* data was received, then we start counting down time from start, if not, then
|
|
|
|
* we subtract rx_timeout_slab from rx_timeout_left.
|
|
|
|
* If rx_timeout_left is less than rx_timeout_slab it means that receiving has
|
|
|
|
* timed out and we should tell user about that.
|
|
|
|
*/
|
|
|
|
static void rx_timeout(struct k_timer *timer)
|
|
|
|
{
|
2020-07-08 13:37:36 +02:00
|
|
|
struct uarte_nrfx_data *data = k_timer_user_data_get(timer);
|
|
|
|
const struct device *dev = data->dev;
|
2018-12-20 15:35:06 +01:00
|
|
|
const struct uarte_nrfx_config *cfg = get_dev_config(dev);
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t read;
|
2018-12-20 15:35:06 +01:00
|
|
|
|
2019-08-26 11:25:35 +02:00
|
|
|
if (data->async->is_in_irq) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Disable ENDRX ISR, in case ENDRX event is generated, it will be
|
|
|
|
* handled after rx_timeout routine is complete.
|
|
|
|
*/
|
|
|
|
nrf_uarte_int_disable(get_uarte_instance(dev),
|
|
|
|
NRF_UARTE_INT_ENDRX_MASK);
|
|
|
|
|
2018-12-20 15:35:06 +01:00
|
|
|
if (hw_rx_counting_enabled(data)) {
|
|
|
|
read = nrfx_timer_capture(&cfg->timer, 0);
|
|
|
|
} else {
|
|
|
|
read = data->async->rx_cnt.cnt;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if data was received since last function call */
|
|
|
|
if (read != data->async->rx_total_byte_cnt) {
|
|
|
|
data->async->rx_total_byte_cnt = read;
|
|
|
|
data->async->rx_timeout_left = data->async->rx_timeout;
|
|
|
|
}
|
|
|
|
|
drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE
radio/ints running) data could be lost between when enough characters
have been RX'd to fill the DMA buffer and when the ENDRX event was
fired, where the the STARTRX task is invoked to start filling the next
buffer (which is set up earlier, but I think will not be filled until
STARTRX).
To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the
'next' buffer is available, so that STARTRX is invoked automatically and
subsequent chars go into the next buffer via EasyDMA.
To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX
event _before_ the STARTRX event.
There was also an issue in rx_timeout() where the received character
count (rx_total_byte_count) could be incremented greater than the actual
buffer size. This arises from rx_total_byte_count value coming from the
counting the RXDRDY events (either by PPI/timer counter or counting the
RXDRDY ints themselves) and so if chars are received in the rx_timeout()
(or before ENDRX is handled) the rx_timeout() could increment rx_offset
past the length of the buffer. This could result the remaining 'len'
being calculated incorrectly (an underflow due to unsigned - signed ,
where signed > unsigned).
To fix this, we now store the lengths of the buffers and don't invoke
the UART_RX_RDY callback when the buffers are full; its handled by
ENDRX.
(Also note that the buffer size should be available via the RXD.MAXCNT
register on the nrf, but this register is not exposed through the nrfx
HAL and is also double buffered, so it seemed clearer to just track the
buffer lengths explicitly here in the driver).
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
for fixup
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
2020-04-08 22:47:44 +10:00
|
|
|
/* Check if there is data that was not sent to user yet
|
|
|
|
* Note though that 'len' is a count of data bytes received, but not
|
|
|
|
* necessarily the amount available in the current buffer
|
|
|
|
*/
|
2020-05-27 11:26:57 -05:00
|
|
|
int32_t len = data->async->rx_total_byte_cnt
|
2019-10-29 14:46:39 +01:00
|
|
|
- data->async->rx_total_user_byte_cnt;
|
drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE
radio/ints running) data could be lost between when enough characters
have been RX'd to fill the DMA buffer and when the ENDRX event was
fired, where the the STARTRX task is invoked to start filling the next
buffer (which is set up earlier, but I think will not be filled until
STARTRX).
To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the
'next' buffer is available, so that STARTRX is invoked automatically and
subsequent chars go into the next buffer via EasyDMA.
To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX
event _before_ the STARTRX event.
There was also an issue in rx_timeout() where the received character
count (rx_total_byte_count) could be incremented greater than the actual
buffer size. This arises from rx_total_byte_count value coming from the
counting the RXDRDY events (either by PPI/timer counter or counting the
RXDRDY ints themselves) and so if chars are received in the rx_timeout()
(or before ENDRX is handled) the rx_timeout() could increment rx_offset
past the length of the buffer. This could result the remaining 'len'
being calculated incorrectly (an underflow due to unsigned - signed ,
where signed > unsigned).
To fix this, we now store the lengths of the buffers and don't invoke
the UART_RX_RDY callback when the buffers are full; its handled by
ENDRX.
(Also note that the buffer size should be available via the RXD.MAXCNT
register on the nrf, but this register is not exposed through the nrfx
HAL and is also double buffered, so it seemed clearer to just track the
buffer lengths explicitly here in the driver).
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
for fixup
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
2020-04-08 22:47:44 +10:00
|
|
|
|
|
|
|
/* Check for current buffer being full.
|
|
|
|
* if the UART receives characters before the the ENDRX is handled
|
|
|
|
* and the 'next' buffer is set up, then the SHORT between ENDRX and
|
|
|
|
* STARTRX will mean that data will be going into to the 'next' buffer
|
|
|
|
* until the ENDRX event gets a chance to be handled.
|
|
|
|
*/
|
|
|
|
bool clipped = false;
|
|
|
|
|
|
|
|
if (len + data->async->rx_offset > data->async->rx_buf_len) {
|
|
|
|
len = data->async->rx_buf_len - data->async->rx_offset;
|
|
|
|
clipped = true;
|
|
|
|
}
|
|
|
|
|
2019-10-29 14:46:39 +01:00
|
|
|
if (len > 0) {
|
drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE
radio/ints running) data could be lost between when enough characters
have been RX'd to fill the DMA buffer and when the ENDRX event was
fired, where the the STARTRX task is invoked to start filling the next
buffer (which is set up earlier, but I think will not be filled until
STARTRX).
To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the
'next' buffer is available, so that STARTRX is invoked automatically and
subsequent chars go into the next buffer via EasyDMA.
To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX
event _before_ the STARTRX event.
There was also an issue in rx_timeout() where the received character
count (rx_total_byte_count) could be incremented greater than the actual
buffer size. This arises from rx_total_byte_count value coming from the
counting the RXDRDY events (either by PPI/timer counter or counting the
RXDRDY ints themselves) and so if chars are received in the rx_timeout()
(or before ENDRX is handled) the rx_timeout() could increment rx_offset
past the length of the buffer. This could result the remaining 'len'
being calculated incorrectly (an underflow due to unsigned - signed ,
where signed > unsigned).
To fix this, we now store the lengths of the buffers and don't invoke
the UART_RX_RDY callback when the buffers are full; its handled by
ENDRX.
(Also note that the buffer size should be available via the RXD.MAXCNT
register on the nrf, but this register is not exposed through the nrfx
HAL and is also double buffered, so it seemed clearer to just track the
buffer lengths explicitly here in the driver).
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
for fixup
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
2020-04-08 22:47:44 +10:00
|
|
|
if (clipped ||
|
|
|
|
(data->async->rx_timeout_left
|
|
|
|
< data->async->rx_timeout_slab)) {
|
2018-12-20 15:35:06 +01:00
|
|
|
/* rx_timeout ms elapsed since last receiving */
|
|
|
|
struct uart_event evt = {
|
|
|
|
.type = UART_RX_RDY,
|
|
|
|
.data.rx.buf = data->async->rx_buf,
|
|
|
|
.data.rx.len = len,
|
|
|
|
.data.rx.offset = data->async->rx_offset
|
|
|
|
};
|
|
|
|
data->async->rx_offset += len;
|
drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE
radio/ints running) data could be lost between when enough characters
have been RX'd to fill the DMA buffer and when the ENDRX event was
fired, where the the STARTRX task is invoked to start filling the next
buffer (which is set up earlier, but I think will not be filled until
STARTRX).
To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the
'next' buffer is available, so that STARTRX is invoked automatically and
subsequent chars go into the next buffer via EasyDMA.
To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX
event _before_ the STARTRX event.
There was also an issue in rx_timeout() where the received character
count (rx_total_byte_count) could be incremented greater than the actual
buffer size. This arises from rx_total_byte_count value coming from the
counting the RXDRDY events (either by PPI/timer counter or counting the
RXDRDY ints themselves) and so if chars are received in the rx_timeout()
(or before ENDRX is handled) the rx_timeout() could increment rx_offset
past the length of the buffer. This could result the remaining 'len'
being calculated incorrectly (an underflow due to unsigned - signed ,
where signed > unsigned).
To fix this, we now store the lengths of the buffers and don't invoke
the UART_RX_RDY callback when the buffers are full; its handled by
ENDRX.
(Also note that the buffer size should be available via the RXD.MAXCNT
register on the nrf, but this register is not exposed through the nrfx
HAL and is also double buffered, so it seemed clearer to just track the
buffer lengths explicitly here in the driver).
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
for fixup
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
2020-04-08 22:47:44 +10:00
|
|
|
data->async->rx_total_user_byte_cnt += len;
|
2018-12-20 15:35:06 +01:00
|
|
|
user_callback(dev, &evt);
|
|
|
|
} else {
|
|
|
|
data->async->rx_timeout_left -=
|
|
|
|
data->async->rx_timeout_slab;
|
|
|
|
}
|
drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE
radio/ints running) data could be lost between when enough characters
have been RX'd to fill the DMA buffer and when the ENDRX event was
fired, where the the STARTRX task is invoked to start filling the next
buffer (which is set up earlier, but I think will not be filled until
STARTRX).
To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the
'next' buffer is available, so that STARTRX is invoked automatically and
subsequent chars go into the next buffer via EasyDMA.
To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX
event _before_ the STARTRX event.
There was also an issue in rx_timeout() where the received character
count (rx_total_byte_count) could be incremented greater than the actual
buffer size. This arises from rx_total_byte_count value coming from the
counting the RXDRDY events (either by PPI/timer counter or counting the
RXDRDY ints themselves) and so if chars are received in the rx_timeout()
(or before ENDRX is handled) the rx_timeout() could increment rx_offset
past the length of the buffer. This could result the remaining 'len'
being calculated incorrectly (an underflow due to unsigned - signed ,
where signed > unsigned).
To fix this, we now store the lengths of the buffers and don't invoke
the UART_RX_RDY callback when the buffers are full; its handled by
ENDRX.
(Also note that the buffer size should be available via the RXD.MAXCNT
register on the nrf, but this register is not exposed through the nrfx
HAL and is also double buffered, so it seemed clearer to just track the
buffer lengths explicitly here in the driver).
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
for fixup
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
2020-04-08 22:47:44 +10:00
|
|
|
|
|
|
|
/* If theres nothing left to report until the buffers are
|
|
|
|
* switched then the timer can be stopped
|
|
|
|
*/
|
|
|
|
if (clipped) {
|
|
|
|
k_timer_stop(&data->async->rx_timeout_timer);
|
|
|
|
}
|
2018-12-20 15:35:06 +01:00
|
|
|
}
|
2019-08-26 11:25:35 +02:00
|
|
|
|
|
|
|
nrf_uarte_int_enable(get_uarte_instance(dev),
|
|
|
|
NRF_UARTE_INT_ENDRX_MASK);
|
2018-12-20 15:35:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
#define UARTE_ERROR_FROM_MASK(mask) \
|
|
|
|
((mask) & NRF_UARTE_ERROR_OVERRUN_MASK ? UART_ERROR_OVERRUN \
|
|
|
|
: (mask) & NRF_UARTE_ERROR_PARITY_MASK ? UART_ERROR_PARITY \
|
|
|
|
: (mask) & NRF_UARTE_ERROR_FRAMING_MASK ? UART_ERROR_FRAMING \
|
|
|
|
: (mask) & NRF_UARTE_ERROR_BREAK_MASK ? UART_BREAK \
|
|
|
|
: 0)
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void error_isr(const struct device *dev)
|
2018-12-20 15:35:06 +01:00
|
|
|
{
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t err = nrf_uarte_errorsrc_get_and_clear(uarte);
|
2018-12-20 15:35:06 +01:00
|
|
|
struct uart_event evt = {
|
|
|
|
.type = UART_RX_STOPPED,
|
|
|
|
.data.rx_stop.reason = UARTE_ERROR_FROM_MASK(err),
|
|
|
|
};
|
|
|
|
user_callback(dev, &evt);
|
|
|
|
(void) uarte_nrfx_rx_disable(dev);
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void rxstarted_isr(const struct device *dev)
|
2018-12-20 15:35:06 +01:00
|
|
|
{
|
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
|
|
|
struct uart_event evt = {
|
|
|
|
.type = UART_RX_BUF_REQUEST,
|
|
|
|
};
|
|
|
|
user_callback(dev, &evt);
|
2020-05-01 11:58:15 +02:00
|
|
|
if (data->async->rx_timeout != SYS_FOREVER_MS) {
|
2018-12-20 15:35:06 +01:00
|
|
|
data->async->rx_timeout_left = data->async->rx_timeout;
|
|
|
|
k_timer_start(&data->async->rx_timeout_timer,
|
2020-05-01 11:58:15 +02:00
|
|
|
K_MSEC(data->async->rx_timeout_slab),
|
|
|
|
K_MSEC(data->async->rx_timeout_slab));
|
2018-12-20 15:35:06 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void endrx_isr(const struct device *dev)
|
2018-12-20 15:35:06 +01:00
|
|
|
{
|
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
|
|
|
|
if (!data->async->rx_enabled) {
|
2020-02-21 12:31:51 +01:00
|
|
|
if (data->async->rx_buf == NULL) {
|
|
|
|
/* This condition can occur only after triggering
|
|
|
|
* FLUSHRX task.
|
|
|
|
*/
|
|
|
|
struct uart_event evt = {
|
|
|
|
.type = UART_RX_DISABLED,
|
|
|
|
};
|
|
|
|
user_callback(dev, &evt);
|
2020-05-14 09:24:52 +02:00
|
|
|
return;
|
2020-02-21 12:31:51 +01:00
|
|
|
}
|
2018-12-20 15:35:06 +01:00
|
|
|
}
|
2019-08-26 11:25:35 +02:00
|
|
|
|
|
|
|
data->async->is_in_irq = true;
|
|
|
|
|
drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE
radio/ints running) data could be lost between when enough characters
have been RX'd to fill the DMA buffer and when the ENDRX event was
fired, where the the STARTRX task is invoked to start filling the next
buffer (which is set up earlier, but I think will not be filled until
STARTRX).
To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the
'next' buffer is available, so that STARTRX is invoked automatically and
subsequent chars go into the next buffer via EasyDMA.
To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX
event _before_ the STARTRX event.
There was also an issue in rx_timeout() where the received character
count (rx_total_byte_count) could be incremented greater than the actual
buffer size. This arises from rx_total_byte_count value coming from the
counting the RXDRDY events (either by PPI/timer counter or counting the
RXDRDY ints themselves) and so if chars are received in the rx_timeout()
(or before ENDRX is handled) the rx_timeout() could increment rx_offset
past the length of the buffer. This could result the remaining 'len'
being calculated incorrectly (an underflow due to unsigned - signed ,
where signed > unsigned).
To fix this, we now store the lengths of the buffers and don't invoke
the UART_RX_RDY callback when the buffers are full; its handled by
ENDRX.
(Also note that the buffer size should be available via the RXD.MAXCNT
register on the nrf, but this register is not exposed through the nrfx
HAL and is also double buffered, so it seemed clearer to just track the
buffer lengths explicitly here in the driver).
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
for fixup
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
2020-04-08 22:47:44 +10:00
|
|
|
/* ensure rx timer is stopped - it will be restarted in RXSTARTED
|
|
|
|
* handler if needed
|
|
|
|
*/
|
2018-12-20 15:35:06 +01:00
|
|
|
k_timer_stop(&data->async->rx_timeout_timer);
|
|
|
|
|
drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE
radio/ints running) data could be lost between when enough characters
have been RX'd to fill the DMA buffer and when the ENDRX event was
fired, where the the STARTRX task is invoked to start filling the next
buffer (which is set up earlier, but I think will not be filled until
STARTRX).
To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the
'next' buffer is available, so that STARTRX is invoked automatically and
subsequent chars go into the next buffer via EasyDMA.
To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX
event _before_ the STARTRX event.
There was also an issue in rx_timeout() where the received character
count (rx_total_byte_count) could be incremented greater than the actual
buffer size. This arises from rx_total_byte_count value coming from the
counting the RXDRDY events (either by PPI/timer counter or counting the
RXDRDY ints themselves) and so if chars are received in the rx_timeout()
(or before ENDRX is handled) the rx_timeout() could increment rx_offset
past the length of the buffer. This could result the remaining 'len'
being calculated incorrectly (an underflow due to unsigned - signed ,
where signed > unsigned).
To fix this, we now store the lengths of the buffers and don't invoke
the UART_RX_RDY callback when the buffers are full; its handled by
ENDRX.
(Also note that the buffer size should be available via the RXD.MAXCNT
register on the nrf, but this register is not exposed through the nrfx
HAL and is also double buffered, so it seemed clearer to just track the
buffer lengths explicitly here in the driver).
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
for fixup
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
2020-04-08 22:47:44 +10:00
|
|
|
/* this is the amount that the EasyDMA controller has copied into the
|
|
|
|
* buffer
|
|
|
|
*/
|
|
|
|
const int rx_amount = nrf_uarte_rx_amount_get(uarte);
|
|
|
|
|
|
|
|
/* The 'rx_offset' can be bigger than 'rx_amount', so it the length
|
|
|
|
* of data we report back the the user may need to be clipped.
|
|
|
|
* This can happen because the 'rx_offset' count derives from RXRDY
|
|
|
|
* events, which can occur already for the next buffer before we are
|
|
|
|
* here to handle this buffer. (The next buffer is now already active
|
|
|
|
* because of the ENDRX_STARTRX shortcut)
|
|
|
|
*/
|
|
|
|
int rx_len = rx_amount - data->async->rx_offset;
|
|
|
|
|
|
|
|
if (rx_len < 0) {
|
|
|
|
rx_len = 0;
|
|
|
|
}
|
2019-08-09 08:57:51 +02:00
|
|
|
|
|
|
|
data->async->rx_total_user_byte_cnt += rx_len;
|
|
|
|
|
|
|
|
if (!hw_rx_counting_enabled(data)) {
|
|
|
|
/* Prevent too low value of rx_cnt.cnt which may occur due to
|
|
|
|
* latencies in handling of the RXRDY interrupt. Because whole
|
|
|
|
* buffer was filled we can be sure that rx_total_user_byte_cnt
|
|
|
|
* is current total number of received bytes.
|
|
|
|
*/
|
|
|
|
data->async->rx_cnt.cnt = data->async->rx_total_user_byte_cnt;
|
|
|
|
}
|
|
|
|
|
drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE
radio/ints running) data could be lost between when enough characters
have been RX'd to fill the DMA buffer and when the ENDRX event was
fired, where the the STARTRX task is invoked to start filling the next
buffer (which is set up earlier, but I think will not be filled until
STARTRX).
To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the
'next' buffer is available, so that STARTRX is invoked automatically and
subsequent chars go into the next buffer via EasyDMA.
To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX
event _before_ the STARTRX event.
There was also an issue in rx_timeout() where the received character
count (rx_total_byte_count) could be incremented greater than the actual
buffer size. This arises from rx_total_byte_count value coming from the
counting the RXDRDY events (either by PPI/timer counter or counting the
RXDRDY ints themselves) and so if chars are received in the rx_timeout()
(or before ENDRX is handled) the rx_timeout() could increment rx_offset
past the length of the buffer. This could result the remaining 'len'
being calculated incorrectly (an underflow due to unsigned - signed ,
where signed > unsigned).
To fix this, we now store the lengths of the buffers and don't invoke
the UART_RX_RDY callback when the buffers are full; its handled by
ENDRX.
(Also note that the buffer size should be available via the RXD.MAXCNT
register on the nrf, but this register is not exposed through the nrfx
HAL and is also double buffered, so it seemed clearer to just track the
buffer lengths explicitly here in the driver).
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
for fixup
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
2020-04-08 22:47:44 +10:00
|
|
|
/* Only send the RX_RDY event if there is something to send */
|
|
|
|
if (rx_len > 0) {
|
|
|
|
struct uart_event evt = {
|
|
|
|
.type = UART_RX_RDY,
|
|
|
|
.data.rx.buf = data->async->rx_buf,
|
|
|
|
.data.rx.len = rx_len,
|
|
|
|
.data.rx.offset = data->async->rx_offset,
|
|
|
|
};
|
|
|
|
user_callback(dev, &evt);
|
|
|
|
}
|
|
|
|
|
2020-05-14 09:24:52 +02:00
|
|
|
if (!data->async->rx_enabled) {
|
|
|
|
data->async->is_in_irq = false;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-12-20 15:35:06 +01:00
|
|
|
struct uart_event evt = {
|
drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE
radio/ints running) data could be lost between when enough characters
have been RX'd to fill the DMA buffer and when the ENDRX event was
fired, where the the STARTRX task is invoked to start filling the next
buffer (which is set up earlier, but I think will not be filled until
STARTRX).
To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the
'next' buffer is available, so that STARTRX is invoked automatically and
subsequent chars go into the next buffer via EasyDMA.
To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX
event _before_ the STARTRX event.
There was also an issue in rx_timeout() where the received character
count (rx_total_byte_count) could be incremented greater than the actual
buffer size. This arises from rx_total_byte_count value coming from the
counting the RXDRDY events (either by PPI/timer counter or counting the
RXDRDY ints themselves) and so if chars are received in the rx_timeout()
(or before ENDRX is handled) the rx_timeout() could increment rx_offset
past the length of the buffer. This could result the remaining 'len'
being calculated incorrectly (an underflow due to unsigned - signed ,
where signed > unsigned).
To fix this, we now store the lengths of the buffers and don't invoke
the UART_RX_RDY callback when the buffers are full; its handled by
ENDRX.
(Also note that the buffer size should be available via the RXD.MAXCNT
register on the nrf, but this register is not exposed through the nrfx
HAL and is also double buffered, so it seemed clearer to just track the
buffer lengths explicitly here in the driver).
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
for fixup
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
2020-04-08 22:47:44 +10:00
|
|
|
.type = UART_RX_BUF_RELEASED,
|
|
|
|
.data.rx_buf.buf = data->async->rx_buf,
|
2018-12-20 15:35:06 +01:00
|
|
|
};
|
|
|
|
user_callback(dev, &evt);
|
|
|
|
|
drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE
radio/ints running) data could be lost between when enough characters
have been RX'd to fill the DMA buffer and when the ENDRX event was
fired, where the the STARTRX task is invoked to start filling the next
buffer (which is set up earlier, but I think will not be filled until
STARTRX).
To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the
'next' buffer is available, so that STARTRX is invoked automatically and
subsequent chars go into the next buffer via EasyDMA.
To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX
event _before_ the STARTRX event.
There was also an issue in rx_timeout() where the received character
count (rx_total_byte_count) could be incremented greater than the actual
buffer size. This arises from rx_total_byte_count value coming from the
counting the RXDRDY events (either by PPI/timer counter or counting the
RXDRDY ints themselves) and so if chars are received in the rx_timeout()
(or before ENDRX is handled) the rx_timeout() could increment rx_offset
past the length of the buffer. This could result the remaining 'len'
being calculated incorrectly (an underflow due to unsigned - signed ,
where signed > unsigned).
To fix this, we now store the lengths of the buffers and don't invoke
the UART_RX_RDY callback when the buffers are full; its handled by
ENDRX.
(Also note that the buffer size should be available via the RXD.MAXCNT
register on the nrf, but this register is not exposed through the nrfx
HAL and is also double buffered, so it seemed clearer to just track the
buffer lengths explicitly here in the driver).
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
for fixup
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
2020-04-08 22:47:44 +10:00
|
|
|
/* If there is a next buffer, then STARTRX will have already been
|
|
|
|
* invoked by the short (the next buffer will be filling up already)
|
|
|
|
* and here we just do the swap of which buffer the driver is following,
|
|
|
|
* the next rx_timeout() will update the rx_offset.
|
|
|
|
*/
|
2020-07-29 13:05:42 +02:00
|
|
|
int key = irq_lock();
|
|
|
|
|
2018-12-20 15:35:06 +01:00
|
|
|
if (data->async->rx_next_buf) {
|
|
|
|
data->async->rx_buf = data->async->rx_next_buf;
|
drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE
radio/ints running) data could be lost between when enough characters
have been RX'd to fill the DMA buffer and when the ENDRX event was
fired, where the the STARTRX task is invoked to start filling the next
buffer (which is set up earlier, but I think will not be filled until
STARTRX).
To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the
'next' buffer is available, so that STARTRX is invoked automatically and
subsequent chars go into the next buffer via EasyDMA.
To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX
event _before_ the STARTRX event.
There was also an issue in rx_timeout() where the received character
count (rx_total_byte_count) could be incremented greater than the actual
buffer size. This arises from rx_total_byte_count value coming from the
counting the RXDRDY events (either by PPI/timer counter or counting the
RXDRDY ints themselves) and so if chars are received in the rx_timeout()
(or before ENDRX is handled) the rx_timeout() could increment rx_offset
past the length of the buffer. This could result the remaining 'len'
being calculated incorrectly (an underflow due to unsigned - signed ,
where signed > unsigned).
To fix this, we now store the lengths of the buffers and don't invoke
the UART_RX_RDY callback when the buffers are full; its handled by
ENDRX.
(Also note that the buffer size should be available via the RXD.MAXCNT
register on the nrf, but this register is not exposed through the nrfx
HAL and is also double buffered, so it seemed clearer to just track the
buffer lengths explicitly here in the driver).
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
for fixup
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
2020-04-08 22:47:44 +10:00
|
|
|
data->async->rx_buf_len = data->async->rx_next_buf_len;
|
2018-12-20 15:35:06 +01:00
|
|
|
data->async->rx_next_buf = NULL;
|
drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE
radio/ints running) data could be lost between when enough characters
have been RX'd to fill the DMA buffer and when the ENDRX event was
fired, where the the STARTRX task is invoked to start filling the next
buffer (which is set up earlier, but I think will not be filled until
STARTRX).
To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the
'next' buffer is available, so that STARTRX is invoked automatically and
subsequent chars go into the next buffer via EasyDMA.
To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX
event _before_ the STARTRX event.
There was also an issue in rx_timeout() where the received character
count (rx_total_byte_count) could be incremented greater than the actual
buffer size. This arises from rx_total_byte_count value coming from the
counting the RXDRDY events (either by PPI/timer counter or counting the
RXDRDY ints themselves) and so if chars are received in the rx_timeout()
(or before ENDRX is handled) the rx_timeout() could increment rx_offset
past the length of the buffer. This could result the remaining 'len'
being calculated incorrectly (an underflow due to unsigned - signed ,
where signed > unsigned).
To fix this, we now store the lengths of the buffers and don't invoke
the UART_RX_RDY callback when the buffers are full; its handled by
ENDRX.
(Also note that the buffer size should be available via the RXD.MAXCNT
register on the nrf, but this register is not exposed through the nrfx
HAL and is also double buffered, so it seemed clearer to just track the
buffer lengths explicitly here in the driver).
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
for fixup
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
2020-04-08 22:47:44 +10:00
|
|
|
data->async->rx_next_buf_len = 0;
|
2018-12-20 15:35:06 +01:00
|
|
|
|
|
|
|
data->async->rx_offset = 0;
|
2020-07-29 13:05:42 +02:00
|
|
|
/* Check is based on assumption that ISR handler handles
|
|
|
|
* ENDRX before RXSTARTED so if short was set on time, RXSTARTED
|
|
|
|
* event will be set.
|
|
|
|
*/
|
|
|
|
if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) {
|
|
|
|
nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
|
|
|
|
}
|
drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE
radio/ints running) data could be lost between when enough characters
have been RX'd to fill the DMA buffer and when the ENDRX event was
fired, where the the STARTRX task is invoked to start filling the next
buffer (which is set up earlier, but I think will not be filled until
STARTRX).
To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the
'next' buffer is available, so that STARTRX is invoked automatically and
subsequent chars go into the next buffer via EasyDMA.
To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX
event _before_ the STARTRX event.
There was also an issue in rx_timeout() where the received character
count (rx_total_byte_count) could be incremented greater than the actual
buffer size. This arises from rx_total_byte_count value coming from the
counting the RXDRDY events (either by PPI/timer counter or counting the
RXDRDY ints themselves) and so if chars are received in the rx_timeout()
(or before ENDRX is handled) the rx_timeout() could increment rx_offset
past the length of the buffer. This could result the remaining 'len'
being calculated incorrectly (an underflow due to unsigned - signed ,
where signed > unsigned).
To fix this, we now store the lengths of the buffers and don't invoke
the UART_RX_RDY callback when the buffers are full; its handled by
ENDRX.
(Also note that the buffer size should be available via the RXD.MAXCNT
register on the nrf, but this register is not exposed through the nrfx
HAL and is also double buffered, so it seemed clearer to just track the
buffer lengths explicitly here in the driver).
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
for fixup
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
2020-04-08 22:47:44 +10:00
|
|
|
/* Remove the short until the subsequent next buffer is setup */
|
|
|
|
nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX);
|
2018-12-20 15:35:06 +01:00
|
|
|
} else {
|
|
|
|
data->async->rx_buf = NULL;
|
2020-07-29 13:05:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
irq_unlock(key);
|
|
|
|
|
|
|
|
if (data->async->rx_buf == NULL) {
|
2018-12-20 15:35:06 +01:00
|
|
|
evt.type = UART_RX_DISABLED;
|
|
|
|
user_callback(dev, &evt);
|
|
|
|
}
|
2019-08-26 11:25:35 +02:00
|
|
|
|
|
|
|
data->async->is_in_irq = false;
|
2018-12-20 15:35:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* This handler is called when the reception is interrupted, in contrary to
|
|
|
|
* finishing the reception after filling all provided buffers, in which case
|
|
|
|
* the events UART_RX_BUF_RELEASED and UART_RX_DISABLED are reported
|
|
|
|
* from endrx_isr.
|
|
|
|
*/
|
2020-04-30 20:33:38 +02:00
|
|
|
static void rxto_isr(const struct device *dev)
|
2018-12-20 15:35:06 +01:00
|
|
|
{
|
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
|
|
|
struct uart_event evt = {
|
|
|
|
.type = UART_RX_BUF_RELEASED,
|
|
|
|
.data.rx_buf.buf = data->async->rx_buf,
|
|
|
|
};
|
|
|
|
user_callback(dev, &evt);
|
|
|
|
|
|
|
|
data->async->rx_buf = NULL;
|
|
|
|
if (data->async->rx_next_buf) {
|
|
|
|
evt.type = UART_RX_BUF_RELEASED;
|
|
|
|
evt.data.rx_buf.buf = data->async->rx_next_buf;
|
|
|
|
user_callback(dev, &evt);
|
|
|
|
data->async->rx_next_buf = NULL;
|
|
|
|
}
|
|
|
|
|
2020-02-21 12:31:51 +01:00
|
|
|
/* Flushing RX fifo requires buffer bigger than 4 bytes to empty fifo */
|
2020-05-27 11:26:57 -05:00
|
|
|
static uint8_t flush_buf[5];
|
2020-02-21 12:31:51 +01:00
|
|
|
|
|
|
|
nrf_uarte_rx_buffer_set(get_uarte_instance(dev), flush_buf, 5);
|
|
|
|
/* Final part of handling RXTO event is in ENDRX interrupt handler.
|
|
|
|
* ENDRX is generated as a result of FLUSHRX task.
|
|
|
|
*/
|
|
|
|
nrf_uarte_task_trigger(get_uarte_instance(dev), NRF_UARTE_TASK_FLUSHRX);
|
2018-12-20 15:35:06 +01:00
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void txstopped_isr(const struct device *dev)
|
2018-12-20 15:35:06 +01:00
|
|
|
{
|
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
2020-10-01 10:43:11 +02:00
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
int key;
|
2018-12-20 15:35:06 +01:00
|
|
|
|
|
|
|
if (!data->async->tx_buf) {
|
2020-10-01 10:43:11 +02:00
|
|
|
/* If there is a pending tx request, it means that uart_tx()
|
|
|
|
* was called when there was ongoing uart_poll_out. Handling
|
|
|
|
* TXSTOPPED interrupt means that uart_poll_out has completed.
|
|
|
|
*/
|
|
|
|
if (data->async->pend_tx_buf) {
|
|
|
|
key = irq_lock();
|
|
|
|
|
|
|
|
if (nrf_uarte_event_check(uarte,
|
|
|
|
NRF_UARTE_EVENT_TXSTOPPED)) {
|
|
|
|
data->async->tx_buf = data->async->pend_tx_buf;
|
|
|
|
data->async->pend_tx_buf = NULL;
|
|
|
|
data->async->tx_amount = -1;
|
|
|
|
tx_start(uarte, data->async->tx_buf,
|
|
|
|
data->async->tx_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
irq_unlock(key);
|
|
|
|
}
|
2018-12-20 15:35:06 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
k_timer_stop(&data->async->tx_timeout_timer);
|
|
|
|
|
|
|
|
key = irq_lock();
|
|
|
|
size_t amount = (data->async->tx_amount >= 0) ?
|
|
|
|
data->async->tx_amount : nrf_uarte_tx_amount_get(uarte);
|
|
|
|
|
|
|
|
irq_unlock(key);
|
2018-12-20 15:35:06 +01:00
|
|
|
|
|
|
|
struct uart_event evt = {
|
|
|
|
.data.tx.buf = data->async->tx_buf,
|
|
|
|
.data.tx.len = amount,
|
|
|
|
};
|
|
|
|
if (amount == data->async->tx_size) {
|
|
|
|
evt.type = UART_TX_DONE;
|
|
|
|
} else {
|
|
|
|
evt.type = UART_TX_ABORTED;
|
|
|
|
}
|
2020-10-01 10:43:11 +02:00
|
|
|
|
|
|
|
nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
|
2018-12-20 15:35:06 +01:00
|
|
|
data->async->tx_buf = NULL;
|
|
|
|
data->async->tx_size = 0;
|
2020-01-31 16:35:32 +01:00
|
|
|
|
2018-12-20 15:35:06 +01:00
|
|
|
user_callback(dev, &evt);
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uarte_nrfx_isr_async(const struct device *dev)
|
2018-12-20 15:35:06 +01:00
|
|
|
{
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
|
|
|
|
|
|
|
if (!hw_rx_counting_enabled(data)
|
|
|
|
&& nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXDRDY)) {
|
|
|
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXDRDY);
|
|
|
|
data->async->rx_cnt.cnt++;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ERROR)) {
|
|
|
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ERROR);
|
|
|
|
error_isr(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) {
|
|
|
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
|
|
|
|
endrx_isr(dev);
|
|
|
|
}
|
|
|
|
|
drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE
radio/ints running) data could be lost between when enough characters
have been RX'd to fill the DMA buffer and when the ENDRX event was
fired, where the the STARTRX task is invoked to start filling the next
buffer (which is set up earlier, but I think will not be filled until
STARTRX).
To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the
'next' buffer is available, so that STARTRX is invoked automatically and
subsequent chars go into the next buffer via EasyDMA.
To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX
event _before_ the STARTRX event.
There was also an issue in rx_timeout() where the received character
count (rx_total_byte_count) could be incremented greater than the actual
buffer size. This arises from rx_total_byte_count value coming from the
counting the RXDRDY events (either by PPI/timer counter or counting the
RXDRDY ints themselves) and so if chars are received in the rx_timeout()
(or before ENDRX is handled) the rx_timeout() could increment rx_offset
past the length of the buffer. This could result the remaining 'len'
being calculated incorrectly (an underflow due to unsigned - signed ,
where signed > unsigned).
To fix this, we now store the lengths of the buffers and don't invoke
the UART_RX_RDY callback when the buffers are full; its handled by
ENDRX.
(Also note that the buffer size should be available via the RXD.MAXCNT
register on the nrf, but this register is not exposed through the nrfx
HAL and is also double buffered, so it seemed clearer to just track the
buffer lengths explicitly here in the driver).
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
for fixup
Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
2020-04-08 22:47:44 +10:00
|
|
|
if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) {
|
|
|
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
|
|
|
|
rxstarted_isr(dev);
|
|
|
|
}
|
|
|
|
|
2018-12-20 15:35:06 +01:00
|
|
|
if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXTO)) {
|
|
|
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXTO);
|
|
|
|
rxto_isr(dev);
|
|
|
|
}
|
|
|
|
|
2020-01-31 16:35:32 +01:00
|
|
|
if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX)
|
|
|
|
&& nrf_uarte_int_enable_check(uarte, NRF_UARTE_INT_ENDTX_MASK)) {
|
2018-12-20 15:35:06 +01:00
|
|
|
endtx_isr(dev);
|
|
|
|
}
|
|
|
|
|
2020-01-31 16:35:32 +01:00
|
|
|
if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED)
|
|
|
|
&& nrf_uarte_int_enable_check(uarte,
|
|
|
|
NRF_UARTE_INT_TXSTOPPED_MASK)) {
|
2018-12-20 15:35:06 +01:00
|
|
|
txstopped_isr(dev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_UART_ASYNC_API */
|
|
|
|
|
2018-07-02 12:55:49 +02:00
|
|
|
/**
|
|
|
|
* @brief Poll the device for input.
|
|
|
|
*
|
|
|
|
* @param dev UARTE device struct
|
|
|
|
* @param c Pointer to character
|
|
|
|
*
|
|
|
|
* @return 0 if a character arrived, -1 if the input buffer is empty.
|
|
|
|
*/
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uarte_nrfx_poll_in(const struct device *dev, unsigned char *c)
|
2018-07-02 12:55:49 +02:00
|
|
|
{
|
2018-12-20 15:35:06 +01:00
|
|
|
|
2018-07-02 12:55:49 +02:00
|
|
|
const struct uarte_nrfx_data *data = get_dev_data(dev);
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
|
2018-12-20 15:35:06 +01:00
|
|
|
#ifdef CONFIG_UART_ASYNC_API
|
|
|
|
if (data->async) {
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-07-02 12:55:49 +02:00
|
|
|
if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
*c = data->rx_data;
|
|
|
|
|
|
|
|
/* clear the interrupt */
|
|
|
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
|
|
|
|
nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Output a character in polled mode.
|
|
|
|
*
|
|
|
|
* @param dev UARTE device struct
|
|
|
|
* @param c Character to send
|
|
|
|
*/
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uarte_nrfx_poll_out(const struct device *dev, unsigned char c)
|
2018-07-02 12:55:49 +02:00
|
|
|
{
|
2020-01-31 16:35:32 +01:00
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
2020-10-01 10:43:11 +02:00
|
|
|
bool isr_mode = k_is_in_isr() || k_is_pre_kernel();
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
int key;
|
|
|
|
|
|
|
|
if (isr_mode) {
|
|
|
|
while (1) {
|
|
|
|
key = irq_lock();
|
|
|
|
if (is_tx_ready(dev)) {
|
|
|
|
#if CONFIG_UART_ASYNC_API
|
|
|
|
if (data->async->tx_size &&
|
|
|
|
data->async->tx_amount < 0) {
|
|
|
|
data->async->tx_amount =
|
|
|
|
nrf_uarte_tx_amount_get(uarte);
|
|
|
|
}
|
2018-12-20 15:35:06 +01:00
|
|
|
#endif
|
2020-05-25 12:26:40 +00:00
|
|
|
break;
|
2020-01-31 16:35:32 +01:00
|
|
|
}
|
2020-10-01 10:43:11 +02:00
|
|
|
|
|
|
|
irq_unlock(key);
|
2020-01-31 16:35:32 +01:00
|
|
|
}
|
|
|
|
} else {
|
2020-10-01 10:43:11 +02:00
|
|
|
do {
|
|
|
|
/* wait arbitrary time before back off. */
|
|
|
|
bool res;
|
2018-07-02 12:55:49 +02:00
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
NRFX_WAIT_FOR(is_tx_ready(dev), 100, 1, res);
|
2018-07-02 12:55:49 +02:00
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
if (res) {
|
|
|
|
key = irq_lock();
|
|
|
|
if (is_tx_ready(dev)) {
|
|
|
|
break;
|
|
|
|
}
|
2018-07-02 12:55:49 +02:00
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
irq_unlock(key);
|
|
|
|
}
|
|
|
|
k_msleep(1);
|
|
|
|
} while (1);
|
|
|
|
}
|
2020-01-31 16:35:32 +01:00
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
/* At this point we should have irq locked and any previous transfer
|
|
|
|
* completed. Transfer can be started, no need to wait for completion.
|
2018-07-02 12:55:49 +02:00
|
|
|
*/
|
2020-10-01 10:43:11 +02:00
|
|
|
data->char_out = c;
|
|
|
|
tx_start(uarte, &data->char_out, 1);
|
2018-07-02 12:55:49 +02:00
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
irq_unlock(key);
|
2018-07-02 12:55:49 +02:00
|
|
|
}
|
2020-10-01 10:43:11 +02:00
|
|
|
|
|
|
|
|
2018-07-02 12:55:49 +02:00
|
|
|
#ifdef UARTE_INTERRUPT_DRIVEN
|
|
|
|
/** Interrupt driven FIFO fill function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uarte_nrfx_fifo_fill(const struct device *dev,
|
2020-05-27 11:26:57 -05:00
|
|
|
const uint8_t *tx_data,
|
2018-07-02 12:55:49 +02:00
|
|
|
int len)
|
|
|
|
{
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
len = MIN(len, data->int_driven->tx_buff_size);
|
|
|
|
if (!atomic_cas(&data->int_driven->fifo_fill_lock, 0, 1)) {
|
2018-11-19 10:20:20 +01:00
|
|
|
return 0;
|
|
|
|
}
|
2018-07-02 12:55:49 +02:00
|
|
|
|
|
|
|
/* Copy data to RAM buffer for EasyDMA transfer */
|
|
|
|
for (int i = 0; i < len; i++) {
|
2018-12-20 15:35:06 +01:00
|
|
|
data->int_driven->tx_buffer[i] = tx_data[i];
|
2018-07-02 12:55:49 +02:00
|
|
|
}
|
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
int key = irq_lock();
|
2018-07-02 12:55:49 +02:00
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
if (!is_tx_ready(dev)) {
|
|
|
|
data->int_driven->fifo_fill_lock = 0;
|
|
|
|
len = 0;
|
|
|
|
} else {
|
|
|
|
tx_start(uarte, data->int_driven->tx_buffer, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
irq_unlock(key);
|
2018-07-02 12:55:49 +02:00
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Interrupt driven FIFO read function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uarte_nrfx_fifo_read(const struct device *dev,
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t *rx_data,
|
2018-07-02 12:55:49 +02:00
|
|
|
const int size)
|
|
|
|
{
|
|
|
|
int num_rx = 0;
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
const struct uarte_nrfx_data *data = get_dev_data(dev);
|
|
|
|
|
|
|
|
if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) {
|
|
|
|
/* Clear the interrupt */
|
|
|
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
|
|
|
|
|
|
|
|
/* Receive a character */
|
2020-05-27 11:26:57 -05:00
|
|
|
rx_data[num_rx++] = (uint8_t)data->rx_data;
|
2018-07-02 12:55:49 +02:00
|
|
|
|
|
|
|
nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
|
|
|
|
}
|
|
|
|
|
|
|
|
return num_rx;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Interrupt driven transfer enabling function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uarte_nrfx_irq_tx_enable(const struct device *dev)
|
2018-07-02 12:55:49 +02:00
|
|
|
{
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
2018-11-19 13:43:47 +01:00
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
2020-10-01 10:43:11 +02:00
|
|
|
int key = irq_lock();
|
2018-07-02 12:55:49 +02:00
|
|
|
|
2018-12-20 15:35:06 +01:00
|
|
|
data->int_driven->disable_tx_irq = false;
|
2020-10-01 10:43:11 +02:00
|
|
|
nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
|
|
|
|
|
|
|
|
irq_unlock(key);
|
2018-07-02 12:55:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Interrupt driven transfer disabling function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uarte_nrfx_irq_tx_disable(const struct device *dev)
|
2018-07-02 12:55:49 +02:00
|
|
|
{
|
2018-11-19 13:43:47 +01:00
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
|
|
|
/* TX IRQ will be disabled after current transmission is finished */
|
2018-12-20 15:35:06 +01:00
|
|
|
data->int_driven->disable_tx_irq = true;
|
2018-07-02 12:55:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Interrupt driven transfer ready function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uarte_nrfx_irq_tx_ready_complete(const struct device *dev)
|
2018-07-02 12:55:49 +02:00
|
|
|
{
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
2020-04-22 10:33:37 +00:00
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
2018-07-02 12:55:49 +02:00
|
|
|
|
2018-11-19 13:43:47 +01:00
|
|
|
/* ENDTX flag is always on so that ISR is called when we enable TX IRQ.
|
|
|
|
* Because of that we have to explicitly check if ENDTX interrupt is
|
|
|
|
* enabled, otherwise this function would always return true no matter
|
|
|
|
* what would be the source of interrupt.
|
|
|
|
*/
|
2020-04-22 10:33:37 +00:00
|
|
|
return !data->int_driven->disable_tx_irq &&
|
2020-10-01 10:43:11 +02:00
|
|
|
nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED) &&
|
|
|
|
nrf_uarte_int_enable_check(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
|
2018-07-02 12:55:49 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uarte_nrfx_irq_rx_ready(const struct device *dev)
|
2018-07-02 12:55:49 +02:00
|
|
|
{
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
|
|
|
|
return nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX);
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Interrupt driven receiver enabling function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uarte_nrfx_irq_rx_enable(const struct device *dev)
|
2018-07-02 12:55:49 +02:00
|
|
|
{
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
|
|
|
|
nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDRX_MASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Interrupt driven receiver disabling function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uarte_nrfx_irq_rx_disable(const struct device *dev)
|
2018-07-02 12:55:49 +02:00
|
|
|
{
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
|
|
|
|
nrf_uarte_int_disable(uarte, NRF_UARTE_INT_ENDRX_MASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Interrupt driven error enabling function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uarte_nrfx_irq_err_enable(const struct device *dev)
|
2018-07-02 12:55:49 +02:00
|
|
|
{
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
|
|
|
|
nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ERROR_MASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Interrupt driven error disabling function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uarte_nrfx_irq_err_disable(const struct device *dev)
|
2018-07-02 12:55:49 +02:00
|
|
|
{
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
|
|
|
|
nrf_uarte_int_disable(uarte, NRF_UARTE_INT_ERROR_MASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Interrupt driven pending status function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uarte_nrfx_irq_is_pending(const struct device *dev)
|
2018-07-02 12:55:49 +02:00
|
|
|
{
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
|
|
|
|
return ((nrf_uarte_int_enable_check(uarte,
|
2020-10-01 10:43:11 +02:00
|
|
|
NRF_UARTE_INT_TXSTOPPED_MASK) &&
|
2018-07-02 12:55:49 +02:00
|
|
|
uarte_nrfx_irq_tx_ready_complete(dev))
|
|
|
|
||
|
|
|
|
(nrf_uarte_int_enable_check(uarte,
|
|
|
|
NRF_UARTE_INT_ENDRX_MASK) &&
|
|
|
|
uarte_nrfx_irq_rx_ready(dev)));
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Interrupt driven interrupt update function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uarte_nrfx_irq_update(const struct device *dev)
|
2018-07-02 12:55:49 +02:00
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Set the callback function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uarte_nrfx_irq_callback_set(const struct device *dev,
|
2018-07-16 21:12:26 +03:00
|
|
|
uart_irq_callback_user_data_t cb,
|
|
|
|
void *cb_data)
|
2018-07-02 12:55:49 +02:00
|
|
|
{
|
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
|
|
|
|
2018-12-20 15:35:06 +01:00
|
|
|
data->int_driven->cb = cb;
|
|
|
|
data->int_driven->cb_data = cb_data;
|
2018-07-02 12:55:49 +02:00
|
|
|
}
|
|
|
|
#endif /* UARTE_INTERRUPT_DRIVEN */
|
|
|
|
|
|
|
|
static const struct uart_driver_api uart_nrfx_uarte_driver_api = {
|
|
|
|
.poll_in = uarte_nrfx_poll_in,
|
|
|
|
.poll_out = uarte_nrfx_poll_out,
|
|
|
|
.err_check = uarte_nrfx_err_check,
|
2018-11-20 12:24:42 +01:00
|
|
|
.configure = uarte_nrfx_configure,
|
|
|
|
.config_get = uarte_nrfx_config_get,
|
2018-12-20 15:35:06 +01:00
|
|
|
#ifdef CONFIG_UART_ASYNC_API
|
|
|
|
.callback_set = uarte_nrfx_callback_set,
|
|
|
|
.tx = uarte_nrfx_tx,
|
|
|
|
.tx_abort = uarte_nrfx_tx_abort,
|
|
|
|
.rx_enable = uarte_nrfx_rx_enable,
|
|
|
|
.rx_buf_rsp = uarte_nrfx_rx_buf_rsp,
|
|
|
|
.rx_disable = uarte_nrfx_rx_disable,
|
|
|
|
#endif /* CONFIG_UART_ASYNC_API */
|
2018-07-02 12:55:49 +02:00
|
|
|
#ifdef UARTE_INTERRUPT_DRIVEN
|
|
|
|
.fifo_fill = uarte_nrfx_fifo_fill,
|
|
|
|
.fifo_read = uarte_nrfx_fifo_read,
|
|
|
|
.irq_tx_enable = uarte_nrfx_irq_tx_enable,
|
|
|
|
.irq_tx_disable = uarte_nrfx_irq_tx_disable,
|
|
|
|
.irq_tx_ready = uarte_nrfx_irq_tx_ready_complete,
|
|
|
|
.irq_rx_enable = uarte_nrfx_irq_rx_enable,
|
|
|
|
.irq_rx_disable = uarte_nrfx_irq_rx_disable,
|
|
|
|
.irq_tx_complete = uarte_nrfx_irq_tx_ready_complete,
|
|
|
|
.irq_rx_ready = uarte_nrfx_irq_rx_ready,
|
|
|
|
.irq_err_enable = uarte_nrfx_irq_err_enable,
|
|
|
|
.irq_err_disable = uarte_nrfx_irq_err_disable,
|
|
|
|
.irq_is_pending = uarte_nrfx_irq_is_pending,
|
|
|
|
.irq_update = uarte_nrfx_irq_update,
|
|
|
|
.irq_callback_set = uarte_nrfx_irq_callback_set,
|
|
|
|
#endif /* UARTE_INTERRUPT_DRIVEN */
|
|
|
|
};
|
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
static int endtx_stoptx_ppi_init(NRF_UARTE_Type *uarte,
|
|
|
|
struct uarte_nrfx_data *data)
|
|
|
|
{
|
|
|
|
nrfx_err_t ret;
|
|
|
|
|
|
|
|
ret = gppi_channel_alloc(&data->ppi_ch_endtx);
|
|
|
|
if (ret != NRFX_SUCCESS) {
|
|
|
|
LOG_ERR("Failed to allocate PPI Channel");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
nrfx_gppi_channel_endpoints_setup(data->ppi_ch_endtx,
|
|
|
|
nrf_uarte_event_address_get(uarte, NRF_UARTE_EVENT_ENDTX),
|
|
|
|
nrf_uarte_task_address_get(uarte, NRF_UARTE_TASK_STOPTX));
|
|
|
|
nrfx_gppi_channels_enable(BIT(data->ppi_ch_endtx));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uarte_instance_init(const struct device *dev,
|
2018-07-02 12:55:49 +02:00
|
|
|
const struct uarte_init_config *config,
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t interrupts_active)
|
2018-07-02 12:55:49 +02:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
|
|
|
|
2020-07-23 16:12:02 -07:00
|
|
|
nrf_uarte_disable(uarte);
|
2020-05-14 09:24:52 +02:00
|
|
|
|
2020-07-08 13:37:36 +02:00
|
|
|
data->dev = dev;
|
|
|
|
|
2018-07-02 12:55:49 +02:00
|
|
|
nrf_gpio_pin_write(config->pseltxd, 1);
|
|
|
|
nrf_gpio_cfg_output(config->pseltxd);
|
|
|
|
|
2020-01-27 15:47:15 +01:00
|
|
|
if (config->pselrxd != NRF_UARTE_PSEL_DISCONNECTED) {
|
|
|
|
nrf_gpio_cfg_input(config->pselrxd, NRF_GPIO_PIN_NOPULL);
|
|
|
|
}
|
2018-07-02 12:55:49 +02:00
|
|
|
|
2020-01-27 15:47:15 +01:00
|
|
|
nrf_uarte_txrx_pins_set(uarte, config->pseltxd, config->pselrxd);
|
2018-07-02 12:55:49 +02:00
|
|
|
|
2020-06-05 15:25:38 +02:00
|
|
|
if (config->pselcts != NRF_UARTE_PSEL_DISCONNECTED) {
|
2018-07-02 12:55:49 +02:00
|
|
|
nrf_gpio_cfg_input(config->pselcts, NRF_GPIO_PIN_NOPULL);
|
2020-06-05 15:25:38 +02:00
|
|
|
}
|
2018-07-02 12:55:49 +02:00
|
|
|
|
2020-06-05 15:25:38 +02:00
|
|
|
if (config->pselrts != NRF_UARTE_PSEL_DISCONNECTED) {
|
|
|
|
nrf_gpio_pin_write(config->pselrts, 1);
|
|
|
|
nrf_gpio_cfg_output(config->pselrts);
|
2018-07-02 12:55:49 +02:00
|
|
|
}
|
|
|
|
|
2020-06-05 15:25:38 +02:00
|
|
|
nrf_uarte_hwfc_pins_set(uarte, config->pselrts, config->pselcts);
|
|
|
|
|
2018-11-20 12:24:42 +01:00
|
|
|
err = uarte_nrfx_configure(dev, &get_dev_data(dev)->uart_config);
|
2018-07-02 12:55:49 +02:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-03-21 13:41:25 +01:00
|
|
|
#ifdef CONFIG_DEVICE_POWER_MANAGEMENT
|
|
|
|
data->pm_state = DEVICE_PM_ACTIVE_STATE;
|
|
|
|
#endif
|
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
if (get_dev_config(dev)->ppi_endtx) {
|
|
|
|
err = endtx_stoptx_ppi_init(uarte, data);
|
|
|
|
if (err < 0) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-20 15:35:06 +01:00
|
|
|
|
|
|
|
#ifdef CONFIG_UART_ASYNC_API
|
|
|
|
if (data->async) {
|
2020-10-01 10:43:11 +02:00
|
|
|
err = uarte_nrfx_init(dev);
|
|
|
|
if (err < 0) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
} else
|
2018-12-20 15:35:06 +01:00
|
|
|
#endif
|
2020-10-01 10:43:11 +02:00
|
|
|
{
|
|
|
|
/* Enable receiver and transmitter */
|
|
|
|
nrf_uarte_enable(uarte);
|
2018-12-20 15:35:06 +01:00
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
if (config->pselrxd != NRF_UARTE_PSEL_DISCONNECTED) {
|
|
|
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
|
2018-12-20 15:35:06 +01:00
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
nrf_uarte_rx_buffer_set(uarte, &data->rx_data, 1);
|
|
|
|
nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
|
|
|
|
}
|
2020-01-27 15:47:15 +01:00
|
|
|
}
|
2018-07-02 12:55:49 +02:00
|
|
|
|
2020-10-01 10:43:11 +02:00
|
|
|
if (!get_dev_config(dev)->ppi_endtx) {
|
|
|
|
nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDTX_MASK);
|
2019-04-10 16:04:47 +02:00
|
|
|
}
|
2020-10-01 10:43:11 +02:00
|
|
|
|
|
|
|
/* Set TXSTOPPED event by requesting fake (zero-length) transfer.
|
|
|
|
* Pointer to RAM variable (data->tx_buffer) is set because otherwise
|
|
|
|
* such operation may result in HardFault or RAM corruption.
|
|
|
|
*/
|
|
|
|
nrf_uarte_tx_buffer_set(uarte, &data->char_out, 0);
|
|
|
|
nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTTX);
|
|
|
|
|
|
|
|
/* switch off transmitter to save an energy */
|
|
|
|
nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX);
|
|
|
|
|
2018-07-02 12:55:49 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-03-21 13:41:25 +01:00
|
|
|
#ifdef CONFIG_DEVICE_POWER_MANAGEMENT
|
2019-11-26 14:45:40 +01:00
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uarte_nrfx_pins_enable(const struct device *dev, bool enable)
|
2019-03-21 13:41:25 +01:00
|
|
|
{
|
2019-11-26 14:45:40 +01:00
|
|
|
if (!get_dev_config(dev)->gpio_mgmt) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-03-21 13:41:25 +01:00
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t tx_pin = nrf_uarte_tx_pin_get(uarte);
|
|
|
|
uint32_t rx_pin = nrf_uarte_rx_pin_get(uarte);
|
|
|
|
uint32_t cts_pin = nrf_uarte_cts_pin_get(uarte);
|
|
|
|
uint32_t rts_pin = nrf_uarte_rts_pin_get(uarte);
|
2019-03-21 13:41:25 +01:00
|
|
|
|
2019-11-26 14:45:40 +01:00
|
|
|
if (enable) {
|
|
|
|
nrf_gpio_pin_write(tx_pin, 1);
|
|
|
|
nrf_gpio_cfg_output(tx_pin);
|
2020-01-27 15:47:15 +01:00
|
|
|
if (rx_pin != NRF_UARTE_PSEL_DISCONNECTED) {
|
|
|
|
nrf_gpio_cfg_input(rx_pin, NRF_GPIO_PIN_NOPULL);
|
|
|
|
}
|
2019-11-26 14:45:40 +01:00
|
|
|
|
2020-06-05 15:25:38 +02:00
|
|
|
if (IS_RTS_PIN_SET(get_dev_config(dev)->rts_cts_pins_set)) {
|
2019-11-26 14:45:40 +01:00
|
|
|
nrf_gpio_pin_write(rts_pin, 1);
|
|
|
|
nrf_gpio_cfg_output(rts_pin);
|
2020-06-05 15:25:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_CTS_PIN_SET(get_dev_config(dev)->rts_cts_pins_set)) {
|
2019-11-26 14:45:40 +01:00
|
|
|
nrf_gpio_cfg_input(cts_pin,
|
|
|
|
NRF_GPIO_PIN_NOPULL);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
nrf_gpio_cfg_default(tx_pin);
|
2020-01-27 15:47:15 +01:00
|
|
|
if (rx_pin != NRF_UARTE_PSEL_DISCONNECTED) {
|
|
|
|
nrf_gpio_cfg_default(rx_pin);
|
|
|
|
}
|
2020-06-05 15:25:38 +02:00
|
|
|
|
|
|
|
if (IS_RTS_PIN_SET(get_dev_config(dev)->rts_cts_pins_set)) {
|
2019-11-26 14:45:40 +01:00
|
|
|
nrf_gpio_cfg_default(rts_pin);
|
2019-10-10 13:02:08 +02:00
|
|
|
}
|
2020-06-05 15:25:38 +02:00
|
|
|
|
|
|
|
if (IS_CTS_PIN_SET(get_dev_config(dev)->rts_cts_pins_set)) {
|
|
|
|
nrf_gpio_cfg_default(cts_pin);
|
|
|
|
}
|
2019-11-26 14:45:40 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uarte_nrfx_set_power_state(const struct device *dev,
|
|
|
|
uint32_t new_state)
|
2019-11-26 14:45:40 +01:00
|
|
|
{
|
|
|
|
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
|
2019-12-17 11:40:32 +01:00
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
2019-08-27 11:25:57 +02:00
|
|
|
|
2019-11-26 14:45:40 +01:00
|
|
|
if (new_state == DEVICE_PM_ACTIVE_STATE) {
|
|
|
|
uarte_nrfx_pins_enable(dev, true);
|
2019-03-21 13:41:25 +01:00
|
|
|
nrf_uarte_enable(uarte);
|
|
|
|
#ifdef CONFIG_UART_ASYNC_API
|
2020-01-14 15:34:34 +01:00
|
|
|
if (hw_rx_counting_enabled(get_dev_data(dev))) {
|
|
|
|
nrfx_timer_enable(&get_dev_config(dev)->timer);
|
|
|
|
}
|
2019-03-21 13:41:25 +01:00
|
|
|
if (get_dev_data(dev)->async) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif
|
2020-02-24 08:33:19 +01:00
|
|
|
if (nrf_uarte_rx_pin_get(uarte) !=
|
|
|
|
NRF_UARTE_PSEL_DISCONNECTED) {
|
|
|
|
nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
|
|
|
|
}
|
2019-03-21 13:41:25 +01:00
|
|
|
} else {
|
2020-02-17 09:09:22 +01:00
|
|
|
__ASSERT_NO_MSG(new_state == DEVICE_PM_LOW_POWER_STATE ||
|
|
|
|
new_state == DEVICE_PM_SUSPEND_STATE ||
|
|
|
|
new_state == DEVICE_PM_OFF_STATE);
|
2019-04-29 15:46:16 +02:00
|
|
|
|
2019-12-17 11:40:32 +01:00
|
|
|
/* if pm is already not active, driver will stay indefinitely
|
|
|
|
* in while loop waiting for event NRF_UARTE_EVENT_RXTO
|
|
|
|
*/
|
|
|
|
if (data->pm_state != DEVICE_PM_ACTIVE_STATE) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-04-29 15:46:16 +02:00
|
|
|
/* Disabling UART requires stopping RX, but stop RX event is
|
|
|
|
* only sent after each RX if async UART API is used.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_UART_ASYNC_API
|
2020-01-14 15:34:34 +01:00
|
|
|
if (hw_rx_counting_enabled(get_dev_data(dev))) {
|
|
|
|
nrfx_timer_disable(&get_dev_config(dev)->timer);
|
2020-09-24 11:28:19 +02:00
|
|
|
/* Timer/counter value is reset when disabled. */
|
|
|
|
data->async->rx_total_byte_cnt = 0;
|
|
|
|
data->async->rx_total_user_byte_cnt = 0;
|
2020-01-14 15:34:34 +01:00
|
|
|
}
|
2019-04-29 15:46:16 +02:00
|
|
|
if (get_dev_data(dev)->async) {
|
|
|
|
nrf_uarte_disable(uarte);
|
2019-11-26 14:45:40 +01:00
|
|
|
uarte_nrfx_pins_enable(dev, false);
|
2019-04-29 15:46:16 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif
|
2020-04-15 14:06:17 -05:00
|
|
|
if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) {
|
|
|
|
nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX);
|
|
|
|
while (!nrf_uarte_event_check(uarte,
|
|
|
|
NRF_UARTE_EVENT_RXTO)) {
|
|
|
|
/* Busy wait for event to register */
|
|
|
|
}
|
|
|
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
|
|
|
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXTO);
|
2019-04-29 15:46:16 +02:00
|
|
|
}
|
2019-03-21 13:41:25 +01:00
|
|
|
nrf_uarte_disable(uarte);
|
2019-11-26 14:45:40 +01:00
|
|
|
uarte_nrfx_pins_enable(dev, false);
|
2019-03-21 13:41:25 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uarte_nrfx_pm_control(const struct device *dev,
|
|
|
|
uint32_t ctrl_command,
|
2019-03-21 13:41:25 +01:00
|
|
|
void *context, device_pm_cb cb, void *arg)
|
|
|
|
{
|
|
|
|
struct uarte_nrfx_data *data = get_dev_data(dev);
|
|
|
|
|
|
|
|
if (ctrl_command == DEVICE_PM_SET_POWER_STATE) {
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t new_state = *((const uint32_t *)context);
|
2019-03-21 13:41:25 +01:00
|
|
|
|
|
|
|
if (new_state != data->pm_state) {
|
|
|
|
uarte_nrfx_set_power_state(dev, new_state);
|
|
|
|
data->pm_state = new_state;
|
|
|
|
}
|
|
|
|
} else {
|
2020-02-17 09:09:22 +01:00
|
|
|
__ASSERT_NO_MSG(ctrl_command == DEVICE_PM_GET_POWER_STATE);
|
2020-05-27 11:26:57 -05:00
|
|
|
*((uint32_t *)context) = data->pm_state;
|
2019-03-21 13:41:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (cb) {
|
|
|
|
cb(dev, 0, context, arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_DEVICE_POWER_MANAGEMENT */
|
|
|
|
|
2020-04-17 16:58:20 -07:00
|
|
|
#define UARTE(idx) DT_NODELABEL(uart##idx)
|
|
|
|
#define UARTE_HAS_PROP(idx, prop) DT_NODE_HAS_PROP(UARTE(idx), prop)
|
|
|
|
#define UARTE_PROP(idx, prop) DT_PROP(UARTE(idx), prop)
|
|
|
|
|
|
|
|
#define UARTE_PSEL(idx, pin_prop) \
|
|
|
|
COND_CODE_1(UARTE_HAS_PROP(idx, pin_prop), \
|
|
|
|
(UARTE_PROP(idx, pin_prop)), \
|
|
|
|
(NRF_UARTE_PSEL_DISCONNECTED))
|
|
|
|
|
2020-06-05 15:25:38 +02:00
|
|
|
#define HWFC_AVAILABLE(idx) \
|
|
|
|
(UARTE_HAS_PROP(idx, rts_pin) || UARTE_HAS_PROP(idx, cts_pin))
|
2020-04-17 16:58:20 -07:00
|
|
|
|
|
|
|
#define UARTE_IRQ_CONFIGURE(idx, isr_handler) \
|
|
|
|
do { \
|
|
|
|
IRQ_CONNECT(DT_IRQN(UARTE(idx)), DT_IRQ(UARTE(idx), priority), \
|
|
|
|
isr_handler, DEVICE_GET(uart_nrfx_uarte##idx), 0); \
|
|
|
|
irq_enable(DT_IRQN(UARTE(idx))); \
|
|
|
|
} while (0)
|
|
|
|
|
2020-06-05 15:25:38 +02:00
|
|
|
#define HWFC_CONFIG_CHECK(idx) \
|
|
|
|
BUILD_ASSERT( \
|
|
|
|
(UARTE_PROP(idx, hw_flow_control) && HWFC_AVAILABLE(idx)) \
|
|
|
|
|| \
|
|
|
|
!UARTE_PROP(idx, hw_flow_control) \
|
|
|
|
)
|
|
|
|
|
2018-10-03 14:39:06 +02:00
|
|
|
#define UART_NRF_UARTE_DEVICE(idx) \
|
2020-06-05 15:25:38 +02:00
|
|
|
HWFC_CONFIG_CHECK(idx); \
|
2018-10-03 14:39:06 +02:00
|
|
|
DEVICE_DECLARE(uart_nrfx_uarte##idx); \
|
2018-12-20 15:35:06 +01:00
|
|
|
UARTE_INT_DRIVEN(idx); \
|
|
|
|
UARTE_ASYNC(idx); \
|
2018-10-03 14:39:06 +02:00
|
|
|
static struct uarte_nrfx_data uarte_##idx##_data = { \
|
2018-12-20 15:35:06 +01:00
|
|
|
UARTE_CONFIG(idx), \
|
2019-12-17 09:59:57 +01:00
|
|
|
IF_ENABLED(CONFIG_UART_##idx##_ASYNC, \
|
|
|
|
(.async = &uarte##idx##_async,)) \
|
|
|
|
IF_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN, \
|
|
|
|
(.int_driven = &uarte##idx##_int_driven,)) \
|
2018-10-03 14:39:06 +02:00
|
|
|
}; \
|
2019-03-12 15:15:42 -06:00
|
|
|
static const struct uarte_nrfx_config uarte_##idx##z_config = { \
|
2020-04-17 16:58:20 -07:00
|
|
|
.uarte_regs = (NRF_UARTE_Type *)DT_REG_ADDR(UARTE(idx)), \
|
2020-06-05 15:25:38 +02:00
|
|
|
.rts_cts_pins_set = \
|
|
|
|
(UARTE_HAS_PROP(idx, rts_pin) ? RTS_PIN_SET_MASK : 0) |\
|
|
|
|
(UARTE_HAS_PROP(idx, cts_pin) ? CTS_PIN_SET_MASK : 0), \
|
2019-10-10 13:02:08 +02:00
|
|
|
.gpio_mgmt = IS_ENABLED(CONFIG_UART_##idx##_GPIO_MANAGEMENT), \
|
2020-10-01 10:43:11 +02:00
|
|
|
.ppi_endtx = IS_ENABLED(CONFIG_UART_##idx##_ENHANCED_POLL_OUT),\
|
2019-12-17 09:59:57 +01:00
|
|
|
IF_ENABLED(CONFIG_UART_##idx##_NRF_HW_ASYNC, \
|
2018-12-20 15:35:06 +01:00
|
|
|
(.timer = NRFX_TIMER_INSTANCE( \
|
2019-12-17 09:59:57 +01:00
|
|
|
CONFIG_UART_##idx##_NRF_HW_ASYNC_TIMER),)) \
|
2018-10-03 14:39:06 +02:00
|
|
|
}; \
|
2020-07-14 17:02:00 +02:00
|
|
|
static int uarte_##idx##_init(const struct device *dev) \
|
2018-10-03 14:39:06 +02:00
|
|
|
{ \
|
|
|
|
const struct uarte_init_config init_config = { \
|
2020-04-17 16:58:20 -07:00
|
|
|
.pseltxd = UARTE_PROP(idx, tx_pin), /* must be set */ \
|
|
|
|
.pselrxd = UARTE_PSEL(idx, rx_pin), /* optional */ \
|
|
|
|
.pselcts = UARTE_PSEL(idx, cts_pin), /* optional */ \
|
|
|
|
.pselrts = UARTE_PSEL(idx, rts_pin), /* optional */ \
|
2018-10-03 14:39:06 +02:00
|
|
|
}; \
|
2020-10-01 10:43:11 +02:00
|
|
|
COND_CODE_1(CONFIG_UART_##idx##_ASYNC, \
|
|
|
|
(UARTE_IRQ_CONFIGURE(idx, uarte_nrfx_isr_async);), \
|
2020-04-17 16:58:20 -07:00
|
|
|
(UARTE_IRQ_CONFIGURE(idx, uarte_nrfx_isr_int);)) \
|
2018-12-20 15:35:06 +01:00
|
|
|
return uarte_instance_init( \
|
|
|
|
dev, \
|
|
|
|
&init_config, \
|
|
|
|
IS_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN)); \
|
2018-10-03 14:39:06 +02:00
|
|
|
} \
|
2019-03-21 13:41:25 +01:00
|
|
|
DEVICE_DEFINE(uart_nrfx_uarte##idx, \
|
2020-04-17 16:58:20 -07:00
|
|
|
DT_LABEL(UARTE(idx)), \
|
2019-03-21 13:41:25 +01:00
|
|
|
uarte_##idx##_init, \
|
|
|
|
uarte_nrfx_pm_control, \
|
|
|
|
&uarte_##idx##_data, \
|
2019-03-12 15:15:42 -06:00
|
|
|
&uarte_##idx##z_config, \
|
2019-03-21 13:41:25 +01:00
|
|
|
PRE_KERNEL_1, \
|
|
|
|
CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \
|
|
|
|
&uart_nrfx_uarte_driver_api)
|
2018-07-02 12:55:49 +02:00
|
|
|
|
2018-12-20 15:35:06 +01:00
|
|
|
#define UARTE_CONFIG(idx) \
|
|
|
|
.uart_config = { \
|
2020-04-17 16:58:20 -07:00
|
|
|
.baudrate = UARTE_PROP(idx, current_speed), \
|
2018-12-20 15:35:06 +01:00
|
|
|
.data_bits = UART_CFG_DATA_BITS_8, \
|
|
|
|
.stop_bits = UART_CFG_STOP_BITS_1, \
|
|
|
|
.parity = IS_ENABLED(CONFIG_UART_##idx##_NRF_PARITY_BIT) \
|
|
|
|
? UART_CFG_PARITY_EVEN \
|
|
|
|
: UART_CFG_PARITY_NONE, \
|
2020-06-05 15:25:38 +02:00
|
|
|
.flow_ctrl = UARTE_PROP(idx, hw_flow_control) \
|
2018-12-20 15:35:06 +01:00
|
|
|
? UART_CFG_FLOW_CTRL_RTS_CTS \
|
|
|
|
: UART_CFG_FLOW_CTRL_NONE, \
|
|
|
|
}
|
2018-07-02 12:55:49 +02:00
|
|
|
|
2018-12-20 15:35:06 +01:00
|
|
|
#define UARTE_ASYNC(idx) \
|
2019-12-17 09:59:57 +01:00
|
|
|
IF_ENABLED(CONFIG_UART_##idx##_ASYNC, \
|
2018-12-20 15:35:06 +01:00
|
|
|
(struct uarte_async_cb uarte##idx##_async = { \
|
2019-12-17 09:59:57 +01:00
|
|
|
.hw_rx_counting = \
|
|
|
|
IS_ENABLED(CONFIG_UART_##idx##_NRF_HW_ASYNC), \
|
|
|
|
}))
|
2018-12-20 15:35:06 +01:00
|
|
|
|
|
|
|
#define UARTE_INT_DRIVEN(idx) \
|
2019-12-17 09:59:57 +01:00
|
|
|
IF_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN, \
|
2020-05-27 11:26:57 -05:00
|
|
|
(static uint8_t uarte##idx##_tx_buffer[\
|
2018-12-20 15:35:06 +01:00
|
|
|
MIN(CONFIG_UART_##idx##_NRF_TX_BUFFER_SIZE, \
|
|
|
|
BIT_MASK(UARTE##idx##_EASYDMA_MAXCNT_SIZE))]; \
|
|
|
|
static struct uarte_nrfx_int_driven \
|
|
|
|
uarte##idx##_int_driven = { \
|
|
|
|
.tx_buffer = uarte##idx##_tx_buffer, \
|
|
|
|
.tx_buff_size = sizeof(uarte##idx##_tx_buffer),\
|
2019-12-17 09:59:57 +01:00
|
|
|
};))
|
2018-07-02 12:55:49 +02:00
|
|
|
|
|
|
|
#ifdef CONFIG_UART_0_NRF_UARTE
|
2020-04-17 16:58:20 -07:00
|
|
|
UART_NRF_UARTE_DEVICE(0);
|
|
|
|
#endif
|
2018-07-02 12:55:49 +02:00
|
|
|
|
|
|
|
#ifdef CONFIG_UART_1_NRF_UARTE
|
2020-04-17 16:58:20 -07:00
|
|
|
UART_NRF_UARTE_DEVICE(1);
|
|
|
|
#endif
|
2018-07-11 14:11:18 +02:00
|
|
|
|
2019-01-15 13:20:14 +01:00
|
|
|
#ifdef CONFIG_UART_2_NRF_UARTE
|
2020-04-17 16:58:20 -07:00
|
|
|
UART_NRF_UARTE_DEVICE(2);
|
|
|
|
#endif
|
2019-01-15 13:20:14 +01:00
|
|
|
|
|
|
|
#ifdef CONFIG_UART_3_NRF_UARTE
|
2020-04-17 16:58:20 -07:00
|
|
|
UART_NRF_UARTE_DEVICE(3);
|
|
|
|
#endif
|