drivers: nrf: Fix forever timeout handling in UART async API.

Current implementation directly passes timeout value to workqueue/timer
which results in assertion or instant timeout in case when K_FOREVER is
passed. This fix ensures that no timer or workqueue is called with
K_FOREVER.

Signed-off-by: Mieszko Mierunski <mieszko.mierunski@nordicsemi.no>
This commit is contained in:
Mieszko Mierunski 2020-01-24 16:04:16 +01:00 committed by Carles Cufí
commit 20444869e3
2 changed files with 34 additions and 17 deletions

View file

@ -49,7 +49,7 @@ static struct {
size_t rx_secondary_buffer_length;
volatile size_t rx_counter;
volatile size_t rx_offset;
size_t rx_timeout;
s32_t rx_timeout;
struct k_delayed_work rx_timeout_work;
bool rx_enabled;
@ -59,7 +59,7 @@ static struct {
volatile size_t tx_counter;
#if defined(DT_NORDIC_NRF_UART_UART_0_RTS_PIN) && \
defined(DT_NORDIC_NRF_UART_UART_0_CTS_PIN)
size_t tx_timeout;
s32_t tx_timeout;
struct k_delayed_work tx_timeout_work;
#endif
} uart0_cb;
@ -353,7 +353,7 @@ static int uart_nrfx_callback_set(struct device *dev, uart_callback_t callback,
}
static int uart_nrfx_tx(struct device *dev, const u8_t *buf, size_t len,
u32_t timeout)
s32_t timeout)
{
if (uart0_cb.tx_buffer_length != 0) {
return -EBUSY;
@ -384,7 +384,9 @@ static int uart_nrfx_tx_abort(struct device *dev)
}
#if defined(DT_NORDIC_NRF_UART_UART_0_RTS_PIN) && \
defined(DT_NORDIC_NRF_UART_UART_0_CTS_PIN)
if (uart0_cb.tx_timeout != K_FOREVER) {
k_delayed_work_cancel(&uart0_cb.tx_timeout_work);
}
#endif
nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPTX);
@ -403,7 +405,7 @@ static int uart_nrfx_tx_abort(struct device *dev)
}
static int uart_nrfx_rx_enable(struct device *dev, u8_t *buf, size_t len,
u32_t timeout)
s32_t timeout)
{
if (uart0_cb.rx_buffer_length != 0) {
return -EBUSY;
@ -447,7 +449,9 @@ static int uart_nrfx_rx_disable(struct device *dev)
}
uart0_cb.rx_enabled = 0;
if (uart0_cb.rx_timeout != K_FOREVER) {
k_delayed_work_cancel(&uart0_cb.rx_timeout_work);
}
nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPRX);
return 0;
@ -524,7 +528,9 @@ static void rx_isr(struct device *dev)
}
if (uart0_cb.rx_buffer_length == uart0_cb.rx_counter) {
if (uart0_cb.rx_timeout != K_FOREVER) {
k_delayed_work_cancel(&uart0_cb.rx_timeout_work);
}
rx_rdy_evt();
if (uart0_cb.rx_secondary_buffer_length) {
@ -552,8 +558,10 @@ static void tx_isr(void)
!uart0_cb.tx_abort) {
#if defined(DT_NORDIC_NRF_UART_UART_0_RTS_PIN) && \
defined(DT_NORDIC_NRF_UART_UART_0_CTS_PIN)
if (uart0_cb.tx_timeout != K_FOREVER) {
k_delayed_work_submit(&uart0_cb.tx_timeout_work,
uart0_cb.tx_timeout);
}
#endif
nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_TXDRDY);
@ -563,7 +571,10 @@ static void tx_isr(void)
} else {
#if defined(DT_NORDIC_NRF_UART_UART_0_RTS_PIN) && \
defined(DT_NORDIC_NRF_UART_UART_0_CTS_PIN)
if (uart0_cb.tx_timeout != K_FOREVER) {
k_delayed_work_cancel(&uart0_cb.tx_timeout_work);
}
#endif
nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_TXDRDY);
uart0_cb.tx_buffer_length = 0;
@ -586,7 +597,9 @@ static void tx_isr(void)
static void error_isr(struct device *dev)
{
if (uart0_cb.rx_timeout != K_FOREVER) {
k_delayed_work_cancel(&uart0_cb.rx_timeout_work);
}
nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_ERROR);
if (!uart0_cb.rx_enabled) {
@ -653,13 +666,16 @@ static void rx_timeout(struct k_work *work)
{
rx_rdy_evt();
}
#if defined(DT_NORDIC_NRF_UART_UART_0_RTS_PIN) && \
defined(DT_NORDIC_NRF_UART_UART_0_CTS_PIN)
static void tx_timeout(struct k_work *work)
{
struct uart_event evt;
if (uart0_cb.tx_timeout != K_FOREVER) {
k_delayed_work_cancel(&uart0_cb.tx_timeout_work);
}
nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPTX);
evt.type = UART_TX_ABORTED;
evt.data.tx.buf = uart0_cb.tx_buffer;

View file

@ -70,7 +70,7 @@ struct uarte_async_cb {
u8_t *rx_next_buf;
u32_t rx_total_byte_cnt; /* Total number of bytes received */
u32_t rx_total_user_byte_cnt; /* Total number of bytes passed to user */
u32_t rx_timeout; /* Timeout set by user */
s32_t rx_timeout; /* Timeout set by user */
s32_t rx_timeout_slab; /* rx_timeout divided by RX_TIMEOUT_DIV */
s32_t rx_timeout_left; /* Current time left until user callback */
struct k_timer rx_timeout_timer;
@ -470,7 +470,7 @@ static int uarte_nrfx_init(struct device *dev)
}
static int uarte_nrfx_tx(struct device *dev, const u8_t *buf, size_t len,
u32_t timeout)
s32_t timeout)
{
struct uarte_nrfx_data *data = get_dev_data(dev);
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
@ -486,7 +486,8 @@ static int uarte_nrfx_tx(struct device *dev, const u8_t *buf, size_t len,
data->async->tx_size = len;
nrf_uarte_tx_buffer_set(uarte, buf, len);
nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTTX);
if (data->uart_config.flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS) {
if (data->uart_config.flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS
&& timeout != K_FOREVER) {
k_timer_start(&data->async->tx_timeout_timer, timeout,
K_NO_WAIT);
}
@ -508,7 +509,7 @@ static int uarte_nrfx_tx_abort(struct device *dev)
}
static int uarte_nrfx_rx_enable(struct device *dev, u8_t *buf, size_t len,
u32_t timeout)
s32_t timeout)
{
struct uarte_nrfx_data *data = get_dev_data(dev);
const struct uarte_nrfx_config *cfg = get_dev_config(dev);
@ -690,7 +691,7 @@ static void rxstarted_isr(struct device *dev)
.type = UART_RX_BUF_REQUEST,
};
user_callback(dev, &evt);
if (data->async->rx_timeout) {
if (data->async->rx_timeout != K_FOREVER) {
data->async->rx_timeout_left = data->async->rx_timeout;
k_timer_start(&data->async->rx_timeout_timer,
data->async->rx_timeout_slab,