drivers: serial: nrf uarte: avoid dropping RX chars/overruns
In some cases (eg at high baud rate, no HW flow control, and when BLE radio/ints running) data could be lost between when enough characters have been RX'd to fill the DMA buffer and when the ENDRX event was fired, where the the STARTRX task is invoked to start filling the next buffer (which is set up earlier, but I think will not be filled until STARTRX). To fix this, the SHORT is enabled between ENDRX and STARTRX whenever the 'next' buffer is available, so that STARTRX is invoked automatically and subsequent chars go into the next buffer via EasyDMA. To make this work properly, uarte_nrfx_isr_async() now handles the ENDRX event _before_ the STARTRX event. There was also an issue in rx_timeout() where the received character count (rx_total_byte_count) could be incremented greater than the actual buffer size. This arises from rx_total_byte_count value coming from the counting the RXDRDY events (either by PPI/timer counter or counting the RXDRDY ints themselves) and so if chars are received in the rx_timeout() (or before ENDRX is handled) the rx_timeout() could increment rx_offset past the length of the buffer. This could result the remaining 'len' being calculated incorrectly (an underflow due to unsigned - signed , where signed > unsigned). To fix this, we now store the lengths of the buffers and don't invoke the UART_RX_RDY callback when the buffers are full; its handled by ENDRX. (Also note that the buffer size should be available via the RXD.MAXCNT register on the nrf, but this register is not exposed through the nrfx HAL and is also double buffered, so it seemed clearer to just track the buffer lengths explicitly here in the driver). Signed-off-by: Marc Reilly <marc@cpdesign.com.au> for fixup Signed-off-by: Marc Reilly <marc@cpdesign.com.au>
This commit is contained in:
parent
59c21ad3cf
commit
cf7dd4981f
1 changed files with 87 additions and 24 deletions
|
@ -66,8 +66,10 @@ struct uarte_async_cb {
|
||||||
struct k_timer tx_timeout_timer;
|
struct k_timer tx_timeout_timer;
|
||||||
|
|
||||||
u8_t *rx_buf;
|
u8_t *rx_buf;
|
||||||
|
size_t rx_buf_len;
|
||||||
size_t rx_offset;
|
size_t rx_offset;
|
||||||
u8_t *rx_next_buf;
|
u8_t *rx_next_buf;
|
||||||
|
size_t rx_next_buf_len;
|
||||||
u32_t rx_total_byte_cnt; /* Total number of bytes received */
|
u32_t rx_total_byte_cnt; /* Total number of bytes received */
|
||||||
u32_t rx_total_user_byte_cnt; /* Total number of bytes passed to user */
|
u32_t rx_total_user_byte_cnt; /* Total number of bytes passed to user */
|
||||||
s32_t rx_timeout; /* Timeout set by user */
|
s32_t rx_timeout; /* Timeout set by user */
|
||||||
|
@ -541,7 +543,10 @@ static int uarte_nrfx_rx_enable(struct device *dev, u8_t *buf, size_t len,
|
||||||
NRFX_CEIL_DIV(1000, CONFIG_SYS_CLOCK_TICKS_PER_SEC));
|
NRFX_CEIL_DIV(1000, CONFIG_SYS_CLOCK_TICKS_PER_SEC));
|
||||||
|
|
||||||
data->async->rx_buf = buf;
|
data->async->rx_buf = buf;
|
||||||
|
data->async->rx_buf_len = len;
|
||||||
data->async->rx_offset = 0;
|
data->async->rx_offset = 0;
|
||||||
|
data->async->rx_next_buf = NULL;
|
||||||
|
data->async->rx_next_buf_len = 0;
|
||||||
nrf_uarte_rx_buffer_set(uarte, buf, len);
|
nrf_uarte_rx_buffer_set(uarte, buf, len);
|
||||||
|
|
||||||
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
|
||||||
|
@ -559,7 +564,9 @@ static int uarte_nrfx_rx_buf_rsp(struct device *dev, u8_t *buf, size_t len)
|
||||||
|
|
||||||
if (data->async->rx_next_buf == NULL) {
|
if (data->async->rx_next_buf == NULL) {
|
||||||
data->async->rx_next_buf = buf;
|
data->async->rx_next_buf = buf;
|
||||||
|
data->async->rx_next_buf_len = len;
|
||||||
nrf_uarte_rx_buffer_set(uarte, buf, len);
|
nrf_uarte_rx_buffer_set(uarte, buf, len);
|
||||||
|
nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX);
|
||||||
} else {
|
} else {
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
@ -585,6 +592,9 @@ static int uarte_nrfx_rx_disable(struct device *dev)
|
||||||
if (data->async->rx_buf == NULL) {
|
if (data->async->rx_buf == NULL) {
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
if (data->async->rx_next_buf != NULL) {
|
||||||
|
nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX);
|
||||||
|
}
|
||||||
|
|
||||||
k_timer_stop(&data->async->rx_timeout_timer);
|
k_timer_stop(&data->async->rx_timeout_timer);
|
||||||
data->async->rx_enabled = false;
|
data->async->rx_enabled = false;
|
||||||
|
@ -646,13 +656,30 @@ static void rx_timeout(struct k_timer *timer)
|
||||||
data->async->rx_timeout_left = data->async->rx_timeout;
|
data->async->rx_timeout_left = data->async->rx_timeout;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check if there is data that was not sent to user yet */
|
/* Check if there is data that was not sent to user yet
|
||||||
|
* Note though that 'len' is a count of data bytes received, but not
|
||||||
|
* necessarily the amount available in the current buffer
|
||||||
|
*/
|
||||||
s32_t len = data->async->rx_total_byte_cnt
|
s32_t len = data->async->rx_total_byte_cnt
|
||||||
- data->async->rx_total_user_byte_cnt;
|
- data->async->rx_total_user_byte_cnt;
|
||||||
|
|
||||||
|
/* Check for current buffer being full.
|
||||||
|
* if the UART receives characters before the the ENDRX is handled
|
||||||
|
* and the 'next' buffer is set up, then the SHORT between ENDRX and
|
||||||
|
* STARTRX will mean that data will be going into to the 'next' buffer
|
||||||
|
* until the ENDRX event gets a chance to be handled.
|
||||||
|
*/
|
||||||
|
bool clipped = false;
|
||||||
|
|
||||||
|
if (len + data->async->rx_offset > data->async->rx_buf_len) {
|
||||||
|
len = data->async->rx_buf_len - data->async->rx_offset;
|
||||||
|
clipped = true;
|
||||||
|
}
|
||||||
|
|
||||||
if (len > 0) {
|
if (len > 0) {
|
||||||
if (data->async->rx_timeout_left
|
if (clipped ||
|
||||||
< data->async->rx_timeout_slab) {
|
(data->async->rx_timeout_left
|
||||||
|
< data->async->rx_timeout_slab)) {
|
||||||
/* rx_timeout ms elapsed since last receiving */
|
/* rx_timeout ms elapsed since last receiving */
|
||||||
struct uart_event evt = {
|
struct uart_event evt = {
|
||||||
.type = UART_RX_RDY,
|
.type = UART_RX_RDY,
|
||||||
|
@ -661,13 +688,19 @@ static void rx_timeout(struct k_timer *timer)
|
||||||
.data.rx.offset = data->async->rx_offset
|
.data.rx.offset = data->async->rx_offset
|
||||||
};
|
};
|
||||||
data->async->rx_offset += len;
|
data->async->rx_offset += len;
|
||||||
data->async->rx_total_user_byte_cnt =
|
data->async->rx_total_user_byte_cnt += len;
|
||||||
data->async->rx_total_byte_cnt;
|
|
||||||
user_callback(dev, &evt);
|
user_callback(dev, &evt);
|
||||||
} else {
|
} else {
|
||||||
data->async->rx_timeout_left -=
|
data->async->rx_timeout_left -=
|
||||||
data->async->rx_timeout_slab;
|
data->async->rx_timeout_slab;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* If theres nothing left to report until the buffers are
|
||||||
|
* switched then the timer can be stopped
|
||||||
|
*/
|
||||||
|
if (clipped) {
|
||||||
|
k_timer_stop(&data->async->rx_timeout_timer);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nrf_uarte_int_enable(get_uarte_instance(dev),
|
nrf_uarte_int_enable(get_uarte_instance(dev),
|
||||||
|
@ -728,13 +761,28 @@ static void endrx_isr(struct device *dev)
|
||||||
|
|
||||||
data->async->is_in_irq = true;
|
data->async->is_in_irq = true;
|
||||||
|
|
||||||
if (data->async->rx_next_buf) {
|
/* ensure rx timer is stopped - it will be restarted in RXSTARTED
|
||||||
nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
|
* handler if needed
|
||||||
}
|
*/
|
||||||
k_timer_stop(&data->async->rx_timeout_timer);
|
k_timer_stop(&data->async->rx_timeout_timer);
|
||||||
|
|
||||||
size_t rx_len = nrf_uarte_rx_amount_get(uarte)
|
/* this is the amount that the EasyDMA controller has copied into the
|
||||||
- data->async->rx_offset;
|
* buffer
|
||||||
|
*/
|
||||||
|
const int rx_amount = nrf_uarte_rx_amount_get(uarte);
|
||||||
|
|
||||||
|
/* The 'rx_offset' can be bigger than 'rx_amount', so it the length
|
||||||
|
* of data we report back the the user may need to be clipped.
|
||||||
|
* This can happen because the 'rx_offset' count derives from RXRDY
|
||||||
|
* events, which can occur already for the next buffer before we are
|
||||||
|
* here to handle this buffer. (The next buffer is now already active
|
||||||
|
* because of the ENDRX_STARTRX shortcut)
|
||||||
|
*/
|
||||||
|
int rx_len = rx_amount - data->async->rx_offset;
|
||||||
|
|
||||||
|
if (rx_len < 0) {
|
||||||
|
rx_len = 0;
|
||||||
|
}
|
||||||
|
|
||||||
data->async->rx_total_user_byte_cnt += rx_len;
|
data->async->rx_total_user_byte_cnt += rx_len;
|
||||||
|
|
||||||
|
@ -747,23 +795,38 @@ static void endrx_isr(struct device *dev)
|
||||||
data->async->rx_cnt.cnt = data->async->rx_total_user_byte_cnt;
|
data->async->rx_cnt.cnt = data->async->rx_total_user_byte_cnt;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Only send the RX_RDY event if there is something to send */
|
||||||
|
if (rx_len > 0) {
|
||||||
|
struct uart_event evt = {
|
||||||
|
.type = UART_RX_RDY,
|
||||||
|
.data.rx.buf = data->async->rx_buf,
|
||||||
|
.data.rx.len = rx_len,
|
||||||
|
.data.rx.offset = data->async->rx_offset,
|
||||||
|
};
|
||||||
|
user_callback(dev, &evt);
|
||||||
|
}
|
||||||
|
|
||||||
struct uart_event evt = {
|
struct uart_event evt = {
|
||||||
.type = UART_RX_RDY,
|
.type = UART_RX_BUF_RELEASED,
|
||||||
.data.rx.buf = data->async->rx_buf,
|
.data.rx_buf.buf = data->async->rx_buf,
|
||||||
.data.rx.len = rx_len,
|
|
||||||
.data.rx.offset = data->async->rx_offset,
|
|
||||||
};
|
};
|
||||||
user_callback(dev, &evt);
|
user_callback(dev, &evt);
|
||||||
|
|
||||||
evt.type = UART_RX_BUF_RELEASED;
|
/* If there is a next buffer, then STARTRX will have already been
|
||||||
evt.data.rx_buf.buf = data->async->rx_buf;
|
* invoked by the short (the next buffer will be filling up already)
|
||||||
user_callback(dev, &evt);
|
* and here we just do the swap of which buffer the driver is following,
|
||||||
|
* the next rx_timeout() will update the rx_offset.
|
||||||
|
*/
|
||||||
if (data->async->rx_next_buf) {
|
if (data->async->rx_next_buf) {
|
||||||
data->async->rx_buf = data->async->rx_next_buf;
|
data->async->rx_buf = data->async->rx_next_buf;
|
||||||
|
data->async->rx_buf_len = data->async->rx_next_buf_len;
|
||||||
data->async->rx_next_buf = NULL;
|
data->async->rx_next_buf = NULL;
|
||||||
|
data->async->rx_next_buf_len = 0;
|
||||||
|
|
||||||
data->async->rx_offset = 0;
|
data->async->rx_offset = 0;
|
||||||
|
|
||||||
|
/* Remove the short until the subsequent next buffer is setup */
|
||||||
|
nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX);
|
||||||
} else {
|
} else {
|
||||||
data->async->rx_buf = NULL;
|
data->async->rx_buf = NULL;
|
||||||
evt.type = UART_RX_DISABLED;
|
evt.type = UART_RX_DISABLED;
|
||||||
|
@ -860,16 +923,16 @@ static void uarte_nrfx_isr_async(struct device *dev)
|
||||||
error_isr(dev);
|
error_isr(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) {
|
|
||||||
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
|
|
||||||
rxstarted_isr(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) {
|
if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) {
|
||||||
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
|
||||||
endrx_isr(dev);
|
endrx_isr(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) {
|
||||||
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
|
||||||
|
rxstarted_isr(dev);
|
||||||
|
}
|
||||||
|
|
||||||
if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXTO)) {
|
if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXTO)) {
|
||||||
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXTO);
|
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXTO);
|
||||||
rxto_isr(dev);
|
rxto_isr(dev);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue