drivers: serial: uart_async_rx: Add return value to consume function
Return availability of free buffers after data is consumed. This information may be important for the module using uart_async_rx to schedule next reception if there is a new buffer available. Signed-off-by: Krzysztof Chruściński <krzysztof.chruscinski@nordicsemi.no>
This commit is contained in:
parent
65b42260f7
commit
d2bd82eb5f
5 changed files with 45 additions and 40 deletions
|
@ -104,7 +104,7 @@ size_t uart_async_rx_data_claim(struct uart_async_rx *rx_data, uint8_t **data, s
|
||||||
return MIN(length, rem);
|
return MIN(length, rem);
|
||||||
}
|
}
|
||||||
|
|
||||||
void uart_async_rx_data_consume(struct uart_async_rx *rx_data, size_t length)
|
bool uart_async_rx_data_consume(struct uart_async_rx *rx_data, size_t length)
|
||||||
{
|
{
|
||||||
struct uart_async_rx_buf *buf = get_buf(rx_data, rx_data->rd_buf_idx);
|
struct uart_async_rx_buf *buf = get_buf(rx_data, rx_data->rd_buf_idx);
|
||||||
|
|
||||||
|
@ -117,6 +117,8 @@ void uart_async_rx_data_consume(struct uart_async_rx *rx_data, size_t length)
|
||||||
atomic_sub(&rx_data->pending_bytes, length);
|
atomic_sub(&rx_data->pending_bytes, length);
|
||||||
|
|
||||||
__ASSERT_NO_MSG(buf->rd_idx <= buf->wr_idx);
|
__ASSERT_NO_MSG(buf->rd_idx <= buf->wr_idx);
|
||||||
|
|
||||||
|
return rx_data->free_buf_cnt > 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void uart_async_rx_reset(struct uart_async_rx *rx_data)
|
void uart_async_rx_reset(struct uart_async_rx *rx_data)
|
||||||
|
|
|
@ -198,24 +198,23 @@ int z_uart_async_to_irq_fifo_read(const struct device *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(buf, claim_buf, claim_len);
|
memcpy(buf, claim_buf, claim_len);
|
||||||
uart_async_rx_data_consume(async_rx, claim_len);
|
bool buf_available = uart_async_rx_data_consume(async_rx, claim_len);
|
||||||
|
|
||||||
if (data->rx.pending_buf_req) {
|
if (data->rx.pending_buf_req && buf_available) {
|
||||||
buf = uart_async_rx_buf_req(async_rx);
|
buf = uart_async_rx_buf_req(async_rx);
|
||||||
if (buf) {
|
__ASSERT_NO_MSG(buf != NULL);
|
||||||
int err;
|
int err;
|
||||||
size_t rx_len = uart_async_rx_get_buf_len(async_rx);
|
size_t rx_len = uart_async_rx_get_buf_len(async_rx);
|
||||||
|
|
||||||
atomic_dec(&data->rx.pending_buf_req);
|
atomic_dec(&data->rx.pending_buf_req);
|
||||||
err = config->api->rx_buf_rsp(dev, buf, rx_len);
|
err = config->api->rx_buf_rsp(dev, buf, rx_len);
|
||||||
|
if (err < 0) {
|
||||||
|
if (err == -EACCES) {
|
||||||
|
data->rx.pending_buf_req = 0;
|
||||||
|
err = rx_enable(dev, data, buf, rx_len);
|
||||||
|
}
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
if (err == -EACCES) {
|
return err;
|
||||||
data->rx.pending_buf_req = 0;
|
|
||||||
err = rx_enable(dev, data, buf, rx_len);
|
|
||||||
}
|
|
||||||
if (err < 0) {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -163,8 +163,11 @@ size_t uart_async_rx_data_claim(struct uart_async_rx *async_rx, uint8_t **data,
|
||||||
*
|
*
|
||||||
* @param async_rx Pointer to the helper instance.
|
* @param async_rx Pointer to the helper instance.
|
||||||
* @param length Amount of data to consume. It must be less or equal than amount of claimed data.
|
* @param length Amount of data to consume. It must be less or equal than amount of claimed data.
|
||||||
|
*
|
||||||
|
* @retval true If there are free buffers in the pool after data got consumed.
|
||||||
|
* @retval false If there are no free buffers.
|
||||||
*/
|
*/
|
||||||
void uart_async_rx_data_consume(struct uart_async_rx *async_rx, size_t length);
|
bool uart_async_rx_data_consume(struct uart_async_rx *async_rx, size_t length);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -454,26 +454,24 @@ static int async_read(struct shell_uart_async *sh_uart,
|
||||||
|
|
||||||
memcpy(data, buf, blen);
|
memcpy(data, buf, blen);
|
||||||
#endif
|
#endif
|
||||||
uart_async_rx_data_consume(async_rx, sh_cnt);
|
bool buf_available = uart_async_rx_data_consume(async_rx, sh_cnt);
|
||||||
*cnt = sh_cnt;
|
*cnt = sh_cnt;
|
||||||
|
|
||||||
if (sh_uart->pending_rx_req) {
|
if (sh_uart->pending_rx_req && buf_available) {
|
||||||
uint8_t *buf = uart_async_rx_buf_req(async_rx);
|
uint8_t *buf = uart_async_rx_buf_req(async_rx);
|
||||||
|
size_t len = uart_async_rx_get_buf_len(async_rx);
|
||||||
|
int err;
|
||||||
|
|
||||||
if (buf) {
|
__ASSERT_NO_MSG(buf != NULL);
|
||||||
int err;
|
atomic_dec(&sh_uart->pending_rx_req);
|
||||||
size_t len = uart_async_rx_get_buf_len(async_rx);
|
err = uart_rx_buf_rsp(sh_uart->common.dev, buf, len);
|
||||||
|
/* If it is too late and RX is disabled then re-enable it. */
|
||||||
atomic_dec(&sh_uart->pending_rx_req);
|
if (err < 0) {
|
||||||
err = uart_rx_buf_rsp(sh_uart->common.dev, buf, len);
|
if (err == -EACCES) {
|
||||||
/* If it is too late and RX is disabled then re-enable it. */
|
sh_uart->pending_rx_req = 0;
|
||||||
if (err < 0) {
|
err = rx_enable(sh_uart->common.dev, buf, len);
|
||||||
if (err == -EACCES) {
|
} else {
|
||||||
sh_uart->pending_rx_req = 0;
|
return err;
|
||||||
err = rx_enable(sh_uart->common.dev, buf, len);
|
|
||||||
} else {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,6 +41,7 @@ ZTEST(uart_async_rx, test_rx)
|
||||||
uint8_t *claim_buf;
|
uint8_t *claim_buf;
|
||||||
uint8_t *aloc_buf;
|
uint8_t *aloc_buf;
|
||||||
struct uart_async_rx async_rx;
|
struct uart_async_rx async_rx;
|
||||||
|
bool buf_available;
|
||||||
const struct uart_async_rx_config config = {
|
const struct uart_async_rx_config config = {
|
||||||
.buffer = buf,
|
.buffer = buf,
|
||||||
.length = sizeof(buf),
|
.length = sizeof(buf),
|
||||||
|
@ -87,7 +88,8 @@ ZTEST(uart_async_rx, test_rx)
|
||||||
zassert_true(mem_check(claim_buf, 0, aloc_len - 2));
|
zassert_true(mem_check(claim_buf, 0, aloc_len - 2));
|
||||||
|
|
||||||
/* Consume first 2 bytes. */
|
/* Consume first 2 bytes. */
|
||||||
uart_async_rx_data_consume(&async_rx, 2);
|
buf_available = uart_async_rx_data_consume(&async_rx, 2);
|
||||||
|
zassert_true(buf_available);
|
||||||
|
|
||||||
/* Now claim will return buffer taking into account that first 2 bytes are
|
/* Now claim will return buffer taking into account that first 2 bytes are
|
||||||
* consumed.
|
* consumed.
|
||||||
|
@ -98,7 +100,8 @@ ZTEST(uart_async_rx, test_rx)
|
||||||
zassert_true(mem_check(claim_buf, 2, aloc_len - 4));
|
zassert_true(mem_check(claim_buf, 2, aloc_len - 4));
|
||||||
|
|
||||||
/* Consume rest of data. Get indication that it was end of the buffer. */
|
/* Consume rest of data. Get indication that it was end of the buffer. */
|
||||||
uart_async_rx_data_consume(&async_rx, aloc_len - 4);
|
buf_available = uart_async_rx_data_consume(&async_rx, aloc_len - 4);
|
||||||
|
zassert_true(buf_available);
|
||||||
}
|
}
|
||||||
|
|
||||||
ZTEST(uart_async_rx, test_rx_late_consume)
|
ZTEST(uart_async_rx, test_rx_late_consume)
|
||||||
|
@ -134,7 +137,7 @@ ZTEST(uart_async_rx, test_rx_late_consume)
|
||||||
zassert_equal(claim_len, 1);
|
zassert_equal(claim_len, 1);
|
||||||
zassert_equal(claim_buf[0], (uint8_t)i);
|
zassert_equal(claim_buf[0], (uint8_t)i);
|
||||||
|
|
||||||
uart_async_rx_data_consume(&async_rx, 1);
|
(void)uart_async_rx_data_consume(&async_rx, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
claim_len = uart_async_rx_data_claim(&async_rx, &claim_buf, 100);
|
claim_len = uart_async_rx_data_claim(&async_rx, &claim_buf, 100);
|
||||||
|
@ -217,13 +220,13 @@ static bool consumer(void *user_data, uint32_t cnt, bool last, int prio)
|
||||||
test_data->exp_consume++;
|
test_data->exp_consume++;
|
||||||
}
|
}
|
||||||
|
|
||||||
uart_async_rx_data_consume(async_rx, len);
|
bool buf_released = uart_async_rx_data_consume(async_rx, len);
|
||||||
|
|
||||||
if (test_data->pending_req) {
|
if (buf_released && test_data->pending_req) {
|
||||||
buf = uart_async_rx_buf_req(async_rx);
|
buf = uart_async_rx_buf_req(async_rx);
|
||||||
if (buf) {
|
zassert_true(buf != NULL);
|
||||||
atomic_dec(&test_data->pending_req);
|
|
||||||
}
|
atomic_dec(&test_data->pending_req);
|
||||||
k_spinlock_key_t key = k_spin_lock(&test_data->lock);
|
k_spinlock_key_t key = k_spin_lock(&test_data->lock);
|
||||||
|
|
||||||
if (test_data->curr_buf == NULL) {
|
if (test_data->curr_buf == NULL) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue