ring_buffer: make finish methods final
Make it possible to "finish" with fewer bytes than what was "claimed". This was possible before on the get side, but the put side was cummulative wrt finish. The revamp made it cummulative on both sides. Turns out that existing users rely on the opposite behavior which is more logical and useful. So make both sides that way. Adjust documentation, test case and users accordingly. Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
parent
d9a74dda54
commit
eee5b8e563
5 changed files with 16 additions and 18 deletions
|
@ -251,7 +251,7 @@ static inline uint32_t ring_buf_size_get(struct ring_buf *buf)
|
||||||
*
|
*
|
||||||
* With this routine, memory copying can be reduced since internal ring buffer
|
* With this routine, memory copying can be reduced since internal ring buffer
|
||||||
* can be used directly by the user. Once data is written to allocated area
|
* can be used directly by the user. Once data is written to allocated area
|
||||||
* number of bytes written can be confirmed (see @ref ring_buf_put_finish).
|
* number of bytes written must be confirmed (see @ref ring_buf_put_finish).
|
||||||
*
|
*
|
||||||
* @warning
|
* @warning
|
||||||
* Use cases involving multiple writers to the ring buffer must prevent
|
* Use cases involving multiple writers to the ring buffer must prevent
|
||||||
|
@ -277,6 +277,10 @@ uint32_t ring_buf_put_claim(struct ring_buf *buf,
|
||||||
/**
|
/**
|
||||||
* @brief Indicate number of bytes written to allocated buffers.
|
* @brief Indicate number of bytes written to allocated buffers.
|
||||||
*
|
*
|
||||||
|
* The number of bytes must be equal to or lower than the sum corresponding
|
||||||
|
* to all preceding @ref ring_buf_put_claim invocations (or even 0). Surplus
|
||||||
|
* bytes will be returned to the available free buffer space.
|
||||||
|
*
|
||||||
* @warning
|
* @warning
|
||||||
* Use cases involving multiple writers to the ring buffer must prevent
|
* Use cases involving multiple writers to the ring buffer must prevent
|
||||||
* concurrent write operations, either by preventing all writers from
|
* concurrent write operations, either by preventing all writers from
|
||||||
|
@ -320,7 +324,7 @@ uint32_t ring_buf_put(struct ring_buf *buf, const uint8_t *data, uint32_t size);
|
||||||
* @brief Get address of a valid data in a ring buffer.
|
* @brief Get address of a valid data in a ring buffer.
|
||||||
*
|
*
|
||||||
* With this routine, memory copying can be reduced since internal ring buffer
|
* With this routine, memory copying can be reduced since internal ring buffer
|
||||||
* can be used directly by the user. Once data is processed it can be freed
|
* can be used directly by the user. Once data is processed it must be freed
|
||||||
* using @ref ring_buf_get_finish.
|
* using @ref ring_buf_get_finish.
|
||||||
*
|
*
|
||||||
* @warning
|
* @warning
|
||||||
|
@ -347,6 +351,10 @@ uint32_t ring_buf_get_claim(struct ring_buf *buf,
|
||||||
/**
|
/**
|
||||||
* @brief Indicate number of bytes read from claimed buffer.
|
* @brief Indicate number of bytes read from claimed buffer.
|
||||||
*
|
*
|
||||||
|
* The number of bytes must be equal or lower than the sum corresponding to
|
||||||
|
* all preceding @ref ring_buf_get_claim invocations (or even 0). Surplus
|
||||||
|
* bytes will remain available in the buffer.
|
||||||
|
*
|
||||||
* @warning
|
* @warning
|
||||||
* Use cases involving multiple reads of the ring buffer must prevent
|
* Use cases involving multiple reads of the ring buffer must prevent
|
||||||
* concurrent read operations, either by preventing all readers from
|
* concurrent read operations, either by preventing all readers from
|
||||||
|
|
|
@ -37,18 +37,13 @@ int ring_buf_put_finish(struct ring_buf *buf, uint32_t size)
|
||||||
{
|
{
|
||||||
uint32_t finish_space, wrap_size;
|
uint32_t finish_space, wrap_size;
|
||||||
|
|
||||||
if (unlikely(size == 0)) {
|
|
||||||
/* claim is cancelled */
|
|
||||||
buf->put_head = buf->put_tail;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
finish_space = buf->put_head - buf->put_tail;
|
finish_space = buf->put_head - buf->put_tail;
|
||||||
if (unlikely(size > finish_space)) {
|
if (unlikely(size > finish_space)) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
buf->put_tail += size;
|
buf->put_tail += size;
|
||||||
|
buf->put_head = buf->put_tail;
|
||||||
|
|
||||||
wrap_size = buf->put_tail - buf->put_base;
|
wrap_size = buf->put_tail - buf->put_base;
|
||||||
if (unlikely(wrap_size >= buf->size)) {
|
if (unlikely(wrap_size >= buf->size)) {
|
||||||
|
@ -108,18 +103,13 @@ int ring_buf_get_finish(struct ring_buf *buf, uint32_t size)
|
||||||
{
|
{
|
||||||
uint32_t finish_space, wrap_size;
|
uint32_t finish_space, wrap_size;
|
||||||
|
|
||||||
if (unlikely(size == 0)) {
|
|
||||||
/* claim is cancelled */
|
|
||||||
buf->get_head = buf->get_tail;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
finish_space = buf->get_head - buf->get_tail;
|
finish_space = buf->get_head - buf->get_tail;
|
||||||
if (unlikely(size > finish_space)) {
|
if (unlikely(size > finish_space)) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
buf->get_tail += size;
|
buf->get_tail += size;
|
||||||
|
buf->get_head = buf->get_tail;
|
||||||
|
|
||||||
wrap_size = buf->get_tail - buf->get_base;
|
wrap_size = buf->get_tail - buf->get_base;
|
||||||
if (unlikely(wrap_size >= buf->size)) {
|
if (unlikely(wrap_size >= buf->size)) {
|
||||||
|
|
|
@ -171,9 +171,8 @@ void z_shell_history_put(struct shell_history *history, uint8_t *line,
|
||||||
ring_buf_put_claim(history->ring_buf,
|
ring_buf_put_claim(history->ring_buf,
|
||||||
(uint8_t **)&h_item, total_len);
|
(uint8_t **)&h_item, total_len);
|
||||||
if (claim2_len == total_len) {
|
if (claim2_len == total_len) {
|
||||||
ring_buf_put_finish(history->ring_buf,
|
|
||||||
claim_len);
|
|
||||||
padding += claim_len;
|
padding += claim_len;
|
||||||
|
total_len += claim_len;
|
||||||
claim_len = total_len;
|
claim_len = total_len;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -62,6 +62,7 @@ static void rng_pool_work_handler(struct k_work *work)
|
||||||
ring_buf_put_finish(&rnd_rbuf, len);
|
ring_buf_put_finish(&rnd_rbuf, len);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
ring_buf_put_finish(&rnd_rbuf, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
k_work_submit(work);
|
k_work_submit(work);
|
||||||
|
|
|
@ -569,11 +569,11 @@ void test_ringbuffer_alloc_put(void)
|
||||||
zassert_true(err == 0, NULL);
|
zassert_true(err == 0, NULL);
|
||||||
|
|
||||||
err = ring_buf_put_finish(&ringbuf_raw, RINGBUFFER_SIZE - 1);
|
err = ring_buf_put_finish(&ringbuf_raw, RINGBUFFER_SIZE - 1);
|
||||||
zassert_true(err == 0, NULL);
|
zassert_true(err == -EINVAL, NULL);
|
||||||
|
|
||||||
read_size = ring_buf_get(&ringbuf_raw, outputbuf,
|
read_size = ring_buf_get(&ringbuf_raw, outputbuf,
|
||||||
RINGBUFFER_SIZE);
|
RINGBUFFER_SIZE);
|
||||||
zassert_true(read_size == RINGBUFFER_SIZE, NULL);
|
zassert_true(read_size == 1, NULL);
|
||||||
|
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
allocated = ring_buf_put_claim(&ringbuf_raw, &data, 2);
|
allocated = ring_buf_put_claim(&ringbuf_raw, &data, 2);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue