drivers: i2s: Make the stm32 i2s driver use a msgq

Currently uses a home grown ringbuffer and a semaphore with irq locks.

Signed-off-by: Chris Desjardins <chris@arch-embedded.com>
This commit is contained in:
Chris Desjardins 2024-10-23 23:43:00 +02:00 committed by Benjamin Cabé
commit 18d30e3535
2 changed files with 37 additions and 129 deletions

View file

@ -23,79 +23,49 @@
#include <zephyr/irq.h>
LOG_MODULE_REGISTER(i2s_ll_stm32);
#define MODULO_INC(val, max) { val = (++val < max) ? val : 0; }
static unsigned int div_round_closest(uint32_t dividend, uint32_t divisor)
{
return (dividend + (divisor / 2U)) / divisor;
}
static bool queue_is_empty(struct ring_buffer *rb)
static bool queue_is_empty(struct k_msgq *q)
{
unsigned int key;
key = irq_lock();
if (rb->tail != rb->head) {
/* Ring buffer is not empty */
irq_unlock(key);
return false;
}
irq_unlock(key);
return true;
return (k_msgq_num_used_get(q) == 0) ? true : false;
}
/*
* Get data from the queue
*/
static int queue_get(struct ring_buffer *rb, void **mem_block, size_t *size)
static int queue_get(struct k_msgq *q, void **mem_block, size_t *size, int32_t timeout)
{
unsigned int key;
struct queue_item item;
int result = k_msgq_get(q, &item, SYS_TIMEOUT_MS(timeout));
key = irq_lock();
if (queue_is_empty(rb) == true) {
irq_unlock(key);
return -ENOMEM;
if (result == 0) {
*mem_block = item.mem_block;
*size = item.size;
}
*mem_block = rb->buf[rb->tail].mem_block;
*size = rb->buf[rb->tail].size;
MODULO_INC(rb->tail, rb->len);
irq_unlock(key);
return 0;
return result;
}
/*
* Put data in the queue
*/
static int queue_put(struct ring_buffer *rb, void *mem_block, size_t size)
static int queue_put(struct k_msgq *q, void *mem_block, size_t size, int32_t timeout)
{
uint16_t head_next;
unsigned int key;
struct queue_item item = {.mem_block = mem_block, .size = size};
key = irq_lock();
return k_msgq_put(q, &item, SYS_TIMEOUT_MS(timeout));
}
head_next = rb->head;
MODULO_INC(head_next, rb->len);
static void stream_queue_drop(struct stream *s)
{
size_t size;
void *mem_block;
if (head_next == rb->tail) {
/* Ring buffer is full */
irq_unlock(key);
return -ENOMEM;
while (queue_get(s->msgq, &mem_block, &size, 0) == 0) {
k_mem_slab_free(s->cfg.mem_slab, mem_block);
}
rb->buf[rb->head].mem_block = mem_block;
rb->buf[rb->head].size = size;
rb->head = head_next;
irq_unlock(key);
return 0;
}
static int i2s_stm32_enable_clock(const struct device *dev)
@ -224,7 +194,7 @@ static int i2s_stm32_configure(const struct device *dev, enum i2s_dir dir,
}
if (i2s_cfg->frame_clk_freq == 0U) {
stream->queue_drop(stream);
stream_queue_drop(stream);
memset(&stream->cfg, 0, sizeof(struct i2s_config));
stream->state = I2S_STATE_NOT_READY;
return 0;
@ -385,7 +355,7 @@ do_trigger_stop:
}
if (dir == I2S_DIR_TX) {
if ((queue_is_empty(&stream->mem_block_queue) == false) ||
if ((queue_is_empty(stream->msgq) == false) ||
(ll_func_i2s_dma_busy(cfg->i2s))) {
stream->state = I2S_STATE_STOPPING;
/*
@ -412,7 +382,7 @@ do_trigger_stop:
return -EIO;
}
stream->stream_disable(stream, dev);
stream->queue_drop(stream);
stream_queue_drop(stream);
stream->state = I2S_STATE_READY;
break;
@ -422,7 +392,7 @@ do_trigger_stop:
return -EIO;
}
stream->state = I2S_STATE_READY;
stream->queue_drop(stream);
stream_queue_drop(stream);
break;
default:
@ -444,16 +414,8 @@ static int i2s_stm32_read(const struct device *dev, void **mem_block,
return -EIO;
}
if (dev_data->rx.state != I2S_STATE_ERROR) {
ret = k_sem_take(&dev_data->rx.sem,
SYS_TIMEOUT_MS(dev_data->rx.cfg.timeout));
if (ret < 0) {
return ret;
}
}
/* Get data from the beginning of RX queue */
ret = queue_get(&dev_data->rx.mem_block_queue, mem_block, size);
ret = queue_get(dev_data->rx.msgq, mem_block, size, dev_data->rx.cfg.timeout);
if (ret < 0) {
return -EIO;
}
@ -465,7 +427,6 @@ static int i2s_stm32_write(const struct device *dev, void *mem_block,
size_t size)
{
struct i2s_stm32_data *const dev_data = dev->data;
int ret;
if (dev_data->tx.state != I2S_STATE_RUNNING &&
dev_data->tx.state != I2S_STATE_READY) {
@ -473,14 +434,8 @@ static int i2s_stm32_write(const struct device *dev, void *mem_block,
return -EIO;
}
ret = k_sem_take(&dev_data->tx.sem,
SYS_TIMEOUT_MS(dev_data->tx.cfg.timeout));
if (ret < 0) {
return ret;
}
/* Add data to the end of the TX queue */
return queue_put(&dev_data->tx.mem_block_queue, mem_block, size);
return queue_put(dev_data->tx.msgq, mem_block, size, dev_data->tx.cfg.timeout);
}
static DEVICE_API(i2s, i2s_stm32_driver_api) = {
@ -604,13 +559,12 @@ static void dma_rx_callback(const struct device *dma_dev, void *arg,
sys_cache_data_invd_range(mblk_tmp, stream->cfg.block_size);
/* All block data received */
ret = queue_put(&stream->mem_block_queue, mblk_tmp,
stream->cfg.block_size);
ret = queue_put(stream->msgq, mblk_tmp,
stream->cfg.block_size, 0);
if (ret < 0) {
stream->state = I2S_STATE_ERROR;
goto rx_disable;
}
k_sem_give(&stream->sem);
/* Stop reception if we were requested */
if (stream->state == I2S_STATE_STOPPING) {
@ -659,8 +613,8 @@ static void dma_tx_callback(const struct device *dma_dev, void *arg,
* as stated in zephyr i2s specification, in case of DRAIN command
* send all data in the transmit queue and stop the transmission.
*/
if (queue_is_empty(&stream->mem_block_queue) == true) {
stream->queue_drop(stream);
if (queue_is_empty(stream->msgq) == true) {
stream_queue_drop(stream);
stream->state = I2S_STATE_READY;
goto tx_disable;
} else if (stream->tx_stop_for_drain == false) {
@ -681,8 +635,8 @@ static void dma_tx_callback(const struct device *dma_dev, void *arg,
}
/* Prepare to send the next data block */
ret = queue_get(&stream->mem_block_queue, &stream->mem_block,
&mem_block_size);
ret = queue_get(stream->msgq, &stream->mem_block,
&mem_block_size, 0);
if (ret < 0) {
if (stream->state == I2S_STATE_STOPPING) {
stream->state = I2S_STATE_READY;
@ -691,7 +645,6 @@ static void dma_tx_callback(const struct device *dma_dev, void *arg,
}
goto tx_disable;
}
k_sem_give(&stream->sem);
/* Assure cache coherency before DMA read operation */
sys_cache_data_flush_range(stream->mem_block, mem_block_size);
@ -765,10 +718,6 @@ static int i2s_stm32_initialize(const struct device *dev)
cfg->irq_config(dev);
k_sem_init(&dev_data->rx.sem, 0, CONFIG_I2S_STM32_RX_BLOCK_COUNT);
k_sem_init(&dev_data->tx.sem, CONFIG_I2S_STM32_TX_BLOCK_COUNT,
CONFIG_I2S_STM32_TX_BLOCK_COUNT);
for (i = 0; i < STM32_DMA_NUM_CHANNELS; i++) {
active_dma_rx_channel[i] = NULL;
active_dma_tx_channel[i] = NULL;
@ -847,12 +796,11 @@ static int tx_stream_start(struct stream *stream, const struct device *dev)
size_t mem_block_size;
int ret;
ret = queue_get(&stream->mem_block_queue, &stream->mem_block,
&mem_block_size);
ret = queue_get(stream->msgq, &stream->mem_block,
&mem_block_size, 0);
if (ret < 0) {
return ret;
}
k_sem_give(&stream->sem);
/* Assure cache coherency before DMA read operation */
sys_cache_data_flush_range(stream->mem_block, mem_block_size);
@ -948,34 +896,6 @@ static void tx_stream_disable(struct stream *stream, const struct device *dev)
active_dma_tx_channel[stream->dma_channel] = NULL;
}
static void rx_queue_drop(struct stream *stream)
{
size_t size;
void *mem_block;
while (queue_get(&stream->mem_block_queue, &mem_block, &size) == 0) {
k_mem_slab_free(stream->cfg.mem_slab, mem_block);
}
k_sem_reset(&stream->sem);
}
static void tx_queue_drop(struct stream *stream)
{
size_t size;
void *mem_block;
unsigned int n = 0U;
while (queue_get(&stream->mem_block_queue, &mem_block, &size) == 0) {
k_mem_slab_free(stream->cfg.mem_slab, mem_block);
n++;
}
for (; n > 0; n--) {
k_sem_give(&stream->sem);
}
}
static const struct device *get_dev_from_rx_dma_channel(uint32_t dma_channel)
{
return active_dma_rx_channel[dma_channel];
@ -1011,9 +931,7 @@ static const struct device *get_dev_from_tx_dma_channel(uint32_t dma_channel)
STM32_DMA_FEATURES(index, dir)), \
.stream_start = dir##_stream_start, \
.stream_disable = dir##_stream_disable, \
.queue_drop = dir##_queue_drop, \
.mem_block_queue.buf = dir##_##index##_ring_buf, \
.mem_block_queue.len = ARRAY_SIZE(dir##_##index##_ring_buf) \
.msgq = &dir##_##index##_queue, \
}
#define I2S_STM32_INIT(index) \
@ -1034,8 +952,8 @@ static const struct i2s_stm32_cfg i2s_stm32_config_##index = { \
.master_clk_sel = DT_INST_PROP(index, mck_enabled) \
}; \
\
struct queue_item rx_##index##_ring_buf[CONFIG_I2S_STM32_RX_BLOCK_COUNT + 1];\
struct queue_item tx_##index##_ring_buf[CONFIG_I2S_STM32_TX_BLOCK_COUNT + 1];\
K_MSGQ_DEFINE(rx_##index##_queue, sizeof(struct queue_item), CONFIG_I2S_STM32_RX_BLOCK_COUNT, 4);\
K_MSGQ_DEFINE(tx_##index##_queue, sizeof(struct queue_item), CONFIG_I2S_STM32_TX_BLOCK_COUNT, 4);\
\
static struct i2s_stm32_data i2s_stm32_data_##index = { \
UTIL_AND(DT_INST_DMAS_HAS_NAME(index, rx), \