drivers: i2s_sam_ssc: use dma_reload() to continue transfer

Use `dma_reload()` instead of `dma_config()` within DMA callbacks. This
significantly shortens time required to reconfigure DMA engine to
transmit / receive the next data block and allows to configure higher
I2S bus data rates.

The maximum I2S data rate supported by the driver is still lower than
that of underlying hardware. To fully support hardware capabilities the
I2S driver would have to use scatter-gather / linked-list DMA transfer.
This is currently not supported by the DMA driver.

Signed-off-by: Piotr Mienkowski <piotr.mienkowski@gmail.com>
This commit is contained in:
Piotr Mienkowski 2021-07-02 02:42:29 +02:00 committed by Christopher Friedt
commit 4566d0ec58

View file

@ -77,11 +77,12 @@ struct stream {
int32_t state;
struct k_sem sem;
uint32_t dma_channel;
struct dma_config dma_cfg;
uint8_t dma_perid;
uint8_t word_size_bytes;
bool last_block;
struct i2s_config cfg;
struct ring_buf mem_block_queue;
void *mem_block;
bool last_block;
int (*stream_start)(struct stream *, Ssc *const,
const struct device *);
void (*stream_disable)(struct stream *, Ssc *const,
@ -165,6 +166,21 @@ static int queue_put(struct ring_buf *rb, void *mem_block, size_t size)
return 0;
}
static int reload_dma(const struct device *dev_dma, uint32_t channel,
void *src, void *dst, size_t size)
{
int ret;
ret = dma_reload(dev_dma, channel, (uint32_t)src, (uint32_t)dst, size);
if (ret < 0) {
return ret;
}
ret = dma_start(dev_dma, channel);
return ret;
}
static int start_dma(const struct device *dev_dma, uint32_t channel,
struct dma_config *cfg, void *src, void *dst,
uint32_t blk_size)
@ -235,11 +251,11 @@ static void dma_rx_callback(const struct device *dma_dev, void *user_data,
goto rx_disable;
}
ret = start_dma(dev_cfg->dev_dma, stream->dma_channel, &stream->dma_cfg,
(void *)&(ssc->SSC_RHR), stream->mem_block,
stream->cfg.block_size);
ret = reload_dma(dev_cfg->dev_dma, stream->dma_channel,
(void *)&(ssc->SSC_RHR), stream->mem_block,
stream->cfg.block_size);
if (ret < 0) {
LOG_DBG("Failed to start RX DMA transfer: %d", ret);
LOG_DBG("Failed to reload RX DMA transfer: %d", ret);
goto rx_disable;
}
@ -296,11 +312,11 @@ static void dma_tx_callback(const struct device *dma_dev, void *user_data,
/* Assure cache coherency before DMA read operation */
DCACHE_CLEAN(stream->mem_block, mem_block_size);
ret = start_dma(dev_cfg->dev_dma, stream->dma_channel, &stream->dma_cfg,
stream->mem_block, (void *)&(ssc->SSC_THR),
mem_block_size);
ret = reload_dma(dev_cfg->dev_dma, stream->dma_channel,
stream->mem_block, (void *)&(ssc->SSC_THR),
mem_block_size);
if (ret < 0) {
LOG_DBG("Failed to start TX DMA transfer: %d", ret);
LOG_DBG("Failed to reload TX DMA transfer: %d", ret);
goto tx_disable;
}
@ -542,7 +558,6 @@ static int i2s_sam_configure(const struct device *dev, enum i2s_dir dir,
Ssc *const ssc = dev_cfg->regs;
uint8_t num_words = i2s_cfg->channels;
uint8_t word_size_bits = i2s_cfg->word_size;
uint8_t word_size_bytes;
uint32_t bit_clk_freq;
struct stream *stream;
int ret;
@ -608,11 +623,8 @@ static int i2s_sam_configure(const struct device *dev, enum i2s_dir dir,
return ret;
}
word_size_bytes = get_word_size_bytes(word_size_bits);
/* Set up DMA channel parameters */
stream->dma_cfg.source_data_size = word_size_bytes;
stream->dma_cfg.dest_data_size = word_size_bytes;
stream->word_size_bytes = get_word_size_bytes(word_size_bits);
if (i2s_cfg->options & I2S_OPT_LOOPBACK) {
ssc->SSC_RFMR |= SSC_RFMR_LOOP;
@ -640,7 +652,18 @@ static int rx_stream_start(struct stream *stream, Ssc *const ssc,
*/
(void)ssc->SSC_RHR;
ret = start_dma(dev_dma, stream->dma_channel, &stream->dma_cfg,
struct dma_config dma_cfg = {
.source_data_size = stream->word_size_bytes,
.dest_data_size = stream->word_size_bytes,
.block_count = 1,
.dma_slot = stream->dma_perid,
.channel_direction = PERIPHERAL_TO_MEMORY,
.source_burst_length = 1,
.dest_burst_length = 1,
.dma_callback = dma_rx_callback,
};
ret = start_dma(dev_dma, stream->dma_channel, &dma_cfg,
(void *)&(ssc->SSC_RHR), stream->mem_block,
stream->cfg.block_size);
if (ret < 0) {
@ -678,10 +701,21 @@ static int tx_stream_start(struct stream *stream, Ssc *const ssc,
*/
ssc->SSC_THR = 0;
struct dma_config dma_cfg = {
.source_data_size = stream->word_size_bytes,
.dest_data_size = stream->word_size_bytes,
.block_count = 1,
.dma_slot = stream->dma_perid,
.channel_direction = MEMORY_TO_PERIPHERAL,
.source_burst_length = 1,
.dest_burst_length = 1,
.dma_callback = dma_tx_callback,
};
/* Assure cache coherency before DMA read operation */
DCACHE_CLEAN(stream->mem_block, mem_block_size);
ret = start_dma(dev_dma, stream->dma_channel, &stream->dma_cfg,
ret = start_dma(dev_dma, stream->dma_channel, &dma_cfg,
stream->mem_block, (void *)&(ssc->SSC_THR),
mem_block_size);
if (ret < 0) {
@ -994,14 +1028,7 @@ struct queue_item tx_0_ring_buf[CONFIG_I2S_SAM_SSC_TX_BLOCK_COUNT + 1];
static struct i2s_sam_dev_data i2s0_sam_data = {
.rx = {
.dma_channel = DT_INST_DMAS_CELL_BY_NAME(0, rx, channel),
.dma_cfg = {
.block_count = 1,
.dma_slot = DT_INST_DMAS_CELL_BY_NAME(0, rx, perid),
.channel_direction = PERIPHERAL_TO_MEMORY,
.source_burst_length = 1,
.dest_burst_length = 1,
.dma_callback = dma_rx_callback,
},
.dma_perid = DT_INST_DMAS_CELL_BY_NAME(0, rx, perid),
.mem_block_queue.buf = rx_0_ring_buf,
.mem_block_queue.len = ARRAY_SIZE(rx_0_ring_buf),
.stream_start = rx_stream_start,
@ -1011,14 +1038,7 @@ static struct i2s_sam_dev_data i2s0_sam_data = {
},
.tx = {
.dma_channel = DT_INST_DMAS_CELL_BY_NAME(0, tx, channel),
.dma_cfg = {
.block_count = 1,
.dma_slot = DT_INST_DMAS_CELL_BY_NAME(0, tx, perid),
.channel_direction = MEMORY_TO_PERIPHERAL,
.source_burst_length = 1,
.dest_burst_length = 1,
.dma_callback = dma_tx_callback,
},
.dma_perid = DT_INST_DMAS_CELL_BY_NAME(0, tx, perid),
.mem_block_queue.buf = tx_0_ring_buf,
.mem_block_queue.len = ARRAY_SIZE(tx_0_ring_buf),
.stream_start = tx_stream_start,