drivers: i2s: mcux_sai: fixed SAI driver

Includes several driver fixes and improvement to leverage
scatter/gather mode of DMA.  Loads multiple DMA blocks into TCDs.

Signed-off-by: Derek Snell <derek.snell@nxp.com>
This commit is contained in:
Derek Snell 2022-02-25 12:48:37 -05:00 committed by Carles Cufí
commit 16a09b026e

View file

@ -33,8 +33,22 @@
LOG_MODULE_REGISTER(LOG_DOMAIN); LOG_MODULE_REGISTER(LOG_DOMAIN);
#define DT_DRV_COMPAT nxp_mcux_i2s #define DT_DRV_COMPAT nxp_mcux_i2s
#define NUM_DMA_BLOCKS_RX_PREP 3
#define MAX_TX_DMA_BLOCKS CONFIG_DMA_TCD_QUEUE_SIZE
#if (NUM_DMA_BLOCKS_RX_PREP >= CONFIG_DMA_TCD_QUEUE_SIZE)
#error NUM_DMA_BLOCKS_RX_PREP must be < CONFIG_DMA_TCD_QUEUE_SIZE
#endif
#if defined(CONFIG_DMA_MCUX_EDMA) && (NUM_DMA_BLOCKS_RX_PREP < 3)
#error eDMA avoids TCD coherency issue if NUM_DMA_BLOCKS_RX_PREP >= 3
#endif
/* /*
* SAI driver uses source_gather_en/dest_scatter_en feature of DMA, and relies
* on DMA driver managing circular list of DMA blocks. Like eDMA driver links
* Transfer Control Descriptors (TCDs) in list, and manages the tcdpool.
* Calling dma_reload() adds new DMA block to DMA channel already configured,
* into the DMA driver's circular list of blocks.
* This indicates the Tx/Rx stream. * This indicates the Tx/Rx stream.
* *
* in_queue and out_queue are used as follows * in_queue and out_queue are used as follows
@ -58,9 +72,10 @@ struct stream {
struct i2s_config cfg; struct i2s_config cfg;
struct dma_config dma_cfg; struct dma_config dma_cfg;
struct dma_block_config dma_block; struct dma_block_config dma_block;
uint8_t free_tx_dma_blocks;
bool last_block;
struct k_msgq in_queue; struct k_msgq in_queue;
struct k_msgq out_queue; struct k_msgq out_queue;
uint32_t stream_starving;
}; };
struct i2s_mcux_config { struct i2s_mcux_config {
@ -97,7 +112,6 @@ struct i2s_dev_data {
void *rx_out_msgs[CONFIG_I2S_RX_BLOCK_COUNT]; void *rx_out_msgs[CONFIG_I2S_RX_BLOCK_COUNT];
}; };
static void i2s_dma_tx_callback(const struct device *, void *, static void i2s_dma_tx_callback(const struct device *, void *,
uint32_t, int); uint32_t, int);
static void i2s_tx_stream_disable(const struct device *, bool drop); static void i2s_tx_stream_disable(const struct device *, bool drop);
@ -148,8 +162,6 @@ static void i2s_tx_stream_disable(const struct device *dev, bool drop)
/* Disable Tx */ /* Disable Tx */
SAI_TxEnable(dev_cfg->base, false); SAI_TxEnable(dev_cfg->base, false);
strm->stream_starving = false;
/* If Tx is disabled, reset the FIFO pointer, clear error flags */ /* If Tx is disabled, reset the FIFO pointer, clear error flags */
if ((dev_cfg->base->TCSR & I2S_TCSR_TE_MASK) == 0UL) { if ((dev_cfg->base->TCSR & I2S_TCSR_TE_MASK) == 0UL) {
dev_cfg->base->TCSR |= dev_cfg->base->TCSR |=
@ -185,35 +197,82 @@ static void i2s_rx_stream_disable(const struct device *dev,
/* Disable Rx */ /* Disable Rx */
SAI_RxEnable(dev_cfg->base, false); SAI_RxEnable(dev_cfg->base, false);
/* wait for Receiver to disable */
while (dev_cfg->base->RCSR & I2S_RCSR_RE_MASK)
;
/* reset the FIFO pointer and clear error flags */
dev_cfg->base->RCSR |= (I2S_RCSR_FR_MASK | I2S_RCSR_SR_MASK);
dev_cfg->base->RCSR &= ~I2S_RCSR_SR_MASK;
/* purge buffers queued in the stream */ /* purge buffers queued in the stream */
if (in_drop || out_drop) { if (in_drop || out_drop) {
i2s_purge_stream_buffers(strm, dev_data->rx.cfg.mem_slab, i2s_purge_stream_buffers(strm, dev_data->rx.cfg.mem_slab,
in_drop, out_drop); in_drop, out_drop);
} }
}
strm->stream_starving = false; static int i2s_tx_reload_multiple_dma_blocks(const struct device *dev,
uint8_t *blocks_queued)
{
struct i2s_dev_data *dev_data = dev->data;
const struct i2s_mcux_config *dev_cfg = dev->config;
I2S_Type *base = (I2S_Type *)dev_cfg->base;
struct stream *strm = &dev_data->tx;
void *buffer = NULL;
int ret = 0;
unsigned int key;
/* If Rx is disabled, reset the FIFO pointer *blocks_queued = 0;
* and clear error flags
*/ key = irq_lock();
if ((dev_cfg->base->RCSR & I2S_RCSR_RE_MASK) == 0UL) {
dev_cfg->base->RCSR |= /* queue additional blocks to DMA if in_queue and DMA has free blocks */
(I2S_RCSR_FR_MASK | I2S_RCSR_SR_MASK); while (strm->free_tx_dma_blocks) {
dev_cfg->base->RCSR &= ~I2S_RCSR_SR_MASK; /* get the next buffer from queue */
ret = k_msgq_get(&strm->in_queue, &buffer, K_NO_WAIT);
if (ret) {
/* in_queue is empty, no more blocks to send to DMA */
ret = 0;
break;
}
/* reload the DMA */
ret = dma_reload(dev_data->dev_dma, strm->dma_channel,
(uint32_t)buffer,
(uint32_t)&base->TDR[strm->start_channel],
strm->cfg.block_size);
if (ret != 0) {
LOG_ERR("dma_reload() failed with error 0x%x", ret);
break;
}
(strm->free_tx_dma_blocks)--;
ret = k_msgq_put(&strm->out_queue,
&buffer, K_NO_WAIT);
if (ret != 0) {
LOG_ERR("buffer %p -> out %p err %d",
buffer, &strm->out_queue, ret);
break;
}
(*blocks_queued)++;
} }
irq_unlock(key);
return ret;
} }
/* This function is executed in the interrupt context */ /* This function is executed in the interrupt context */
static void i2s_dma_tx_callback(const struct device *dma_dev, static void i2s_dma_tx_callback(const struct device *dma_dev,
void *arg, uint32_t channel, int status) void *arg, uint32_t channel, int status)
{ {
const struct device *dev = (const struct device *)arg; const struct device *dev = (struct device *)arg;
const struct i2s_mcux_config *dev_cfg = dev->config;
I2S_Type *base = (I2S_Type *)dev_cfg->base;
struct i2s_dev_data *dev_data = dev->data; struct i2s_dev_data *dev_data = dev->data;
struct stream *strm = &dev_data->tx; struct stream *strm = &dev_data->tx;
void *buffer = NULL; void *buffer = NULL;
int ret; int ret;
uint8_t blocks_queued;
LOG_DBG("tx cb"); LOG_DBG("tx cb");
@ -221,64 +280,79 @@ static void i2s_dma_tx_callback(const struct device *dma_dev,
if (ret == 0) { if (ret == 0) {
/* transmission complete. free the buffer */ /* transmission complete. free the buffer */
k_mem_slab_free(strm->cfg.mem_slab, &buffer); k_mem_slab_free(strm->cfg.mem_slab, &buffer);
(strm->free_tx_dma_blocks)++;
} else { } else {
LOG_ERR("no buf in out queue for channel %u", channel); LOG_ERR("no buf in out_queue for channel %u", channel);
}
if (strm->free_tx_dma_blocks > MAX_TX_DMA_BLOCKS) {
strm->state = I2S_STATE_ERROR;
LOG_ERR("free_tx_dma_blocks exceeded maximum, now %d",
strm->free_tx_dma_blocks);
goto disabled_exit_no_drop;
}
/* Received a STOP trigger, terminate TX immediately */
if (strm->last_block) {
strm->state = I2S_STATE_READY;
LOG_DBG("TX STOPPED last_block set");
goto disabled_exit_no_drop;
}
if (ret) {
/* k_msgq_get() returned error, and was not last_block */
strm->state = I2S_STATE_ERROR;
goto disabled_exit_no_drop;
} }
switch (strm->state) { switch (strm->state) {
case I2S_STATE_RUNNING: case I2S_STATE_RUNNING:
/* get the next buffer from queue */
if (strm->in_queue.used_msgs == 0) {
LOG_DBG("tx no more in queue data");
i2s_tx_stream_disable(dev, false);
/* stream is starving */
strm->stream_starving = true;
return;
}
ret = k_msgq_get(&strm->in_queue, &buffer, K_NO_WAIT);
if (ret == 0) {
/* config the DMA */
uint32_t data_path = dev_data->tx.start_channel;
strm->dma_block.block_size = strm->cfg.block_size;
strm->dma_block.dest_address =
(uint32_t)&base->TDR[data_path];
strm->dma_block.source_address = (uint32_t)buffer;
dma_config(dev_data->dev_dma, strm->dma_channel,
&strm->dma_cfg);
ret = k_msgq_put(&strm->out_queue,
&buffer, K_NO_WAIT);
if (ret != 0) {
LOG_ERR("buffer %p -> out %p err %d",
buffer, &strm->out_queue, ret);
}
dma_start(dev_data->dev_dma, strm->dma_channel);
}
if (ret || status) {
/*
* DMA encountered an error (status != 0)
* or No buffers in input queue
*/
LOG_ERR("DMA status %08x chan %u get ret %d",
status, channel, ret);
strm->state = I2S_STATE_READY;
i2s_tx_stream_disable(dev, false);
}
break;
case I2S_STATE_STOPPING: case I2S_STATE_STOPPING:
i2s_tx_stream_disable(dev, true); ret = i2s_tx_reload_multiple_dma_blocks(dev, &blocks_queued);
strm->state = I2S_STATE_READY;
break; if (ret) {
strm->state = I2S_STATE_ERROR;
goto disabled_exit_no_drop;
}
dma_start(dev_data->dev_dma, strm->dma_channel);
if (blocks_queued ||
(strm->free_tx_dma_blocks < MAX_TX_DMA_BLOCKS)) {
goto enabled_exit;
} else {
/* all DMA blocks are free but no blocks were queued */
if (strm->state == I2S_STATE_STOPPING) {
/* TX queue has drained */
strm->state = I2S_STATE_READY;
LOG_DBG("TX stream has stopped");
} else {
strm->state = I2S_STATE_ERROR;
LOG_ERR("TX Failed to reload DMA");
}
goto disabled_exit_no_drop;
}
case I2S_STATE_ERROR:
default:
goto disabled_exit_drop;
} }
disabled_exit_no_drop:
i2s_tx_stream_disable(dev, false);
return;
disabled_exit_drop:
i2s_tx_stream_disable(dev, true);
return;
enabled_exit:
return;
} }
static void i2s_dma_rx_callback(const struct device *dma_dev, static void i2s_dma_rx_callback(const struct device *dma_dev,
void *arg, uint32_t channel, int status) void *arg, uint32_t channel, int status)
{ {
const struct device *dev = (const struct device *)arg; struct device *dev = (struct device *)arg;
const struct i2s_mcux_config *dev_cfg = dev->config; const struct i2s_mcux_config *dev_cfg = dev->config;
I2S_Type *base = (I2S_Type *)dev_cfg->base; I2S_Type *base = (I2S_Type *)dev_cfg->base;
struct i2s_dev_data *dev_data = dev->data; struct i2s_dev_data *dev_data = dev->data;
@ -287,21 +361,23 @@ static void i2s_dma_rx_callback(const struct device *dma_dev,
int ret; int ret;
LOG_DBG("RX cb"); LOG_DBG("RX cb");
switch (strm->state) { switch (strm->state) {
case I2S_STATE_STOPPING: case I2S_STATE_STOPPING:
case I2S_STATE_RUNNING: case I2S_STATE_RUNNING:
/* retrieve buffer from input queue */ /* retrieve buffer from input queue */
ret = k_msgq_get(&strm->in_queue, &buffer, K_NO_WAIT); ret = k_msgq_get(&strm->in_queue, &buffer, K_NO_WAIT);
if (ret != 0) { __ASSERT_NO_MSG(ret == 0);
LOG_ERR("get buffer from in_queue %p failed (%d)",
&strm->in_queue, ret);
}
/* put buffer to output queue */ /* put buffer to output queue */
ret = k_msgq_put(&strm->out_queue, &buffer, K_NO_WAIT); ret = k_msgq_put(&strm->out_queue, &buffer, K_NO_WAIT);
if (ret != 0) { if (ret != 0) {
LOG_ERR("buffer %p -> out_queue %p err %d", LOG_ERR("buffer %p -> out_queue %p err %d",
buffer, buffer,
&strm->out_queue, ret); &strm->out_queue, ret);
i2s_rx_stream_disable(dev, false, false);
strm->state = I2S_STATE_ERROR;
return;
} }
if (strm->state == I2S_STATE_RUNNING) { if (strm->state == I2S_STATE_RUNNING) {
/* allocate new buffer for next audio frame */ /* allocate new buffer for next audio frame */
@ -314,6 +390,21 @@ static void i2s_dma_rx_callback(const struct device *dma_dev,
strm->state = I2S_STATE_ERROR; strm->state = I2S_STATE_ERROR;
} else { } else {
uint32_t data_path = strm->start_channel; uint32_t data_path = strm->start_channel;
ret = dma_reload(dev_data->dev_dma,
strm->dma_channel,
(uint32_t)&base->RDR[data_path],
(uint32_t)buffer,
strm->cfg.block_size);
if (ret != 0) {
LOG_ERR("dma_reload() failed with error 0x%x",
ret);
i2s_rx_stream_disable(dev,
false, false);
strm->state = I2S_STATE_ERROR;
return;
}
/* put buffer in input queue */ /* put buffer in input queue */
ret = k_msgq_put(&strm->in_queue, ret = k_msgq_put(&strm->in_queue,
&buffer, K_NO_WAIT); &buffer, K_NO_WAIT);
@ -323,23 +414,12 @@ static void i2s_dma_rx_callback(const struct device *dma_dev,
ret); ret);
} }
strm->dma_block.block_size =
strm->cfg.block_size;
strm->dma_block.dest_address =
(uint32_t)buffer;
strm->dma_block.source_address = (uint32_t)
&base->RDR[data_path];
dma_config(dev_data->dev_dma,
strm->dma_channel,
&strm->dma_cfg);
/* disable rx interrupt and wait for next read*/
SAI_RxEnableDMA(dev_cfg->base,
kSAI_FIFORequestDMAEnable, false);
dma_start(dev_data->dev_dma, dma_start(dev_data->dev_dma,
strm->dma_channel); strm->dma_channel);
} }
} else { } else {
/* Received a STOP trigger */ i2s_rx_stream_disable(dev, true, false);
/* Received a STOP/DRAIN trigger */
strm->state = I2S_STATE_READY; strm->state = I2S_STATE_READY;
} }
break; break;
@ -354,13 +434,15 @@ static void enable_mclk_direction(const struct device *dev, bool dir)
const struct i2s_mcux_config *dev_cfg = dev->config; const struct i2s_mcux_config *dev_cfg = dev->config;
uint32_t offset = dev_cfg->mclk_pin_offset; uint32_t offset = dev_cfg->mclk_pin_offset;
uint32_t mask = dev_cfg->mclk_pin_mask; uint32_t mask = dev_cfg->mclk_pin_mask;
uint32_t *gpr = (uint32_t *)DT_REG_ADDR(DT_NODELABEL(iomuxcgpr)) + offset; uint32_t *gpr = (uint32_t *)
(DT_REG_ADDR(DT_NODELABEL(iomuxcgpr)) + offset);
if (dir) { if (dir) {
*gpr |= mask; *gpr |= mask;
} else { } else {
*gpr &= ~mask; *gpr &= ~mask;
} }
} }
static void get_mclk_rate(const struct device *dev, uint32_t *mclk) static void get_mclk_rate(const struct device *dev, uint32_t *mclk)
@ -452,9 +534,9 @@ static int i2s_mcux_config(const struct device *dev, enum i2s_dir dir,
memset(&config, 0, sizeof(config)); memset(&config, 0, sizeof(config));
const bool is_mclk_master = i2s_cfg->options & I2S_OPT_BIT_CLK_MASTER; const bool is_mclk_slave = i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE;
enable_mclk_direction(dev, is_mclk_master); enable_mclk_direction(dev, !is_mclk_slave);
get_mclk_rate(dev, &mclk); get_mclk_rate(dev, &mclk);
LOG_DBG("mclk is %d", mclk); LOG_DBG("mclk is %d", mclk);
@ -646,8 +728,9 @@ const struct i2s_config *i2s_mcux_config_get(const struct device *dev,
{ {
struct i2s_dev_data *dev_data = dev->data; struct i2s_dev_data *dev_data = dev->data;
if (dir == I2S_DIR_RX) if (dir == I2S_DIR_RX) {
return &dev_data->rx.cfg; return &dev_data->rx.cfg;
}
return &dev_data->tx.cfg; return &dev_data->tx.cfg;
} }
@ -658,7 +741,6 @@ static int i2s_tx_stream_start(const struct device *dev)
void *buffer; void *buffer;
struct i2s_dev_data *dev_data = dev->data; struct i2s_dev_data *dev_data = dev->data;
struct stream *strm = &dev_data->tx; struct stream *strm = &dev_data->tx;
uint32_t data_path = strm->start_channel;
const struct device *dev_dma = dev_data->dev_dma; const struct device *dev_dma = dev_data->dev_dma;
const struct i2s_mcux_config *dev_cfg = dev->config; const struct i2s_mcux_config *dev_cfg = dev->config;
I2S_Type *base = (I2S_Type *)dev_cfg->base; I2S_Type *base = (I2S_Type *)dev_cfg->base;
@ -672,11 +754,28 @@ static int i2s_tx_stream_start(const struct device *dev)
LOG_DBG("tx stream start"); LOG_DBG("tx stream start");
strm->dma_block.block_size = strm->cfg.block_size; /* Driver keeps track of how many DMA blocks can be loaded to the DMA */
strm->dma_block.dest_address = strm->free_tx_dma_blocks = MAX_TX_DMA_BLOCKS;
(uint32_t)&base->RDR[data_path];
strm->dma_block.source_address = (uint32_t)buffer;
/* Configure the DMA with the first TX block */
struct dma_block_config *blk_cfg = &strm->dma_block;
memset(blk_cfg, 0, sizeof(struct dma_block_config));
uint32_t data_path = strm->start_channel;
blk_cfg->dest_address = (uint32_t)&base->TDR[data_path];
blk_cfg->source_address = (uint32_t)buffer;
blk_cfg->block_size = strm->cfg.block_size;
blk_cfg->dest_scatter_en = 1;
strm->dma_cfg.block_count = 1;
strm->dma_cfg.head_block = &strm->dma_block;
strm->dma_cfg.user_data = (void *)dev;
(strm->free_tx_dma_blocks)--;
dma_config(dev_dma, strm->dma_channel, &strm->dma_cfg); dma_config(dev_dma, strm->dma_channel, &strm->dma_cfg);
/* put buffer in output queue */ /* put buffer in output queue */
@ -686,6 +785,14 @@ static int i2s_tx_stream_start(const struct device *dev)
return ret; return ret;
} }
uint8_t blocks_queued;
ret = i2s_tx_reload_multiple_dma_blocks(dev, &blocks_queued);
if (ret) {
LOG_ERR("i2s_tx_reload_multiple_dma_blocks() failed (%d)", ret);
return ret;
}
ret = dma_start(dev_dma, strm->dma_channel); ret = dma_start(dev_dma, strm->dma_channel);
if (ret < 0) { if (ret < 0) {
LOG_ERR("dma_start failed (%d)", ret); LOG_ERR("dma_start failed (%d)", ret);
@ -693,15 +800,13 @@ static int i2s_tx_stream_start(const struct device *dev)
} }
/* Enable DMA enable bit */ /* Enable DMA enable bit */
SAI_TxEnableDMA(dev_cfg->base, kSAI_FIFORequestDMAEnable, true); SAI_TxEnableDMA(base, kSAI_FIFORequestDMAEnable, true);
/* Enable SAI Tx clock */
SAI_TxEnable(dev_cfg->base, true);
/* Enable the channel FIFO */ /* Enable the channel FIFO */
dev_cfg->base->TCR3 |= I2S_TCR3_TCE(1UL << strm->start_channel); base->TCR3 |= I2S_TCR3_TCE(1UL << strm->start_channel);
strm->stream_starving = false; /* Enable SAI Tx clock */
SAI_TxEnable(base, true);
return 0; return 0;
} }
@ -715,28 +820,80 @@ static int i2s_rx_stream_start(const struct device *dev)
const struct device *dev_dma = dev_data->dev_dma; const struct device *dev_dma = dev_data->dev_dma;
const struct i2s_mcux_config *dev_cfg = dev->config; const struct i2s_mcux_config *dev_cfg = dev->config;
I2S_Type *base = (I2S_Type *)dev_cfg->base; I2S_Type *base = (I2S_Type *)dev_cfg->base;
uint32_t data_path = strm->start_channel; uint8_t num_of_bufs;
/* allocate receive buffer from SLAB */ num_of_bufs = k_mem_slab_num_free_get(strm->cfg.mem_slab);
ret = k_mem_slab_alloc(strm->cfg.mem_slab, &buffer, K_NO_WAIT);
/*
* Need at least NUM_DMA_BLOCKS_RX_PREP buffers on the RX memory slab
* for reliable DMA reception.
*/
if (num_of_bufs < NUM_DMA_BLOCKS_RX_PREP) {
return -EINVAL;
}
/* allocate 1st receive buffer from SLAB */
ret = k_mem_slab_alloc(strm->cfg.mem_slab, &buffer,
K_NO_WAIT);
if (ret != 0) { if (ret != 0) {
LOG_DBG("buffer alloc from mem_slab failed (%d)", ret); LOG_DBG("buffer alloc from mem_slab failed (%d)", ret);
return ret; return ret;
} }
strm->dma_block.block_size = strm->cfg.block_size; /* Configure DMA block */
strm->dma_block.dest_address = (uint32_t)buffer; struct dma_block_config *blk_cfg = &strm->dma_block;
strm->dma_block.source_address =
(uint32_t)&base->RDR[data_path]; memset(blk_cfg, 0, sizeof(struct dma_block_config));
uint32_t data_path = strm->start_channel;
blk_cfg->dest_address = (uint32_t)buffer;
blk_cfg->source_address = (uint32_t)&base->RDR[data_path];
blk_cfg->block_size = strm->cfg.block_size;
blk_cfg->source_gather_en = 1;
strm->dma_cfg.block_count = 1;
strm->dma_cfg.head_block = &strm->dma_block;
strm->dma_cfg.user_data = (void *)dev;
dma_config(dev_dma, strm->dma_channel, &strm->dma_cfg); dma_config(dev_dma, strm->dma_channel, &strm->dma_cfg);
/* put buffer in input queue */ /* put buffer in input queue */
ret = k_msgq_put(&strm->in_queue, &buffer, K_NO_WAIT); ret = k_msgq_put(&strm->in_queue, &buffer, K_NO_WAIT);
if (ret != 0) { if (ret != 0) {
LOG_ERR("failed to put buffer in output queue"); LOG_ERR("failed to put buffer in input queue, ret1 %d", ret);
return ret; return ret;
} }
/* prep DMA for each of remaining (NUM_DMA_BLOCKS_RX_PREP-1) buffers */
for (int i = 0; i < NUM_DMA_BLOCKS_RX_PREP - 1; i++) {
/* allocate receive buffer from SLAB */
ret = k_mem_slab_alloc(strm->cfg.mem_slab, &buffer,
K_NO_WAIT);
if (ret != 0) {
LOG_ERR("buffer alloc from mem_slab failed (%d)", ret);
return ret;
}
ret = dma_reload(dev_dma, strm->dma_channel,
(uint32_t)&base->RDR[data_path],
(uint32_t)buffer, blk_cfg->block_size);
if (ret != 0) {
LOG_ERR("dma_reload() failed with error 0x%x", ret);
return ret;
}
/* put buffer in input queue */
ret = k_msgq_put(&strm->in_queue, &buffer, K_NO_WAIT);
if (ret != 0) {
LOG_ERR("failed to put buffer in input queue, ret2 %d",
ret);
return ret;
}
}
LOG_DBG("Starting DMA Ch%u", strm->dma_channel); LOG_DBG("Starting DMA Ch%u", strm->dma_channel);
ret = dma_start(dev_dma, strm->dma_channel); ret = dma_start(dev_dma, strm->dma_channel);
if (ret < 0) { if (ret < 0) {
@ -746,15 +903,13 @@ static int i2s_rx_stream_start(const struct device *dev)
} }
/* Enable DMA enable bit */ /* Enable DMA enable bit */
SAI_RxEnableDMA(dev_cfg->base, kSAI_FIFORequestDMAEnable, true); SAI_RxEnableDMA(base, kSAI_FIFORequestDMAEnable, true);
/* Enable the channel FIFO */ /* Enable the channel FIFO */
dev_cfg->base->RCR3 |= I2S_RCR3_RCE(1UL << strm->start_channel); base->RCR3 |= I2S_RCR3_RCE(1UL << strm->start_channel);
/* Enable SAI Rx clock */ /* Enable SAI Rx clock */
SAI_RxEnable(dev_cfg->base, true); SAI_RxEnable(base, true);
strm->stream_starving = false;
return 0; return 0;
} }
@ -796,6 +951,7 @@ static int i2s_mcux_trigger(const struct device *dev, enum i2s_dir dir,
} }
strm->state = I2S_STATE_RUNNING; strm->state = I2S_STATE_RUNNING;
strm->last_block = false;
break; break;
case I2S_TRIGGER_DROP: case I2S_TRIGGER_DROP:
@ -805,31 +961,35 @@ static int i2s_mcux_trigger(const struct device *dev, enum i2s_dir dir,
ret = -EIO; ret = -EIO;
break; break;
} }
if (strm->state == I2S_STATE_READY) {
/* nothing to do in this state*/ strm->state = I2S_STATE_READY;
break;
}
if (dir == I2S_DIR_TX) { if (dir == I2S_DIR_TX) {
i2s_tx_stream_disable(dev, true); i2s_tx_stream_disable(dev, true);
} else { } else {
i2s_rx_stream_disable(dev, true, true); i2s_rx_stream_disable(dev, true, true);
} }
strm->state = I2S_STATE_READY;
break; break;
case I2S_TRIGGER_DRAIN:
case I2S_TRIGGER_STOP: case I2S_TRIGGER_STOP:
if (strm->state != I2S_STATE_RUNNING) {
LOG_ERR("STOP trigger: invalid state %d", strm->state);
ret = -EIO;
break;
}
strm->state = I2S_STATE_STOPPING;
strm->last_block = true;
break;
case I2S_TRIGGER_DRAIN:
if (strm->state != I2S_STATE_RUNNING) { if (strm->state != I2S_STATE_RUNNING) {
LOG_ERR("DRAIN/STOP trigger: invalid state %d", LOG_ERR("DRAIN/STOP trigger: invalid state %d",
strm->state); strm->state);
ret = -EIO; ret = -EIO;
break; break;
} }
if (dir == I2S_DIR_TX && strm->in_queue.used_msgs == 0) {
/* if the transmission is done, then go ready*/ strm->state = I2S_STATE_STOPPING;
strm->state = I2S_STATE_READY;
} else {
strm->state = I2S_STATE_STOPPING;
}
break; break;
case I2S_TRIGGER_PREPARE: case I2S_TRIGGER_PREPARE:
@ -859,11 +1019,10 @@ static int i2s_mcux_trigger(const struct device *dev, enum i2s_dir dir,
static int i2s_mcux_read(const struct device *dev, void **mem_block, static int i2s_mcux_read(const struct device *dev, void **mem_block,
size_t *size) size_t *size)
{ {
const struct i2s_mcux_config *dev_cfg = dev->config;
struct i2s_dev_data *dev_data = dev->data; struct i2s_dev_data *dev_data = dev->data;
struct stream *strm = &dev_data->rx; struct stream *strm = &dev_data->rx;
void *buffer; void *buffer;
int ret = 0; int status, ret = 0;
LOG_DBG("i2s_mcux_read"); LOG_DBG("i2s_mcux_read");
if (strm->state == I2S_STATE_NOT_READY) { if (strm->state == I2S_STATE_NOT_READY) {
@ -871,26 +1030,18 @@ static int i2s_mcux_read(const struct device *dev, void **mem_block,
return -EIO; return -EIO;
} }
ret = k_msgq_get(&strm->out_queue, &buffer, status = k_msgq_get(&strm->out_queue, &buffer,
SYS_TIMEOUT_MS(strm->cfg.timeout)); SYS_TIMEOUT_MS(strm->cfg.timeout));
if (ret != 0) { if (status != 0) {
LOG_DBG("need retry"); if (strm->state == I2S_STATE_ERROR) {
return -EAGAIN; ret = -EIO;
} else {
LOG_DBG("need retry");
ret = -EAGAIN;
}
return ret;
} }
switch (strm->state) {
case I2S_STATE_STOPPING:
i2s_rx_stream_disable(dev, true, false);
strm->state = I2S_STATE_READY;
break;
case I2S_STATE_RUNNING:
SAI_RxEnableDMA(dev_cfg->base,
kSAI_FIFORequestDMAEnable, true);
break;
default:
case I2S_STATE_READY:
break;
}
*mem_block = buffer; *mem_block = buffer;
*size = strm->cfg.block_size; *size = strm->cfg.block_size;
return 0; return 0;
@ -913,16 +1064,10 @@ static int i2s_mcux_write(const struct device *dev, void *mem_block,
ret = k_msgq_put(&strm->in_queue, &mem_block, ret = k_msgq_put(&strm->in_queue, &mem_block,
SYS_TIMEOUT_MS(strm->cfg.timeout)); SYS_TIMEOUT_MS(strm->cfg.timeout));
if (ret) { if (ret) {
LOG_ERR("k_msgq_put failed %d", ret); LOG_DBG("k_msgq_put returned code %d", ret);
return ret; return ret;
} }
if (strm->state == I2S_STATE_RUNNING && strm->stream_starving) {
/* in running state if the stream is off turn it on*/
ret = i2s_tx_stream_start(dev);
strm->stream_starving = false;
}
return ret; return ret;
} }
@ -953,8 +1098,9 @@ static void sai_driver_irq(const struct device *dev)
} }
/* clear IRQ sources atm */ /* clear IRQ sources atm */
static void i2s_mcux_isr(const struct device *dev) static void i2s_mcux_isr(void *arg)
{ {
struct device *dev = (struct device *)arg;
const struct i2s_mcux_config *dev_cfg = dev->config; const struct i2s_mcux_config *dev_cfg = dev->config;
I2S_Type *base = (I2S_Type *)dev_cfg->base; I2S_Type *base = (I2S_Type *)dev_cfg->base;
@ -983,12 +1129,23 @@ static void audio_clock_settings(const struct device *dev)
/*Clock setting for SAI*/ /*Clock setting for SAI*/
imxrt_audio_codec_pll_init(clock_name, dev_cfg->clk_src, imxrt_audio_codec_pll_init(clock_name, dev_cfg->clk_src,
dev_cfg->clk_pre_div, dev_cfg->clk_src_div); dev_cfg->clk_pre_div, dev_cfg->clk_src_div);
audioPllConfig.loopDivider = dev_cfg->pll_lp; #ifdef CONFIG_SOC_SERIES_IMX_RT11XX
audioPllConfig.postDivider = dev_cfg->pll_pd; audioPllConfig.loopDivider = dev_cfg->pll_lp;
audioPllConfig.numerator = dev_cfg->pll_num; audioPllConfig.postDivider = dev_cfg->pll_pd;
audioPllConfig.denominator = dev_cfg->pll_den; audioPllConfig.numerator = dev_cfg->pll_num;
audioPllConfig.denominator = dev_cfg->pll_den;
audioPllConfig.ssEnable = false;
#elif defined CONFIG_SOC_SERIES_IMX_RT10XX
audioPllConfig.src = dev_cfg->pll_src;
audioPllConfig.loopDivider = dev_cfg->pll_lp;
audioPllConfig.postDivider = dev_cfg->pll_pd;
audioPllConfig.numerator = dev_cfg->pll_num;
audioPllConfig.denominator = dev_cfg->pll_den;
#else
#error Initialize SOC Series-specific clock_audio_pll_config_t
#endif /* CONFIG_SOC_SERIES */
CLOCK_InitAudioPll(&audioPllConfig); CLOCK_InitAudioPll(&audioPllConfig);
} }
@ -1037,14 +1194,14 @@ static int i2s_mcux_initialize(const struct device *dev)
dev_data->tx.state = I2S_STATE_NOT_READY; dev_data->tx.state = I2S_STATE_NOT_READY;
dev_data->rx.state = I2S_STATE_NOT_READY; dev_data->rx.state = I2S_STATE_NOT_READY;
#if (defined(FSL_FEATURE_SAI_HAS_MCR) && (FSL_FEATURE_SAI_HAS_MCR)) || \ #if (defined(FSL_FEATURE_SAI_HAS_MCR) && (FSL_FEATURE_SAI_HAS_MCR)) || \
(defined(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER) && \ (defined(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER) && \
(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER)) (FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER))
sai_master_clock_t mclkConfig = { sai_master_clock_t mclkConfig = {
#if defined(FSL_FEATURE_SAI_HAS_MCR) && (FSL_FEATURE_SAI_HAS_MCR) #if defined(FSL_FEATURE_SAI_HAS_MCR) && (FSL_FEATURE_SAI_HAS_MCR)
.mclkOutputEnable = true, .mclkOutputEnable = true,
#if !(defined(FSL_FEATURE_SAI_HAS_NO_MCR_MICS) && \ #if !(defined(FSL_FEATURE_SAI_HAS_NO_MCR_MICS) && \
(FSL_FEATURE_SAI_HAS_NO_MCR_MICS)) (FSL_FEATURE_SAI_HAS_NO_MCR_MICS))
.mclkSource = kSAI_MclkSourceSysclk, .mclkSource = kSAI_MclkSourceSysclk,
#endif #endif
#endif #endif
@ -1055,7 +1212,7 @@ static int i2s_mcux_initialize(const struct device *dev)
/* master clock configurations */ /* master clock configurations */
#if (defined(FSL_FEATURE_SAI_HAS_MCR) && (FSL_FEATURE_SAI_HAS_MCR)) || \ #if (defined(FSL_FEATURE_SAI_HAS_MCR) && (FSL_FEATURE_SAI_HAS_MCR)) || \
(defined(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER) && \ (defined(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER) && \
(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER)) (FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER))
#if defined(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER) && \ #if defined(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER) && \
(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER) (FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER)
mclkConfig.mclkHz = mclk; mclkConfig.mclkHz = mclk;