drivers: i2s: mcux_sai: fixed SAI driver
Includes several driver fixes and improvement to leverage scatter/gather mode of DMA. Loads multiple DMA blocks into TCDs. Signed-off-by: Derek Snell <derek.snell@nxp.com>
This commit is contained in:
parent
60dd576975
commit
16a09b026e
1 changed files with 314 additions and 157 deletions
|
@ -33,8 +33,22 @@
|
|||
LOG_MODULE_REGISTER(LOG_DOMAIN);
|
||||
|
||||
#define DT_DRV_COMPAT nxp_mcux_i2s
|
||||
#define NUM_DMA_BLOCKS_RX_PREP 3
|
||||
#define MAX_TX_DMA_BLOCKS CONFIG_DMA_TCD_QUEUE_SIZE
|
||||
#if (NUM_DMA_BLOCKS_RX_PREP >= CONFIG_DMA_TCD_QUEUE_SIZE)
|
||||
#error NUM_DMA_BLOCKS_RX_PREP must be < CONFIG_DMA_TCD_QUEUE_SIZE
|
||||
#endif
|
||||
#if defined(CONFIG_DMA_MCUX_EDMA) && (NUM_DMA_BLOCKS_RX_PREP < 3)
|
||||
#error eDMA avoids TCD coherency issue if NUM_DMA_BLOCKS_RX_PREP >= 3
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SAI driver uses source_gather_en/dest_scatter_en feature of DMA, and relies
|
||||
* on DMA driver managing circular list of DMA blocks. Like eDMA driver links
|
||||
* Transfer Control Descriptors (TCDs) in list, and manages the tcdpool.
|
||||
* Calling dma_reload() adds new DMA block to DMA channel already configured,
|
||||
* into the DMA driver's circular list of blocks.
|
||||
|
||||
* This indicates the Tx/Rx stream.
|
||||
*
|
||||
* in_queue and out_queue are used as follows
|
||||
|
@ -58,9 +72,10 @@ struct stream {
|
|||
struct i2s_config cfg;
|
||||
struct dma_config dma_cfg;
|
||||
struct dma_block_config dma_block;
|
||||
uint8_t free_tx_dma_blocks;
|
||||
bool last_block;
|
||||
struct k_msgq in_queue;
|
||||
struct k_msgq out_queue;
|
||||
uint32_t stream_starving;
|
||||
};
|
||||
|
||||
struct i2s_mcux_config {
|
||||
|
@ -97,7 +112,6 @@ struct i2s_dev_data {
|
|||
void *rx_out_msgs[CONFIG_I2S_RX_BLOCK_COUNT];
|
||||
};
|
||||
|
||||
|
||||
static void i2s_dma_tx_callback(const struct device *, void *,
|
||||
uint32_t, int);
|
||||
static void i2s_tx_stream_disable(const struct device *, bool drop);
|
||||
|
@ -148,8 +162,6 @@ static void i2s_tx_stream_disable(const struct device *dev, bool drop)
|
|||
/* Disable Tx */
|
||||
SAI_TxEnable(dev_cfg->base, false);
|
||||
|
||||
strm->stream_starving = false;
|
||||
|
||||
/* If Tx is disabled, reset the FIFO pointer, clear error flags */
|
||||
if ((dev_cfg->base->TCSR & I2S_TCSR_TE_MASK) == 0UL) {
|
||||
dev_cfg->base->TCSR |=
|
||||
|
@ -185,35 +197,82 @@ static void i2s_rx_stream_disable(const struct device *dev,
|
|||
/* Disable Rx */
|
||||
SAI_RxEnable(dev_cfg->base, false);
|
||||
|
||||
/* wait for Receiver to disable */
|
||||
while (dev_cfg->base->RCSR & I2S_RCSR_RE_MASK)
|
||||
;
|
||||
/* reset the FIFO pointer and clear error flags */
|
||||
dev_cfg->base->RCSR |= (I2S_RCSR_FR_MASK | I2S_RCSR_SR_MASK);
|
||||
dev_cfg->base->RCSR &= ~I2S_RCSR_SR_MASK;
|
||||
|
||||
/* purge buffers queued in the stream */
|
||||
if (in_drop || out_drop) {
|
||||
i2s_purge_stream_buffers(strm, dev_data->rx.cfg.mem_slab,
|
||||
in_drop, out_drop);
|
||||
}
|
||||
}
|
||||
|
||||
strm->stream_starving = false;
|
||||
static int i2s_tx_reload_multiple_dma_blocks(const struct device *dev,
|
||||
uint8_t *blocks_queued)
|
||||
{
|
||||
struct i2s_dev_data *dev_data = dev->data;
|
||||
const struct i2s_mcux_config *dev_cfg = dev->config;
|
||||
I2S_Type *base = (I2S_Type *)dev_cfg->base;
|
||||
struct stream *strm = &dev_data->tx;
|
||||
void *buffer = NULL;
|
||||
int ret = 0;
|
||||
unsigned int key;
|
||||
|
||||
/* If Rx is disabled, reset the FIFO pointer
|
||||
* and clear error flags
|
||||
*/
|
||||
if ((dev_cfg->base->RCSR & I2S_RCSR_RE_MASK) == 0UL) {
|
||||
dev_cfg->base->RCSR |=
|
||||
(I2S_RCSR_FR_MASK | I2S_RCSR_SR_MASK);
|
||||
dev_cfg->base->RCSR &= ~I2S_RCSR_SR_MASK;
|
||||
*blocks_queued = 0;
|
||||
|
||||
key = irq_lock();
|
||||
|
||||
/* queue additional blocks to DMA if in_queue and DMA has free blocks */
|
||||
while (strm->free_tx_dma_blocks) {
|
||||
/* get the next buffer from queue */
|
||||
ret = k_msgq_get(&strm->in_queue, &buffer, K_NO_WAIT);
|
||||
if (ret) {
|
||||
/* in_queue is empty, no more blocks to send to DMA */
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* reload the DMA */
|
||||
ret = dma_reload(dev_data->dev_dma, strm->dma_channel,
|
||||
(uint32_t)buffer,
|
||||
(uint32_t)&base->TDR[strm->start_channel],
|
||||
strm->cfg.block_size);
|
||||
if (ret != 0) {
|
||||
LOG_ERR("dma_reload() failed with error 0x%x", ret);
|
||||
break;
|
||||
}
|
||||
|
||||
(strm->free_tx_dma_blocks)--;
|
||||
|
||||
ret = k_msgq_put(&strm->out_queue,
|
||||
&buffer, K_NO_WAIT);
|
||||
if (ret != 0) {
|
||||
LOG_ERR("buffer %p -> out %p err %d",
|
||||
buffer, &strm->out_queue, ret);
|
||||
break;
|
||||
}
|
||||
|
||||
(*blocks_queued)++;
|
||||
}
|
||||
|
||||
irq_unlock(key);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* This function is executed in the interrupt context */
|
||||
static void i2s_dma_tx_callback(const struct device *dma_dev,
|
||||
void *arg, uint32_t channel, int status)
|
||||
{
|
||||
const struct device *dev = (const struct device *)arg;
|
||||
const struct i2s_mcux_config *dev_cfg = dev->config;
|
||||
I2S_Type *base = (I2S_Type *)dev_cfg->base;
|
||||
const struct device *dev = (struct device *)arg;
|
||||
struct i2s_dev_data *dev_data = dev->data;
|
||||
struct stream *strm = &dev_data->tx;
|
||||
void *buffer = NULL;
|
||||
int ret;
|
||||
uint8_t blocks_queued;
|
||||
|
||||
LOG_DBG("tx cb");
|
||||
|
||||
|
@ -221,64 +280,79 @@ static void i2s_dma_tx_callback(const struct device *dma_dev,
|
|||
if (ret == 0) {
|
||||
/* transmission complete. free the buffer */
|
||||
k_mem_slab_free(strm->cfg.mem_slab, &buffer);
|
||||
(strm->free_tx_dma_blocks)++;
|
||||
} else {
|
||||
LOG_ERR("no buf in out queue for channel %u", channel);
|
||||
LOG_ERR("no buf in out_queue for channel %u", channel);
|
||||
}
|
||||
|
||||
if (strm->free_tx_dma_blocks > MAX_TX_DMA_BLOCKS) {
|
||||
strm->state = I2S_STATE_ERROR;
|
||||
LOG_ERR("free_tx_dma_blocks exceeded maximum, now %d",
|
||||
strm->free_tx_dma_blocks);
|
||||
goto disabled_exit_no_drop;
|
||||
}
|
||||
|
||||
/* Received a STOP trigger, terminate TX immediately */
|
||||
if (strm->last_block) {
|
||||
strm->state = I2S_STATE_READY;
|
||||
LOG_DBG("TX STOPPED last_block set");
|
||||
goto disabled_exit_no_drop;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
/* k_msgq_get() returned error, and was not last_block */
|
||||
strm->state = I2S_STATE_ERROR;
|
||||
goto disabled_exit_no_drop;
|
||||
}
|
||||
|
||||
switch (strm->state) {
|
||||
case I2S_STATE_RUNNING:
|
||||
/* get the next buffer from queue */
|
||||
if (strm->in_queue.used_msgs == 0) {
|
||||
LOG_DBG("tx no more in queue data");
|
||||
i2s_tx_stream_disable(dev, false);
|
||||
/* stream is starving */
|
||||
strm->stream_starving = true;
|
||||
return;
|
||||
}
|
||||
ret = k_msgq_get(&strm->in_queue, &buffer, K_NO_WAIT);
|
||||
if (ret == 0) {
|
||||
/* config the DMA */
|
||||
uint32_t data_path = dev_data->tx.start_channel;
|
||||
|
||||
strm->dma_block.block_size = strm->cfg.block_size;
|
||||
strm->dma_block.dest_address =
|
||||
(uint32_t)&base->TDR[data_path];
|
||||
strm->dma_block.source_address = (uint32_t)buffer;
|
||||
dma_config(dev_data->dev_dma, strm->dma_channel,
|
||||
&strm->dma_cfg);
|
||||
ret = k_msgq_put(&strm->out_queue,
|
||||
&buffer, K_NO_WAIT);
|
||||
if (ret != 0) {
|
||||
LOG_ERR("buffer %p -> out %p err %d",
|
||||
buffer, &strm->out_queue, ret);
|
||||
}
|
||||
dma_start(dev_data->dev_dma, strm->dma_channel);
|
||||
}
|
||||
|
||||
if (ret || status) {
|
||||
/*
|
||||
* DMA encountered an error (status != 0)
|
||||
* or No buffers in input queue
|
||||
*/
|
||||
LOG_ERR("DMA status %08x chan %u get ret %d",
|
||||
status, channel, ret);
|
||||
strm->state = I2S_STATE_READY;
|
||||
i2s_tx_stream_disable(dev, false);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case I2S_STATE_STOPPING:
|
||||
i2s_tx_stream_disable(dev, true);
|
||||
strm->state = I2S_STATE_READY;
|
||||
break;
|
||||
ret = i2s_tx_reload_multiple_dma_blocks(dev, &blocks_queued);
|
||||
|
||||
if (ret) {
|
||||
strm->state = I2S_STATE_ERROR;
|
||||
goto disabled_exit_no_drop;
|
||||
}
|
||||
dma_start(dev_data->dev_dma, strm->dma_channel);
|
||||
|
||||
if (blocks_queued ||
|
||||
(strm->free_tx_dma_blocks < MAX_TX_DMA_BLOCKS)) {
|
||||
goto enabled_exit;
|
||||
} else {
|
||||
/* all DMA blocks are free but no blocks were queued */
|
||||
if (strm->state == I2S_STATE_STOPPING) {
|
||||
/* TX queue has drained */
|
||||
strm->state = I2S_STATE_READY;
|
||||
LOG_DBG("TX stream has stopped");
|
||||
} else {
|
||||
strm->state = I2S_STATE_ERROR;
|
||||
LOG_ERR("TX Failed to reload DMA");
|
||||
}
|
||||
goto disabled_exit_no_drop;
|
||||
}
|
||||
|
||||
case I2S_STATE_ERROR:
|
||||
default:
|
||||
goto disabled_exit_drop;
|
||||
}
|
||||
|
||||
disabled_exit_no_drop:
|
||||
i2s_tx_stream_disable(dev, false);
|
||||
return;
|
||||
|
||||
disabled_exit_drop:
|
||||
i2s_tx_stream_disable(dev, true);
|
||||
return;
|
||||
|
||||
enabled_exit:
|
||||
return;
|
||||
}
|
||||
|
||||
static void i2s_dma_rx_callback(const struct device *dma_dev,
|
||||
void *arg, uint32_t channel, int status)
|
||||
{
|
||||
const struct device *dev = (const struct device *)arg;
|
||||
struct device *dev = (struct device *)arg;
|
||||
const struct i2s_mcux_config *dev_cfg = dev->config;
|
||||
I2S_Type *base = (I2S_Type *)dev_cfg->base;
|
||||
struct i2s_dev_data *dev_data = dev->data;
|
||||
|
@ -287,21 +361,23 @@ static void i2s_dma_rx_callback(const struct device *dma_dev,
|
|||
int ret;
|
||||
|
||||
LOG_DBG("RX cb");
|
||||
|
||||
switch (strm->state) {
|
||||
case I2S_STATE_STOPPING:
|
||||
case I2S_STATE_RUNNING:
|
||||
/* retrieve buffer from input queue */
|
||||
ret = k_msgq_get(&strm->in_queue, &buffer, K_NO_WAIT);
|
||||
if (ret != 0) {
|
||||
LOG_ERR("get buffer from in_queue %p failed (%d)",
|
||||
&strm->in_queue, ret);
|
||||
}
|
||||
__ASSERT_NO_MSG(ret == 0);
|
||||
|
||||
/* put buffer to output queue */
|
||||
ret = k_msgq_put(&strm->out_queue, &buffer, K_NO_WAIT);
|
||||
if (ret != 0) {
|
||||
LOG_ERR("buffer %p -> out_queue %p err %d",
|
||||
buffer,
|
||||
&strm->out_queue, ret);
|
||||
i2s_rx_stream_disable(dev, false, false);
|
||||
strm->state = I2S_STATE_ERROR;
|
||||
return;
|
||||
}
|
||||
if (strm->state == I2S_STATE_RUNNING) {
|
||||
/* allocate new buffer for next audio frame */
|
||||
|
@ -314,6 +390,21 @@ static void i2s_dma_rx_callback(const struct device *dma_dev,
|
|||
strm->state = I2S_STATE_ERROR;
|
||||
} else {
|
||||
uint32_t data_path = strm->start_channel;
|
||||
|
||||
ret = dma_reload(dev_data->dev_dma,
|
||||
strm->dma_channel,
|
||||
(uint32_t)&base->RDR[data_path],
|
||||
(uint32_t)buffer,
|
||||
strm->cfg.block_size);
|
||||
if (ret != 0) {
|
||||
LOG_ERR("dma_reload() failed with error 0x%x",
|
||||
ret);
|
||||
i2s_rx_stream_disable(dev,
|
||||
false, false);
|
||||
strm->state = I2S_STATE_ERROR;
|
||||
return;
|
||||
}
|
||||
|
||||
/* put buffer in input queue */
|
||||
ret = k_msgq_put(&strm->in_queue,
|
||||
&buffer, K_NO_WAIT);
|
||||
|
@ -323,23 +414,12 @@ static void i2s_dma_rx_callback(const struct device *dma_dev,
|
|||
ret);
|
||||
}
|
||||
|
||||
strm->dma_block.block_size =
|
||||
strm->cfg.block_size;
|
||||
strm->dma_block.dest_address =
|
||||
(uint32_t)buffer;
|
||||
strm->dma_block.source_address = (uint32_t)
|
||||
&base->RDR[data_path];
|
||||
dma_config(dev_data->dev_dma,
|
||||
strm->dma_channel,
|
||||
&strm->dma_cfg);
|
||||
/* disable rx interrupt and wait for next read*/
|
||||
SAI_RxEnableDMA(dev_cfg->base,
|
||||
kSAI_FIFORequestDMAEnable, false);
|
||||
dma_start(dev_data->dev_dma,
|
||||
strm->dma_channel);
|
||||
strm->dma_channel);
|
||||
}
|
||||
} else {
|
||||
/* Received a STOP trigger */
|
||||
i2s_rx_stream_disable(dev, true, false);
|
||||
/* Received a STOP/DRAIN trigger */
|
||||
strm->state = I2S_STATE_READY;
|
||||
}
|
||||
break;
|
||||
|
@ -354,13 +434,15 @@ static void enable_mclk_direction(const struct device *dev, bool dir)
|
|||
const struct i2s_mcux_config *dev_cfg = dev->config;
|
||||
uint32_t offset = dev_cfg->mclk_pin_offset;
|
||||
uint32_t mask = dev_cfg->mclk_pin_mask;
|
||||
uint32_t *gpr = (uint32_t *)DT_REG_ADDR(DT_NODELABEL(iomuxcgpr)) + offset;
|
||||
uint32_t *gpr = (uint32_t *)
|
||||
(DT_REG_ADDR(DT_NODELABEL(iomuxcgpr)) + offset);
|
||||
|
||||
if (dir) {
|
||||
*gpr |= mask;
|
||||
} else {
|
||||
*gpr &= ~mask;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void get_mclk_rate(const struct device *dev, uint32_t *mclk)
|
||||
|
@ -452,9 +534,9 @@ static int i2s_mcux_config(const struct device *dev, enum i2s_dir dir,
|
|||
|
||||
memset(&config, 0, sizeof(config));
|
||||
|
||||
const bool is_mclk_master = i2s_cfg->options & I2S_OPT_BIT_CLK_MASTER;
|
||||
const bool is_mclk_slave = i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE;
|
||||
|
||||
enable_mclk_direction(dev, is_mclk_master);
|
||||
enable_mclk_direction(dev, !is_mclk_slave);
|
||||
|
||||
get_mclk_rate(dev, &mclk);
|
||||
LOG_DBG("mclk is %d", mclk);
|
||||
|
@ -646,8 +728,9 @@ const struct i2s_config *i2s_mcux_config_get(const struct device *dev,
|
|||
{
|
||||
struct i2s_dev_data *dev_data = dev->data;
|
||||
|
||||
if (dir == I2S_DIR_RX)
|
||||
if (dir == I2S_DIR_RX) {
|
||||
return &dev_data->rx.cfg;
|
||||
}
|
||||
|
||||
return &dev_data->tx.cfg;
|
||||
}
|
||||
|
@ -658,7 +741,6 @@ static int i2s_tx_stream_start(const struct device *dev)
|
|||
void *buffer;
|
||||
struct i2s_dev_data *dev_data = dev->data;
|
||||
struct stream *strm = &dev_data->tx;
|
||||
uint32_t data_path = strm->start_channel;
|
||||
const struct device *dev_dma = dev_data->dev_dma;
|
||||
const struct i2s_mcux_config *dev_cfg = dev->config;
|
||||
I2S_Type *base = (I2S_Type *)dev_cfg->base;
|
||||
|
@ -672,11 +754,28 @@ static int i2s_tx_stream_start(const struct device *dev)
|
|||
|
||||
LOG_DBG("tx stream start");
|
||||
|
||||
strm->dma_block.block_size = strm->cfg.block_size;
|
||||
strm->dma_block.dest_address =
|
||||
(uint32_t)&base->RDR[data_path];
|
||||
strm->dma_block.source_address = (uint32_t)buffer;
|
||||
/* Driver keeps track of how many DMA blocks can be loaded to the DMA */
|
||||
strm->free_tx_dma_blocks = MAX_TX_DMA_BLOCKS;
|
||||
|
||||
/* Configure the DMA with the first TX block */
|
||||
struct dma_block_config *blk_cfg = &strm->dma_block;
|
||||
|
||||
memset(blk_cfg, 0, sizeof(struct dma_block_config));
|
||||
|
||||
uint32_t data_path = strm->start_channel;
|
||||
|
||||
blk_cfg->dest_address = (uint32_t)&base->TDR[data_path];
|
||||
blk_cfg->source_address = (uint32_t)buffer;
|
||||
blk_cfg->block_size = strm->cfg.block_size;
|
||||
blk_cfg->dest_scatter_en = 1;
|
||||
|
||||
strm->dma_cfg.block_count = 1;
|
||||
|
||||
strm->dma_cfg.head_block = &strm->dma_block;
|
||||
strm->dma_cfg.user_data = (void *)dev;
|
||||
|
||||
|
||||
(strm->free_tx_dma_blocks)--;
|
||||
dma_config(dev_dma, strm->dma_channel, &strm->dma_cfg);
|
||||
|
||||
/* put buffer in output queue */
|
||||
|
@ -686,6 +785,14 @@ static int i2s_tx_stream_start(const struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
uint8_t blocks_queued;
|
||||
|
||||
ret = i2s_tx_reload_multiple_dma_blocks(dev, &blocks_queued);
|
||||
if (ret) {
|
||||
LOG_ERR("i2s_tx_reload_multiple_dma_blocks() failed (%d)", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = dma_start(dev_dma, strm->dma_channel);
|
||||
if (ret < 0) {
|
||||
LOG_ERR("dma_start failed (%d)", ret);
|
||||
|
@ -693,15 +800,13 @@ static int i2s_tx_stream_start(const struct device *dev)
|
|||
}
|
||||
|
||||
/* Enable DMA enable bit */
|
||||
SAI_TxEnableDMA(dev_cfg->base, kSAI_FIFORequestDMAEnable, true);
|
||||
|
||||
/* Enable SAI Tx clock */
|
||||
SAI_TxEnable(dev_cfg->base, true);
|
||||
SAI_TxEnableDMA(base, kSAI_FIFORequestDMAEnable, true);
|
||||
|
||||
/* Enable the channel FIFO */
|
||||
dev_cfg->base->TCR3 |= I2S_TCR3_TCE(1UL << strm->start_channel);
|
||||
base->TCR3 |= I2S_TCR3_TCE(1UL << strm->start_channel);
|
||||
|
||||
strm->stream_starving = false;
|
||||
/* Enable SAI Tx clock */
|
||||
SAI_TxEnable(base, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -715,28 +820,80 @@ static int i2s_rx_stream_start(const struct device *dev)
|
|||
const struct device *dev_dma = dev_data->dev_dma;
|
||||
const struct i2s_mcux_config *dev_cfg = dev->config;
|
||||
I2S_Type *base = (I2S_Type *)dev_cfg->base;
|
||||
uint32_t data_path = strm->start_channel;
|
||||
uint8_t num_of_bufs;
|
||||
|
||||
/* allocate receive buffer from SLAB */
|
||||
ret = k_mem_slab_alloc(strm->cfg.mem_slab, &buffer, K_NO_WAIT);
|
||||
num_of_bufs = k_mem_slab_num_free_get(strm->cfg.mem_slab);
|
||||
|
||||
/*
|
||||
* Need at least NUM_DMA_BLOCKS_RX_PREP buffers on the RX memory slab
|
||||
* for reliable DMA reception.
|
||||
*/
|
||||
if (num_of_bufs < NUM_DMA_BLOCKS_RX_PREP) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* allocate 1st receive buffer from SLAB */
|
||||
ret = k_mem_slab_alloc(strm->cfg.mem_slab, &buffer,
|
||||
K_NO_WAIT);
|
||||
if (ret != 0) {
|
||||
LOG_DBG("buffer alloc from mem_slab failed (%d)", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
strm->dma_block.block_size = strm->cfg.block_size;
|
||||
strm->dma_block.dest_address = (uint32_t)buffer;
|
||||
strm->dma_block.source_address =
|
||||
(uint32_t)&base->RDR[data_path];
|
||||
/* Configure DMA block */
|
||||
struct dma_block_config *blk_cfg = &strm->dma_block;
|
||||
|
||||
memset(blk_cfg, 0, sizeof(struct dma_block_config));
|
||||
|
||||
uint32_t data_path = strm->start_channel;
|
||||
|
||||
blk_cfg->dest_address = (uint32_t)buffer;
|
||||
blk_cfg->source_address = (uint32_t)&base->RDR[data_path];
|
||||
blk_cfg->block_size = strm->cfg.block_size;
|
||||
|
||||
blk_cfg->source_gather_en = 1;
|
||||
|
||||
strm->dma_cfg.block_count = 1;
|
||||
strm->dma_cfg.head_block = &strm->dma_block;
|
||||
strm->dma_cfg.user_data = (void *)dev;
|
||||
|
||||
dma_config(dev_dma, strm->dma_channel, &strm->dma_cfg);
|
||||
|
||||
/* put buffer in input queue */
|
||||
ret = k_msgq_put(&strm->in_queue, &buffer, K_NO_WAIT);
|
||||
if (ret != 0) {
|
||||
LOG_ERR("failed to put buffer in output queue");
|
||||
LOG_ERR("failed to put buffer in input queue, ret1 %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* prep DMA for each of remaining (NUM_DMA_BLOCKS_RX_PREP-1) buffers */
|
||||
for (int i = 0; i < NUM_DMA_BLOCKS_RX_PREP - 1; i++) {
|
||||
|
||||
/* allocate receive buffer from SLAB */
|
||||
ret = k_mem_slab_alloc(strm->cfg.mem_slab, &buffer,
|
||||
K_NO_WAIT);
|
||||
if (ret != 0) {
|
||||
LOG_ERR("buffer alloc from mem_slab failed (%d)", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = dma_reload(dev_dma, strm->dma_channel,
|
||||
(uint32_t)&base->RDR[data_path],
|
||||
(uint32_t)buffer, blk_cfg->block_size);
|
||||
if (ret != 0) {
|
||||
LOG_ERR("dma_reload() failed with error 0x%x", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* put buffer in input queue */
|
||||
ret = k_msgq_put(&strm->in_queue, &buffer, K_NO_WAIT);
|
||||
if (ret != 0) {
|
||||
LOG_ERR("failed to put buffer in input queue, ret2 %d",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
LOG_DBG("Starting DMA Ch%u", strm->dma_channel);
|
||||
ret = dma_start(dev_dma, strm->dma_channel);
|
||||
if (ret < 0) {
|
||||
|
@ -746,15 +903,13 @@ static int i2s_rx_stream_start(const struct device *dev)
|
|||
}
|
||||
|
||||
/* Enable DMA enable bit */
|
||||
SAI_RxEnableDMA(dev_cfg->base, kSAI_FIFORequestDMAEnable, true);
|
||||
SAI_RxEnableDMA(base, kSAI_FIFORequestDMAEnable, true);
|
||||
|
||||
/* Enable the channel FIFO */
|
||||
dev_cfg->base->RCR3 |= I2S_RCR3_RCE(1UL << strm->start_channel);
|
||||
base->RCR3 |= I2S_RCR3_RCE(1UL << strm->start_channel);
|
||||
|
||||
/* Enable SAI Rx clock */
|
||||
SAI_RxEnable(dev_cfg->base, true);
|
||||
|
||||
strm->stream_starving = false;
|
||||
SAI_RxEnable(base, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -796,6 +951,7 @@ static int i2s_mcux_trigger(const struct device *dev, enum i2s_dir dir,
|
|||
}
|
||||
|
||||
strm->state = I2S_STATE_RUNNING;
|
||||
strm->last_block = false;
|
||||
break;
|
||||
|
||||
case I2S_TRIGGER_DROP:
|
||||
|
@ -805,31 +961,35 @@ static int i2s_mcux_trigger(const struct device *dev, enum i2s_dir dir,
|
|||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
if (strm->state == I2S_STATE_READY) {
|
||||
/* nothing to do in this state*/
|
||||
break;
|
||||
}
|
||||
|
||||
strm->state = I2S_STATE_READY;
|
||||
if (dir == I2S_DIR_TX) {
|
||||
i2s_tx_stream_disable(dev, true);
|
||||
} else {
|
||||
i2s_rx_stream_disable(dev, true, true);
|
||||
}
|
||||
strm->state = I2S_STATE_READY;
|
||||
break;
|
||||
case I2S_TRIGGER_DRAIN:
|
||||
|
||||
case I2S_TRIGGER_STOP:
|
||||
if (strm->state != I2S_STATE_RUNNING) {
|
||||
LOG_ERR("STOP trigger: invalid state %d", strm->state);
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
strm->state = I2S_STATE_STOPPING;
|
||||
strm->last_block = true;
|
||||
break;
|
||||
|
||||
case I2S_TRIGGER_DRAIN:
|
||||
if (strm->state != I2S_STATE_RUNNING) {
|
||||
LOG_ERR("DRAIN/STOP trigger: invalid state %d",
|
||||
strm->state);
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
if (dir == I2S_DIR_TX && strm->in_queue.used_msgs == 0) {
|
||||
/* if the transmission is done, then go ready*/
|
||||
strm->state = I2S_STATE_READY;
|
||||
} else {
|
||||
strm->state = I2S_STATE_STOPPING;
|
||||
}
|
||||
|
||||
strm->state = I2S_STATE_STOPPING;
|
||||
break;
|
||||
|
||||
case I2S_TRIGGER_PREPARE:
|
||||
|
@ -859,11 +1019,10 @@ static int i2s_mcux_trigger(const struct device *dev, enum i2s_dir dir,
|
|||
static int i2s_mcux_read(const struct device *dev, void **mem_block,
|
||||
size_t *size)
|
||||
{
|
||||
const struct i2s_mcux_config *dev_cfg = dev->config;
|
||||
struct i2s_dev_data *dev_data = dev->data;
|
||||
struct stream *strm = &dev_data->rx;
|
||||
void *buffer;
|
||||
int ret = 0;
|
||||
int status, ret = 0;
|
||||
|
||||
LOG_DBG("i2s_mcux_read");
|
||||
if (strm->state == I2S_STATE_NOT_READY) {
|
||||
|
@ -871,26 +1030,18 @@ static int i2s_mcux_read(const struct device *dev, void **mem_block,
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
ret = k_msgq_get(&strm->out_queue, &buffer,
|
||||
status = k_msgq_get(&strm->out_queue, &buffer,
|
||||
SYS_TIMEOUT_MS(strm->cfg.timeout));
|
||||
if (ret != 0) {
|
||||
LOG_DBG("need retry");
|
||||
return -EAGAIN;
|
||||
if (status != 0) {
|
||||
if (strm->state == I2S_STATE_ERROR) {
|
||||
ret = -EIO;
|
||||
} else {
|
||||
LOG_DBG("need retry");
|
||||
ret = -EAGAIN;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
switch (strm->state) {
|
||||
case I2S_STATE_STOPPING:
|
||||
i2s_rx_stream_disable(dev, true, false);
|
||||
strm->state = I2S_STATE_READY;
|
||||
break;
|
||||
case I2S_STATE_RUNNING:
|
||||
SAI_RxEnableDMA(dev_cfg->base,
|
||||
kSAI_FIFORequestDMAEnable, true);
|
||||
break;
|
||||
default:
|
||||
case I2S_STATE_READY:
|
||||
break;
|
||||
}
|
||||
*mem_block = buffer;
|
||||
*size = strm->cfg.block_size;
|
||||
return 0;
|
||||
|
@ -913,16 +1064,10 @@ static int i2s_mcux_write(const struct device *dev, void *mem_block,
|
|||
ret = k_msgq_put(&strm->in_queue, &mem_block,
|
||||
SYS_TIMEOUT_MS(strm->cfg.timeout));
|
||||
if (ret) {
|
||||
LOG_ERR("k_msgq_put failed %d", ret);
|
||||
LOG_DBG("k_msgq_put returned code %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (strm->state == I2S_STATE_RUNNING && strm->stream_starving) {
|
||||
/* in running state if the stream is off turn it on*/
|
||||
ret = i2s_tx_stream_start(dev);
|
||||
strm->stream_starving = false;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -953,8 +1098,9 @@ static void sai_driver_irq(const struct device *dev)
|
|||
}
|
||||
|
||||
/* clear IRQ sources atm */
|
||||
static void i2s_mcux_isr(const struct device *dev)
|
||||
static void i2s_mcux_isr(void *arg)
|
||||
{
|
||||
struct device *dev = (struct device *)arg;
|
||||
const struct i2s_mcux_config *dev_cfg = dev->config;
|
||||
I2S_Type *base = (I2S_Type *)dev_cfg->base;
|
||||
|
||||
|
@ -983,12 +1129,23 @@ static void audio_clock_settings(const struct device *dev)
|
|||
|
||||
/*Clock setting for SAI*/
|
||||
imxrt_audio_codec_pll_init(clock_name, dev_cfg->clk_src,
|
||||
dev_cfg->clk_pre_div, dev_cfg->clk_src_div);
|
||||
dev_cfg->clk_pre_div, dev_cfg->clk_src_div);
|
||||
|
||||
audioPllConfig.loopDivider = dev_cfg->pll_lp;
|
||||
audioPllConfig.postDivider = dev_cfg->pll_pd;
|
||||
audioPllConfig.numerator = dev_cfg->pll_num;
|
||||
audioPllConfig.denominator = dev_cfg->pll_den;
|
||||
#ifdef CONFIG_SOC_SERIES_IMX_RT11XX
|
||||
audioPllConfig.loopDivider = dev_cfg->pll_lp;
|
||||
audioPllConfig.postDivider = dev_cfg->pll_pd;
|
||||
audioPllConfig.numerator = dev_cfg->pll_num;
|
||||
audioPllConfig.denominator = dev_cfg->pll_den;
|
||||
audioPllConfig.ssEnable = false;
|
||||
#elif defined CONFIG_SOC_SERIES_IMX_RT10XX
|
||||
audioPllConfig.src = dev_cfg->pll_src;
|
||||
audioPllConfig.loopDivider = dev_cfg->pll_lp;
|
||||
audioPllConfig.postDivider = dev_cfg->pll_pd;
|
||||
audioPllConfig.numerator = dev_cfg->pll_num;
|
||||
audioPllConfig.denominator = dev_cfg->pll_den;
|
||||
#else
|
||||
#error Initialize SOC Series-specific clock_audio_pll_config_t
|
||||
#endif /* CONFIG_SOC_SERIES */
|
||||
|
||||
CLOCK_InitAudioPll(&audioPllConfig);
|
||||
}
|
||||
|
@ -1037,14 +1194,14 @@ static int i2s_mcux_initialize(const struct device *dev)
|
|||
dev_data->tx.state = I2S_STATE_NOT_READY;
|
||||
dev_data->rx.state = I2S_STATE_NOT_READY;
|
||||
|
||||
#if (defined(FSL_FEATURE_SAI_HAS_MCR) && (FSL_FEATURE_SAI_HAS_MCR)) || \
|
||||
(defined(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER) && \
|
||||
(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER))
|
||||
#if (defined(FSL_FEATURE_SAI_HAS_MCR) && (FSL_FEATURE_SAI_HAS_MCR)) || \
|
||||
(defined(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER) && \
|
||||
(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER))
|
||||
sai_master_clock_t mclkConfig = {
|
||||
#if defined(FSL_FEATURE_SAI_HAS_MCR) && (FSL_FEATURE_SAI_HAS_MCR)
|
||||
.mclkOutputEnable = true,
|
||||
#if !(defined(FSL_FEATURE_SAI_HAS_NO_MCR_MICS) && \
|
||||
(FSL_FEATURE_SAI_HAS_NO_MCR_MICS))
|
||||
#if !(defined(FSL_FEATURE_SAI_HAS_NO_MCR_MICS) && \
|
||||
(FSL_FEATURE_SAI_HAS_NO_MCR_MICS))
|
||||
.mclkSource = kSAI_MclkSourceSysclk,
|
||||
#endif
|
||||
#endif
|
||||
|
@ -1055,7 +1212,7 @@ static int i2s_mcux_initialize(const struct device *dev)
|
|||
/* master clock configurations */
|
||||
#if (defined(FSL_FEATURE_SAI_HAS_MCR) && (FSL_FEATURE_SAI_HAS_MCR)) || \
|
||||
(defined(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER) && \
|
||||
(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER))
|
||||
(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER))
|
||||
#if defined(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER) && \
|
||||
(FSL_FEATURE_SAI_HAS_MCLKDIV_REGISTER)
|
||||
mclkConfig.mclkHz = mclk;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue