drivers: spi_mcux_lpspi: Added DMA support to NXP LPSPI driver
Adds DMA support to NXP's LPSPI driver. This can be enabled by selecting the KConfig symbol CONFIG_SPI_MCUX_LPSPI_DMA, and requires the LPSPI instances enabled in the devicetree to have valid DMA instances assigned. Signed-off-by: Daniel DeGrasse <daniel.degrasse@nxp.com>
This commit is contained in:
parent
aa464a9d67
commit
7b74dbb405
2 changed files with 313 additions and 3 deletions
|
@ -8,3 +8,12 @@ config SPI_MCUX_LPSPI
|
|||
depends on HAS_MCUX_LPSPI && CLOCK_CONTROL
|
||||
help
|
||||
Enable support for mcux spi driver.
|
||||
|
||||
if SPI_MCUX_LPSPI
|
||||
config SPI_MCUX_LPSPI_DMA
|
||||
bool "MCUX LPSPI SPI DMA Support"
|
||||
select DMA
|
||||
help
|
||||
Enable the SPI DMA mode for SPI instances
|
||||
that enable dma channels in their device tree node.
|
||||
endif # SPI_MCUX_LPSPI
|
||||
|
|
|
@ -10,10 +10,12 @@
|
|||
#include <drivers/spi.h>
|
||||
#include <drivers/clock_control.h>
|
||||
#include <fsl_lpspi.h>
|
||||
|
||||
#define LOG_LEVEL CONFIG_SPI_LOG_LEVEL
|
||||
#include <logging/log.h>
|
||||
LOG_MODULE_REGISTER(spi_mcux_lpspi);
|
||||
#ifdef CONFIG_SPI_MCUX_LPSPI_DMA
|
||||
#include <drivers/dma.h>
|
||||
#endif
|
||||
|
||||
LOG_MODULE_REGISTER(spi_mcux_lpspi, CONFIG_SPI_LOG_LEVEL);
|
||||
|
||||
#include "spi_context.h"
|
||||
|
||||
|
@ -30,11 +32,35 @@ struct spi_mcux_config {
|
|||
uint32_t transfer_delay;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SPI_MCUX_LPSPI_DMA
|
||||
#define SPI_MCUX_LPSPI_DMA_ERROR_FLAG 0x01
|
||||
#define SPI_MCUX_LPSPI_DMA_RX_DONE_FLAG 0x02
|
||||
#define SPI_MCUX_LPSPI_DMA_TX_DONE_FLAG 0x04
|
||||
#define SPI_MCUX_LPSPI_DMA_DONE_FLAG \
|
||||
(SPI_MCUX_LPSPI_DMA_RX_DONE_FLAG | SPI_MCUX_LPSPI_DMA_TX_DONE_FLAG)
|
||||
|
||||
struct stream {
|
||||
const struct device *dma_dev;
|
||||
uint32_t channel; /* stores the channel for dma */
|
||||
struct dma_config dma_cfg;
|
||||
struct dma_block_config dma_blk_cfg;
|
||||
};
|
||||
#endif
|
||||
|
||||
struct spi_mcux_data {
|
||||
const struct device *dev;
|
||||
lpspi_master_handle_t handle;
|
||||
struct spi_context ctx;
|
||||
size_t transfer_len;
|
||||
#ifdef CONFIG_SPI_MCUX_LPSPI_DMA
|
||||
volatile uint32_t status_flags;
|
||||
struct stream dma_rx;
|
||||
struct stream dma_tx;
|
||||
/* dummy value used for transferring NOP when tx buf is null */
|
||||
uint32_t dummy_tx_buffer;
|
||||
/* dummy value used to read RX data into when rx buf is null */
|
||||
uint32_t dummy_rx_buffer;
|
||||
#endif
|
||||
};
|
||||
|
||||
static void spi_mcux_transfer_next_packet(const struct device *dev)
|
||||
|
@ -201,6 +227,231 @@ static int spi_mcux_configure(const struct device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SPI_MCUX_LPSPI_DMA
|
||||
|
||||
/* This function is executed in the interrupt context */
|
||||
static void spi_mcux_dma_callback(const struct device *dev, void *arg,
|
||||
uint32_t channel, int status)
|
||||
{
|
||||
/* arg directly holds the spi device */
|
||||
struct spi_mcux_data *data = arg;
|
||||
|
||||
if (status != 0) {
|
||||
LOG_ERR("DMA callback error with channel %d.", channel);
|
||||
data->status_flags |= SPI_MCUX_LPSPI_DMA_ERROR_FLAG;
|
||||
} else {
|
||||
/* identify the origin of this callback */
|
||||
if (channel == data->dma_tx.channel) {
|
||||
/* this part of the transfer ends */
|
||||
data->status_flags |= SPI_MCUX_LPSPI_DMA_TX_DONE_FLAG;
|
||||
LOG_DBG("DMA TX Block Complete");
|
||||
} else if (channel == data->dma_rx.channel) {
|
||||
/* this part of the transfer ends */
|
||||
data->status_flags |= SPI_MCUX_LPSPI_DMA_RX_DONE_FLAG;
|
||||
LOG_DBG("DMA RX Block Complete");
|
||||
} else {
|
||||
LOG_ERR("DMA callback channel %d is not valid.",
|
||||
channel);
|
||||
data->status_flags |= SPI_MCUX_LPSPI_DMA_ERROR_FLAG;
|
||||
}
|
||||
}
|
||||
spi_context_complete(&data->ctx, 0);
|
||||
}
|
||||
|
||||
static int spi_mcux_dma_tx_load(const struct device *dev, const uint8_t *buf, size_t len)
|
||||
{
|
||||
const struct spi_mcux_config *cfg = dev->config;
|
||||
struct spi_mcux_data *data = dev->data;
|
||||
struct dma_block_config *blk_cfg;
|
||||
LPSPI_Type *base = cfg->base;
|
||||
|
||||
/* remember active TX DMA channel (used in callback) */
|
||||
struct stream *stream = &data->dma_tx;
|
||||
|
||||
blk_cfg = &stream->dma_blk_cfg;
|
||||
|
||||
/* prepare the block for this TX DMA channel */
|
||||
memset(blk_cfg, 0, sizeof(struct dma_block_config));
|
||||
|
||||
if (buf == NULL) {
|
||||
/* Treat the transfer as a peripheral to peripheral one, so that DMA
|
||||
* reads from this address each time
|
||||
*/
|
||||
blk_cfg->source_address = (uint32_t)&data->dummy_tx_buffer;
|
||||
stream->dma_cfg.channel_direction = PERIPHERAL_TO_PERIPHERAL;
|
||||
} else {
|
||||
/* tx direction has memory as source and periph as dest. */
|
||||
blk_cfg->source_address = (uint32_t)buf;
|
||||
stream->dma_cfg.channel_direction = MEMORY_TO_PERIPHERAL;
|
||||
}
|
||||
/* Enable scatter/gather */
|
||||
blk_cfg->source_gather_en = 1;
|
||||
/* Dest is LPSPI tx fifo */
|
||||
blk_cfg->dest_address = LPSPI_GetTxRegisterAddress(base);
|
||||
blk_cfg->block_size = len;
|
||||
/* Transfer 1 byte each DMA loop */
|
||||
stream->dma_cfg.source_burst_length = 1;
|
||||
|
||||
stream->dma_cfg.head_block = &stream->dma_blk_cfg;
|
||||
/* give the client dev as arg, as the callback comes from the dma */
|
||||
stream->dma_cfg.user_data = data;
|
||||
/* pass our client origin to the dma: data->dma_tx.dma_channel */
|
||||
return dma_config(data->dma_tx.dma_dev, data->dma_tx.channel,
|
||||
&stream->dma_cfg);
|
||||
}
|
||||
|
||||
static int spi_mcux_dma_rx_load(const struct device *dev, uint8_t *buf,
|
||||
size_t len)
|
||||
{
|
||||
const struct spi_mcux_config *cfg = dev->config;
|
||||
struct spi_mcux_data *data = dev->data;
|
||||
struct dma_block_config *blk_cfg;
|
||||
LPSPI_Type *base = cfg->base;
|
||||
|
||||
/* retrieve active RX DMA channel (used in callback) */
|
||||
struct stream *stream = &data->dma_rx;
|
||||
|
||||
blk_cfg = &stream->dma_blk_cfg;
|
||||
|
||||
/* prepare the block for this RX DMA channel */
|
||||
memset(blk_cfg, 0, sizeof(struct dma_block_config));
|
||||
|
||||
if (buf == NULL) {
|
||||
/* Treat the transfer as a peripheral to peripheral one, so that DMA
|
||||
* reads from this address each time
|
||||
*/
|
||||
blk_cfg->dest_address = (uint32_t)&data->dummy_rx_buffer;
|
||||
stream->dma_cfg.channel_direction = PERIPHERAL_TO_PERIPHERAL;
|
||||
} else {
|
||||
/* rx direction has periph as source and mem as dest. */
|
||||
blk_cfg->dest_address = (uint32_t)buf;
|
||||
stream->dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY;
|
||||
}
|
||||
blk_cfg->block_size = len;
|
||||
/* Enable scatter/gather */
|
||||
blk_cfg->dest_scatter_en = 1;
|
||||
/* Source is LPSPI rx fifo */
|
||||
blk_cfg->source_address = LPSPI_GetRxRegisterAddress(base);
|
||||
stream->dma_cfg.source_burst_length = 1;
|
||||
|
||||
stream->dma_cfg.head_block = blk_cfg;
|
||||
stream->dma_cfg.user_data = data;
|
||||
|
||||
/* pass our client origin to the dma: data->dma_rx.channel */
|
||||
return dma_config(data->dma_rx.dma_dev, data->dma_rx.channel,
|
||||
&stream->dma_cfg);
|
||||
}
|
||||
|
||||
static int wait_dma_rx_tx_done(const struct device *dev)
|
||||
{
|
||||
struct spi_mcux_data *data = dev->data;
|
||||
int ret = -1;
|
||||
|
||||
while (1) {
|
||||
ret = spi_context_wait_for_completion(&data->ctx);
|
||||
if (ret) {
|
||||
LOG_DBG("Timed out waiting for SPI context to complete");
|
||||
return ret;
|
||||
}
|
||||
if (data->status_flags & SPI_MCUX_LPSPI_DMA_ERROR_FLAG) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if ((data->status_flags & SPI_MCUX_LPSPI_DMA_DONE_FLAG) ==
|
||||
SPI_MCUX_LPSPI_DMA_DONE_FLAG) {
|
||||
LOG_DBG("DMA block completed");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int transceive_dma(const struct device *dev,
|
||||
const struct spi_config *spi_cfg,
|
||||
const struct spi_buf_set *tx_bufs,
|
||||
const struct spi_buf_set *rx_bufs,
|
||||
bool asynchronous,
|
||||
struct k_poll_signal *sig)
|
||||
{
|
||||
const struct spi_mcux_config *config = dev->config;
|
||||
struct spi_mcux_data *data = dev->data;
|
||||
LPSPI_Type *base = config->base;
|
||||
int ret;
|
||||
size_t dma_size;
|
||||
|
||||
spi_context_lock(&data->ctx, asynchronous, sig, spi_cfg);
|
||||
|
||||
ret = spi_mcux_configure(dev, spi_cfg);
|
||||
if (ret) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
|
||||
|
||||
spi_context_cs_control(&data->ctx, true);
|
||||
|
||||
/* DMA is fast enough watermarks are not required */
|
||||
LPSPI_SetFifoWatermarks(base, 0U, 0U);
|
||||
|
||||
/* Send each spi buf via DMA, updating context as DMA completes */
|
||||
while (data->ctx.rx_len > 0 || data->ctx.tx_len > 0) {
|
||||
/* Clear status flags */
|
||||
data->status_flags = 0U;
|
||||
/* Load dma blocks of equal length */
|
||||
dma_size = MIN(data->ctx.tx_len, data->ctx.rx_len);
|
||||
if (dma_size == 0) {
|
||||
dma_size = MAX(data->ctx.tx_len, data->ctx.rx_len);
|
||||
}
|
||||
ret = spi_mcux_dma_tx_load(dev, data->ctx.tx_buf, dma_size);
|
||||
if (ret != 0) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = spi_mcux_dma_rx_load(dev, data->ctx.rx_buf, dma_size);
|
||||
if (ret != 0) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Start DMA */
|
||||
ret = dma_start(data->dma_tx.dma_dev, data->dma_tx.channel);
|
||||
if (ret != 0) {
|
||||
goto out;
|
||||
}
|
||||
ret = dma_start(data->dma_rx.dma_dev, data->dma_rx.channel);
|
||||
if (ret != 0) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Enable DMA Requests */
|
||||
LPSPI_EnableDMA(base, kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable);
|
||||
|
||||
/* Wait for DMA to finish */
|
||||
ret = wait_dma_rx_tx_done(dev);
|
||||
if (ret != 0) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
while ((LPSPI_GetStatusFlags(base) & kLPSPI_ModuleBusyFlag)) {
|
||||
/* wait until module is idle */
|
||||
}
|
||||
|
||||
/* Disable DMA */
|
||||
LPSPI_DisableDMA(base, kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable);
|
||||
|
||||
/* Update SPI contexts with amount of data we just sent */
|
||||
spi_context_update_tx(&data->ctx, 1, dma_size);
|
||||
spi_context_update_rx(&data->ctx, 1, dma_size);
|
||||
}
|
||||
|
||||
spi_context_cs_control(&data->ctx, false);
|
||||
|
||||
out:
|
||||
spi_context_release(&data->ctx, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static int transceive(const struct device *dev,
|
||||
const struct spi_config *spi_cfg,
|
||||
const struct spi_buf_set *tx_bufs,
|
||||
|
@ -236,6 +487,9 @@ static int spi_mcux_transceive(const struct device *dev,
|
|||
const struct spi_buf_set *tx_bufs,
|
||||
const struct spi_buf_set *rx_bufs)
|
||||
{
|
||||
#ifdef CONFIG_SPI_MCUX_LPSPI_DMA
|
||||
return transceive_dma(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL);
|
||||
#endif /* CONFIG_SPI_MCUX_LPSPI_DMA */
|
||||
return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL);
|
||||
}
|
||||
|
||||
|
@ -277,6 +531,20 @@ static int spi_mcux_init(const struct device *dev)
|
|||
|
||||
data->dev = dev;
|
||||
|
||||
#ifdef CONFIG_SPI_MCUX_LPSPI_DMA
|
||||
if (!device_is_ready(data->dma_tx.dma_dev)) {
|
||||
LOG_ERR("%s device is not ready", data->dma_tx.dma_dev->name);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!device_is_ready(data->dma_rx.dma_dev)) {
|
||||
LOG_ERR("%s device is not ready", data->dma_rx.dma_dev->name);
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif /* CONFIG_SPI_MCUX_LPSPI_DMA */
|
||||
|
||||
spi_context_unlock_unconditionally(&data->ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -288,6 +556,38 @@ static const struct spi_driver_api spi_mcux_driver_api = {
|
|||
.release = spi_mcux_release,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SPI_MCUX_LPSPI_DMA
|
||||
#define SPI_DMA_CHANNELS(n) \
|
||||
.dma_tx = { \
|
||||
.dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, tx)), \
|
||||
.channel = \
|
||||
DT_INST_DMAS_CELL_BY_NAME(n, tx, mux), \
|
||||
.dma_cfg = { \
|
||||
.channel_direction = MEMORY_TO_PERIPHERAL, \
|
||||
.dma_callback = spi_mcux_dma_callback, \
|
||||
.source_data_size = 1, \
|
||||
.dest_data_size = 1, \
|
||||
.block_count = 1, \
|
||||
.dma_slot = DT_INST_DMAS_CELL_BY_NAME(n, tx, source) \
|
||||
} \
|
||||
}, \
|
||||
.dma_rx = { \
|
||||
.dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, rx)), \
|
||||
.channel = \
|
||||
DT_INST_DMAS_CELL_BY_NAME(n, rx, mux), \
|
||||
.dma_cfg = { \
|
||||
.channel_direction = PERIPHERAL_TO_MEMORY, \
|
||||
.dma_callback = spi_mcux_dma_callback, \
|
||||
.source_data_size = 1, \
|
||||
.dest_data_size = 1, \
|
||||
.block_count = 1, \
|
||||
.dma_slot = DT_INST_DMAS_CELL_BY_NAME(n, rx, source) \
|
||||
} \
|
||||
}
|
||||
#else
|
||||
#define SPI_DMA_CHANNELS(n)
|
||||
#endif
|
||||
|
||||
#define SPI_MCUX_LPSPI_INIT(n) \
|
||||
static void spi_mcux_config_func_##n(const struct device *dev); \
|
||||
\
|
||||
|
@ -312,6 +612,7 @@ static const struct spi_driver_api spi_mcux_driver_api = {
|
|||
SPI_CONTEXT_INIT_LOCK(spi_mcux_data_##n, ctx), \
|
||||
SPI_CONTEXT_INIT_SYNC(spi_mcux_data_##n, ctx), \
|
||||
SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \
|
||||
SPI_DMA_CHANNELS(n) \
|
||||
}; \
|
||||
\
|
||||
DEVICE_DT_INST_DEFINE(n, &spi_mcux_init, NULL, \
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue