drivers: spi: gd32: Add support DMA transfer

Add supporting DMA-based transfer for GD32 SPI.

Signed-off-by: TOKITA Hiroshi <tokita.hiroshi@gmail.com>
This commit is contained in:
TOKITA Hiroshi 2022-12-11 16:16:59 +09:00 committed by Marti Bolivar
commit e55cbb8ce6
2 changed files with 324 additions and 5 deletions

View file

@ -13,7 +13,16 @@ if SPI_GD32
config SPI_GD32_INTERRUPT
bool "GD32 MCU SPI Interrupt Support"
default y if SPI_ASYNC
default y if SPI_GD32_DMA
help
Enable the interrupt driven mode for SPI instances
config SPI_GD32_DMA
bool "GD32 MCU SPI DMA Support"
select DMA
select SPI_GD32_INTERRUPT
help
Use the DMA for SPI transfer
that enable dma channels in their device tree node.
endif # SPI_GD32

View file

@ -13,6 +13,10 @@
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/drivers/reset.h>
#include <zephyr/drivers/spi.h>
#ifdef CONFIG_SPI_GD32_DMA
#include <zephyr/drivers/dma.h>
#include <zephyr/drivers/dma/dma_gd32.h>
#endif
#include <gd32_spi.h>
@ -37,11 +41,39 @@ LOG_MODULE_REGISTER(spi_gd32);
#else
#error Unknown GD32 soc series
#endif
#ifdef CONFIG_SPI_GD32_DMA
enum spi_gd32_dma_direction {
RX = 0,
TX,
NUM_OF_DIRECTION
};
struct spi_gd32_dma_config {
const struct device *dev;
uint32_t channel;
uint32_t config;
uint32_t slot;
uint32_t fifo_threshold;
};
struct spi_gd32_dma_data {
struct dma_config config;
struct dma_block_config block;
uint32_t count;
};
#endif
struct spi_gd32_config {
uint32_t reg;
uint16_t clkid;
struct reset_dt_spec reset;
const struct pinctrl_dev_config *pcfg;
#ifdef CONFIG_SPI_GD32_DMA
const struct spi_gd32_dma_config dma[NUM_OF_DIRECTION];
#endif
#ifdef CONFIG_SPI_GD32_INTERRUPT
void (*irq_configure)();
#endif
@ -49,8 +81,34 @@ struct spi_gd32_config {
struct spi_gd32_data {
struct spi_context ctx;
#ifdef CONFIG_SPI_GD32_DMA
struct spi_gd32_dma_data dma[NUM_OF_DIRECTION];
#endif
};
#ifdef CONFIG_SPI_GD32_DMA
static uint32_t dummy_tx;
static uint32_t dummy_rx;
static bool spi_gd32_dma_enabled(const struct device *dev)
{
const struct spi_gd32_config *cfg = dev->config;
if (cfg->dma[TX].dev && cfg->dma[RX].dev) {
return true;
}
return false;
}
static size_t spi_gd32_dma_enabled_num(const struct device *dev)
{
return spi_gd32_dma_enabled(dev) ? 2 : 0;
}
#endif
static int spi_gd32_get_err(const struct spi_gd32_config *cfg)
{
uint32_t stat = SPI_STAT(cfg->reg);
@ -195,6 +253,114 @@ static int spi_gd32_frame_exchange(const struct device *dev)
return spi_gd32_get_err(cfg);
}
#ifdef CONFIG_SPI_GD32_DMA
static void spi_gd32_dma_callback(const struct device *dma_dev, void *arg,
uint32_t channel, int status);
static uint32_t spi_gd32_dma_setup(const struct device *dev, const uint32_t dir)
{
const struct spi_gd32_config *cfg = dev->config;
struct spi_gd32_data *data = dev->data;
struct dma_config *dma_cfg = &data->dma[dir].config;
struct dma_block_config *block_cfg = &data->dma[dir].block;
const struct spi_gd32_dma_config *dma = &cfg->dma[dir];
int ret;
memset(dma_cfg, 0, sizeof(struct dma_config));
memset(block_cfg, 0, sizeof(struct dma_block_config));
dma_cfg->source_burst_length = 1;
dma_cfg->dest_burst_length = 1;
dma_cfg->user_data = (void *)dev;
dma_cfg->dma_callback = spi_gd32_dma_callback;
dma_cfg->block_count = 1U;
dma_cfg->head_block = block_cfg;
dma_cfg->dma_slot = cfg->dma[dir].slot;
dma_cfg->channel_priority =
GD32_DMA_CONFIG_PRIORITY(cfg->dma[dir].config);
dma_cfg->channel_direction =
dir == TX ? MEMORY_TO_PERIPHERAL : PERIPHERAL_TO_MEMORY;
if (SPI_WORD_SIZE_GET(data->ctx.config->operation) == 8) {
dma_cfg->source_data_size = 1;
dma_cfg->dest_data_size = 1;
} else {
dma_cfg->source_data_size = 2;
dma_cfg->dest_data_size = 2;
}
block_cfg->block_size = spi_context_max_continuous_chunk(&data->ctx);
if (dir == TX) {
block_cfg->dest_address = (uint32_t)&SPI_DATA(cfg->reg);
block_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
if (spi_context_tx_buf_on(&data->ctx)) {
block_cfg->source_address = (uint32_t)data->ctx.tx_buf;
block_cfg->source_addr_adj = DMA_ADDR_ADJ_INCREMENT;
} else {
block_cfg->source_address = (uint32_t)&dummy_tx;
block_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
}
}
if (dir == RX) {
block_cfg->source_address = (uint32_t)&SPI_DATA(cfg->reg);
block_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
if (spi_context_rx_buf_on(&data->ctx)) {
block_cfg->dest_address = (uint32_t)data->ctx.rx_buf;
block_cfg->dest_addr_adj = DMA_ADDR_ADJ_INCREMENT;
} else {
block_cfg->dest_address = (uint32_t)&dummy_rx;
block_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
}
}
ret = dma_config(dma->dev, dma->channel, dma_cfg);
if (ret < 0) {
LOG_ERR("dma_config %p failed %d\n", dma->dev, ret);
return ret;
}
ret = dma_start(dma->dev, dma->channel);
if (ret < 0) {
LOG_ERR("dma_start %p failed %d\n", dma->dev, ret);
return ret;
}
return 0;
}
static int spi_gd32_start_dma_transceive(const struct device *dev)
{
const struct spi_gd32_config *cfg = dev->config;
struct spi_gd32_data *data = dev->data;
const size_t chunk_len = spi_context_max_continuous_chunk(&data->ctx);
struct dma_status stat;
int ret = 0;
for (size_t i = 0; i < spi_gd32_dma_enabled_num(dev); i++) {
dma_get_status(cfg->dma[i].dev, cfg->dma[i].channel, &stat);
if ((chunk_len != data->dma[i].count) && !stat.busy) {
ret = spi_gd32_dma_setup(dev, i);
if (ret < 0) {
goto on_error;
}
}
}
SPI_CTL1(cfg->reg) |= (SPI_CTL1_DMATEN | SPI_CTL1_DMAREN);
on_error:
if (ret < 0) {
for (size_t i = 0; i < spi_gd32_dma_enabled_num(dev); i++) {
dma_stop(cfg->dma[i].dev, cfg->dma[i].channel);
}
}
return ret;
}
#endif
static int spi_gd32_transceive_impl(const struct device *dev,
const struct spi_config *config,
const struct spi_buf_set *tx_bufs,
@ -220,8 +386,24 @@ static int spi_gd32_transceive_impl(const struct device *dev,
spi_context_cs_control(&data->ctx, true);
#ifdef CONFIG_SPI_GD32_INTERRUPT
SPI_STAT(cfg->reg) &= ~(SPI_STAT_RBNE | SPI_STAT_TBE | SPI_GD32_ERR_MASK);
SPI_CTL1(cfg->reg) |= (SPI_CTL1_RBNEIE | SPI_CTL1_TBEIE | SPI_CTL1_ERRIE);
#ifdef CONFIG_SPI_GD32_DMA
if (spi_gd32_dma_enabled(dev)) {
for (size_t i = 0; i < ARRAY_SIZE(data->dma); i++) {
data->dma[i].count = 0;
}
ret = spi_gd32_start_dma_transceive(dev);
if (ret < 0) {
goto dma_error;
}
} else
#endif
{
SPI_STAT(cfg->reg) &=
~(SPI_STAT_RBNE | SPI_STAT_TBE | SPI_GD32_ERR_MASK);
SPI_CTL1(cfg->reg) |=
(SPI_CTL1_RBNEIE | SPI_CTL1_TBEIE | SPI_CTL1_ERRIE);
}
ret = spi_context_wait_for_completion(&data->ctx);
#else
do {
@ -241,9 +423,13 @@ static int spi_gd32_transceive_impl(const struct device *dev,
/* Wait until last frame transfer complete. */
}
#ifdef CONFIG_SPI_GD32_DMA
dma_error:
#endif
spi_context_cs_control(&data->ctx, false);
SPI_CTL0(cfg->reg) &= ~SPI_CTL0_SPIEN;
SPI_CTL0(cfg->reg) &=
~(SPI_CTL0_SPIEN | SPI_CTL1_DMATEN | SPI_CTL1_DMAREN);
error:
spi_context_release(&data->ctx, ret);
@ -278,7 +464,14 @@ static void spi_gd32_complete(const struct device *dev, int status)
struct spi_gd32_data *data = dev->data;
const struct spi_gd32_config *cfg = dev->config;
SPI_CTL1(cfg->reg) &= ~(SPI_CTL1_RBNEIE | SPI_CTL1_TBEIE | SPI_CTL1_ERRIE);
SPI_CTL1(cfg->reg) &=
~(SPI_CTL1_RBNEIE | SPI_CTL1_TBEIE | SPI_CTL1_ERRIE);
#ifdef CONFIG_SPI_GD32_DMA
for (size_t i = 0; i < spi_gd32_dma_enabled_num(dev); i++) {
dma_stop(cfg->dma[i].dev, cfg->dma[i].channel);
}
#endif
spi_context_complete(&data->ctx, dev, status);
}
@ -302,7 +495,76 @@ static void spi_gd32_isr(struct device *dev)
SPI_STAT(cfg->reg) = 0;
}
#endif /* INTERRUPT */
#endif /* SPI_GD32_INTERRUPT */
#ifdef CONFIG_SPI_GD32_DMA
static bool spi_gd32_chunk_transfer_finished(const struct device *dev)
{
struct spi_gd32_data *data = dev->data;
struct spi_gd32_dma_data *dma = data->dma;
const size_t chunk_len = spi_context_max_continuous_chunk(&data->ctx);
return (MIN(dma[TX].count, dma[RX].count) >= chunk_len);
}
static void spi_gd32_dma_callback(const struct device *dma_dev, void *arg,
uint32_t channel, int status)
{
const struct device *dev = (const struct device *)arg;
const struct spi_gd32_config *cfg = dev->config;
struct spi_gd32_data *data = dev->data;
const size_t chunk_len = spi_context_max_continuous_chunk(&data->ctx);
int err = 0;
if (status < 0) {
LOG_ERR("dma:%p ch:%d callback gets error: %d", dma_dev, channel,
status);
spi_gd32_complete(dev, status);
return;
}
for (size_t i = 0; i < ARRAY_SIZE(cfg->dma); i++) {
if (dma_dev == cfg->dma[i].dev &&
channel == cfg->dma[i].channel) {
data->dma[i].count += chunk_len;
}
}
/* Check transfer finished.
* The transmission of this chunk is complete if both the dma[TX].count
* and the dma[RX].count reach greater than or equal to the chunk_len.
* chunk_len is zero here means the transfer is already complete.
*/
if (spi_gd32_chunk_transfer_finished(dev)) {
if (SPI_WORD_SIZE_GET(data->ctx.config->operation) == 8) {
spi_context_update_tx(&data->ctx, 1, chunk_len);
spi_context_update_rx(&data->ctx, 1, chunk_len);
} else {
spi_context_update_tx(&data->ctx, 2, chunk_len);
spi_context_update_rx(&data->ctx, 2, chunk_len);
}
if (spi_gd32_transfer_ongoing(data)) {
/* Next chunk is available, reset the count and
* continue processing
*/
data->dma[TX].count = 0;
data->dma[RX].count = 0;
} else {
/* All data is processed, complete the process */
spi_context_complete(&data->ctx, dev, 0);
return;
}
}
err = spi_gd32_start_dma_transceive(dev);
if (err) {
spi_gd32_complete(dev, err);
}
}
#endif /* DMA */
static int spi_gd32_release(const struct device *dev,
const struct spi_config *config)
@ -327,6 +589,9 @@ int spi_gd32_init(const struct device *dev)
struct spi_gd32_data *data = dev->data;
const struct spi_gd32_config *cfg = dev->config;
int ret;
#ifdef CONFIG_SPI_GD32_DMA
uint32_t ch_filter;
#endif
(void)clock_control_on(GD32_CLOCK_CONTROLLER,
(clock_control_subsys_t *)&cfg->clkid);
@ -339,6 +604,28 @@ int spi_gd32_init(const struct device *dev)
return ret;
}
#ifdef CONFIG_SPI_GD32_DMA
if ((cfg->dma[RX].dev && !cfg->dma[TX].dev) ||
(cfg->dma[TX].dev && !cfg->dma[RX].dev)) {
LOG_ERR("DMA must be enabled for both TX and RX channels");
return -ENODEV;
}
for (size_t i = 0; i < spi_gd32_dma_enabled_num(dev); i++) {
if (!device_is_ready(cfg->dma[i].dev)) {
LOG_ERR("DMA %s not ready", cfg->dma[i].dev->name);
return -ENODEV;
}
ch_filter = BIT(cfg->dma[i].channel);
ret = dma_request_channel(cfg->dma[i].dev, &ch_filter);
if (ret < 0) {
LOG_ERR("dma_request_channel failed %d", ret);
return ret;
}
}
#endif
ret = spi_context_cs_configure_all(&data->ctx);
if (ret < 0) {
return ret;
@ -353,6 +640,28 @@ int spi_gd32_init(const struct device *dev)
return 0;
}
#define DMA_INITIALIZER(idx, dir) \
{ \
.dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(idx, dir)), \
.channel = DT_INST_DMAS_CELL_BY_NAME(idx, dir, channel), \
.slot = COND_CODE_1( \
DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_dma_v1), \
(DT_INST_DMAS_CELL_BY_NAME(idx, dir, slot)), (0)), \
.config = DT_INST_DMAS_CELL_BY_NAME(idx, dir, config), \
.fifo_threshold = COND_CODE_1( \
DT_HAS_COMPAT_STATUS_OKAY(gd_gd32_dma_v1), \
(DT_INST_DMAS_CELL_BY_NAME(idx, dir, fifo_threshold)), \
(0)), \
}
#define DMAS_DECL(idx) \
{ \
COND_CODE_1(DT_INST_DMAS_HAS_NAME(idx, rx), \
(DMA_INITIALIZER(idx, rx)), ({0})), \
COND_CODE_1(DT_INST_DMAS_HAS_NAME(idx, tx), \
(DMA_INITIALIZER(idx, tx)), ({0})), \
}
#define GD32_IRQ_CONFIGURE(idx) \
static void spi_gd32_irq_configure_##idx(void) \
{ \
@ -374,6 +683,7 @@ int spi_gd32_init(const struct device *dev)
.clkid = DT_INST_CLOCKS_CELL(idx, id), \
.reset = RESET_DT_SPEC_INST_GET(idx), \
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(idx), \
IF_ENABLED(CONFIG_SPI_GD32_DMA, (.dma = DMAS_DECL(idx),)) \
IF_ENABLED(CONFIG_SPI_GD32_INTERRUPT, \
(.irq_configure = spi_gd32_irq_configure_##idx)) }; \
DEVICE_DT_INST_DEFINE(idx, &spi_gd32_init, NULL, \