dma/dw_common: Update dma_dw_common from sof

Ports the SOF DesignWare DMA code to Zephyr.

Effectively replaces much of what was the designware driver as this
driver enables scatter gather which the older driver did not.

* Enables cyclic transfer description lists when the cyclic config
  param is given.
* Enables linear link position usage with cAVS GPDMA.
* Passes suspend/resume, scatter/gather tests.
* Provides status updates of the transfer through dma_get_status()
* Enables reloading a cyclic transfer with dma_reload()
* Enables dma handshakes using the dma_slot config param.
* cAVS specifics remain in the dma_cavs_gpdma driver.

Co-authored-by: Adrian Bonislawski <adrian.bonislawski@intel.com>
Co-authored-by: Tom Burdick <thomas.burdick@intel.com>
Signed-off-by: Tom Burdick <thomas.burdick@intel.com>
This commit is contained in:
Tom Burdick 2022-03-11 15:53:57 -06:00 committed by Anas Nashif
commit 2f792c2de5
8 changed files with 1018 additions and 204 deletions

View file

@ -7,3 +7,15 @@ config DMA_CAVS_GPDMA
bool "CAVS GPDMA DMA driver"
help
Intel cAVS GPDMA DMA driver.
if DMA_CAVS_GPDMA
config DMA_CAVS_GPDMA_HAS_LLP
bool "cAVS GPDMA Linear Link Position Feature"
help
cAVS GPDMA may optionally have a linear link position
feature.
source "drivers/dma/Kconfig.dw_common"
endif # DMA_CAVS_GPDMA

View file

@ -7,3 +7,9 @@ config DMA_DW
bool "DesignWare DMA driver"
help
DesignWare DMA driver.
if DMA_DW
source "drivers/dma/Kconfig.dw_common"
endif # DMA_DW

View file

@ -0,0 +1,39 @@
# DesignWare DMA common configuration options
# Copyright (c) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
config DMA_DW_FIFO_PARTITION
bool "FIFO Partitioning"
help
FIFO partition feature
config DMA_DW_LLI_POOL_SIZE
int "number of LLI structs in an allocation pool"
default 2
help
The number of LLI structs in a statically allocated pool. Each channel has its own
LLI struct pool. If during dma_config() a log notes there are not enough LLI structs
then this should be increased to match the need.
config DMA_DW_HW_LLI
bool "hardware supports scatter gather"
default y
help
The hardware is by default expected to support hardware LLI (scatter gather).
When not enabled the driver will still perform scatter gather but using software
to run through the scatter gather list.
config DMA_DW_SUSPEND_DRAIN
bool "channels should be suspended and drained on stop"
help
Rather than immediately stopping a DMA channel the channel is suspended
with the DRAIN bit flag set to allow for the hardware FIFO to be drained
before stopping the channel.
config DMA_DW_HOST_MASK
int "memory space mask"
default 0
help
Some instances of the DesignWare DMAC require a mask applied to source/destination
addresses to signifiy the memory space the address is in.

View file

@ -4,11 +4,19 @@
* SPDX-License-Identifier: Apache-2.0
*/
#include "drivers/dma.h"
#define DT_DRV_COMPAT intel_cavs_gpdma
#define GPDMA_CTL_OFFSET 0x004
#define GPDMA_CTL_OFFSET 0x0004
#define GPDMA_CTL_FDCGB BIT(0)
#define GPDMA_CHLLPC_OFFSET(channel) (0x0010 + (channel) * 0x10)
#define GPDMA_CHLLPC_EN BIT(7)
#define GPDMA_CHLLPC_DHRS(x) SET_BITS(6, 0, x)
#define GPDMA_CHLLPL(channel) (0x0018 + (channel) * 0x10)
#define GPDMA_CHLLPU(channel) (0x001c + (channel) * 0x10)
#include "dma_dw_common.h"
#define LOG_LEVEL CONFIG_DMA_LOG_LEVEL
@ -36,6 +44,137 @@ static void cavs_gpdma_clock_enable(const struct device *dev)
sys_write32(GPDMA_CTL_FDCGB, reg);
}
static void cavs_gpdma_llp_config(const struct device *dev, uint32_t channel,
uint32_t addr)
{
#ifdef CONFIG_DMA_CAVS_GPDMA_HAS_LLP
const struct cavs_gpdma_cfg *const dev_cfg = dev->config;
dw_write(dev_cfg->shim, GPDMA_CHLLPC_OFFSET(channel), GPDMA_CHLLPC_DHRS(addr));
#endif
}
static inline void cavs_gpdma_llp_enable(const struct device *dev,
uint32_t channel)
{
#ifdef CONFIG_DMA_CAVS_GPDMA_HAS_LLP
const struct cavs_gpdma_cfg *const dev_cfg = dev->config;
uint32_t val;
val = dw_read(dev_cfg->shim, GPDMA_CHLLPC_OFFSET(channel));
if (!(val & GPDMA_CHLLPC_EN)) {
dw_write(dev_cfg->shim, GPDMA_CHLLPC_OFFSET(channel), val | GPDMA_CHLLPC_EN);
}
#endif
}
static inline void cavs_gpdma_llp_disable(const struct device *dev,
uint32_t channel)
{
#ifdef CONFIG_DMA_CAVS_GPDMA_HAS_LLP
const struct cavs_gpdma_cfg *const dev_cfg = dev->config;
uint32_t val;
val = dw_read(dev_cfg->shim, GPDMA_CHLLPC_OFFSET(channel));
dw_write(dev_cfg->shim, GPDMA_CHLLPC_OFFSET(channel), val | GPDMA_CHLLPC_EN);
#endif
}
static inline void cavs_gpdma_llp_read(const struct device *dev,
uint32_t channel,
uint32_t *llp_l,
uint32_t *llp_u)
{
#ifdef CONFIG_DMA_CAVS_GPDMA_HAS_LLP
const struct cavs_gpdma_cfg *const dev_cfg = dev->config;
*llp_l = dw_read(dev_cfg->shim, GPDMA_CHLLPL(channel));
*llp_u = dw_read(dev_cfg->shim, GPDMA_CHLLPU(channel));
#endif
}
static int cavs_gpdma_config(const struct device *dev, uint32_t channel,
struct dma_config *cfg)
{
int res = dw_dma_config(dev, channel, cfg);
if (res != 0) {
return res;
}
struct dma_block_config *block_cfg = cfg->head_block;
/* Assume all scatter/gathers are for the same device? */
switch (cfg->channel_direction) {
case MEMORY_TO_PERIPHERAL:
LOG_DBG("%s: dma %s configuring llp for destination %x",
__func__, dev->name, block_cfg->dest_address);
cavs_gpdma_llp_config(dev, channel, block_cfg->dest_address);
break;
case PERIPHERAL_TO_MEMORY:
LOG_DBG("%s: dma %s configuring llp for source %x",
__func__, dev->name, block_cfg->source_address);
cavs_gpdma_llp_config(dev, channel, block_cfg->source_address);
break;
default:
break;
}
return res;
}
static int cavs_gpdma_start(const struct device *dev, uint32_t channel)
{
int ret;
cavs_gpdma_llp_enable(dev, channel);
ret = dw_dma_start(dev, channel);
if (ret != 0) {
cavs_gpdma_llp_disable(dev, channel);
}
return ret;
}
static int cavs_gpdma_stop(const struct device *dev, uint32_t channel)
{
int ret;
ret = dw_dma_stop(dev, channel);
if (ret == 0) {
cavs_gpdma_llp_disable(dev, channel);
}
return ret;
}
int cavs_gpdma_copy(const struct device *dev, uint32_t channel,
uint32_t src, uint32_t dst, size_t size)
{
struct dw_dma_dev_data *const dev_data = dev->data;
struct dw_dma_chan_data *chan_data;
int i = 0;
if (channel >= DW_MAX_CHAN) {
return -EINVAL;
}
chan_data = &dev_data->chan[channel];
/* default action is to clear the DONE bit for all LLI making
* sure the cache is coherent between DSP and DMAC.
*/
for (i = 0; i < chan_data->lli_count; i++) {
chan_data->lli[i].ctrl_hi &= ~DW_CTLH_DONE(1);
}
chan_data->ptr_data.current_ptr += size;
if (chan_data->ptr_data.current_ptr >= chan_data->ptr_data.end_ptr) {
chan_data->ptr_data.current_ptr = chan_data->ptr_data.start_ptr +
(chan_data->ptr_data.current_ptr - chan_data->ptr_data.end_ptr);
}
return 0;
}
int cavs_gpdma_init(const struct device *dev)
{
@ -45,22 +184,31 @@ int cavs_gpdma_init(const struct device *dev)
cavs_gpdma_clock_enable(dev);
/* Disable all channels and Channel interrupts */
dw_dma_setup(dev);
int ret = dw_dma_setup(dev);
if (ret != 0) {
LOG_ERR("%s: dma %s failed to initialize", __func__, dev->name);
goto out;
}
/* Configure interrupts */
dev_cfg->dw_cfg.irq_config();
LOG_INF("Device %s initialized", dev->name);
LOG_INF("%s: dma %s initialized", __func__, dev->name);
out:
return 0;
}
static const struct dma_driver_api cavs_gpdma_driver_api = {
.config = dw_dma_config,
.reload = dw_dma_reload,
.start = dw_dma_transfer_start,
.stop = dw_dma_transfer_stop,
.config = cavs_gpdma_config,
.reload = cavs_gpdma_copy,
.start = cavs_gpdma_start,
.stop = cavs_gpdma_stop,
.suspend = dw_dma_suspend,
.resume = dw_dma_resume,
.get_status = dw_dma_get_status,
};

View file

@ -17,9 +17,8 @@
#include <soc.h>
#include "dma_dw_common.h"
#define LOG_LEVEL CONFIG_DMA_LOG_LEVEL
#include <logging/log.h>
LOG_MODULE_REGISTER(dma_dw);
LOG_MODULE_REGISTER(dma_dw, CONFIG_DMA_LOG_LEVEL);
/* Device constant configuration parameters */
struct dw_dma_cfg {
@ -32,21 +31,26 @@ static int dw_dma_init(const struct device *dev)
const struct dw_dma_cfg *const dev_cfg = dev->config;
/* Disable all channels and Channel interrupts */
dw_dma_setup(dev);
int ret = dw_dma_setup(dev);
if (ret != 0) {
LOG_ERR("failed to initialize DW DMA %s", dev->name);
goto out;
}
/* Configure interrupts */
dev_cfg->irq_config();
LOG_INF("Device %s initialized", dev->name);
return 0;
out:
return ret;
}
static const struct dma_driver_api dw_dma_driver_api = {
.config = dw_dma_config,
.reload = dw_dma_reload,
.start = dw_dma_transfer_start,
.stop = dw_dma_transfer_stop,
.start = dw_dma_start,
.stop = dw_dma_stop,
};
#define DW_DMAC_INIT(inst) \

View file

@ -19,39 +19,17 @@
#include <logging/log.h>
LOG_MODULE_REGISTER(dma_dw_common);
#define BYTE (1)
#define WORD (2)
#define DWORD (4)
/* CFG_LO */
#define DW_CFG_CLASS(x) (x << 29)
/* CFG_HI */
#define DW_CFGH_SRC_PER(x) ((x & 0xf) | ((x & 0x30) << 24))
#define DW_CFGH_DST_PER(x) (((x & 0xf) << 4) | ((x & 0x30) << 26))
/* default initial setup register values */
#define DW_CFG_LOW_DEF 0x0
static __aligned(32) struct dw_lli lli_pool[DW_MAX_CHAN][CONFIG_DMA_DW_LLI_POOL_SIZE];
/* number of tries to wait for reset */
#define DW_DMA_CFG_TRIES 10000
#define INT_MASK_ALL 0xFF00
static ALWAYS_INLINE void dw_write(uint32_t dma_base, uint32_t reg, uint32_t value)
{
*((volatile uint32_t *)(dma_base + reg)) = value;
}
static ALWAYS_INLINE uint32_t dw_read(uint32_t dma_base, uint32_t reg)
{
return *((volatile uint32_t *)(dma_base + reg));
}
void dw_dma_isr(const struct device *dev)
{
const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
struct dw_dma_dev_data *const dev_data = dev->data;
struct dma_chan_data *chan_data;
struct dw_dma_chan_data *chan_data;
uint32_t status_tfr = 0U;
uint32_t status_block = 0U;
@ -86,6 +64,7 @@ void dw_dma_isr(const struct device *dev)
chan_data = &dev_data->chan[channel];
if (chan_data->dma_blkcallback) {
LOG_DBG("Dispatching block complete callback");
/* Ensure the linked list (chan_data->lli) is
* freed in the user callback function once
@ -101,7 +80,15 @@ void dw_dma_isr(const struct device *dev)
channel = find_lsb_set(status_tfr) - 1;
status_tfr &= ~(1 << channel);
chan_data = &dev_data->chan[channel];
/* Transfer complete, channel now idle, a reload
* could safely occur in the callback via dma_config
* and dma_start
*/
chan_data->state = DW_DMA_IDLE;
if (chan_data->dma_tfrcallback) {
LOG_DBG("Dispatching transfer callback");
chan_data->dma_tfrcallback(dev,
chan_data->tfruser_data,
channel, 0);
@ -109,99 +96,295 @@ void dw_dma_isr(const struct device *dev)
}
}
/* mask address for dma to identify memory space. */
static void dw_dma_mask_address(struct dma_block_config *block_cfg,
struct dw_lli *lli_desc, uint32_t direction)
{
lli_desc->sar = block_cfg->source_address;
lli_desc->dar = block_cfg->dest_address;
switch (direction) {
case MEMORY_TO_PERIPHERAL:
lli_desc->sar |= CONFIG_DMA_DW_HOST_MASK;
break;
case PERIPHERAL_TO_MEMORY:
lli_desc->dar |= CONFIG_DMA_DW_HOST_MASK;
break;
case MEMORY_TO_MEMORY:
lli_desc->sar |= CONFIG_DMA_DW_HOST_MASK;
lli_desc->dar |= CONFIG_DMA_DW_HOST_MASK;
break;
default:
break;
}
}
int dw_dma_config(const struct device *dev, uint32_t channel,
struct dma_config *cfg)
{
struct dw_dma_dev_data *const dev_data = dev->data;
const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
struct dma_chan_data *chan_data;
struct dma_block_config *cfg_blocks;
uint32_t m_size;
uint32_t tr_width;
uint32_t ctrl_lo;
struct dw_dma_dev_data *const dev_data = dev->data;
struct dma_block_config *block_cfg;
struct dw_lli *lli_desc;
struct dw_lli *lli_desc_head;
struct dw_lli *lli_desc_tail;
uint32_t msize = 3;/* default msize, 8 bytes */
int ret = 0;
if (channel >= DW_MAX_CHAN) {
return -EINVAL;
LOG_ERR("%s: invalid dma channel %d", __func__, channel);
ret = -EINVAL;
goto out;
}
struct dw_dma_chan_data *chan_data = &dev_data->chan[channel];
if (chan_data->state != DW_DMA_IDLE && chan_data->state != DW_DMA_PREPARED) {
LOG_ERR("%s: dma %s channel %d must be inactive to "
"reconfigure, currently %d", __func__, dev->name,
channel, chan_data->state);
ret = -EBUSY;
goto out;
}
LOG_DBG("%s: dma %s channel %d config",
__func__, dev->name, channel);
__ASSERT_NO_MSG(cfg->source_data_size == cfg->dest_data_size);
__ASSERT_NO_MSG(cfg->source_burst_length == cfg->dest_burst_length);
__ASSERT_NO_MSG(cfg->block_count > 0);
__ASSERT_NO_MSG(cfg->head_block != NULL);
if (cfg->source_data_size != BYTE && cfg->source_data_size != WORD &&
cfg->source_data_size != DWORD) {
LOG_ERR("Invalid 'source_data_size' value");
return -EINVAL;
if (cfg->source_data_size != 1 && cfg->source_data_size != 2 &&
cfg->source_data_size != 4 && cfg->source_data_size != 8 &&
cfg->source_data_size != 16) {
LOG_ERR("%s: dma %s channel %d 'invalid source_data_size' value %d",
__func__, dev->name, channel, cfg->source_data_size);
ret = -EINVAL;
goto out;
}
cfg_blocks = cfg->head_block;
if ((cfg_blocks->next_block) || (cfg->block_count > 1)) {
/*
* return error since the application may have allocated
* memory for the buffers that may be lost when the DMA
* driver discards the buffers provided in the linked blocks
*/
LOG_ERR("block_count > 1 not supported");
return -EINVAL;
if (cfg->block_count > CONFIG_DMA_DW_LLI_POOL_SIZE) {
LOG_ERR("%s: dma %s channel %d scatter gather list larger than"
" descriptors in pool, consider increasing CONFIG_DMA_DW_LLI_POOL_SIZE",
__func__, dev->name, channel);
ret = -EINVAL;
goto out;
}
chan_data = &dev_data->chan[channel];
/* burst_size = (2 ^ msize) */
msize = find_msb_set(cfg->source_burst_length) - 1;
LOG_DBG("%s: dma %s channel %d m_size=%d", __func__, dev->name, channel, msize);
__ASSERT_NO_MSG(msize < 5);
/* default channel config */
chan_data->direction = cfg->channel_direction;
/* data_size = (2 ^ tr_width) */
tr_width = find_msb_set(cfg->source_data_size) - 1;
LOG_DBG("Ch%u: tr_width=%d", channel, tr_width);
/* burst_size = (2 ^ msize) */
m_size = find_msb_set(cfg->source_burst_length) - 1;
LOG_DBG("Ch%u: m_size=%d", channel, m_size);
/* setup a list of lli structs. we don't need to allocate */
chan_data->lli = &lli_pool[channel][0]; /* TODO allocate here */
chan_data->lli_count = cfg->block_count;
ctrl_lo = DW_CTLL_SRC_WIDTH(tr_width) | DW_CTLL_DST_WIDTH(tr_width);
ctrl_lo |= DW_CTLL_SRC_MSIZE(m_size) | DW_CTLL_DST_MSIZE(m_size);
/* zero the scatter gather list */
memset(chan_data->lli, 0, sizeof(struct dw_lli) * chan_data->lli_count);
lli_desc = chan_data->lli;
lli_desc_head = &chan_data->lli[0];
lli_desc_tail = &chan_data->lli[chan_data->lli_count - 1];
/* enable interrupt */
ctrl_lo |= DW_CTLL_INT_EN;
chan_data->ptr_data.buffer_bytes = 0;
/* copy the scatter gather list from dma_cfg to dw_lli */
block_cfg = cfg->head_block;
for (int i = 0; i < cfg->block_count; i++) {
__ASSERT_NO_MSG(block_cfg != NULL);
LOG_DBG("copying block_cfg %p to lli_desc %p", block_cfg, lli_desc);
/* write CTL_LO for each lli */
switch (cfg->source_data_size) {
case 1:
/* byte at a time transfer */
lli_desc->ctrl_lo |= DW_CTLL_SRC_WIDTH(0);
case 2:
/* non peripheral copies are optimal using words */
switch (cfg->channel_direction) {
case MEMORY_TO_MEMORY:
ctrl_lo |= DW_CTLL_FC_M2M;
ctrl_lo |= DW_CTLL_SRC_INC | DW_CTLL_DST_INC;
/* config the src tr width for 32 bit words */
lli_desc->ctrl_lo |= DW_CTLL_SRC_WIDTH(2);
break;
case MEMORY_TO_PERIPHERAL:
ctrl_lo |= DW_CTLL_FC_M2P;
ctrl_lo |= DW_CTLL_SRC_INC | DW_CTLL_DST_FIX;
/* Assign a hardware handshaking interface (0-15) to the
* destination of channel
*/
dw_write(dev_cfg->base, DW_CFG_HIGH(channel),
DW_CFGH_DST_PER(cfg->dma_slot));
break;
case PERIPHERAL_TO_MEMORY:
ctrl_lo |= DW_CTLL_FC_P2M;
ctrl_lo |= DW_CTLL_SRC_FIX | DW_CTLL_DST_INC;
/* Assign a hardware handshaking interface (0-15) to the
* source of channel
*/
dw_write(dev_cfg->base, DW_CFG_HIGH(channel),
DW_CFGH_SRC_PER(cfg->dma_slot));
break;
default:
LOG_ERR("channel_direction %d is not supported",
cfg->channel_direction);
return -EINVAL;
/* config the src width for 16 bit samples */
lli_desc->ctrl_lo |= DW_CTLL_SRC_WIDTH(1);
break;
}
break;
case 4:
/* config the src tr width for 24, 32 bit samples */
lli_desc->ctrl_lo |= DW_CTLL_SRC_WIDTH(2);
break;
default:
LOG_ERR("%s: dma %s channel %d invalid src width %d",
__func__, dev->name, channel, cfg->source_data_size);
ret = -EINVAL;
goto out;
}
/* channel needs started from scratch, so write SARn, DARn */
dw_write(dev_cfg->base, DW_SAR(channel), cfg_blocks->source_address);
dw_write(dev_cfg->base, DW_DAR(channel), cfg_blocks->dest_address);
LOG_DBG("source data size: lli_desc %p, ctrl_lo %x",
lli_desc, lli_desc->ctrl_lo);
switch (cfg->dest_data_size) {
case 1:
/* byte at a time transfer */
lli_desc->ctrl_lo |= DW_CTLL_SRC_WIDTH(0);
case 2:
/* non peripheral copies are optimal using words */
switch (cfg->channel_direction) {
case MEMORY_TO_MEMORY:
/* config the dest tr width for 32 bit words */
lli_desc->ctrl_lo |= DW_CTLL_DST_WIDTH(2);
break;
default:
/* config the dest width for 16 bit samples */
lli_desc->ctrl_lo |= DW_CTLL_DST_WIDTH(1);
break;
}
break;
case 4:
/* config the dest tr width for 24, 32 bit samples */
lli_desc->ctrl_lo |= DW_CTLL_DST_WIDTH(2);
break;
default:
LOG_ERR("%s: dma %s channel %d invalid dest width %d",
__func__, dev->name, channel, cfg->dest_data_size);
ret = -EINVAL;
goto out;
}
LOG_DBG("dest data size: lli_desc %p, ctrl_lo %x", lli_desc, lli_desc->ctrl_lo);
lli_desc->ctrl_lo |= DW_CTLL_SRC_MSIZE(msize) |
DW_CTLL_DST_MSIZE(msize) |
DW_CTLL_INT_EN; /* enable interrupt */
LOG_DBG("msize, int_en: lli_desc %p, ctrl_lo %x", lli_desc, lli_desc->ctrl_lo);
/* config the SINC and DINC fields of CTL_LO,
* SRC/DST_PER fields of CFG_HI
*/
switch (cfg->channel_direction) {
case MEMORY_TO_MEMORY:
lli_desc->ctrl_lo |= DW_CTLL_FC_M2M | DW_CTLL_SRC_INC |
DW_CTLL_DST_INC;
#if CONFIG_DMA_DW_HW_LLI
LOG_DBG("setting LLP_D_EN, LLP_S_EN in lli_desc->ctrl_lo %x",
lli_desc->ctrl_lo);
lli_desc->ctrl_lo |=
DW_CTLL_LLP_S_EN | DW_CTLL_LLP_D_EN;
LOG_DBG("lli_desc->ctrl_lo %x", lli_desc->ctrl_lo);
#endif
break;
case MEMORY_TO_PERIPHERAL:
lli_desc->ctrl_lo |= DW_CTLL_FC_M2P | DW_CTLL_SRC_INC |
DW_CTLL_DST_FIX;
#if CONFIG_DMA_DW_HW_LLI
lli_desc->ctrl_lo |= DW_CTLL_LLP_S_EN;
chan_data->cfg_lo |= DW_CFGL_RELOAD_DST;
#endif
/* Assign a hardware handshake interface (0-15) to the
* destination of the channel
*/
chan_data->cfg_hi |= DW_CFGH_DST_PER(cfg->dma_slot);
break;
case PERIPHERAL_TO_MEMORY:
lli_desc->ctrl_lo |= DW_CTLL_FC_P2M | DW_CTLL_SRC_FIX |
DW_CTLL_DST_INC;
#if CONFIG_DMA_DW_HW_LLI
if (!block_cfg->dest_scatter_en) {
lli_desc->ctrl_lo |= DW_CTLL_LLP_D_EN;
} else {
/* Use contiguous auto-reload. Line 3 in
* table 3-3
*/
lli_desc->ctrl_lo |= DW_CTLL_D_SCAT_EN;
}
chan_data->cfg_lo |= DW_CFGL_RELOAD_SRC;
#endif
/* Assign a hardware handshake interface (0-15) to the
* source of the channel
*/
chan_data->cfg_hi |= DW_CFGH_SRC_PER(cfg->dma_slot);
break;
default:
LOG_ERR("%s: dma %s channel %d invalid direction %d",
__func__, dev->name, channel, cfg->channel_direction);
ret = -EINVAL;
goto out;
}
LOG_DBG("direction: lli_desc %p, ctrl_lo %x, cfg_hi %x, cfg_lo %x",
lli_desc, lli_desc->ctrl_lo, chan_data->cfg_hi, chan_data->cfg_lo);
dw_dma_mask_address(block_cfg, lli_desc, cfg->channel_direction);
LOG_DBG("mask address: lli_desc %p, ctrl_lo %x, cfg_hi %x, cfg_lo %x",
lli_desc, lli_desc->ctrl_lo, chan_data->cfg_hi, chan_data->cfg_lo);
if (block_cfg->block_size > DW_CTLH_BLOCK_TS_MASK) {
LOG_ERR("%s: dma %s channel %d block size too big %d",
__func__, dev->name, channel, block_cfg->block_size);
ret = -EINVAL;
goto out;
}
/* Set class and transfer size */
lli_desc->ctrl_hi |= DW_CTLH_CLASS(dev_data->channel_data->chan[channel].class) |
(block_cfg->block_size & DW_CTLH_BLOCK_TS_MASK);
LOG_DBG("block_size, class: lli_desc %p, ctrl_lo %x, cfg_hi %x, cfg_lo %x",
lli_desc, lli_desc->ctrl_lo, chan_data->cfg_hi, chan_data->cfg_lo);
chan_data->ptr_data.buffer_bytes += block_cfg->block_size;
/* set next descriptor in list */
lli_desc->llp = (uint32_t)(lli_desc + 1);
LOG_DBG("lli_desc llp %x", lli_desc->llp);
/* next descriptor */
lli_desc++;
block_cfg = block_cfg->next_block;
}
#if CONFIG_DMA_DW_HW_LLI
chan_data->cfg_lo |= DW_CFGL_CTL_HI_UPD_EN;
#endif
/* end of list or cyclic buffer */
if (cfg->cyclic) {
lli_desc_tail->llp = (uint32_t)lli_desc_head;
} else {
lli_desc_tail->llp = 0;
#if CONFIG_DMA_DW_HW_LLI
LOG_DBG("Clearing LLP_S_EN, LLP_D_EN from tail LLI %x", lli_desc_tail->ctrl_lo);
lli_desc_tail->ctrl_lo &= ~(DW_CTLL_LLP_S_EN | DW_CTLL_LLP_D_EN);
LOG_DBG("ctrl_lo %x", lli_desc_tail->ctrl_lo);
#endif
}
/* set the initial lli, mark the channel as prepared (ready to be started) */
chan_data->state = DW_DMA_PREPARED;
chan_data->lli_current = chan_data->lli;
/* initialize pointers */
chan_data->ptr_data.start_ptr = DW_DMA_LLI_ADDRESS(chan_data->lli,
chan_data->direction);
chan_data->ptr_data.end_ptr = chan_data->ptr_data.start_ptr +
chan_data->ptr_data.buffer_bytes;
chan_data->ptr_data.current_ptr = chan_data->ptr_data.start_ptr;
chan_data->ptr_data.hw_ptr = chan_data->ptr_data.start_ptr;
/* Configure a callback appropriately depending on whether the
* interrupt is requested at the end of transaction completion or
@ -210,14 +393,14 @@ int dw_dma_config(const struct device *dev, uint32_t channel,
if (cfg->complete_callback_en) {
chan_data->dma_blkcallback = cfg->dma_callback;
chan_data->blkuser_data = cfg->user_data;
dw_write(dev_cfg->base, DW_MASK_BLOCK, INT_UNMASK(channel));
dw_write(dev_cfg->base, DW_MASK_BLOCK, DW_CHAN_UNMASK(channel));
} else {
chan_data->dma_tfrcallback = cfg->dma_callback;
chan_data->tfruser_data = cfg->user_data;
dw_write(dev_cfg->base, DW_MASK_TFR, INT_UNMASK(channel));
dw_write(dev_cfg->base, DW_MASK_TFR, DW_CHAN_UNMASK(channel));
}
dw_write(dev_cfg->base, DW_MASK_ERR, INT_UNMASK(channel));
dw_write(dev_cfg->base, DW_MASK_ERR, DW_CHAN_UNMASK(channel));
/* write interrupt clear registers for the channel
* ClearTfr, ClearBlock, ClearSrcTran, ClearDstTran, ClearErr
@ -228,106 +411,382 @@ int dw_dma_config(const struct device *dev, uint32_t channel,
dw_write(dev_cfg->base, DW_CLEAR_DST_TRAN, 0x1 << channel);
dw_write(dev_cfg->base, DW_CLEAR_ERR, 0x1 << channel);
/* single transfer, must set zero */
dw_write(dev_cfg->base, DW_LLP(channel), 0);
/* program CTLn */
dw_write(dev_cfg->base, DW_CTRL_LOW(channel), ctrl_lo);
dw_write(dev_cfg->base, DW_CTRL_HIGH(channel),
DW_CFG_CLASS(dev_data->channel_data->chan[channel].class) |
cfg_blocks->block_size);
/* write channel config */
dw_write(dev_cfg->base, DW_CFG_LOW(channel), DW_CFG_LOW_DEF);
return 0;
out:
return ret;
}
int dw_dma_reload(const struct device *dev, uint32_t channel,
uint32_t src, uint32_t dst, size_t size)
{
struct dw_dma_dev_data *const dev_data = dev->data;
const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
if (channel >= DW_MAX_CHAN) {
return -EINVAL;
}
dw_write(dev_cfg->base, DW_SAR(channel), src);
dw_write(dev_cfg->base, DW_DAR(channel), dst);
dw_write(dev_cfg->base, DW_CTRL_HIGH(channel),
DW_CFG_CLASS(dev_data->channel_data->chan[channel].class) |
size);
return 0;
}
int dw_dma_transfer_start(const struct device *dev, uint32_t channel)
int dw_dma_start(const struct device *dev, uint32_t channel)
{
const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
struct dw_dma_dev_data *dev_data = dev->data;
int ret = 0;
/* validate channel */
if (channel >= DW_MAX_CHAN) {
return -EINVAL;
ret = -EINVAL;
goto out;
}
struct dw_dma_chan_data *chan_data = &dev_data->chan[channel];
/* validate channel state */
if (chan_data->state != DW_DMA_PREPARED) {
LOG_ERR("%s: dma %s channel %d not ready ena 0x%x status 0x%x",
__func__, dev->name, channel,
dw_read(dev_cfg->base, DW_DMA_CHAN_EN),
chan_data->state);
ret = -EBUSY;
goto out;
}
/* is valid stream */
if (!chan_data->lli) {
LOG_ERR("%s: dma %s channel %d invalid stream",
__func__, dev->name, channel);
ret = -EINVAL;
goto out;
}
struct dw_lli *lli = chan_data->lli_current;
#ifdef CONFIG_DMA_DW_HW_LLI
/* LLP mode - write LLP pointer */
uint32_t masked_ctrl_lo = lli->ctrl_lo & (DW_CTLL_LLP_D_EN | DW_CTLL_LLP_S_EN);
uint32_t llp = 0;
if (masked_ctrl_lo) {
llp = (uint32_t)lli;
LOG_DBG("Setting llp");
}
dw_write(dev_cfg->base, DW_LLP(channel), llp);
LOG_DBG("ctrl_lo %x, masked ctrl_lo %x, LLP %x",
lli->ctrl_lo, masked_ctrl_lo, dw_read(dev_cfg->base, DW_LLP(channel)));
#endif /* CONFIG_DMA_DW_HW_LLI */
/* channel needs to start from scratch, so write SAR and DAR */
dw_write(dev_cfg->base, DW_SAR(channel), lli->sar);
dw_write(dev_cfg->base, DW_DAR(channel), lli->dar);
/* program CTL_LO and CTL_HI */
dw_write(dev_cfg->base, DW_CTRL_LOW(channel), lli->ctrl_lo);
dw_write(dev_cfg->base, DW_CTRL_HIGH(channel), lli->ctrl_hi);
/* program CFG_LO and CFG_HI */
dw_write(dev_cfg->base, DW_CFG_LOW(channel), chan_data->cfg_lo);
dw_write(dev_cfg->base, DW_CFG_HIGH(channel), chan_data->cfg_hi);
LOG_DBG("start: sar %x, dar %x, ctrl_lo %x, ctrl_hi %x, cfg_lo %x, cfg_hi %x, llp %x",
lli->sar, lli->dar, lli->ctrl_lo, lli->ctrl_hi, chan_data->cfg_lo,
chan_data->cfg_hi, dw_read(dev_cfg->base, DW_LLP(channel))
);
#ifdef CONFIG_DMA_DW_HW_LLI
if (lli->ctrl_lo & DW_CTLL_D_SCAT_EN) {
LOG_DBG("configuring DW_DSR");
uint32_t words_per_tfr = (lli->ctrl_hi & DW_CTLH_BLOCK_TS_MASK) >>
((lli->ctrl_lo & DW_CTLL_DST_WIDTH_MASK) >> DW_CTLL_DST_WIDTH_SHIFT);
dw_write(dev_cfg->base, DW_DSR(channel),
DW_DSR_DSC(words_per_tfr) | DW_DSR_DSI(words_per_tfr));
}
#endif /* CONFIG_DMA_DW_HW_LLI */
chan_data->state = DW_DMA_ACTIVE;
/* enable the channel */
dw_write(dev_cfg->base, DW_DMA_CHAN_EN, CHAN_ENABLE(channel));
dw_write(dev_cfg->base, DW_DMA_CHAN_EN, DW_CHAN_UNMASK(channel));
return 0;
out:
return ret;
}
int dw_dma_transfer_stop(const struct device *dev, uint32_t channel)
int dw_dma_stop(const struct device *dev, uint32_t channel)
{
const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
struct dw_dma_dev_data *dev_data = dev->data;
int ret = 0;
if (channel >= DW_MAX_CHAN) {
return -EINVAL;
ret = -EINVAL;
goto out;
}
/* disable the channel */
dw_write(dev_cfg->base, DW_DMA_CHAN_EN, CHAN_DISABLE(channel));
#if defined(CONFIG_DMA_DW_HW_LLI) || defined(CONFIG_DMA_DW_SUSPEND_DRAIN)
struct dw_dma_chan_data *chan_data = &dev_data->chan[channel];
#endif
#ifdef CONFIG_DMA_DW_HW_LLI
struct dw_lli *lli = chan_data->lli;
int i;
#endif
LOG_DBG("%s: dma %s channel %d stop",
__func__, dev->name, channel);
/* Validate the channel state */
if (chan_data->state != DW_DMA_ACTIVE &&
chan_data->state != DW_DMA_SUSPENDED) {
ret = -EINVAL;
goto out;
}
#ifdef CONFIG_DMA_DW_SUSPEND_DRAIN
/* channel cannot be disabled right away, so first we need to)
* suspend it and drain the FIFO
*/
dw_write(dev_cfg->base, DW_CFG_LOW(channel),
dw_chan->cfg_lo | DW_CFGL_SUSPEND | DW_CFGL_DRAIN);
/* now we wait for FIFO to be empty */
bool timeout = wait_for(dw_read(dev_cfg->base, DW_CFG_LOW(channel)) & DW_CFGL_FIFO_EMPTY,
DW_DMA_TIMEOUT, k_busy_wait(DW_DMA_TIMEOUT/10));
if (timeout) {
LOG_ERR("%s: dma %s channel drain time out", __func__, channel);
}
#endif
dw_write(dev_cfg->base, DW_DMA_CHAN_EN, DW_CHAN_MASK(channel));
#if CONFIG_DMA_DW_HW_LLI
for (i = 0; i < chan_data->lli_count; i++) {
lli->ctrl_hi &= ~DW_CTLH_DONE(1);
lli++;
}
#endif
chan_data->state = DW_DMA_IDLE;
out:
return 0;
}
void dw_dma_setup(const struct device *dev)
int dw_dma_resume(const struct device *dev, uint32_t channel)
{
const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
struct dw_dma_dev_data *dev_data = dev->data;
int ret = 0;
/* Validate channel index */
if (channel >= DW_MAX_CHAN) {
ret = -EINVAL;
goto out;
}
struct dw_dma_chan_data *chan_data = &dev_data->chan[channel];
/* Validate channel state */
if (chan_data->state != DW_DMA_SUSPENDED) {
ret = -EINVAL;
goto out;
}
LOG_DBG("%s: dma %s channel %d resume",
__func__, dev->name, channel);
dw_write(dev_cfg->base, DW_CFG_LOW(channel), chan_data->cfg_lo);
/* Channel is now active */
chan_data->state = DW_DMA_ACTIVE;
out:
return ret;
}
int dw_dma_suspend(const struct device *dev, uint32_t channel)
{
const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
struct dw_dma_dev_data *dev_data = dev->data;
int ret = 0;
/* Validate channel index */
if (channel >= DW_MAX_CHAN) {
ret = -EINVAL;
goto out;
}
struct dw_dma_chan_data *chan_data = &dev_data->chan[channel];
/* Validate channel state */
if (chan_data->state != DW_DMA_ACTIVE) {
ret = -EINVAL;
goto out;
}
LOG_DBG("%s: dma %s channel %d suspend",
__func__, dev->name, channel);
dw_write(dev_cfg->base, DW_CFG_LOW(channel),
chan_data->cfg_lo | DW_CFGL_SUSPEND);
/* Channel is now suspended */
chan_data->state = DW_DMA_SUSPENDED;
out:
return ret;
}
int dw_dma_setup(const struct device *dev)
{
const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
struct dw_dma_dev_data *const dev_data = dev->data;
struct dw_drv_plat_data *dp = dev_data->channel_data;
int i;
int i, ret = 0;
/* we cannot config DMAC if DMAC has been already enabled by host */
if (dw_read(dev_cfg->base, DW_DMA_CFG) != 0) {
dw_write(dev_cfg->base, DW_DMA_CFG, 0x0);
}
/* now check that it's 0 */
for (i = DW_DMA_CFG_TRIES; i > 0; i--) {
if (dw_read(dev_cfg->base, DW_DMA_CFG) == 0) {
goto found;
if (!dw_read(dev_cfg->base, DW_DMA_CFG)) {
break;
}
}
LOG_ERR("DW_DMA_CFG is non-zero\n");
return;
found:
if (!i) {
LOG_ERR("%s: dma %s setup failed",
__func__, dev->name);
ret = -EIO;
goto out;
}
LOG_DBG("%s: dma %s", __func__, dev->name);
for (i = 0; i < DW_MAX_CHAN; i++) {
dw_read(dev_cfg->base, DW_DMA_CHAN_EN);
}
/* enable the DMA controller */
dw_write(dev_cfg->base, DW_DMA_CFG, 1);
/* mask all interrupts for all 8 channels */
dw_write(dev_cfg->base, DW_MASK_TFR, INT_MASK_ALL);
dw_write(dev_cfg->base, DW_MASK_BLOCK, INT_MASK_ALL);
dw_write(dev_cfg->base, DW_MASK_SRC_TRAN, INT_MASK_ALL);
dw_write(dev_cfg->base, DW_MASK_DST_TRAN, INT_MASK_ALL);
dw_write(dev_cfg->base, DW_MASK_ERR, INT_MASK_ALL);
dw_write(dev_cfg->base, DW_MASK_TFR, DW_CHAN_MASK_ALL);
dw_write(dev_cfg->base, DW_MASK_BLOCK, DW_CHAN_MASK_ALL);
dw_write(dev_cfg->base, DW_MASK_SRC_TRAN, DW_CHAN_MASK_ALL);
dw_write(dev_cfg->base, DW_MASK_DST_TRAN, DW_CHAN_MASK_ALL);
dw_write(dev_cfg->base, DW_MASK_ERR, DW_CHAN_MASK_ALL);
/* set channel priorities */
for (i = 0; i < DW_MAX_CHAN; i++) {
dw_write(dev_cfg->base, DW_CTRL_HIGH(i),
DW_CFG_CLASS(dp->chan[i].class));
}
#ifdef CONFIG_DMA_DW_FIFO_PARTITION
/* allocate FIFO partitions for each channel */
dw_write(dev_cfg->base, DW_FIFO_PART1_HI,
DW_FIFO_CHx(DW_FIFO_SIZE) | DW_FIFO_CHy(DW_FIFO_SIZE));
dw_write(dev_cfg->base, DW_FIFO_PART1_LO,
DW_FIFO_CHx(DW_FIFO_SIZE) | DW_FIFO_CHy(DW_FIFO_SIZE));
dw_write(dev_cfg->base, DW_FIFO_PART0_HI,
DW_FIFO_CHx(DW_FIFO_SIZE) | DW_FIFO_CHy(DW_FIFO_SIZE));
dw_write(dev_cfg->base, DW_FIFO_PART0_LO,
DW_FIFO_CHx(DW_FIFO_SIZE) | DW_FIFO_CHy(DW_FIFO_SIZE) |
DW_FIFO_UPD);
#endif /* CONFIG_DMA_DW_FIFO_PARTITION */
/* TODO add baytrail/cherrytrail workaround */
/* Setup context and atomics for channels */
dev_data->dma_ctx.magic = DMA_MAGIC;
dev_data->dma_ctx.dma_channels = DW_MAX_CHAN;
dev_data->dma_ctx.atomic = dev_data->channels_atomic;
out:
return ret;
}
static int dw_dma_avail_data_size(uint32_t base,
struct dw_dma_chan_data *chan_data,
uint32_t channel)
{
int32_t read_ptr = chan_data->ptr_data.current_ptr;
int32_t write_ptr = dw_read(base, DW_DAR(channel));
int32_t delta = write_ptr - chan_data->ptr_data.hw_ptr;
int size;
chan_data->ptr_data.hw_ptr = write_ptr;
size = write_ptr - read_ptr;
if (size < 0) {
size += chan_data->ptr_data.buffer_bytes;
} else if (!size) {
/*
* Buffer is either full or empty. If the DMA pointer has
* changed, then the DMA has filled the buffer.
*/
if (delta) {
size = chan_data->ptr_data.buffer_bytes;
} else {
LOG_INF("%s size is 0!", __func__);
}
}
LOG_DBG("DAR %x reader 0x%x free 0x%x avail 0x%x", write_ptr, read_ptr,
chan_data->ptr_data.buffer_bytes - size, size);
return size;
}
static int dw_dma_free_data_size(uint32_t base,
struct dw_dma_chan_data *chan_data,
uint32_t channel)
{
int32_t read_ptr = dw_read(base, DW_SAR(channel));
int32_t write_ptr = chan_data->ptr_data.current_ptr;
int32_t delta = read_ptr - chan_data->ptr_data.hw_ptr;
int size;
chan_data->ptr_data.hw_ptr = read_ptr;
size = read_ptr - write_ptr;
if (size < 0) {
size += chan_data->ptr_data.buffer_bytes;
} else if (!size) {
/*
* Buffer is either full or empty. If the DMA pointer has
* changed, then the DMA has emptied the buffer.
*/
if (delta) {
size = chan_data->ptr_data.buffer_bytes;
} else {
LOG_INF("%s size is 0!", __func__);
}
}
LOG_DBG("SAR %x writer 0x%x free 0x%x avail 0x%x", read_ptr, write_ptr, size,
chan_data->ptr_data.buffer_bytes - size);
return size;
}
int dw_dma_get_status(const struct device *dev, uint32_t channel,
struct dma_status *stat)
{
struct dw_dma_dev_data *const dev_data = dev->data;
const struct dw_dma_dev_cfg *const dev_cfg = dev->config;
struct dw_dma_chan_data *chan_data;
if (channel >= DW_MAX_CHAN) {
return -EINVAL;
}
chan_data = &dev_data->chan[channel];
if (chan_data->direction == MEMORY_TO_MEMORY ||
chan_data->direction == PERIPHERAL_TO_MEMORY) {
stat->pending_length = dw_dma_avail_data_size(dev_cfg->base, chan_data, channel);
stat->free = chan_data->ptr_data.buffer_bytes - stat->pending_length;
} else {
stat->free = dw_dma_free_data_size(dev_cfg->base, chan_data, channel);
stat->pending_length = chan_data->ptr_data.buffer_bytes - stat->free;
}
#if CONFIG_DMA_DW_HW_LLI
if (!(dw_read(dev_cfg->base, DW_DMA_CHAN_EN) & DW_CHAN(channel))) {
LOG_ERR("xrun detected");
return -ENODATA;
}
#endif
return 0;
}

View file

@ -7,56 +7,39 @@
#ifndef ZEPHYR_DRIVERS_DMA_DMA_DW_COMMON_H_
#define ZEPHYR_DRIVERS_DMA_DMA_DW_COMMON_H_
#include <sys/atomic.h>
#include <drivers/dma.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DW_CTLL_INT_EN (1 << 0)
#define DW_CTLL_DST_WIDTH(x) (x << 1)
#define DW_CTLL_SRC_WIDTH(x) (x << 4)
#define DW_CTLL_DST_INC (0 << 8)
#define DW_CTLL_DST_FIX (1 << 8)
#define DW_CTLL_SRC_INC (0 << 10)
#define DW_CTLL_SRC_FIX (1 << 10)
#define DW_CTLL_DST_MSIZE(x) (x << 11)
#define DW_CTLL_SRC_MSIZE(x) (x << 14)
#define DW_CTLL_FC(x) (x << 20)
#define DW_CTLL_FC_M2M (0 << 20)
#define DW_CTLL_FC_M2P (1 << 20)
#define DW_CTLL_FC_P2M (2 << 20)
#define DW_CTLL_FC_P2P (3 << 20)
#define DW_CTLL_LLP_D_EN (1 << 27)
#define DW_CTLL_LLP_S_EN (1 << 28)
/* data for each DMA channel */
struct dma_chan_data {
uint32_t direction;
void *blkuser_data;
dma_callback_t dma_blkcallback;
void *tfruser_data;
dma_callback_t dma_tfrcallback;
};
#define MASK(b_hi, b_lo) \
(((1ULL << ((b_hi) - (b_lo) + 1ULL)) - 1ULL) << (b_lo))
#define SET_BIT(b, x) (((x) & 1) << (b))
#define SET_BITS(b_hi, b_lo, x) \
(((x) & ((1ULL << ((b_hi) - (b_lo) + 1ULL)) - 1ULL)) << (b_lo))
#define DW_MAX_CHAN 8
#define DW_CH_SIZE 0x58
#define BYT_CHAN_OFFSET(chan) (DW_CH_SIZE * chan)
#define DW_CHAN_OFFSET(chan) (DW_CH_SIZE * chan)
#define DW_SAR(chan) \
(0x0000 + BYT_CHAN_OFFSET(chan))
(0x0000 + DW_CHAN_OFFSET(chan))
#define DW_DAR(chan) \
(0x0008 + BYT_CHAN_OFFSET(chan))
(0x0008 + DW_CHAN_OFFSET(chan))
#define DW_LLP(chan) \
(0x0010 + BYT_CHAN_OFFSET(chan))
(0x0010 + DW_CHAN_OFFSET(chan))
#define DW_CTRL_LOW(chan) \
(0x0018 + BYT_CHAN_OFFSET(chan))
(0x0018 + DW_CHAN_OFFSET(chan))
#define DW_CTRL_HIGH(chan) \
(0x001C + BYT_CHAN_OFFSET(chan))
(0x001C + DW_CHAN_OFFSET(chan))
#define DW_CFG_LOW(chan) \
(0x0040 + BYT_CHAN_OFFSET(chan))
(0x0040 + DW_CHAN_OFFSET(chan))
#define DW_CFG_HIGH(chan) \
(0x0044 + BYT_CHAN_OFFSET(chan))
(0x0044 + DW_CHAN_OFFSET(chan))
#define DW_DSR(chan) \
(0x0050 + DW_CHAN_OFFSET(chan))
/* registers */
#define DW_RAW_TFR 0x02C0
@ -82,14 +65,96 @@ struct dma_chan_data {
#define DW_INTR_STATUS 0x0360
#define DW_DMA_CFG 0x0398
#define DW_DMA_CHAN_EN 0x03A0
#define DW_FIFO_PART0_LO 0x400
#define DW_FIFO_PART0_HI 0x404
#define DW_FIFO_PART1_LO 0x408
#define DW_FIFO_PART1_HI 0x40C
/* channel bits */
#define INT_MASK(chan) (0x100 << chan)
#define INT_UNMASK(chan) (0x101 << chan)
#define INT_MASK_ALL 0xFF00
#define INT_UNMASK_ALL 0xFFFF
#define CHAN_ENABLE(chan) (0x101 << chan)
#define CHAN_DISABLE(chan) (0x100 << chan)
#define DW_CHAN_WRITE_EN_ALL MASK(2 * DW_MAX_CHAN - 1, DW_MAX_CHAN)
#define DW_CHAN_WRITE_EN(chan) BIT((chan) + DW_MAX_CHAN)
#define DW_CHAN_ALL MASK(DW_MAX_CHAN - 1, 0)
#define DW_CHAN(chan) BIT(chan)
#define DW_CHAN_MASK_ALL DW_CHAN_WRITE_EN_ALL
#define DW_CHAN_MASK(chan) DW_CHAN_WRITE_EN(chan)
#define DW_CHAN_UNMASK_ALL (DW_CHAN_WRITE_EN_ALL | DW_CHAN_ALL)
#define DW_CHAN_UNMASK(chan) (DW_CHAN_WRITE_EN(chan) | DW_CHAN(chan))
/* CFG_LO */
#define DW_CFGL_RELOAD_DST BIT(31)
#define DW_CFGL_RELOAD_SRC BIT(30)
#define DW_CFGL_DRAIN BIT(10)
#define DW_CFGL_FIFO_EMPTY BIT(9)
#define DW_CFGL_SUSPEND BIT(8)
#define DW_CFGL_CTL_HI_UPD_EN BIT(5)
/* CFG_HI */
#define DW_CFGH_DST_PER_EXT(x) SET_BITS(31, 30, x)
#define DW_CFGH_SRC_PER_EXT(x) SET_BITS(29, 28, x)
#define DW_CFGH_DST_PER(x) SET_BITS(7, 4, x)
#define DW_CFGH_SRC_PER(x) SET_BITS(3, 0, x)
#define DW_CFGH_DST(x) \
(DW_CFGH_DST_PER_EXT((x) >> 4) | DW_CFGH_DST_PER(x))
#define DW_CFGH_SRC(x) \
(DW_CFGH_SRC_PER_EXT((x) >> 4) | DW_CFGH_SRC_PER(x))
/* CTL_LO */
#define DW_CTLL_RELOAD_DST BIT(31)
#define DW_CTLL_RELOAD_SRC BIT(30)
#define DW_CTLL_LLP_S_EN BIT(28)
#define DW_CTLL_LLP_D_EN BIT(27)
#define DW_CTLL_SMS(x) SET_BIT(25, x)
#define DW_CTLL_DMS(x) SET_BIT(23, x)
#define DW_CTLL_FC_P2P SET_BITS(21, 20, 3)
#define DW_CTLL_FC_P2M SET_BITS(21, 20, 2)
#define DW_CTLL_FC_M2P SET_BITS(21, 20, 1)
#define DW_CTLL_FC_M2M SET_BITS(21, 20, 0)
#define DW_CTLL_D_SCAT_EN BIT(18)
#define DW_CTLL_S_GATH_EN BIT(17)
#define DW_CTLL_SRC_MSIZE(x) SET_BITS(16, 14, x)
#define DW_CTLL_DST_MSIZE(x) SET_BITS(13, 11, x)
#define DW_CTLL_SRC_FIX SET_BITS(10, 9, 2)
#define DW_CTLL_SRC_DEC SET_BITS(10, 9, 1)
#define DW_CTLL_SRC_INC SET_BITS(10, 9, 0)
#define DW_CTLL_DST_FIX SET_BITS(8, 7, 2)
#define DW_CTLL_DST_DEC SET_BITS(8, 7, 1)
#define DW_CTLL_DST_INC SET_BITS(8, 7, 0)
#define DW_CTLL_SRC_WIDTH(x) SET_BITS(6, 4, x)
#define DW_CTLL_DST_WIDTH(x) SET_BITS(3, 1, x)
#define DW_CTLL_INT_EN BIT(0)
#define DW_CTLL_SRC_WIDTH_MASK MASK(6, 4)
#define DW_CTLL_SRC_WIDTH_SHIFT 4
#define DW_CTLL_DST_WIDTH_MASK MASK(3, 1)
#define DW_CTLL_DST_WIDTH_SHIFT 1
/* CTL_HI */
#define DW_CTLH_CLASS(x) SET_BITS(31, 29, x)
#define DW_CTLH_WEIGHT(x) SET_BITS(28, 18, x)
#define DW_CTLH_DONE(x) SET_BIT(17, x)
#define DW_CTLH_BLOCK_TS_MASK MASK(16, 0)
/* DSR */
#define DW_DSR_DSC(x) SET_BITS(31, 20, x)
#define DW_DSR_DSI(x) SET_BITS(19, 0, x)
/* FIFO_PART */
#define DW_FIFO_SIZE 0x80
#define DW_FIFO_UPD BIT(26)
#define DW_FIFO_CHx(x) SET_BITS(25, 13, x)
#define DW_FIFO_CHy(x) SET_BITS(12, 0, x)
/* number of tries to wait for reset */
#define DW_DMA_CFG_TRIES 10000
/* channel drain timeout in microseconds */
#define DW_DMA_TIMEOUT 1333
/* min number of elems for config with irq disabled */
#define DW_DMA_CFG_NO_IRQ_MIN_ELEMS 3
/* linked list item address */
#define DW_DMA_LLI_ADDRESS(lli, dir) \
(((dir) == MEMORY_TO_PERIPHERAL) ? ((lli)->sar) : ((lli)->dar))
/* TODO: add FIFO sizes */
struct dw_chan_arbit_data {
@ -101,10 +166,74 @@ struct dw_drv_plat_data {
struct dw_chan_arbit_data chan[DW_MAX_CHAN];
};
/* DMA descriptor used by HW */
struct dw_lli {
uint32_t sar;
uint32_t dar;
uint32_t llp;
uint32_t ctrl_lo;
uint32_t ctrl_hi;
uint32_t sstat;
uint32_t dstat;
/* align to 32 bytes to not cross cache line
* in case of more than two items
*/
uint32_t reserved;
} __packed;
/* pointer data for DW DMA buffer */
struct dw_dma_ptr_data {
uint32_t current_ptr;
uint32_t start_ptr;
uint32_t end_ptr;
uint32_t hw_ptr;
uint32_t buffer_bytes;
};
/* State tracking for each channel */
enum dw_dma_state {
DW_DMA_IDLE,
DW_DMA_PREPARED,
DW_DMA_SUSPENDED,
DW_DMA_ACTIVE,
};
/* data for each DMA channel */
struct dw_dma_chan_data {
uint32_t direction;
enum dw_dma_state state;
struct dw_lli *lli; /* allocated array of LLI's */
uint32_t lli_count; /* number of lli's in the allocation */
struct dw_lli *lli_current; /* current LLI being used */
uint32_t cfg_lo;
uint32_t cfg_hi;
struct dw_dma_ptr_data ptr_data; /* pointer data */
dma_callback_t dma_blkcallback;
void *blkuser_data;
dma_callback_t dma_tfrcallback;
void *tfruser_data;
};
/* use array to get burst_elems for specific slot number setting.
* the relation between msize and burst_elems should be
* 2 ^ msize = burst_elems
*/
static const uint32_t burst_elems[] = {1, 2, 4, 8};
#if CONFIG_DMA_HW_LLI
#define DW_DMA_BUFFER_PERIOD_COUNT 4
#else
#define DW_DMA_BUFFER_PERIOD_COUNT 2
#endif
/* Device run time data */
struct dw_dma_dev_data {
struct dma_context dma_ctx;
struct dw_drv_plat_data *channel_data;
struct dma_chan_data chan[DW_MAX_CHAN];
struct dw_dma_chan_data chan[DW_MAX_CHAN];
ATOMIC_DEFINE(channels_atomic, DW_MAX_CHAN);
};
/* Device constant configuration parameters */
@ -113,7 +242,17 @@ struct dw_dma_dev_cfg {
void (*irq_config)(void);
};
void dw_dma_setup(const struct device *dev);
static ALWAYS_INLINE void dw_write(uint32_t dma_base, uint32_t reg, uint32_t value)
{
*((volatile uint32_t *)(dma_base + reg)) = value;
}
static ALWAYS_INLINE uint32_t dw_read(uint32_t dma_base, uint32_t reg)
{
return *((volatile uint32_t *)(dma_base + reg));
}
int dw_dma_setup(const struct device *dev);
int dw_dma_config(const struct device *dev, uint32_t channel,
struct dma_config *cfg);
@ -121,12 +260,19 @@ int dw_dma_config(const struct device *dev, uint32_t channel,
int dw_dma_reload(const struct device *dev, uint32_t channel,
uint32_t src, uint32_t dst, size_t size);
int dw_dma_transfer_start(const struct device *dev, uint32_t channel);
int dw_dma_start(const struct device *dev, uint32_t channel);
int dw_dma_transfer_stop(const struct device *dev, uint32_t channel);
int dw_dma_stop(const struct device *dev, uint32_t channel);
int dw_dma_suspend(const struct device *dev, uint32_t channel);
int dw_dma_resume(const struct device *dev, uint32_t channel);
void dw_dma_isr(const struct device *dev);
int dw_dma_get_status(const struct device *dev, uint32_t channel,
struct dma_status *stat);
#ifdef __cplusplus
}
#endif

View file

@ -2,4 +2,4 @@ tests:
drivers.dma.scatter_gather:
depends_on: dma
tags: drivers dma
skip: True
platform_allow: intel_adsp_cavs25 intel_adsp_cavs20 intel_adsp_cavs18 intel_adsp_cavs15