drivers: iom: define ambiq spi/i2c dma mode as a binding property

Changed to define ambiq spi/i2c dma mode as a binding property
instead of kconfig macros, making it more flexible for different
spi/i2c instances.

Signed-off-by: Hao Luo <hluo@ambiq.com>
This commit is contained in:
Hao Luo 2025-04-15 14:38:50 +08:00 committed by Benjamin Cabé
commit d89c61bd64
5 changed files with 95 additions and 102 deletions

View file

@ -16,16 +16,10 @@ menuconfig I2C_AMBIQ
if I2C_AMBIQ
config I2C_AMBIQ_DMA
bool "AMBIQ APOLLO I2C DMA Support"
help
Enable DMA for Ambiq I2C.
config I2C_AMBIQ_HANDLE_CACHE
bool "Turn on cache handling in i2c driver"
default y
depends on CACHE_MANAGEMENT && DCACHE
depends on I2C_AMBIQ_DMA
help
Disable this if cache has been handled in upper layers.

View file

@ -54,6 +54,7 @@ struct i2c_ambiq_data {
void *callback_data;
uint32_t transfer_status;
bool pm_policy_state_on;
bool dma_mode;
};
static void i2c_ambiq_pm_policy_state_lock_get(const struct device *dev)
@ -82,7 +83,6 @@ static void i2c_ambiq_pm_policy_state_lock_put(const struct device *dev)
}
}
#ifdef CONFIG_I2C_AMBIQ_DMA
static void i2c_ambiq_callback(void *callback_ctxt, uint32_t status)
{
const struct device *dev = callback_ctxt;
@ -93,7 +93,6 @@ static void i2c_ambiq_callback(void *callback_ctxt, uint32_t status)
}
data->transfer_status = status;
}
#endif
static void i2c_ambiq_isr(const struct device *dev)
{
@ -132,7 +131,7 @@ static int i2c_ambiq_read(const struct device *dev, struct i2c_msg *hdr_msg,
trans.ui32InstrLen = hdr_msg->len;
}
#ifdef CONFIG_I2C_AMBIQ_DMA
if (data->dma_mode) {
data->transfer_status = -EFAULT;
ret = am_hal_iom_nonblocking_transfer(data->iom_handler, &trans, i2c_ambiq_callback,
(void *)dev);
@ -152,9 +151,10 @@ static int i2c_ambiq_read(const struct device *dev, struct i2c_msg *hdr_msg,
}
#endif /* CONFIG_I2C_AMBIQ_HANDLE_CACHE */
ret = data->transfer_status;
#else
} else {
ret = am_hal_iom_blocking_transfer(data->iom_handler, &trans);
#endif
}
return (ret != AM_HAL_STATUS_SUCCESS) ? -EIO : 0;
}
@ -184,7 +184,7 @@ static int i2c_ambiq_write(const struct device *dev, struct i2c_msg *hdr_msg,
trans.ui32InstrLen = hdr_msg->len;
}
#ifdef CONFIG_I2C_AMBIQ_DMA
if (data->dma_mode) {
data->transfer_status = -EFAULT;
#if CONFIG_I2C_AMBIQ_HANDLE_CACHE
if (!buf_in_nocache((uintptr_t)trans.pui32TxBuffer, trans.ui32NumBytes)) {
@ -205,9 +205,9 @@ static int i2c_ambiq_write(const struct device *dev, struct i2c_msg *hdr_msg,
return -ETIMEDOUT;
}
ret = data->transfer_status;
#else
} else {
ret = am_hal_iom_blocking_transfer(data->iom_handler, &trans);
#endif
}
return (ret != AM_HAL_STATUS_SUCCESS) ? -EIO : 0;
}
@ -391,11 +391,13 @@ static int i2c_ambiq_init(const struct device *dev)
goto end;
}
#ifdef CONFIG_I2C_AMBIQ_DMA
am_hal_iom_interrupt_clear(data->iom_handler, AM_HAL_IOM_INT_DCMP | AM_HAL_IOM_INT_CMDCMP);
am_hal_iom_interrupt_enable(data->iom_handler, AM_HAL_IOM_INT_DCMP | AM_HAL_IOM_INT_CMDCMP);
if (data->dma_mode) {
am_hal_iom_interrupt_clear(data->iom_handler,
AM_HAL_IOM_INT_DCMP | AM_HAL_IOM_INT_CMDCMP);
am_hal_iom_interrupt_enable(data->iom_handler,
AM_HAL_IOM_INT_DCMP | AM_HAL_IOM_INT_CMDCMP);
config->irq_config_func();
#endif
}
if (AM_HAL_STATUS_SUCCESS != am_hal_iom_enable(data->iom_handler)) {
LOG_ERR("Fail to enable I2C\n");
@ -465,17 +467,18 @@ static int i2c_ambiq_pm_action(const struct device *dev, enum pm_device_action a
i2c_ambiq_isr, DEVICE_DT_INST_GET(n), 0); \
irq_enable(DT_IRQN(DT_INST_PARENT(n))); \
}; \
IF_ENABLED(CONFIG_I2C_AMBIQ_DMA, \
IF_ENABLED(DT_PROP(DT_INST_PARENT(n), dma_mode), \
(static uint32_t i2c_ambiq_cmdq##n[DT_PROP_OR(DT_INST_PARENT(n), cmdq_buffer_size, 1024)] \
__attribute__((section(DT_PROP_OR(DT_INST_PARENT(n), \
cmdq_buffer_location, ".nocache"))));) \
) \
static struct i2c_ambiq_data i2c_ambiq_data##n = { \
.iom_cfg = IOM_HAL_CFG(n, COND_CODE_1(CONFIG_I2C_AMBIQ_DMA, (i2c_ambiq_cmdq##n), \
.iom_cfg = IOM_HAL_CFG( \
n, COND_CODE_1(DT_PROP(DT_INST_PARENT(n), dma_mode), (i2c_ambiq_cmdq##n), \
(NULL)), \
COND_CODE_1(CONFIG_I2C_AMBIQ_DMA, \
(DT_INST_PROP_OR(n, cmdq_buffer_size, 1024)), (0)) \
), \
COND_CODE_1(DT_PROP(DT_INST_PARENT(n), dma_mode), \
(DT_INST_PROP_OR(n, cmdq_buffer_size, 1024)), (0))), \
.dma_mode = DT_PROP(DT_INST_PARENT(n), dma_mode), \
.bus_sem = Z_SEM_INITIALIZER(i2c_ambiq_data##n.bus_sem, 1, 1), \
.transfer_sem = Z_SEM_INITIALIZER(i2c_ambiq_data##n.transfer_sem, 0, 1), \
}; \

View file

@ -18,17 +18,10 @@ menuconfig SPI_AMBIQ_SPIC
if SPI_AMBIQ_SPIC
config SPI_AMBIQ_DMA
bool "AMBIQ APOLLO SPI DMA Support"
depends on SPI_AMBIQ_SPIC
help
Enable DMA for Ambiq SPI.
config SPI_AMBIQ_HANDLE_CACHE
bool "Turn on cache handling in spi driver"
default y
depends on CACHE_MANAGEMENT && DCACHE
depends on SPI_AMBIQ_DMA
help
Disable this if cache has been handled in upper layers.

View file

@ -40,6 +40,7 @@ struct spi_ambiq_data {
void *iom_handler;
bool cont;
bool pm_policy_state_on;
bool dma_mode;
};
typedef void (*spi_context_update_trx)(struct spi_context *ctx, uint8_t dfs, uint32_t len);
@ -72,7 +73,6 @@ static void spi_ambiq_pm_policy_state_lock_put(const struct device *dev)
}
}
#ifdef CONFIG_SPI_AMBIQ_DMA
static void spi_ambiq_callback(void *callback_ctxt, uint32_t status)
{
const struct device *dev = callback_ctxt;
@ -85,7 +85,6 @@ static void spi_ambiq_callback(void *callback_ctxt, uint32_t status)
}
spi_context_complete(ctx, dev, (status == AM_HAL_STATUS_SUCCESS) ? 0 : -EIO);
}
#endif
static void spi_ambiq_reset(const struct device *dev)
{
@ -224,38 +223,36 @@ static int spi_ambiq_xfer_half_duplex(const struct device *dev, am_hal_iom_dir_e
if ((!spi_context_tx_buf_on(ctx)) && (!spi_context_rx_on(ctx))) {
is_last = true;
}
#ifdef CONFIG_SPI_AMBIQ_DMA
if (data->dma_mode) {
#if CONFIG_SPI_AMBIQ_HANDLE_CACHE
/* Clean Dcache before DMA write */
if ((trans.eDirection == AM_HAL_IOM_TX) && (trans.pui32TxBuffer)) {
if (!buf_in_nocache((uintptr_t)trans.pui32TxBuffer, trans.ui32NumBytes)) {
if ((trans.eDirection == AM_HAL_IOM_TX) && (trans.pui32TxBuffer) &&
(!buf_in_nocache((uintptr_t)trans.pui32TxBuffer, trans.ui32NumBytes))) {
sys_cache_data_flush_range((void *)trans.pui32TxBuffer,
trans.ui32NumBytes);
}
}
#endif /* CONFIG_SPI_AMBIQ_HANDLE_CACHE */
if (AM_HAL_STATUS_SUCCESS !=
am_hal_iom_nonblocking_transfer(data->iom_handler, &trans,
((is_last == true) ? spi_ambiq_callback : NULL),
(void *)dev)) {
am_hal_iom_nonblocking_transfer(
data->iom_handler, &trans,
((is_last == true) ? spi_ambiq_callback : NULL), (void *)dev)) {
return -EIO;
}
if (is_last) {
ret = spi_context_wait_for_completion(ctx);
#if CONFIG_SPI_AMBIQ_HANDLE_CACHE
/* Invalidate Dcache after DMA read */
if ((trans.eDirection == AM_HAL_IOM_RX) && (trans.pui32RxBuffer)) {
if (!buf_in_nocache((uintptr_t)trans.pui32RxBuffer,
trans.ui32NumBytes)) {
if ((trans.eDirection == AM_HAL_IOM_RX) && (trans.pui32RxBuffer) &&
(!buf_in_nocache((uintptr_t)trans.pui32RxBuffer,
trans.ui32NumBytes))) {
sys_cache_data_invd_range((void *)trans.pui32RxBuffer,
trans.ui32NumBytes);
}
}
#endif /* CONFIG_SPI_AMBIQ_HANDLE_CACHE */
}
#else
} else {
ret = am_hal_iom_blocking_transfer(data->iom_handler, &trans);
#endif
}
rem_num -= cur_num;
if (ret != 0) {
return -EIO;
@ -360,12 +357,11 @@ static int spi_ambiq_xfer(const struct device *dev, const struct spi_config *con
}
}
#ifndef CONFIG_SPI_AMBIQ_DMA
if (!data->cont) {
if ((!data->dma_mode) && (!data->cont)) {
spi_context_cs_control(ctx, false);
spi_context_complete(ctx, dev, ret);
}
#endif
return ret;
}
@ -450,11 +446,13 @@ static int spi_ambiq_init(const struct device *dev)
goto end;
}
#ifdef CONFIG_SPI_AMBIQ_DMA
am_hal_iom_interrupt_clear(data->iom_handler, AM_HAL_IOM_INT_CQUPD | AM_HAL_IOM_INT_ERR);
am_hal_iom_interrupt_enable(data->iom_handler, AM_HAL_IOM_INT_CQUPD | AM_HAL_IOM_INT_ERR);
if (data->dma_mode) {
am_hal_iom_interrupt_clear(data->iom_handler,
AM_HAL_IOM_INT_CQUPD | AM_HAL_IOM_INT_ERR);
am_hal_iom_interrupt_enable(data->iom_handler,
AM_HAL_IOM_INT_CQUPD | AM_HAL_IOM_INT_ERR);
cfg->irq_config_func();
#endif
}
end:
if (ret < 0) {
am_hal_iom_uninitialize(data->iom_handler);
@ -511,17 +509,18 @@ static int spi_ambiq_pm_action(const struct device *dev, enum pm_device_action a
spi_ambiq_isr, DEVICE_DT_INST_GET(n), 0); \
irq_enable(DT_IRQN(DT_INST_PARENT(n))); \
}; \
IF_ENABLED(CONFIG_SPI_AMBIQ_DMA, \
IF_ENABLED(DT_PROP(DT_INST_PARENT(n), dma_mode), \
(static uint32_t spi_ambiq_cmdq##n[DT_PROP_OR(DT_INST_PARENT(n), cmdq_buffer_size, 1024)] \
__attribute__((section(DT_PROP_OR(DT_INST_PARENT(n), \
cmdq_buffer_location, ".nocache"))));) \
) \
static struct spi_ambiq_data spi_ambiq_data##n = { \
.iom_cfg = IOM_HAL_CFG(n, COND_CODE_1(CONFIG_SPI_AMBIQ_DMA, (spi_ambiq_cmdq##n), \
.iom_cfg = IOM_HAL_CFG( \
n, COND_CODE_1(DT_PROP(DT_INST_PARENT(n), dma_mode), (spi_ambiq_cmdq##n), \
(NULL)), \
COND_CODE_1(CONFIG_SPI_AMBIQ_DMA, \
(DT_INST_PROP_OR(n, cmdq_buffer_size, 1024)), (0)) \
), \
COND_CODE_1(DT_PROP(DT_INST_PARENT(n), dma_mode), \
(DT_INST_PROP_OR(n, cmdq_buffer_size, 1024)), (0))), \
.dma_mode = DT_PROP(DT_INST_PARENT(n), dma_mode), \
SPI_CONTEXT_INIT_LOCK(spi_ambiq_data##n, ctx), \
SPI_CONTEXT_INIT_SYNC(spi_ambiq_data##n, ctx), \
SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx)}; \

View file

@ -14,6 +14,10 @@ properties:
interrupts:
required: true
dma-mode:
description: Enables DMA over SPI/I2C.
type: boolean
cmdq-buffer-location:
type: string
description: |