From 7b82e9f2d09ca17a97e34d11965e119f394ee39c Mon Sep 17 00:00:00 2001 From: Armando Visconti Date: Thu, 14 Jun 2018 09:27:46 +0200 Subject: [PATCH] drivers: i2s: Add support for I2S stm32 STM32 I2S driver implementation. It has been designed in the most generic way possible, with the possibility of using it in master/slave and rx/tx mode. Currenty it has been tested for master rx mode only using the microphone on ArgonKey board. The configuration file permits to compile it for STM32F4xx product family only, but it should be easy to extend it also for other families. It supports all 5 STM32F4xx I2S controllers (I2S 1/4/5 on APB2 and I2S 2/3 on APB1). It makes uses of the available DMA channels for rx/tx streams. The clock source can be selected among one of the following two choices: - PLLI2S pll, with possibility to configure PLLM/PLLN/PLLR - HSE/HSI clock Interrupt is triggered only in case of errors (FRM/OVR/UDR). Signed-off-by: Armando Visconti --- drivers/i2s/CMakeLists.txt | 1 + drivers/i2s/Kconfig.stm32 | 98 +++ drivers/i2s/i2s_ll_stm32.c | 1147 ++++++++++++++++++++++++++++++++++++ drivers/i2s/i2s_ll_stm32.h | 129 ++++ 4 files changed, 1375 insertions(+) create mode 100644 drivers/i2s/Kconfig.stm32 create mode 100644 drivers/i2s/i2s_ll_stm32.c create mode 100644 drivers/i2s/i2s_ll_stm32.h diff --git a/drivers/i2s/CMakeLists.txt b/drivers/i2s/CMakeLists.txt index 55dfbe20ef9..115b40c9ad2 100644 --- a/drivers/i2s/CMakeLists.txt +++ b/drivers/i2s/CMakeLists.txt @@ -4,3 +4,4 @@ zephyr_library_sources(i2s_common.c) zephyr_library_sources_ifdef(CONFIG_I2S_SAM_SSC i2s_sam_ssc.c) zephyr_library_sources_ifdef(CONFIG_I2S_CAVS i2s_cavs.c) zephyr_library_sources_ifdef(CONFIG_USERSPACE i2s_handlers.c) +zephyr_library_sources_ifdef(CONFIG_I2S_STM32 i2s_ll_stm32.c) diff --git a/drivers/i2s/Kconfig.stm32 b/drivers/i2s/Kconfig.stm32 new file mode 100644 index 00000000000..bdb21fcff91 --- /dev/null +++ b/drivers/i2s/Kconfig.stm32 @@ -0,0 +1,98 @@ +# Kconfig - STM32 I2S driver configuration options +# +# Copyright (c) 2018 STMicroelectronics +# +# SPDX-License-Identifier: Apache-2.0 +# + +menuconfig I2S_STM32 + bool "STM32 MCU I2S controller driver" + depends on I2S && SOC_SERIES_STM32F4X + select DMA + default n + help + Enable I2S support on the STM32 family of processors. + (Tested on the STM32F4 series) + +if I2S_STM32 + +config I2S_STM32_RX_BLOCK_COUNT + int "RX queue length" + default 4 + +config I2S_STM32_TX_BLOCK_COUNT + int "TX queue length" + default 4 + +config I2S_STM32_USE_PLLI2S_ENABLE + bool "Enable usage of PLL" + default n + help + Enable it if I2S clock should be provided by the PLLI2S. + If not enabled the clock will be provided by HSI/HSE. + +config I2S_STM32_PLLI2S_PLLM + int "Division factor for PLLI2S VCO input clock" + depends on I2S_STM32_USE_PLLI2S_ENABLE + default 8 + range 2 63 + help + Division factor for the audio PLL (PLLI2S) VCO input clock. + PLLM factor should be selected to ensure that the VCO + input frequency ranges from 1 to 2 MHz. It is recommended + to select a frequency of 2 MHz to limit PLL jitter. + Allowed values: 2-63 + +config I2S_STM32_PLLI2S_PLLN + int "Multiplier factor for PLLI2S VCO output clock" + depends on I2S_STM32_USE_PLLI2S_ENABLE + default 56 + range 50 432 + help + Multiply factor for the audio PLL (PLLI2S) VCO output clock. + PLLN factor should be selected to ensure that the VCO + output frequency ranges from 100 to 432 MHz. + Allowed values: 50-432 + +config I2S_STM32_PLLI2S_PLLR + int "Division factor for I2S clock" + depends on I2S_STM32_USE_PLLI2S_ENABLE + default 7 + range 2 7 + help + Division factor for the I2S clock. + PLLR factor should be selected to ensure that the I2S clock + frequency is less than or equal to 192MHz. + Allowed values: 2-7 + +config I2S_1 + bool "I2S port 1" + default n + help + Enable I2S controller port 1. + +config I2S_2 + bool "I2S port 2" + default n + help + Enable I2S controller port 2. + +config I2S_3 + bool "I2S port 3" + default n + help + Enable I2S controller port 3. + +config I2S_4 + bool "I2S port 4" + default n + help + Enable I2S controller port 4. + +config I2S_5 + bool "I2S port 5" + default n + help + Enable I2S controller port 5. + +endif # I2S_STM32 diff --git a/drivers/i2s/i2s_ll_stm32.c b/drivers/i2s/i2s_ll_stm32.c new file mode 100644 index 00000000000..387d61be6fe --- /dev/null +++ b/drivers/i2s/i2s_ll_stm32.c @@ -0,0 +1,1147 @@ +/* + * Copyright (c) 2018 STMicroelectronics + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include +#include + +#include "i2s_ll_stm32.h" +#include + +/* FIXME change to + * #if __DCACHE_PRESENT == 1 + * when cache support is added + */ +#if 0 +#define DCACHE_INVALIDATE(addr, size) \ + SCB_InvalidateDCache_by_Addr((u32_t *)addr, size) +#define DCACHE_CLEAN(addr, size) \ + SCB_CleanDCache_by_Addr((u32_t *)addr, size) +#else +#define DCACHE_INVALIDATE(addr, size) {; } +#define DCACHE_CLEAN(addr, size) {; } +#endif + +#define MODULO_INC(val, max) { val = (++val < max) ? val : 0; } + +static unsigned int div_round_closest(u32_t dividend, u32_t divisor) +{ + return (dividend + (divisor / 2)) / divisor; +} + +/* + * Get data from the queue + */ +static int queue_get(struct ring_buf *rb, void **mem_block, size_t *size) +{ + unsigned int key; + + key = irq_lock(); + + if (rb->tail == rb->head) { + /* Ring buffer is empty */ + irq_unlock(key); + return -ENOMEM; + } + + *mem_block = rb->buf[rb->tail].mem_block; + *size = rb->buf[rb->tail].size; + MODULO_INC(rb->tail, rb->len); + + irq_unlock(key); + + return 0; +} + +/* + * Put data in the queue + */ +static int queue_put(struct ring_buf *rb, void *mem_block, size_t size) +{ + u16_t head_next; + unsigned int key; + + key = irq_lock(); + + head_next = rb->head; + MODULO_INC(head_next, rb->len); + + if (head_next == rb->tail) { + /* Ring buffer is full */ + irq_unlock(key); + return -ENOMEM; + } + + rb->buf[rb->head].mem_block = mem_block; + rb->buf[rb->head].size = size; + rb->head = head_next; + + irq_unlock(key); + + return 0; +} + +static int i2s_stm32_enable_clock(struct device *dev) +{ + const struct i2s_stm32_cfg *cfg = DEV_CFG(dev); + struct device *clk; + int ret; + + clk = device_get_binding(STM32_CLOCK_CONTROL_NAME); + __ASSERT_NO_MSG(clk); + + ret = clock_control_on(clk, (clock_control_subsys_t *) &cfg->pclken); + if (ret != 0) { + return -EIO; + } + + return 0; +} + +#ifdef CONFIG_I2S_STM32_USE_PLLI2S_ENABLE +#define PLLI2S_MAX_MS_TIME 1 /* PLLI2S lock time is 300us max */ +static u16_t plli2s_ms_count; +#endif + +static int i2s_stm32_set_clock(struct device *dev, u32_t bit_clk_freq) +{ + const struct i2s_stm32_cfg *cfg = DEV_CFG(dev); + u32_t pll_src = LL_RCC_PLL_GetMainSource(); + int freq_in; + u8_t i2s_div, i2s_odd; + + freq_in = (pll_src == LL_RCC_PLLSOURCE_HSI) ? + HSI_VALUE : CONFIG_CLOCK_STM32_HSE_CLOCK; + +#ifdef CONFIG_I2S_STM32_USE_PLLI2S_ENABLE + /* Set PLLI2S */ + LL_RCC_PLLI2S_Disable(); + LL_RCC_PLLI2S_ConfigDomain_I2S(pll_src, + CONFIG_I2S_STM32_PLLI2S_PLLM, + CONFIG_I2S_STM32_PLLI2S_PLLN, + CONFIG_I2S_STM32_PLLI2S_PLLR); + LL_RCC_PLLI2S_Enable(); + + /* wait until PLLI2S gets locked */ + while (!LL_RCC_PLLI2S_IsReady()) { + if (plli2s_ms_count++ > PLLI2S_MAX_MS_TIME) { + return -EIO; + } + + /* wait 1 ms */ + k_sleep(1); + } + SYS_LOG_DBG("PLLI2S is locked"); + + /* Adjust freq_in according to PLLM, PLLN, PLLR */ + float freq_tmp; + + freq_tmp = freq_in / CONFIG_I2S_STM32_PLLI2S_PLLM; + freq_tmp *= CONFIG_I2S_STM32_PLLI2S_PLLN; + freq_tmp /= CONFIG_I2S_STM32_PLLI2S_PLLR; + freq_in = (int) freq_tmp; +#endif /* CONFIG_I2S_STM32_USE_PLLI2S_ENABLE */ + + /* Select clock source */ + LL_RCC_SetI2SClockSource(cfg->i2s_clk_sel); + + /* + * The ratio between input clock (I2SxClk) and output + * clock on the pad (I2S_CK) is obtained using the + * following formula: + * (i2s_div * 2) + i2s_odd + */ + i2s_div = div_round_closest(freq_in, bit_clk_freq); + i2s_odd = (i2s_div & 0x1) ? 1 : 0; + i2s_div >>= 1; + + SYS_LOG_DBG("i2s_div: %d - i2s_odd: %d", i2s_div, i2s_odd); + + LL_I2S_SetPrescalerLinear(cfg->i2s, i2s_div); + LL_I2S_SetPrescalerParity(cfg->i2s, i2s_odd); + + return 0; +} + +static int i2s_stm32_configure(struct device *dev, enum i2s_dir dir, + struct i2s_config *i2s_cfg) +{ + const struct i2s_stm32_cfg *const cfg = DEV_CFG(dev); + struct i2s_stm32_data *const dev_data = DEV_DATA(dev); + struct stream *stream; + u32_t bit_clk_freq; + int ret; + + if (dir == I2S_DIR_RX) { + stream = &dev_data->rx; + } else if (dir == I2S_DIR_TX) { + stream = &dev_data->tx; + } else { + SYS_LOG_ERR("Either RX or TX direction must be selected"); + return -EINVAL; + } + + if (stream->state != I2S_STATE_NOT_READY && + stream->state != I2S_STATE_READY) { + SYS_LOG_ERR("invalid state"); + return -EINVAL; + } + + stream->master = true; + if (i2s_cfg->options & I2S_OPT_FRAME_CLK_SLAVE || + i2s_cfg->options & I2S_OPT_BIT_CLK_SLAVE) { + stream->master = false; + } + + if (i2s_cfg->frame_clk_freq == 0) { + stream->queue_drop(stream); + memset(&stream->cfg, 0, sizeof(struct i2s_config)); + stream->state = I2S_STATE_NOT_READY; + return 0; + } + + memcpy(&stream->cfg, i2s_cfg, sizeof(struct i2s_config)); + + /* set I2S bitclock */ + bit_clk_freq = i2s_cfg->frame_clk_freq * + i2s_cfg->word_size * i2s_cfg->channels; + + ret = i2s_stm32_set_clock(dev, bit_clk_freq); + if (ret < 0) { + return ret; + } + + /* set I2S Data Format */ + if (i2s_cfg->word_size == 16) { + LL_I2S_SetDataFormat(cfg->i2s, LL_I2S_DATAFORMAT_16B); + } else if (i2s_cfg->word_size == 24) { + LL_I2S_SetDataFormat(cfg->i2s, LL_I2S_DATAFORMAT_24B); + } else if (i2s_cfg->word_size == 32) { + LL_I2S_SetDataFormat(cfg->i2s, LL_I2S_DATAFORMAT_32B); + } else { + SYS_LOG_ERR("invalid word size"); + return -EINVAL; + } + + /* set I2S Standard */ + switch (i2s_cfg->format & I2S_FMT_DATA_FORMAT_MASK) { + case I2S_FMT_DATA_FORMAT_I2S: + LL_I2S_SetStandard(cfg->i2s, LL_I2S_STANDARD_PHILIPS); + break; + + case I2S_FMT_DATA_FORMAT_PCM_SHORT: + LL_I2S_SetStandard(cfg->i2s, LL_I2S_STANDARD_PCM_SHORT); + break; + + case I2S_FMT_DATA_FORMAT_PCM_LONG: + LL_I2S_SetStandard(cfg->i2s, LL_I2S_STANDARD_PCM_LONG); + break; + + case I2S_FMT_DATA_FORMAT_LEFT_JUSTIFIED: + LL_I2S_SetStandard(cfg->i2s, LL_I2S_STANDARD_MSB); + break; + + case I2S_FMT_DATA_FORMAT_RIGHT_JUSTIFIED: + LL_I2S_SetStandard(cfg->i2s, LL_I2S_STANDARD_LSB); + break; + + default: + SYS_LOG_ERR("Unsupported I2S data format"); + return -EINVAL; + } + + /* set I2S clock polarity */ + if ((i2s_cfg->format & I2S_FMT_CLK_FORMAT_MASK) == I2S_FMT_BIT_CLK_INV) + LL_I2S_SetClockPolarity(cfg->i2s, LL_I2S_POLARITY_HIGH); + else + LL_I2S_SetClockPolarity(cfg->i2s, LL_I2S_POLARITY_LOW); + + stream->state = I2S_STATE_READY; + return 0; +} + +static int i2s_stm32_trigger(struct device *dev, enum i2s_dir dir, + enum i2s_trigger_cmd cmd) +{ + struct i2s_stm32_data *const dev_data = DEV_DATA(dev); + struct stream *stream; + unsigned int key; + int ret; + + if (dir == I2S_DIR_RX) { + stream = &dev_data->rx; + } else if (dir == I2S_DIR_TX) { + stream = &dev_data->tx; + } else { + SYS_LOG_ERR("Either RX or TX direction must be selected"); + return -EINVAL; + } + + switch (cmd) { + case I2S_TRIGGER_START: + if (stream->state != I2S_STATE_READY) { + SYS_LOG_ERR("START trigger: invalid state %d", + stream->state); + return -EIO; + } + + __ASSERT_NO_MSG(stream->mem_block == NULL); + + ret = stream->stream_start(stream, dev); + if (ret < 0) { + SYS_LOG_ERR("START trigger failed %d", ret); + return ret; + } + + stream->state = I2S_STATE_RUNNING; + stream->last_block = false; + break; + + case I2S_TRIGGER_STOP: + key = irq_lock(); + if (stream->state != I2S_STATE_RUNNING) { + irq_unlock(key); + SYS_LOG_ERR("STOP trigger: invalid state"); + return -EIO; + } + irq_unlock(key); + stream->stream_disable(stream, dev); + stream->queue_drop(stream); + stream->state = I2S_STATE_READY; + stream->last_block = true; + break; + + case I2S_TRIGGER_DRAIN: + key = irq_lock(); + if (stream->state != I2S_STATE_RUNNING) { + irq_unlock(key); + SYS_LOG_ERR("DRAIN trigger: invalid state"); + return -EIO; + } + stream->stream_disable(stream, dev); + stream->queue_drop(stream); + stream->state = I2S_STATE_READY; + irq_unlock(key); + break; + + case I2S_TRIGGER_DROP: + if (stream->state == I2S_STATE_NOT_READY) { + SYS_LOG_ERR("DROP trigger: invalid state"); + return -EIO; + } + stream->stream_disable(stream, dev); + stream->queue_drop(stream); + stream->state = I2S_STATE_READY; + break; + + case I2S_TRIGGER_PREPARE: + if (stream->state != I2S_STATE_ERROR) { + SYS_LOG_ERR("PREPARE trigger: invalid state"); + return -EIO; + } + stream->state = I2S_STATE_READY; + stream->queue_drop(stream); + break; + + default: + SYS_LOG_ERR("Unsupported trigger command"); + return -EINVAL; + } + + return 0; +} + +static int i2s_stm32_read(struct device *dev, void **mem_block, size_t *size) +{ + struct i2s_stm32_data *const dev_data = DEV_DATA(dev); + int ret; + + if (dev_data->rx.state == I2S_STATE_NOT_READY) { + SYS_LOG_DBG("invalid state"); + return -EIO; + } + + if (dev_data->rx.state != I2S_STATE_ERROR) { + ret = k_sem_take(&dev_data->rx.sem, dev_data->rx.cfg.timeout); + if (ret < 0) { + return ret; + } + } + + /* Get data from the beginning of RX queue */ + ret = queue_get(&dev_data->rx.mem_block_queue, mem_block, size); + if (ret < 0) { + return -EIO; + } + + return 0; +} + +static int i2s_stm32_write(struct device *dev, void *mem_block, size_t size) +{ + struct i2s_stm32_data *const dev_data = DEV_DATA(dev); + int ret; + + if (dev_data->tx.state != I2S_STATE_RUNNING && + dev_data->tx.state != I2S_STATE_READY) { + SYS_LOG_DBG("invalid state"); + return -EIO; + } + + ret = k_sem_take(&dev_data->tx.sem, dev_data->tx.cfg.timeout); + if (ret < 0) { + return ret; + } + + /* Add data to the end of the TX queue */ + queue_put(&dev_data->tx.mem_block_queue, mem_block, size); + + return 0; +} + +static const struct i2s_driver_api i2s_stm32_driver_api = { + .configure = i2s_stm32_configure, + .read = i2s_stm32_read, + .write = i2s_stm32_write, + .trigger = i2s_stm32_trigger, +}; + +#define STM32_DMA_NUM_CHANNELS 8 +static struct device *active_dma_rx_channel[STM32_DMA_NUM_CHANNELS]; +static struct device *active_dma_tx_channel[STM32_DMA_NUM_CHANNELS]; + +static int start_dma(struct device *dev_dma, u32_t channel, + struct dma_config *dcfg, void *src, void *dst, + u32_t blk_size) +{ + struct dma_block_config blk_cfg; + int ret; + + memset(&blk_cfg, 0, sizeof(blk_cfg)); + blk_cfg.block_size = blk_size / sizeof(u16_t); + blk_cfg.source_address = (u32_t)src; + blk_cfg.dest_address = (u32_t)dst; + + dcfg->head_block = &blk_cfg; + + ret = dma_config(dev_dma, channel, dcfg); + if (ret < 0) { + return ret; + } + + ret = dma_start(dev_dma, channel); + + return ret; +} + +static struct device *get_dev_from_rx_dma_channel(u32_t dma_channel); +static struct device *get_dev_from_tx_dma_channel(u32_t dma_channel); +static void rx_stream_disable(struct stream *stream, struct device *dev); +static void tx_stream_disable(struct stream *stream, struct device *dev); + +/* This function is executed in the interrupt context */ +static void dma_rx_callback(struct device *dev_dma, u32_t channel, int status) +{ + struct device *dev = get_dev_from_rx_dma_channel(channel); + const struct i2s_stm32_cfg *cfg = DEV_CFG(dev); + struct i2s_stm32_data *const dev_data = DEV_DATA(dev); + struct stream *stream = &dev_data->rx; + void *mblk_tmp; + int ret; + + if (status != 0) { + ret = -EIO; + stream->state = I2S_STATE_ERROR; + goto rx_disable; + } + + __ASSERT_NO_MSG(stream->mem_block != NULL); + + /* Stop reception if there was an error */ + if (stream->state == I2S_STATE_ERROR) { + goto rx_disable; + } + + mblk_tmp = stream->mem_block; + + /* Prepare to receive the next data block */ + ret = k_mem_slab_alloc(stream->cfg.mem_slab, &stream->mem_block, + K_NO_WAIT); + if (ret < 0) { + stream->state = I2S_STATE_ERROR; + goto rx_disable; + } + + ret = start_dma(dev_data->dev_dma, stream->dma_channel, + &stream->dma_cfg, + (void *)LL_SPI_DMA_GetRegAddr(cfg->i2s), + stream->mem_block, + stream->cfg.block_size); + if (ret < 0) { + SYS_LOG_DBG("Failed to start RX DMA transfer: %d", ret); + goto rx_disable; + } + + /* Assure cache coherency after DMA write operation */ + DCACHE_INVALIDATE(mblk_tmp, stream->cfg.block_size); + + /* All block data received */ + ret = queue_put(&stream->mem_block_queue, mblk_tmp, + stream->cfg.block_size); + if (ret < 0) { + stream->state = I2S_STATE_ERROR; + goto rx_disable; + } + k_sem_give(&stream->sem); + + /* Stop reception if we were requested */ + if (stream->state == I2S_STATE_STOPPING) { + stream->state = I2S_STATE_READY; + goto rx_disable; + } + + return; + +rx_disable: + rx_stream_disable(stream, dev); +} + +static void dma_tx_callback(struct device *dev_dma, u32_t channel, int status) +{ + struct device *dev = get_dev_from_tx_dma_channel(channel); + const struct i2s_stm32_cfg *cfg = DEV_CFG(dev); + struct i2s_stm32_data *const dev_data = DEV_DATA(dev); + struct stream *stream = &dev_data->tx; + size_t mem_block_size; + int ret; + + if (status != 0) { + ret = -EIO; + stream->state = I2S_STATE_ERROR; + goto tx_disable; + } + + __ASSERT_NO_MSG(stream->mem_block != NULL); + + /* All block data sent */ + k_mem_slab_free(stream->cfg.mem_slab, &stream->mem_block); + stream->mem_block = NULL; + + /* Stop transmission if there was an error */ + if (stream->state == I2S_STATE_ERROR) { + SYS_LOG_ERR("TX error detected"); + goto tx_disable; + } + + /* Stop transmission if we were requested */ + if (stream->last_block) { + stream->state = I2S_STATE_READY; + goto tx_disable; + } + + /* Prepare to send the next data block */ + ret = queue_get(&stream->mem_block_queue, &stream->mem_block, + &mem_block_size); + if (ret < 0) { + if (stream->state == I2S_STATE_STOPPING) { + stream->state = I2S_STATE_READY; + } else { + stream->state = I2S_STATE_ERROR; + } + goto tx_disable; + } + k_sem_give(&stream->sem); + + /* Assure cache coherency before DMA read operation */ + DCACHE_CLEAN(stream->mem_block, mem_block_size); + + ret = start_dma(dev_data->dev_dma, stream->dma_channel, + &stream->dma_cfg, + stream->mem_block, + (void *)LL_SPI_DMA_GetRegAddr(cfg->i2s), + stream->cfg.block_size); + if (ret < 0) { + SYS_LOG_DBG("Failed to start TX DMA transfer: %d", ret); + goto tx_disable; + } + + return; + +tx_disable: + tx_stream_disable(stream, dev); +} + +static u32_t i2s_stm32_irq_count; +static u32_t i2s_stm32_irq_ovr_count; + +static void i2s_stm32_isr(void *arg) +{ + struct device *const dev = (struct device *) arg; + const struct i2s_stm32_cfg *cfg = DEV_CFG(dev); + struct i2s_stm32_data *const dev_data = DEV_DATA(dev); + struct stream *stream = &dev_data->rx; + + SYS_LOG_ERR("%s: err=%d", __func__, LL_I2S_ReadReg(cfg->i2s, SR)); + stream->state = I2S_STATE_ERROR; + + /* OVR error must be explicitly cleared */ + if (LL_I2S_IsActiveFlag_OVR(cfg->i2s)) { + i2s_stm32_irq_ovr_count++; + LL_I2S_ClearFlag_OVR(cfg->i2s); + } + + i2s_stm32_irq_count++; +} + +static int i2s_stm32_initialize(struct device *dev) +{ + const struct i2s_stm32_cfg *cfg = DEV_CFG(dev); + struct i2s_stm32_data *const dev_data = DEV_DATA(dev); + int ret, i; + + /* Enable I2S clock propagation */ + ret = i2s_stm32_enable_clock(dev); + if (ret < 0) { + SYS_LOG_ERR("%s: clock enabling failed: %d", __func__, ret); + return -EIO; + } + + cfg->irq_config(dev); + + k_sem_init(&dev_data->rx.sem, 0, CONFIG_I2S_STM32_RX_BLOCK_COUNT); + k_sem_init(&dev_data->tx.sem, CONFIG_I2S_STM32_TX_BLOCK_COUNT, + CONFIG_I2S_STM32_TX_BLOCK_COUNT); + + for (i = 0; i < STM32_DMA_NUM_CHANNELS; i++) { + active_dma_rx_channel[i] = NULL; + active_dma_tx_channel[i] = NULL; + } + + /* Get the binding to the DMA device */ + dev_data->dev_dma = device_get_binding(dev_data->dma_name); + if (!dev_data->dev_dma) { + SYS_LOG_ERR("%s device not found", dev_data->dma_name); + return -ENODEV; + } + + SYS_LOG_INF("%s inited", dev->config->name); + + return 0; +} + +static int rx_stream_start(struct stream *stream, struct device *dev) +{ + const struct i2s_stm32_cfg *cfg = DEV_CFG(dev); + struct i2s_stm32_data *const dev_data = DEV_DATA(dev); + int ret; + + ret = k_mem_slab_alloc(stream->cfg.mem_slab, &stream->mem_block, + K_NO_WAIT); + if (ret < 0) { + return ret; + } + + if (stream->master) { + LL_I2S_SetTransferMode(cfg->i2s, LL_I2S_MODE_MASTER_RX); + } else { + LL_I2S_SetTransferMode(cfg->i2s, LL_I2S_MODE_SLAVE_RX); + } + + /* remember active RX DMA channel (used in callback) */ + active_dma_rx_channel[stream->dma_channel] = dev; + + ret = start_dma(dev_data->dev_dma, stream->dma_channel, + &stream->dma_cfg, + (void *)LL_SPI_DMA_GetRegAddr(cfg->i2s), + stream->mem_block, + stream->cfg.block_size); + if (ret < 0) { + SYS_LOG_ERR("Failed to start RX DMA transfer: %d", ret); + return ret; + } + + LL_I2S_EnableDMAReq_RX(cfg->i2s); + + LL_I2S_EnableIT_ERR(cfg->i2s); + LL_I2S_Enable(cfg->i2s); + + return 0; +} + +static int tx_stream_start(struct stream *stream, struct device *dev) +{ + const struct i2s_stm32_cfg *cfg = DEV_CFG(dev); + struct i2s_stm32_data *const dev_data = DEV_DATA(dev); + size_t mem_block_size; + int ret; + + ret = queue_get(&stream->mem_block_queue, &stream->mem_block, + &mem_block_size); + if (ret < 0) { + return ret; + } + k_sem_give(&stream->sem); + + /* Assure cache coherency before DMA read operation */ + DCACHE_CLEAN(stream->mem_block, mem_block_size); + + if (stream->master) { + LL_I2S_SetTransferMode(cfg->i2s, LL_I2S_MODE_MASTER_TX); + } else { + LL_I2S_SetTransferMode(cfg->i2s, LL_I2S_MODE_SLAVE_TX); + } + + /* remember active TX DMA channel (used in callback) */ + active_dma_tx_channel[stream->dma_channel] = dev; + + ret = start_dma(dev_data->dev_dma, stream->dma_channel, + &stream->dma_cfg, + stream->mem_block, + (void *)LL_SPI_DMA_GetRegAddr(cfg->i2s), + stream->cfg.block_size); + if (ret < 0) { + SYS_LOG_ERR("Failed to start TX DMA transfer: %d", ret); + return ret; + } + + LL_I2S_EnableDMAReq_TX(cfg->i2s); + + LL_I2S_EnableIT_ERR(cfg->i2s); + LL_I2S_Enable(cfg->i2s); + + return 0; +} + +static void rx_stream_disable(struct stream *stream, struct device *dev) +{ + const struct i2s_stm32_cfg *cfg = DEV_CFG(dev); + struct i2s_stm32_data *const dev_data = DEV_DATA(dev); + struct device *dev_dma = dev_data->dev_dma; + + LL_I2S_DisableDMAReq_RX(cfg->i2s); + LL_I2S_DisableIT_ERR(cfg->i2s); + + dma_stop(dev_dma, stream->dma_channel); + if (stream->mem_block != NULL) { + k_mem_slab_free(stream->cfg.mem_slab, &stream->mem_block); + stream->mem_block = NULL; + } + + LL_I2S_Disable(cfg->i2s); + + active_dma_rx_channel[stream->dma_channel] = NULL; +} + +static void tx_stream_disable(struct stream *stream, struct device *dev) +{ + const struct i2s_stm32_cfg *cfg = DEV_CFG(dev); + struct i2s_stm32_data *const dev_data = DEV_DATA(dev); + struct device *dev_dma = dev_data->dev_dma; + + LL_I2S_DisableDMAReq_TX(cfg->i2s); + LL_I2S_DisableIT_ERR(cfg->i2s); + + dma_stop(dev_dma, stream->dma_channel); + if (stream->mem_block != NULL) { + k_mem_slab_free(stream->cfg.mem_slab, &stream->mem_block); + stream->mem_block = NULL; + } + + LL_I2S_Disable(cfg->i2s); + + active_dma_tx_channel[stream->dma_channel] = NULL; +} + +static void rx_queue_drop(struct stream *stream) +{ + size_t size; + void *mem_block; + + while (queue_get(&stream->mem_block_queue, &mem_block, &size) == 0) { + k_mem_slab_free(stream->cfg.mem_slab, &mem_block); + } + + k_sem_reset(&stream->sem); +} + +static void tx_queue_drop(struct stream *stream) +{ + size_t size; + void *mem_block; + unsigned int n = 0; + + while (queue_get(&stream->mem_block_queue, &mem_block, &size) == 0) { + k_mem_slab_free(stream->cfg.mem_slab, &mem_block); + n++; + } + + for (; n > 0; n--) { + k_sem_give(&stream->sem); + } +} + +static struct device *get_dev_from_rx_dma_channel(u32_t dma_channel) +{ + return active_dma_rx_channel[dma_channel]; +} + +static struct device *get_dev_from_tx_dma_channel(u32_t dma_channel) +{ + return active_dma_tx_channel[dma_channel]; +} + +#ifdef CONFIG_I2S_1 +static struct device DEVICE_NAME_GET(i2s_stm32_1); + +static void i2s_stm32_irq_config_func_1(struct device *dev); + +static const struct i2s_stm32_cfg i2s_stm32_config_1 = { + .i2s = (SPI_TypeDef *) CONFIG_I2S_1_BASE_ADDRESS, + .pclken = { + .enr = CONFIG_I2S_1_CLOCK_BITS, + .bus = CONFIG_I2S_1_CLOCK_BUS, + }, + .i2s_clk_sel = CLK_SEL_2, + .irq_config = i2s_stm32_irq_config_func_1, +}; + +struct queue_item rx_1_ring_buf[CONFIG_I2S_STM32_RX_BLOCK_COUNT + 1]; +struct queue_item tx_1_ring_buf[CONFIG_I2S_STM32_TX_BLOCK_COUNT + 1]; + +static struct i2s_stm32_data i2s_stm32_data_1 = { + .dma_name = I2S1_DMA_NAME, + .rx = { + .dma_channel = I2S1_DMA_CHAN_RX, + .dma_cfg = { + .block_count = 1, + .dma_slot = I2S1_DMA_SLOT_RX, + .channel_direction = PERIPHERAL_TO_MEMORY, + .source_data_size = 1, /* 16bit default */ + .dest_data_size = 1, /* 16bit default */ + .source_burst_length = 0, /* SINGLE transfer */ + .dest_burst_length = 1, + .dma_callback = dma_rx_callback, + }, + .stream_start = rx_stream_start, + .stream_disable = rx_stream_disable, + .queue_drop = rx_queue_drop, + .mem_block_queue.buf = rx_1_ring_buf, + .mem_block_queue.len = ARRAY_SIZE(rx_1_ring_buf), + }, + .tx = { + .dma_channel = I2S1_DMA_CHAN_TX, + .dma_cfg = { + .block_count = 1, + .dma_slot = I2S1_DMA_SLOT_TX, + .channel_direction = MEMORY_TO_PERIPHERAL, + .source_data_size = 1, /* 16bit default */ + .dest_data_size = 1, /* 16bit default */ + .source_burst_length = 1, + .dest_burst_length = 0, /* SINGLE transfer */ + .dma_callback = dma_tx_callback, + }, + .stream_start = tx_stream_start, + .stream_disable = tx_stream_disable, + .queue_drop = tx_queue_drop, + .mem_block_queue.buf = tx_1_ring_buf, + .mem_block_queue.len = ARRAY_SIZE(tx_1_ring_buf), + }, +}; +DEVICE_AND_API_INIT(i2s_stm32_1, CONFIG_I2S_1_NAME, &i2s_stm32_initialize, + &i2s_stm32_data_1, &i2s_stm32_config_1, POST_KERNEL, + CONFIG_I2S_INIT_PRIORITY, &i2s_stm32_driver_api); + +static void i2s_stm32_irq_config_func_1(struct device *dev) +{ + IRQ_CONNECT(CONFIG_I2S_1_IRQ, CONFIG_I2S_1_IRQ_PRI, i2s_stm32_isr, + DEVICE_GET(i2s_stm32_1), 0); + irq_enable(CONFIG_I2S_1_IRQ); +} + +#endif /* CONFIG_I2S_1 */ + +#ifdef CONFIG_I2S_2 +static struct device DEVICE_NAME_GET(i2s_stm32_2); + +static void i2s_stm32_irq_config_func_2(struct device *dev); + +static const struct i2s_stm32_cfg i2s_stm32_config_2 = { + .i2s = (SPI_TypeDef *) CONFIG_I2S_2_BASE_ADDRESS, + .pclken = { + .enr = CONFIG_I2S_2_CLOCK_BITS, + .bus = CONFIG_I2S_2_CLOCK_BUS, + }, + .i2s_clk_sel = CLK_SEL_1, + .irq_config = i2s_stm32_irq_config_func_2, +}; + +struct queue_item rx_2_ring_buf[CONFIG_I2S_STM32_RX_BLOCK_COUNT + 1]; +struct queue_item tx_2_ring_buf[CONFIG_I2S_STM32_TX_BLOCK_COUNT + 1]; + +static struct i2s_stm32_data i2s_stm32_data_2 = { + .dma_name = I2S2_DMA_NAME, + .rx = { + .dma_channel = I2S2_DMA_CHAN_RX, + .dma_cfg = { + .block_count = 1, + .dma_slot = I2S2_DMA_SLOT_RX, + .channel_direction = PERIPHERAL_TO_MEMORY, + .source_data_size = 1, /* 16bit default */ + .dest_data_size = 1, /* 16bit default */ + .source_burst_length = 0, /* SINGLE transfer */ + .dest_burst_length = 1, + .dma_callback = dma_rx_callback, + }, + .stream_start = rx_stream_start, + .stream_disable = rx_stream_disable, + .queue_drop = rx_queue_drop, + .mem_block_queue.buf = rx_2_ring_buf, + .mem_block_queue.len = ARRAY_SIZE(rx_2_ring_buf), + }, + .tx = { + .dma_channel = I2S2_DMA_CHAN_TX, + .dma_cfg = { + .block_count = 1, + .dma_slot = I2S2_DMA_SLOT_TX, + .channel_direction = MEMORY_TO_PERIPHERAL, + .source_data_size = 1, /* 16bit default */ + .dest_data_size = 1, /* 16bit default */ + .source_burst_length = 1, + .dest_burst_length = 0, /* SINGLE transfer */ + .dma_callback = dma_tx_callback, + }, + .stream_start = tx_stream_start, + .stream_disable = tx_stream_disable, + .queue_drop = tx_queue_drop, + .mem_block_queue.buf = tx_2_ring_buf, + .mem_block_queue.len = ARRAY_SIZE(tx_2_ring_buf), + }, +}; +DEVICE_AND_API_INIT(i2s_stm32_2, CONFIG_I2S_2_NAME, &i2s_stm32_initialize, + &i2s_stm32_data_2, &i2s_stm32_config_2, POST_KERNEL, + CONFIG_I2S_INIT_PRIORITY, &i2s_stm32_driver_api); + +static void i2s_stm32_irq_config_func_2(struct device *dev) +{ + IRQ_CONNECT(CONFIG_I2S_2_IRQ, CONFIG_I2S_2_IRQ_PRI, i2s_stm32_isr, + DEVICE_GET(i2s_stm32_2), 0); + irq_enable(CONFIG_I2S_2_IRQ); +} + +#endif /* CONFIG_I2S_2 */ + +#ifdef CONFIG_I2S_3 +static struct device DEVICE_NAME_GET(i2s_stm32_3); + +static void i2s_stm32_irq_config_func_3(struct device *dev); + +static const struct i2s_stm32_cfg i2s_stm32_config_3 = { + .i2s = (SPI_TypeDef *) CONFIG_I2S_3_BASE_ADDRESS, + .pclken = { + .enr = CONFIG_I2S_3_CLOCK_BITS, + .bus = CONFIG_I2S_3_CLOCK_BUS, + }, + .i2s_clk_sel = CLK_SEL_1, + .irq_config = i2s_stm32_irq_config_func_3, +}; + +struct queue_item rx_3_ring_buf[CONFIG_I2S_STM32_RX_BLOCK_COUNT + 1]; +struct queue_item tx_3_ring_buf[CONFIG_I2S_STM32_TX_BLOCK_COUNT + 1]; + +static struct i2s_stm32_data i2s_stm32_data_3 = { + .dma_name = I2S3_DMA_NAME, + .rx = { + .dma_channel = I2S3_DMA_CHAN_RX, + .dma_cfg = { + .block_count = 1, + .dma_slot = I2S3_DMA_SLOT_RX, + .channel_direction = PERIPHERAL_TO_MEMORY, + .source_data_size = 1, /* 16bit default */ + .dest_data_size = 1, /* 16bit default */ + .source_burst_length = 0, /* SINGLE transfer */ + .dest_burst_length = 1, + .dma_callback = dma_rx_callback, + }, + .stream_start = rx_stream_start, + .stream_disable = rx_stream_disable, + .queue_drop = rx_queue_drop, + .mem_block_queue.buf = rx_3_ring_buf, + .mem_block_queue.len = ARRAY_SIZE(rx_3_ring_buf), + }, + .tx = { + .dma_channel = I2S3_DMA_CHAN_TX, + .dma_cfg = { + .block_count = 1, + .dma_slot = I2S3_DMA_SLOT_TX, + .channel_direction = MEMORY_TO_PERIPHERAL, + .source_data_size = 1, /* 16bit default */ + .dest_data_size = 1, /* 16bit default */ + .source_burst_length = 1, + .dest_burst_length = 0, /* SINGLE transfer */ + .dma_callback = dma_tx_callback, + }, + .stream_start = tx_stream_start, + .stream_disable = tx_stream_disable, + .queue_drop = tx_queue_drop, + .mem_block_queue.buf = tx_3_ring_buf, + .mem_block_queue.len = ARRAY_SIZE(tx_3_ring_buf), + }, +}; +DEVICE_AND_API_INIT(i2s_stm32_3, CONFIG_I2S_3_NAME, &i2s_stm32_initialize, + &i2s_stm32_data_3, &i2s_stm32_config_3, POST_KERNEL, + CONFIG_I2S_INIT_PRIORITY, &i2s_stm32_driver_api); + +static void i2s_stm32_irq_config_func_3(struct device *dev) +{ + IRQ_CONNECT(CONFIG_I2S_3_IRQ, CONFIG_I2S_3_IRQ_PRI, i2s_stm32_isr, + DEVICE_GET(i2s_stm32_3), 0); + irq_enable(CONFIG_I2S_3_IRQ); +} + +#endif /* CONFIG_I2S_3 */ + +#ifdef CONFIG_I2S_4 +static struct device DEVICE_NAME_GET(i2s_stm32_4); + +static void i2s_stm32_irq_config_func_4(struct device *dev); + +static const struct i2s_stm32_cfg i2s_stm32_config_4 = { + .i2s = (SPI_TypeDef *) CONFIG_I2S_4_BASE_ADDRESS, + .pclken = { + .enr = CONFIG_I2S_4_CLOCK_BITS, + .bus = CONFIG_I2S_4_CLOCK_BUS, + }, + .i2s_clk_sel = CLK_SEL_2, + .irq_config = i2s_stm32_irq_config_func_4, +}; + +struct queue_item rx_4_ring_buf[CONFIG_I2S_STM32_RX_BLOCK_COUNT + 1]; +struct queue_item tx_4_ring_buf[CONFIG_I2S_STM32_TX_BLOCK_COUNT + 1]; + +static struct i2s_stm32_data i2s_stm32_data_4 = { + .dma_name = I2S4_DMA_NAME, + .rx = { + .dma_channel = I2S4_DMA_CHAN_RX, + .dma_cfg = { + .block_count = 1, + .dma_slot = I2S4_DMA_SLOT_RX, + .channel_direction = PERIPHERAL_TO_MEMORY, + .source_data_size = 1, /* 16bit default */ + .dest_data_size = 1, /* 16bit default */ + .source_burst_length = 0, /* SINGLE transfer */ + .dest_burst_length = 1, + .dma_callback = dma_rx_callback, + }, + .stream_start = rx_stream_start, + .stream_disable = rx_stream_disable, + .queue_drop = rx_queue_drop, + .mem_block_queue.buf = rx_4_ring_buf, + .mem_block_queue.len = ARRAY_SIZE(rx_4_ring_buf), + }, + .tx = { + .dma_channel = I2S4_DMA_CHAN_TX, + .dma_cfg = { + .block_count = 1, + .dma_slot = I2S4_DMA_SLOT_TX, + .channel_direction = MEMORY_TO_PERIPHERAL, + .source_data_size = 1, /* 16bit default */ + .dest_data_size = 1, /* 16bit default */ + .source_burst_length = 1, + .dest_burst_length = 0, /* SINGLE transfer */ + .dma_callback = dma_tx_callback, + }, + .stream_start = tx_stream_start, + .stream_disable = tx_stream_disable, + .queue_drop = tx_queue_drop, + .mem_block_queue.buf = tx_4_ring_buf, + .mem_block_queue.len = ARRAY_SIZE(tx_4_ring_buf), + }, +}; +DEVICE_AND_API_INIT(i2s_stm32_4, CONFIG_I2S_4_NAME, &i2s_stm32_initialize, + &i2s_stm32_data_4, &i2s_stm32_config_4, POST_KERNEL, + CONFIG_I2S_INIT_PRIORITY, &i2s_stm32_driver_api); + +static void i2s_stm32_irq_config_func_4(struct device *dev) +{ + IRQ_CONNECT(CONFIG_I2S_4_IRQ, CONFIG_I2S_4_IRQ_PRI, i2s_stm32_isr, + DEVICE_GET(i2s_stm32_4), 0); + irq_enable(CONFIG_I2S_4_IRQ); +} + +#endif /* CONFIG_I2S_4 */ + +#ifdef CONFIG_I2S_5 +static struct device DEVICE_NAME_GET(i2s_stm32_5); + +static void i2s_stm32_irq_config_func_5(struct device *dev); + +static const struct i2s_stm32_cfg i2s_stm32_config_5 = { + .i2s = (SPI_TypeDef *) CONFIG_I2S_5_BASE_ADDRESS, + .pclken = { + .enr = CONFIG_I2S_5_CLOCK_BITS, + .bus = CONFIG_I2S_5_CLOCK_BUS, + }, + .i2s_clk_sel = CLK_SEL_2, + .irq_config = i2s_stm32_irq_config_func_5, +}; + +struct queue_item rx_5_ring_buf[CONFIG_I2S_STM32_RX_BLOCK_COUNT + 1]; +struct queue_item tx_5_ring_buf[CONFIG_I2S_STM32_TX_BLOCK_COUNT + 1]; + +static struct i2s_stm32_data i2s_stm32_data_5 = { + .dma_name = I2S5_DMA_NAME, + .rx = { + .dma_channel = I2S5_DMA_CHAN_RX, + .dma_cfg = { + .block_count = 1, + .dma_slot = I2S5_DMA_SLOT_RX, + .channel_direction = PERIPHERAL_TO_MEMORY, + .source_data_size = 1, /* 16bit default */ + .dest_data_size = 1, /* 16bit default */ + .source_burst_length = 0, /* SINGLE transfer */ + .dest_burst_length = 1, + .dma_callback = dma_rx_callback, + }, + .stream_start = rx_stream_start, + .stream_disable = rx_stream_disable, + .queue_drop = rx_queue_drop, + .mem_block_queue.buf = rx_5_ring_buf, + .mem_block_queue.len = ARRAY_SIZE(rx_5_ring_buf), + }, + .tx = { + .dma_channel = I2S5_DMA_CHAN_TX, + .dma_cfg = { + .block_count = 1, + .dma_slot = I2S5_DMA_SLOT_TX, + .channel_direction = MEMORY_TO_PERIPHERAL, + .source_data_size = 1, /* 16bit default */ + .dest_data_size = 1, /* 16bit default */ + .source_burst_length = 1, + .dest_burst_length = 0, /* SINGLE transfer */ + .dma_callback = dma_tx_callback, + }, + .stream_start = tx_stream_start, + .stream_disable = tx_stream_disable, + .queue_drop = tx_queue_drop, + .mem_block_queue.buf = tx_5_ring_buf, + .mem_block_queue.len = ARRAY_SIZE(tx_5_ring_buf), + }, +}; +DEVICE_AND_API_INIT(i2s_stm32_5, CONFIG_I2S_5_NAME, &i2s_stm32_initialize, + &i2s_stm32_data_5, &i2s_stm32_config_5, POST_KERNEL, + CONFIG_I2S_INIT_PRIORITY, &i2s_stm32_driver_api); + +static void i2s_stm32_irq_config_func_5(struct device *dev) +{ + IRQ_CONNECT(CONFIG_I2S_5_IRQ, CONFIG_I2S_5_IRQ_PRI, i2s_stm32_isr, + DEVICE_GET(i2s_stm32_5), 0); + irq_enable(CONFIG_I2S_5_IRQ); +} + +#endif /* CONFIG_I2S_5 */ diff --git a/drivers/i2s/i2s_ll_stm32.h b/drivers/i2s/i2s_ll_stm32.h new file mode 100644 index 00000000000..fb8edd0fcf3 --- /dev/null +++ b/drivers/i2s/i2s_ll_stm32.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2018 STMicroelectronics + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef _STM32_I2S_H_ +#define _STM32_I2S_H_ + +#ifdef CONFIG_I2S_STM32_USE_PLLI2S_ENABLE + +#if defined(RCC_CFGR_I2SSRC) +/* single selector for the I2S clock source (SEL_1 == SEL_2) */ +#define CLK_SEL_1 LL_RCC_I2S1_CLKSOURCE_PLLI2S +#define CLK_SEL_2 LL_RCC_I2S1_CLKSOURCE_PLLI2S +#else +#if defined(RCC_DCKCFGR_I2SSRC) +/* single selector for the I2S clock source (SEL_1 == SEL_2) */ +#define CLK_SEL_1 LL_RCC_I2S1_CLKSOURCE_PLL +#define CLK_SEL_2 LL_RCC_I2S1_CLKSOURCE_PLL +#else +#if defined(RCC_DCKCFGR_I2S1SRC) && defined(RCC_DCKCFGR_I2S2SRC) +/* double selector for the I2S clock source (SEL_1 != SEL_2) */ +#define CLK_SEL_1 LL_RCC_I2S1_CLKSOURCE_PLLI2S +#define CLK_SEL_2 LL_RCC_I2S2_CLKSOURCE_PLLI2S +#endif /* RCC_DCKCFGR_I2S1SRC && RCC_DCKCFGR_I2S2SRC */ +#endif /* RCC_DCKCFGR_I2SSRC */ +#endif /* RCC_CFGR_I2SSRC */ + +#else + +#if defined(RCC_CFGR_I2SSRC) +/* single selector for the I2S clock source (SEL_1 == SEL_2) */ +#define CLK_SEL_1 LL_RCC_I2S1_CLKSOURCE_PIN +#define CLK_SEL_2 LL_RCC_I2S1_CLKSOURCE_PIN +#else +#if defined(RCC_DCKCFGR_I2SSRC) +/* single selector for the I2S clock source (SEL_1 == SEL_2) */ +#define CLK_SEL_1 LL_RCC_I2S1_CLKSOURCE_PLLSRC +#define CLK_SEL_2 LL_RCC_I2S1_CLKSOURCE_PLLSRC +#else +#if defined(RCC_DCKCFGR_I2S1SRC) && defined(RCC_DCKCFGR_I2S2SRC) +/* double selector for the I2S clock source (SEL_1 != SEL_2) */ +#define CLK_SEL_1 LL_RCC_I2S1_CLKSOURCE_PLLSRC +#define CLK_SEL_2 LL_RCC_I2S2_CLKSOURCE_PLLSRC +#endif /* RCC_DCKCFGR_I2S1SRC && RCC_DCKCFGR_I2S2SRC */ +#endif /* RCC_DCKCFGR_I2SSRC */ +#endif /* RCC_CFGR_I2SSRC */ + +#endif /* CONFIG_I2S_STM32_USE_PLLI2S_ENABLE */ + +#ifdef CONFIG_SOC_SERIES_STM32F4X +#define I2S1_DMA_NAME CONFIG_DMA_2_NAME +#define I2S1_DMA_CHAN_RX 2 +#define I2S1_DMA_SLOT_RX 3 +#define I2S1_DMA_CHAN_TX 3 +#define I2S1_DMA_SLOT_TX 3 +#define I2S2_DMA_NAME CONFIG_DMA_1_NAME +#define I2S2_DMA_CHAN_RX 3 +#define I2S2_DMA_SLOT_RX 0 +#define I2S2_DMA_CHAN_TX 4 +#define I2S2_DMA_SLOT_TX 0 +#define I2S3_DMA_NAME CONFIG_DMA_1_NAME +#define I2S3_DMA_CHAN_RX 0 +#define I2S3_DMA_SLOT_RX 0 +#define I2S3_DMA_CHAN_TX 5 +#define I2S3_DMA_SLOT_TX 0 +#define I2S4_DMA_NAME CONFIG_DMA_2_NAME +#define I2S4_DMA_CHAN_RX 0 +#define I2S4_DMA_SLOT_RX 4 +#define I2S4_DMA_CHAN_TX 1 +#define I2S4_DMA_SLOT_TX 4 +#define I2S5_DMA_NAME CONFIG_DMA_2_NAME +#define I2S5_DMA_CHAN_RX 5 +#define I2S5_DMA_SLOT_RX 7 +#define I2S5_DMA_CHAN_TX 6 +#define I2S5_DMA_SLOT_TX 7 +#endif + +#define DEV_CFG(dev) \ + (const struct i2s_stm32_cfg * const)((dev)->config->config_info) +#define DEV_DATA(dev) \ + ((struct i2s_stm32_data *const)(dev)->driver_data) + +struct queue_item { + void *mem_block; + size_t size; +}; + +/* Minimal ring buffer implementation */ +struct ring_buf { + struct queue_item *buf; + u16_t len; + u16_t head; + u16_t tail; +}; + +/* Device constant configuration parameters */ +struct i2s_stm32_cfg { + SPI_TypeDef *i2s; + struct stm32_pclken pclken; + u32_t i2s_clk_sel; + void (*irq_config)(struct device *dev); +}; + +struct stream { + s32_t state; + struct k_sem sem; + u32_t dma_channel; + struct dma_config dma_cfg; + struct i2s_config cfg; + struct ring_buf mem_block_queue; + void *mem_block; + bool last_block; + bool master; + int (*stream_start)(struct stream *, struct device *dev); + void (*stream_disable)(struct stream *, struct device *dev); + void (*queue_drop)(struct stream *); +}; + +/* Device run time data */ +struct i2s_stm32_data { + struct device *dev_dma; + const char *dma_name; + struct stream rx; + struct stream tx; +}; + +#endif /* _STM32_I2S_H_ */