spi_nxp_lpspi: Move RTIO driver into own file

There is (almost) a whole separate driver for RTIO,
move this code to its own file and move shared code
and definitions to a common file.

Signed-off-by: Declan Snyder <declan.snyder@nxp.com>
This commit is contained in:
Declan Snyder 2024-12-09 11:35:48 -06:00 committed by Benjamin Cabé
commit 1d5cd0308c
6 changed files with 548 additions and 389 deletions

View file

@ -1,3 +1,5 @@
# Copyright 2024 NXP
zephyr_library_sources_ifdef(CONFIG_SPI_MCUX_LPSPI spi_mcux_lpspi.c)
zephyr_library_sources_ifdef(CONFIG_SPI_MCUX_LPSPI spi_nxp_lpspi_common.c)
zephyr_library_sources_ifdef(CONFIG_SPI_MCUX_LPSPI_NORMAL spi_mcux_lpspi.c)
zephyr_library_sources_ifdef(CONFIG_SPI_MCUX_LPSPI_RTIO spi_mcux_lpspi_rtio.c)

View file

@ -2,15 +2,17 @@
# SPDX-License-Identifier: Apache-2.0
config SPI_MCUX_LPSPI
bool "MCUX LPSPI driver"
bool "NXP LPSPI peripheral"
default y
depends on DT_HAS_NXP_LPSPI_ENABLED
depends on CLOCK_CONTROL
select PINCTRL
help
Enable support for MCUX LPSPI driver.
Enable support for NXP LPSPI.
if SPI_MCUX_LPSPI
if !SPI_RTIO
config SPI_MCUX_LPSPI_DMA
bool "MCUX LPSPI SPI DMA Support"
select DMA
@ -18,7 +20,20 @@ config SPI_MCUX_LPSPI_DMA
Enable the SPI DMA mode for SPI instances
that enable dma channels in their device tree node.
config SPI_MCUX_LPSPI_NORMAL
bool "NXP MCUX LPSPI driver"
default y
help
Use the traditional (non-RTIO) SPI driver for NXP LPSPI.
endif # !SPI_RTIO
if SPI_RTIO
config SPI_MCUX_LPSPI_RTIO
bool "NXP MCUX LPSPI RTIO based driver"
default y
help
Use the RTIO-based SPI driver for the NXP LPSPI.
config SPI_MCUX_RTIO_SQ_SIZE
int "number of available submission queue entries"
default 8 # sensible default that covers most common spi transactions

View file

@ -6,110 +6,10 @@
#define DT_DRV_COMPAT nxp_lpspi
#include <zephyr/drivers/spi.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/irq.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(spi_mcux_lpspi, CONFIG_SPI_LOG_LEVEL);
#ifdef CONFIG_SPI_RTIO
#include <zephyr/drivers/spi/rtio.h>
#endif
#include "../spi_context.h"
#if CONFIG_NXP_LP_FLEXCOMM
#include <zephyr/drivers/mfd/nxp_lp_flexcomm.h>
#endif
#include <fsl_lpspi.h>
/* If any hardware revisions change this, make it into a DT property.
* DONT'T make #ifdefs here by platform.
*/
#define LPSPI_CHIP_SELECT_COUNT 4
#define LPSPI_MIN_FRAME_SIZE_BITS 8
/* Required by DEVICE_MMIO_NAMED_* macros */
#define DEV_CFG(_dev) ((const struct spi_mcux_config *)(_dev)->config)
#define DEV_DATA(_dev) ((struct spi_mcux_data *)(_dev)->data)
/* Argument to MCUX SDK IRQ handler */
#define LPSPI_IRQ_HANDLE_ARG COND_CODE_1(CONFIG_NXP_LP_FLEXCOMM, (LPSPI_GetInstance(base)), (base))
/* flag for SDK API for master transfers */
#define LPSPI_MASTER_XFER_CFG_FLAGS(slave) \
kLPSPI_MasterPcsContinuous | (slave << LPSPI_MASTER_PCS_SHIFT)
#ifdef CONFIG_SPI_MCUX_LPSPI_DMA
#include <zephyr/drivers/dma.h>
/* These flags are arbitrary */
#define LPSPI_DMA_ERROR_FLAG BIT(0)
#define LPSPI_DMA_RX_DONE_FLAG BIT(1)
#define LPSPI_DMA_TX_DONE_FLAG BIT(2)
#define LPSPI_DMA_DONE_FLAG (LPSPI_DMA_RX_DONE_FLAG | LPSPI_DMA_TX_DONE_FLAG)
struct spi_dma_stream {
const struct device *dma_dev;
uint32_t channel; /* stores the channel for dma */
struct dma_config dma_cfg;
struct dma_block_config dma_blk_cfg;
};
#endif /* CONFIG_SPI_MCUX_LPSPI_DMA */
struct spi_mcux_config {
DEVICE_MMIO_NAMED_ROM(reg_base);
const struct device *clock_dev;
clock_control_subsys_t clock_subsys;
void (*irq_config_func)(const struct device *dev);
uint32_t pcs_sck_delay;
uint32_t sck_pcs_delay;
uint32_t transfer_delay;
const struct pinctrl_dev_config *pincfg;
lpspi_pin_config_t data_pin_config;
};
struct spi_mcux_data {
DEVICE_MMIO_NAMED_RAM(reg_base);
const struct device *dev;
lpspi_master_handle_t handle;
struct spi_context ctx;
size_t transfer_len;
#ifdef CONFIG_SPI_RTIO
struct spi_rtio *rtio_ctx;
#endif
#ifdef CONFIG_SPI_MCUX_LPSPI_DMA
volatile uint32_t status_flags;
struct spi_dma_stream dma_rx;
struct spi_dma_stream dma_tx;
/* dummy value used for transferring NOP when tx buf is null */
uint32_t dummy_buffer;
#endif
};
static int spi_mcux_transfer_next_packet(const struct device *dev);
static void spi_mcux_isr(const struct device *dev)
{
struct spi_mcux_data *data = dev->data;
LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
LPSPI_MasterTransferHandleIRQ(LPSPI_IRQ_HANDLE_ARG, &data->handle);
}
static void spi_mcux_master_callback(LPSPI_Type *base, lpspi_master_handle_t *handle,
status_t status, void *userData)
{
struct spi_mcux_data *data = userData;
spi_context_update_tx(&data->ctx, 1, data->transfer_len);
spi_context_update_rx(&data->ctx, 1, data->transfer_len);
spi_mcux_transfer_next_packet(data->dev);
}
#include "spi_nxp_lpspi_priv.h"
static int spi_mcux_transfer_next_packet(const struct device *dev)
{
@ -142,88 +42,31 @@ static int spi_mcux_transfer_next_packet(const struct device *dev)
return 0;
}
static int spi_mcux_configure(const struct device *dev, const struct spi_config *spi_cfg)
static void lpspi_isr(const struct device *dev)
{
const struct spi_mcux_config *config = dev->config;
struct spi_mcux_data *data = dev->data;
LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
uint32_t word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
lpspi_master_config_t master_config;
uint32_t clock_freq;
int ret;
if (spi_cfg->operation & SPI_HALF_DUPLEX) {
/* the IP DOES support half duplex, need to implement driver support */
LOG_ERR("Half-duplex not supported");
return -ENOTSUP;
}
if (word_size < 8 || (word_size % 32 == 1)) {
/* Zephyr word size == hardware FRAME size (not word size)
* Max frame size: 4096 bits
* (zephyr field is 6 bit wide for max 64 bit size, no need to check)
* Min frame size: 8 bits.
* Minimum hardware word size is 2. Since this driver is intended to work
* for 32 bit platforms, and 64 bits is max size, then only 33 and 1 are invalid.
*/
LOG_ERR("Word size %d not allowed", word_size);
return -EINVAL;
}
if (spi_cfg->slave > LPSPI_CHIP_SELECT_COUNT) {
LOG_ERR("Peripheral %d select exceeds max %d", spi_cfg->slave,
LPSPI_CHIP_SELECT_COUNT - 1);
return -EINVAL;
}
ret = clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_freq);
if (ret) {
return ret;
}
if (data->ctx.config != NULL) {
/* Setting the baud rate in LPSPI_MasterInit requires module to be disabled. Only
* disable if already configured, otherwise the clock is not enabled and the
* CR register cannot be written.
*/
LPSPI_Enable(base, false);
while ((base->CR & LPSPI_CR_MEN_MASK) != 0U) {
/* Wait until LPSPI is disabled. Datasheet:
* After writing 0, MEN (Module Enable) remains set until the LPSPI has
* completed the current transfer and is idle.
*/
}
}
data->ctx.config = spi_cfg;
LPSPI_MasterGetDefaultConfig(&master_config);
master_config.bitsPerFrame = word_size;
master_config.cpol = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL)
? kLPSPI_ClockPolarityActiveLow
: kLPSPI_ClockPolarityActiveHigh;
master_config.cpha = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA)
? kLPSPI_ClockPhaseSecondEdge
: kLPSPI_ClockPhaseFirstEdge;
master_config.direction =
(spi_cfg->operation & SPI_TRANSFER_LSB) ? kLPSPI_LsbFirst : kLPSPI_MsbFirst;
master_config.baudRate = spi_cfg->frequency;
master_config.pcsToSckDelayInNanoSec = config->pcs_sck_delay;
master_config.lastSckToPcsDelayInNanoSec = config->sck_pcs_delay;
master_config.betweenTransferDelayInNanoSec = config->transfer_delay;
master_config.pinCfg = config->data_pin_config;
LPSPI_MasterInit(base, &master_config, clock_freq);
LPSPI_SetDummyData(base, 0);
if (IS_ENABLED(CONFIG_DEBUG)) {
base->CR |= LPSPI_CR_DBGEN_MASK;
}
return 0;
LPSPI_MasterTransferHandleIRQ(LPSPI_IRQ_HANDLE_ARG, &data->handle);
}
static void spi_mcux_master_callback(LPSPI_Type *base, lpspi_master_handle_t *handle,
status_t status, void *userData)
{
struct spi_mcux_data *data = userData;
spi_context_update_tx(&data->ctx, 1, data->transfer_len);
spi_context_update_rx(&data->ctx, 1, data->transfer_len);
spi_mcux_transfer_next_packet(data->dev);
}
/* These flags are arbitrary */
#define LPSPI_DMA_ERROR_FLAG BIT(0)
#define LPSPI_DMA_RX_DONE_FLAG BIT(1)
#define LPSPI_DMA_TX_DONE_FLAG BIT(2)
#define LPSPI_DMA_DONE_FLAG (LPSPI_DMA_RX_DONE_FLAG | LPSPI_DMA_TX_DONE_FLAG)
#ifdef CONFIG_SPI_MCUX_LPSPI_DMA
static bool lpspi_inst_has_dma(const struct spi_mcux_data *data)
{
@ -524,131 +367,6 @@ out:
#define transceive_dma(...) 0
#endif /* CONFIG_SPI_MCUX_LPSPI_DMA */
#ifdef CONFIG_SPI_RTIO
static void spi_mcux_iodev_complete(const struct device *dev, int status);
static void spi_mcux_master_rtio_callback(LPSPI_Type *base, lpspi_master_handle_t *handle,
status_t status, void *userData)
{
struct spi_mcux_data *data = userData;
struct spi_rtio *rtio_ctx = data->rtio_ctx;
if (rtio_ctx->txn_head != NULL) {
spi_mcux_iodev_complete(data->dev, status);
return;
}
spi_mcux_master_callback(base, handle, status, userData);
}
static void spi_mcux_iodev_start(const struct device *dev)
{
struct spi_mcux_data *data = dev->data;
struct spi_rtio *rtio_ctx = data->rtio_ctx;
struct rtio_sqe *sqe = &rtio_ctx->txn_curr->sqe;
struct spi_dt_spec *spi_dt_spec = sqe->iodev->data;
struct spi_config *spi_cfg = &spi_dt_spec->config;
LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
lpspi_transfer_t transfer;
status_t status;
status = spi_mcux_configure(dev, spi_cfg);
if (status) {
LOG_ERR("Error configuring lpspi");
return;
}
LPSPI_MasterTransferCreateHandle(base, &data->handle, spi_mcux_master_rtio_callback, data);
transfer.configFlags = LPSPI_MASTER_XFER_CFG_FLAGS(spi_cfg->slave);
switch (sqe->op) {
case RTIO_OP_RX:
transfer.txData = NULL;
transfer.rxData = sqe->rx.buf;
transfer.dataSize = sqe->rx.buf_len;
break;
case RTIO_OP_TX:
transfer.rxData = NULL;
transfer.txData = sqe->tx.buf;
transfer.dataSize = sqe->tx.buf_len;
break;
case RTIO_OP_TINY_TX:
transfer.rxData = NULL;
transfer.txData = sqe->tiny_tx.buf;
transfer.dataSize = sqe->tiny_tx.buf_len;
break;
case RTIO_OP_TXRX:
transfer.txData = sqe->txrx.tx_buf;
transfer.rxData = sqe->txrx.rx_buf;
transfer.dataSize = sqe->txrx.buf_len;
break;
default:
LOG_ERR("Invalid op code %d for submission %p\n", sqe->op, (void *)sqe);
spi_mcux_iodev_complete(dev, -EINVAL);
return;
}
data->transfer_len = transfer.dataSize;
spi_context_cs_control(&data->ctx, true);
status = LPSPI_MasterTransferNonBlocking(base, &data->handle, &transfer);
if (status != kStatus_Success) {
LOG_ERR("Transfer could not start on %s: %d", dev->name, status);
spi_mcux_iodev_complete(dev, -EIO);
}
}
static void spi_mcux_iodev_complete(const struct device *dev, int status)
{
struct spi_mcux_data *data = dev->data;
struct spi_rtio *rtio_ctx = data->rtio_ctx;
if (!status && rtio_ctx->txn_curr->sqe.flags & RTIO_SQE_TRANSACTION) {
rtio_ctx->txn_curr = rtio_txn_next(rtio_ctx->txn_curr);
spi_mcux_iodev_start(dev);
return;
}
/** De-assert CS-line to space from next transaction */
spi_context_cs_control(&data->ctx, false);
if (spi_rtio_complete(rtio_ctx, status)) {
spi_mcux_iodev_start(dev);
}
}
static void spi_mcux_iodev_submit(const struct device *dev, struct rtio_iodev_sqe *iodev_sqe)
{
struct spi_mcux_data *data = dev->data;
struct spi_rtio *rtio_ctx = data->rtio_ctx;
if (spi_rtio_submit(rtio_ctx, iodev_sqe)) {
spi_mcux_iodev_start(dev);
}
}
static inline int transceive_rtio(const struct device *dev, const struct spi_config *spi_cfg,
const struct spi_buf_set *tx_bufs,
const struct spi_buf_set *rx_bufs)
{
struct spi_mcux_data *data = dev->data;
struct spi_rtio *rtio_ctx = data->rtio_ctx;
int ret;
spi_context_lock(&data->ctx, false, NULL, NULL, spi_cfg);
ret = spi_rtio_transceive(rtio_ctx, spi_cfg, tx_bufs, rx_bufs);
spi_context_release(&data->ctx, ret);
return ret;
}
#else
#define transceive_rtio(...) 0
#endif /* CONFIG_SPI_RTIO */
static int transceive(const struct device *dev, const struct spi_config *spi_cfg,
const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs,
bool asynchronous, spi_callback_t cb, void *userdata)
@ -692,10 +410,6 @@ static int spi_mcux_transceive(const struct device *dev, const struct spi_config
return transceive_dma(dev, spi_cfg, tx_bufs, rx_bufs, async, cb, userdata);
}
if (IS_ENABLED(CONFIG_SPI_RTIO)) {
return transceive_rtio(dev, spi_cfg, tx_bufs, rx_bufs);
}
return transceive(dev, spi_cfg, tx_bufs, rx_bufs, async, cb, userdata);
}
@ -716,22 +430,10 @@ static int spi_mcux_transceive_async(const struct device *dev, const struct spi_
}
#endif /* CONFIG_SPI_ASYNC */
static int spi_mcux_release(const struct device *dev, const struct spi_config *spi_cfg)
{
struct spi_mcux_data *data = dev->data;
spi_context_unlock_unconditionally(&data->ctx);
return 0;
}
static DEVICE_API(spi, spi_mcux_driver_api) = {
.transceive = spi_mcux_transceive_sync,
#ifdef CONFIG_SPI_ASYNC
.transceive_async = spi_mcux_transceive_async,
#endif
#ifdef CONFIG_SPI_RTIO
.iodev_submit = spi_mcux_iodev_submit,
#endif
.release = spi_mcux_release,
};
@ -758,19 +460,9 @@ static int lpspi_dma_devs_ready(struct spi_mcux_data *data)
static int spi_mcux_init(const struct device *dev)
{
const struct spi_mcux_config *config = dev->config;
struct spi_mcux_data *data = dev->data;
int err = 0;
DEVICE_MMIO_NAMED_MAP(dev, reg_base, K_MEM_CACHE_NONE | K_MEM_DIRECT_MAP);
data->dev = dev;
if (!device_is_ready(config->clock_dev)) {
LOG_ERR("clock control device not ready");
return -ENODEV;
}
if (IS_ENABLED(CONFIG_SPI_MCUX_LPSPI_DMA) && lpspi_inst_has_dma(data)) {
err = lpspi_dma_devs_ready(data);
}
@ -778,32 +470,18 @@ static int spi_mcux_init(const struct device *dev)
return err;
}
err = spi_context_cs_configure_all(&data->ctx);
if (err < 0) {
return err;
}
err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
err = spi_nxp_init_common(dev);
if (err) {
return err;
}
config->irq_config_func(dev);
#ifdef CONFIG_SPI_RTIO
spi_rtio_init(data->rtio_ctx, dev);
#endif
spi_context_unlock_unconditionally(&data->ctx);
return 0;
}
#define SPI_MCUX_RTIO_DEFINE(n) \
SPI_RTIO_DEFINE(spi_mcux_rtio_##n, CONFIG_SPI_MCUX_RTIO_SQ_SIZE, \
CONFIG_SPI_MCUX_RTIO_SQ_SIZE)
#ifdef CONFIG_SPI_MCUX_LPSPI_DMA
#define SPI_DMA_CHANNELS(n) \
#define SPI_DMA_CHANNELS(n) \
IF_ENABLED( \
DT_INST_DMAS_HAS_NAME(n, tx), \
(.dma_tx = {.dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, tx)), \
@ -813,7 +491,7 @@ static int spi_mcux_init(const struct device *dev)
.source_data_size = 1, \
.dest_data_size = 1, \
.block_count = 1, \
.dma_slot = DT_INST_DMAS_CELL_BY_NAME(n, tx, source)}},)) \
.dma_slot = DT_INST_DMAS_CELL_BY_NAME(n, tx, source)}},)) \
IF_ENABLED( \
DT_INST_DMAS_HAS_NAME(n, rx), \
(.dma_rx = {.dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, rx)), \
@ -828,49 +506,15 @@ static int spi_mcux_init(const struct device *dev)
#define SPI_DMA_CHANNELS(n)
#endif /* CONFIG_SPI_MCUX_LPSPI_DMA */
#if defined(CONFIG_NXP_LP_FLEXCOMM)
#define SPI_MCUX_LPSPI_IRQ_FUNC(n) \
nxp_lp_flexcomm_setirqhandler(DEVICE_DT_GET(DT_INST_PARENT(n)), DEVICE_DT_INST_GET(n), \
LP_FLEXCOMM_PERIPH_LPSPI, spi_mcux_isr);
#else
#define SPI_MCUX_LPSPI_IRQ_FUNC(n) \
IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), spi_mcux_isr, \
DEVICE_DT_INST_GET(n), 0); \
irq_enable(DT_INST_IRQN(n));
#endif
#define SPI_MCUX_LPSPI_INIT(n) \
PINCTRL_DT_INST_DEFINE(n); \
COND_CODE_1(CONFIG_SPI_RTIO, (SPI_MCUX_RTIO_DEFINE(n)), ()); \
SPI_NXP_LPSPI_COMMON_INIT(n) \
SPI_MCUX_LPSPI_CONFIG_INIT(n) \
\
static void spi_mcux_config_func_##n(const struct device *dev) \
{ \
SPI_MCUX_LPSPI_IRQ_FUNC(n) \
} \
static struct spi_mcux_data spi_mcux_data_##n = {SPI_NXP_LPSPI_COMMON_DATA_INIT(n) \
SPI_DMA_CHANNELS(n)}; \
\
static const struct spi_mcux_config spi_mcux_config_##n = { \
DEVICE_MMIO_NAMED_ROM_INIT(reg_base, DT_DRV_INST(n)), \
.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \
.clock_subsys = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name), \
.irq_config_func = spi_mcux_config_func_##n, \
.pcs_sck_delay = UTIL_AND(DT_INST_NODE_HAS_PROP(n, pcs_sck_delay), \
DT_INST_PROP(n, pcs_sck_delay)), \
.sck_pcs_delay = UTIL_AND(DT_INST_NODE_HAS_PROP(n, sck_pcs_delay), \
DT_INST_PROP(n, sck_pcs_delay)), \
.transfer_delay = UTIL_AND(DT_INST_NODE_HAS_PROP(n, transfer_delay), \
DT_INST_PROP(n, transfer_delay)), \
.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
.data_pin_config = DT_INST_ENUM_IDX(n, data_pin_config), \
}; \
\
static struct spi_mcux_data spi_mcux_data_##n = { \
SPI_CONTEXT_INIT_LOCK(spi_mcux_data_##n, ctx), \
SPI_CONTEXT_INIT_SYNC(spi_mcux_data_##n, ctx), \
SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) SPI_DMA_CHANNELS(n) \
IF_ENABLED(CONFIG_SPI_RTIO, (.rtio_ctx = &spi_mcux_rtio_##n,)) \
}; \
\
SPI_DEVICE_DT_INST_DEFINE(n, spi_mcux_init, NULL, &spi_mcux_data_##n, &spi_mcux_config_##n,\
POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, &spi_mcux_driver_api);
SPI_DEVICE_DT_INST_DEFINE(n, spi_mcux_init, NULL, &spi_mcux_data_##n, \
&spi_mcux_config_##n, POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \
&spi_mcux_driver_api);
DT_INST_FOREACH_STATUS_OKAY(SPI_MCUX_LPSPI_INIT)

View file

@ -0,0 +1,236 @@
/*
* Copyright 2023-2024 NXP
*
* SPDX-License-Identifier: Apache-2.0
*/
#define DT_DRV_COMPAT nxp_lpspi
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(spi_mcux_lpspi_rtio, CONFIG_SPI_LOG_LEVEL);
#include <zephyr/drivers/spi/rtio.h>
#include "spi_nxp_lpspi_priv.h"
static int spi_mcux_transfer_next_packet(const struct device *dev)
{
struct spi_mcux_data *data = dev->data;
LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
struct spi_context *ctx = &data->ctx;
size_t max_chunk = spi_context_max_continuous_chunk(ctx);
lpspi_transfer_t transfer;
status_t status;
if (max_chunk == 0) {
spi_context_cs_control(ctx, false);
spi_context_complete(ctx, dev, 0);
return 0;
}
data->transfer_len = max_chunk;
transfer.configFlags = LPSPI_MASTER_XFER_CFG_FLAGS(ctx->config->slave);
transfer.txData = (ctx->tx_len == 0 ? NULL : ctx->tx_buf);
transfer.rxData = (ctx->rx_len == 0 ? NULL : ctx->rx_buf);
transfer.dataSize = max_chunk;
status = LPSPI_MasterTransferNonBlocking(base, &data->handle, &transfer);
if (status != kStatus_Success) {
LOG_ERR("Transfer could not start on %s: %d", dev->name, status);
return status == kStatus_LPSPI_Busy ? -EBUSY : -EINVAL;
}
return 0;
}
static void spi_mcux_iodev_complete(const struct device *dev, int status);
static void spi_mcux_master_rtio_callback(LPSPI_Type *base, lpspi_master_handle_t *handle,
status_t status, void *userData)
{
struct spi_mcux_data *data = userData;
struct spi_rtio *rtio_ctx = data->rtio_ctx;
if (rtio_ctx->txn_head != NULL) {
spi_mcux_iodev_complete(data->dev, status);
return;
}
spi_context_update_tx(&data->ctx, 1, data->transfer_len);
spi_context_update_rx(&data->ctx, 1, data->transfer_len);
spi_mcux_transfer_next_packet(data->dev);
}
static void spi_mcux_iodev_start(const struct device *dev)
{
struct spi_mcux_data *data = dev->data;
struct spi_rtio *rtio_ctx = data->rtio_ctx;
struct rtio_sqe *sqe = &rtio_ctx->txn_curr->sqe;
struct spi_dt_spec *spi_dt_spec = sqe->iodev->data;
struct spi_config *spi_cfg = &spi_dt_spec->config;
LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
lpspi_transfer_t transfer;
status_t status;
status = spi_mcux_configure(dev, spi_cfg);
if (status) {
LOG_ERR("Error configuring lpspi");
return;
}
LPSPI_MasterTransferCreateHandle(base, &data->handle, spi_mcux_master_rtio_callback, data);
transfer.configFlags = LPSPI_MASTER_XFER_CFG_FLAGS(spi_cfg->slave);
switch (sqe->op) {
case RTIO_OP_RX:
transfer.txData = NULL;
transfer.rxData = sqe->rx.buf;
transfer.dataSize = sqe->rx.buf_len;
break;
case RTIO_OP_TX:
transfer.rxData = NULL;
transfer.txData = sqe->tx.buf;
transfer.dataSize = sqe->tx.buf_len;
break;
case RTIO_OP_TINY_TX:
transfer.rxData = NULL;
transfer.txData = sqe->tiny_tx.buf;
transfer.dataSize = sqe->tiny_tx.buf_len;
break;
case RTIO_OP_TXRX:
transfer.txData = sqe->txrx.tx_buf;
transfer.rxData = sqe->txrx.rx_buf;
transfer.dataSize = sqe->txrx.buf_len;
break;
default:
LOG_ERR("Invalid op code %d for submission %p\n", sqe->op, (void *)sqe);
spi_mcux_iodev_complete(dev, -EINVAL);
return;
}
data->transfer_len = transfer.dataSize;
spi_context_cs_control(&data->ctx, true);
status = LPSPI_MasterTransferNonBlocking(base, &data->handle, &transfer);
if (status != kStatus_Success) {
LOG_ERR("Transfer could not start on %s: %d", dev->name, status);
spi_mcux_iodev_complete(dev, -EIO);
}
}
static void spi_mcux_iodev_complete(const struct device *dev, int status)
{
struct spi_mcux_data *data = dev->data;
struct spi_rtio *rtio_ctx = data->rtio_ctx;
if (!status && rtio_ctx->txn_curr->sqe.flags & RTIO_SQE_TRANSACTION) {
rtio_ctx->txn_curr = rtio_txn_next(rtio_ctx->txn_curr);
spi_mcux_iodev_start(dev);
return;
}
/** De-assert CS-line to space from next transaction */
spi_context_cs_control(&data->ctx, false);
if (spi_rtio_complete(rtio_ctx, status)) {
spi_mcux_iodev_start(dev);
}
}
static void spi_mcux_iodev_submit(const struct device *dev, struct rtio_iodev_sqe *iodev_sqe)
{
struct spi_mcux_data *data = dev->data;
struct spi_rtio *rtio_ctx = data->rtio_ctx;
if (spi_rtio_submit(rtio_ctx, iodev_sqe)) {
spi_mcux_iodev_start(dev);
}
}
static int transceive_rtio(const struct device *dev, const struct spi_config *spi_cfg,
const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs)
{
struct spi_mcux_data *data = dev->data;
struct spi_rtio *rtio_ctx = data->rtio_ctx;
int ret;
spi_context_lock(&data->ctx, false, NULL, NULL, spi_cfg);
ret = spi_rtio_transceive(rtio_ctx, spi_cfg, tx_bufs, rx_bufs);
spi_context_release(&data->ctx, ret);
return ret;
}
#ifdef CONFIG_SPI_ASYNC
static int transceive_rtio_async(const struct device *dev, const struct spi_config *spi_cfg,
const struct spi_buf_set *tx_bufs,
const struct spi_buf_set *rx_bufs, spi_callback_t cb,
void *userdata)
{
ARG_UNUSED(dev);
ARG_UNUSED(spi_cfg);
ARG_UNUSED(tx_bufs);
ARG_UNUSED(rx_bufs);
ARG_UNUSED(cb);
ARG_UNUSED(userdata);
return -ENOTSUP;
}
#endif
static DEVICE_API(spi, spi_mcux_rtio_driver_api) = {
.transceive = transceive_rtio,
#ifdef CONFIG_SPI_ASYNC
.transceive_async = transceive_rtio_async,
#endif
.iodev_submit = spi_mcux_iodev_submit,
.release = spi_mcux_release,
};
static int spi_mcux_rtio_init(const struct device *dev)
{
struct spi_mcux_data *data = dev->data;
int err = 0;
err = spi_nxp_init_common(dev);
if (err) {
return err;
}
spi_rtio_init(data->rtio_ctx, dev);
spi_context_unlock_unconditionally(&data->ctx);
return 0;
}
static void lpspi_isr(const struct device *dev)
{
struct spi_mcux_data *data = dev->data;
LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
LPSPI_MasterTransferHandleIRQ(LPSPI_IRQ_HANDLE_ARG, &data->handle);
}
#define SPI_MCUX_RTIO_DEFINE(n) \
SPI_RTIO_DEFINE(spi_mcux_rtio_##n, CONFIG_SPI_MCUX_RTIO_SQ_SIZE, \
CONFIG_SPI_MCUX_RTIO_SQ_SIZE)
#define SPI_MCUX_LPSPI_RTIO_INIT(n) \
SPI_MCUX_RTIO_DEFINE(n); \
SPI_NXP_LPSPI_COMMON_INIT(n) \
SPI_MCUX_LPSPI_CONFIG_INIT(n) \
\
static struct spi_mcux_data spi_mcux_data_##n = {.rtio_ctx = &spi_mcux_rtio_##n, \
SPI_NXP_LPSPI_COMMON_DATA_INIT(n)}; \
\
SPI_DEVICE_DT_INST_DEFINE(n, spi_mcux_rtio_init, NULL, &spi_mcux_data_##n, \
&spi_mcux_config_##n, POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \
&spi_mcux_rtio_driver_api);
DT_INST_FOREACH_STATUS_OKAY(SPI_MCUX_LPSPI_RTIO_INIT)

View file

@ -0,0 +1,131 @@
/*
* Copyright 2018, 2024 NXP
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(spi_mcux_lpspi_common, CONFIG_SPI_LOG_LEVEL);
#include "spi_nxp_lpspi_priv.h"
int spi_mcux_release(const struct device *dev, const struct spi_config *spi_cfg)
{
struct spi_mcux_data *data = dev->data;
spi_context_unlock_unconditionally(&data->ctx);
return 0;
}
int spi_mcux_configure(const struct device *dev, const struct spi_config *spi_cfg)
{
const struct spi_mcux_config *config = dev->config;
struct spi_mcux_data *data = dev->data;
LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
uint32_t word_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
lpspi_master_config_t master_config;
uint32_t clock_freq;
int ret;
if (spi_cfg->operation & SPI_HALF_DUPLEX) {
/* the IP DOES support half duplex, need to implement driver support */
LOG_ERR("Half-duplex not supported");
return -ENOTSUP;
}
if (word_size < 8 || (word_size % 32 == 1)) {
/* Zephyr word size == hardware FRAME size (not word size)
* Max frame size: 4096 bits
* (zephyr field is 6 bit wide for max 64 bit size, no need to check)
* Min frame size: 8 bits.
* Minimum hardware word size is 2. Since this driver is intended to work
* for 32 bit platforms, and 64 bits is max size, then only 33 and 1 are invalid.
*/
LOG_ERR("Word size %d not allowed", word_size);
return -EINVAL;
}
if (spi_cfg->slave > LPSPI_CHIP_SELECT_COUNT) {
LOG_ERR("Peripheral %d select exceeds max %d", spi_cfg->slave,
LPSPI_CHIP_SELECT_COUNT - 1);
return -EINVAL;
}
ret = clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_freq);
if (ret) {
return ret;
}
if (data->ctx.config != NULL) {
/* Setting the baud rate in LPSPI_MasterInit requires module to be disabled. Only
* disable if already configured, otherwise the clock is not enabled and the
* CR register cannot be written.
*/
LPSPI_Enable(base, false);
while ((base->CR & LPSPI_CR_MEN_MASK) != 0U) {
/* Wait until LPSPI is disabled. Datasheet:
* After writing 0, MEN (Module Enable) remains set until the LPSPI has
* completed the current transfer and is idle.
*/
}
}
data->ctx.config = spi_cfg;
LPSPI_MasterGetDefaultConfig(&master_config);
master_config.bitsPerFrame = word_size;
master_config.cpol = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL)
? kLPSPI_ClockPolarityActiveLow
: kLPSPI_ClockPolarityActiveHigh;
master_config.cpha = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA)
? kLPSPI_ClockPhaseSecondEdge
: kLPSPI_ClockPhaseFirstEdge;
master_config.direction =
(spi_cfg->operation & SPI_TRANSFER_LSB) ? kLPSPI_LsbFirst : kLPSPI_MsbFirst;
master_config.baudRate = spi_cfg->frequency;
master_config.pcsToSckDelayInNanoSec = config->pcs_sck_delay;
master_config.lastSckToPcsDelayInNanoSec = config->sck_pcs_delay;
master_config.betweenTransferDelayInNanoSec = config->transfer_delay;
master_config.pinCfg = config->data_pin_config;
LPSPI_MasterInit(base, &master_config, clock_freq);
LPSPI_SetDummyData(base, 0);
if (IS_ENABLED(CONFIG_DEBUG)) {
base->CR |= LPSPI_CR_DBGEN_MASK;
}
return 0;
}
int spi_nxp_init_common(const struct device *dev)
{
const struct spi_mcux_config *config = dev->config;
struct spi_mcux_data *data = dev->data;
int err = 0;
DEVICE_MMIO_NAMED_MAP(dev, reg_base, K_MEM_CACHE_NONE | K_MEM_DIRECT_MAP);
data->dev = dev;
if (!device_is_ready(config->clock_dev)) {
LOG_ERR("clock control device not ready");
return -ENODEV;
}
err = spi_context_cs_configure_all(&data->ctx);
if (err < 0) {
return err;
}
err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
if (err) {
return err;
}
config->irq_config_func(dev);
return err;
}

View file

@ -0,0 +1,131 @@
/*
* Copyright 2018, 2024 NXP
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/drivers/spi.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/irq.h>
#include "../spi_context.h"
#if CONFIG_NXP_LP_FLEXCOMM
#include <zephyr/drivers/mfd/nxp_lp_flexcomm.h>
#endif
#include <fsl_lpspi.h>
/* If any hardware revisions change this, make it into a DT property.
* DONT'T make #ifdefs here by platform.
*/
#define LPSPI_CHIP_SELECT_COUNT 4
#define LPSPI_MIN_FRAME_SIZE_BITS 8
/* Required by DEVICE_MMIO_NAMED_* macros */
#define DEV_CFG(_dev) ((const struct spi_mcux_config *)(_dev)->config)
#define DEV_DATA(_dev) ((struct spi_mcux_data *)(_dev)->data)
/* flag for SDK API for master transfers */
#define LPSPI_MASTER_XFER_CFG_FLAGS(slave) \
kLPSPI_MasterPcsContinuous | (slave << LPSPI_MASTER_PCS_SHIFT)
struct spi_mcux_config {
DEVICE_MMIO_NAMED_ROM(reg_base);
const struct device *clock_dev;
clock_control_subsys_t clock_subsys;
void (*irq_config_func)(const struct device *dev);
uint32_t pcs_sck_delay;
uint32_t sck_pcs_delay;
uint32_t transfer_delay;
const struct pinctrl_dev_config *pincfg;
lpspi_pin_config_t data_pin_config;
};
#ifdef CONFIG_SPI_MCUX_LPSPI_DMA
#include <zephyr/drivers/dma.h>
struct spi_dma_stream {
const struct device *dma_dev;
uint32_t channel; /* stores the channel for dma */
struct dma_config dma_cfg;
struct dma_block_config dma_blk_cfg;
};
#endif /* CONFIG_SPI_MCUX_LPSPI_DMA */
struct spi_mcux_data {
DEVICE_MMIO_NAMED_RAM(reg_base);
const struct device *dev;
lpspi_master_handle_t handle;
struct spi_context ctx;
size_t transfer_len;
#ifdef CONFIG_SPI_RTIO
struct spi_rtio *rtio_ctx;
#endif
#ifdef CONFIG_SPI_MCUX_LPSPI_DMA
volatile uint32_t status_flags;
struct spi_dma_stream dma_rx;
struct spi_dma_stream dma_tx;
/* dummy value used for transferring NOP when tx buf is null */
uint32_t dummy_buffer;
#endif
};
/* common configure function that verifies spi_cfg validity and set up configuration parameters */
int spi_mcux_configure(const struct device *dev, const struct spi_config *spi_cfg);
/* Does these things:
* Set data.dev
* Check clocks device is ready
* Configure cs gpio pin if needed
* Mux pinctrl to lpspi
* Enable LPSPI IRQ at system level
*/
int spi_nxp_init_common(const struct device *dev);
/* common api function for now */
int spi_mcux_release(const struct device *dev, const struct spi_config *spi_cfg);
/* Argument to MCUX SDK IRQ handler */
#define LPSPI_IRQ_HANDLE_ARG COND_CODE_1(CONFIG_NXP_LP_FLEXCOMM, (LPSPI_GetInstance(base)), (base))
#if defined(CONFIG_NXP_LP_FLEXCOMM)
#define SPI_MCUX_LPSPI_IRQ_FUNC(n) \
nxp_lp_flexcomm_setirqhandler(DEVICE_DT_GET(DT_INST_PARENT(n)), DEVICE_DT_INST_GET(n), \
LP_FLEXCOMM_PERIPH_LPSPI, lpspi_isr);
#else
#define SPI_MCUX_LPSPI_IRQ_FUNC(n) \
IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), lpspi_isr, DEVICE_DT_INST_GET(n), \
0); \
irq_enable(DT_INST_IRQN(n));
#endif
#define SPI_MCUX_LPSPI_CONFIG_INIT(n) \
static const struct spi_mcux_config spi_mcux_config_##n = { \
DEVICE_MMIO_NAMED_ROM_INIT(reg_base, DT_DRV_INST(n)), \
.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \
.clock_subsys = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name), \
.irq_config_func = spi_mcux_config_func_##n, \
.pcs_sck_delay = UTIL_AND(DT_INST_NODE_HAS_PROP(n, pcs_sck_delay), \
DT_INST_PROP(n, pcs_sck_delay)), \
.sck_pcs_delay = UTIL_AND(DT_INST_NODE_HAS_PROP(n, sck_pcs_delay), \
DT_INST_PROP(n, sck_pcs_delay)), \
.transfer_delay = UTIL_AND(DT_INST_NODE_HAS_PROP(n, transfer_delay), \
DT_INST_PROP(n, transfer_delay)), \
.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
.data_pin_config = DT_INST_ENUM_IDX(n, data_pin_config), \
};
#define SPI_NXP_LPSPI_COMMON_INIT(n) \
PINCTRL_DT_INST_DEFINE(n); \
\
static void spi_mcux_config_func_##n(const struct device *dev) \
{ \
SPI_MCUX_LPSPI_IRQ_FUNC(n) \
}
#define SPI_NXP_LPSPI_COMMON_DATA_INIT(n) \
SPI_CONTEXT_INIT_LOCK(spi_mcux_data_##n, ctx), \
SPI_CONTEXT_INIT_SYNC(spi_mcux_data_##n, ctx), \
SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx)