drivers: spi_sifive: Optimize xfer to minimize downtime between frames.

The SPI peripheral on SiFive parts uses FIFOs for Rx and Tx (FIFO size
undocumented, but empirically found to be 8 bytes on FE310, likely
identical on FU540 / FU740). Make use of these FIFOs in order to
continuiously feed Tx data as available.

Verified to transmit 1 MHz SPI @ 200 MHz coreclk / tlclk on FE310
continuously without downtime between frames.

Signed-off-by: Shawn Nematbakhsh <shawn@rivosinc.com>
This commit is contained in:
Shawn Nematbakhsh 2022-05-16 11:47:58 -07:00 committed by Carles Cufí
commit 3386b853ac

View file

@ -17,7 +17,8 @@ LOG_MODULE_REGISTER(spi_sifive);
/* Helper Functions */
static inline void sys_set_mask(mem_addr_t addr, uint32_t mask, uint32_t value)
static ALWAYS_INLINE
void sys_set_mask(mem_addr_t addr, uint32_t mask, uint32_t value)
{
uint32_t temp = sys_read32(addr);
@ -27,7 +28,7 @@ static inline void sys_set_mask(mem_addr_t addr, uint32_t mask, uint32_t value)
sys_write32(temp, addr);
}
int spi_config(const struct device *dev, uint32_t frequency,
static int spi_config(const struct device *dev, uint32_t frequency,
uint16_t operation)
{
uint32_t div;
@ -100,50 +101,64 @@ int spi_config(const struct device *dev, uint32_t frequency,
return 0;
}
static ALWAYS_INLINE bool spi_sifive_send_available(const struct device *dev)
{
return !(sys_read32(SPI_REG(dev, REG_TXDATA)) & SF_TXDATA_FULL);
}
static ALWAYS_INLINE
void spi_sifive_send(const struct device *dev, uint8_t frame)
{
while (sys_read32(SPI_REG(dev, REG_TXDATA)) & SF_TXDATA_FULL) {
}
sys_write32((uint32_t) frame, SPI_REG(dev, REG_TXDATA));
}
uint8_t spi_sifive_recv(const struct device *dev)
static ALWAYS_INLINE
bool spi_sifive_recv(const struct device *dev, uint8_t *val)
{
uint32_t val;
uint32_t reg = sys_read32(SPI_REG(dev, REG_RXDATA));
while ((val = sys_read32(SPI_REG(dev, REG_RXDATA))) & SF_RXDATA_EMPTY) {
if (reg & SF_RXDATA_EMPTY) {
return false;
}
return (uint8_t) val;
*val = (uint8_t) reg;
return true;
}
void spi_sifive_xfer(const struct device *dev, const bool hw_cs_control)
static void spi_sifive_xfer(const struct device *dev, const bool hw_cs_control)
{
struct spi_context *ctx = &SPI_DATA(dev)->ctx;
uint8_t txd, rxd;
int queued_frames = 0;
do {
/* Send a frame */
while (spi_context_tx_on(ctx) || spi_context_rx_on(ctx) || queued_frames > 0) {
bool send = false;
/* As long as frames remain to be sent, attempt to queue them on Tx FIFO. If
* the FIFO is full then another attempt will be made next pass. If Rx length
* > Tx length then queue dummy Tx in order to read the requested Rx data.
*/
if (spi_context_tx_buf_on(ctx)) {
send = true;
txd = *ctx->tx_buf;
} else {
} else if (queued_frames == 0) { /* Implies spi_context_rx_on(). */
send = true;
txd = 0U;
}
spi_sifive_send(dev, txd);
spi_context_update_tx(ctx, 1, 1);
/* Receive a frame */
rxd = spi_sifive_recv(dev);
if (spi_context_rx_buf_on(ctx)) {
*ctx->rx_buf = rxd;
if (send && spi_sifive_send_available(dev)) {
spi_sifive_send(dev, txd);
queued_frames++;
spi_context_update_tx(ctx, 1, 1);
}
spi_context_update_rx(ctx, 1, 1);
} while (spi_context_tx_on(ctx) || spi_context_rx_on(ctx));
if (queued_frames > 0 && spi_sifive_recv(dev, &rxd)) {
if (spi_context_rx_buf_on(ctx)) {
*ctx->rx_buf = rxd;
}
queued_frames--;
spi_context_update_rx(ctx, 1, 1);
}
}
/* Deassert the CS line */
if (!hw_cs_control) {
@ -157,7 +172,7 @@ void spi_sifive_xfer(const struct device *dev, const bool hw_cs_control)
/* API Functions */
int spi_sifive_init(const struct device *dev)
static int spi_sifive_init(const struct device *dev)
{
int err;
#ifdef CONFIG_PINCTRL
@ -183,7 +198,7 @@ int spi_sifive_init(const struct device *dev)
return 0;
}
int spi_sifive_transceive(const struct device *dev,
static int spi_sifive_transceive(const struct device *dev,
const struct spi_config *config,
const struct spi_buf_set *tx_bufs,
const struct spi_buf_set *rx_bufs)
@ -248,7 +263,7 @@ int spi_sifive_transceive(const struct device *dev,
return rc;
}
int spi_sifive_release(const struct device *dev,
static int spi_sifive_release(const struct device *dev,
const struct spi_config *config)
{
spi_context_unlock_unconditionally(&SPI_DATA(dev)->ctx);