drivers: spi_sifive: Optimize xfer to minimize downtime between frames.
The SPI peripheral on SiFive parts uses FIFOs for Rx and Tx (FIFO size undocumented, but empirically found to be 8 bytes on FE310, likely identical on FU540 / FU740). Make use of these FIFOs in order to continuiously feed Tx data as available. Verified to transmit 1 MHz SPI @ 200 MHz coreclk / tlclk on FE310 continuously without downtime between frames. Signed-off-by: Shawn Nematbakhsh <shawn@rivosinc.com>
This commit is contained in:
parent
e17d5ed282
commit
3386b853ac
1 changed files with 43 additions and 28 deletions
|
@ -17,7 +17,8 @@ LOG_MODULE_REGISTER(spi_sifive);
|
||||||
|
|
||||||
/* Helper Functions */
|
/* Helper Functions */
|
||||||
|
|
||||||
static inline void sys_set_mask(mem_addr_t addr, uint32_t mask, uint32_t value)
|
static ALWAYS_INLINE
|
||||||
|
void sys_set_mask(mem_addr_t addr, uint32_t mask, uint32_t value)
|
||||||
{
|
{
|
||||||
uint32_t temp = sys_read32(addr);
|
uint32_t temp = sys_read32(addr);
|
||||||
|
|
||||||
|
@ -27,7 +28,7 @@ static inline void sys_set_mask(mem_addr_t addr, uint32_t mask, uint32_t value)
|
||||||
sys_write32(temp, addr);
|
sys_write32(temp, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
int spi_config(const struct device *dev, uint32_t frequency,
|
static int spi_config(const struct device *dev, uint32_t frequency,
|
||||||
uint16_t operation)
|
uint16_t operation)
|
||||||
{
|
{
|
||||||
uint32_t div;
|
uint32_t div;
|
||||||
|
@ -100,50 +101,64 @@ int spi_config(const struct device *dev, uint32_t frequency,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void spi_sifive_send(const struct device *dev, uint8_t frame)
|
static ALWAYS_INLINE bool spi_sifive_send_available(const struct device *dev)
|
||||||
{
|
{
|
||||||
while (sys_read32(SPI_REG(dev, REG_TXDATA)) & SF_TXDATA_FULL) {
|
return !(sys_read32(SPI_REG(dev, REG_TXDATA)) & SF_TXDATA_FULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ALWAYS_INLINE
|
||||||
|
void spi_sifive_send(const struct device *dev, uint8_t frame)
|
||||||
|
{
|
||||||
sys_write32((uint32_t) frame, SPI_REG(dev, REG_TXDATA));
|
sys_write32((uint32_t) frame, SPI_REG(dev, REG_TXDATA));
|
||||||
}
|
}
|
||||||
|
|
||||||
uint8_t spi_sifive_recv(const struct device *dev)
|
static ALWAYS_INLINE
|
||||||
|
bool spi_sifive_recv(const struct device *dev, uint8_t *val)
|
||||||
{
|
{
|
||||||
uint32_t val;
|
uint32_t reg = sys_read32(SPI_REG(dev, REG_RXDATA));
|
||||||
|
|
||||||
while ((val = sys_read32(SPI_REG(dev, REG_RXDATA))) & SF_RXDATA_EMPTY) {
|
if (reg & SF_RXDATA_EMPTY) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
*val = (uint8_t) reg;
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
return (uint8_t) val;
|
static void spi_sifive_xfer(const struct device *dev, const bool hw_cs_control)
|
||||||
}
|
|
||||||
|
|
||||||
void spi_sifive_xfer(const struct device *dev, const bool hw_cs_control)
|
|
||||||
{
|
{
|
||||||
struct spi_context *ctx = &SPI_DATA(dev)->ctx;
|
struct spi_context *ctx = &SPI_DATA(dev)->ctx;
|
||||||
uint8_t txd, rxd;
|
uint8_t txd, rxd;
|
||||||
|
int queued_frames = 0;
|
||||||
|
|
||||||
do {
|
while (spi_context_tx_on(ctx) || spi_context_rx_on(ctx) || queued_frames > 0) {
|
||||||
/* Send a frame */
|
bool send = false;
|
||||||
|
|
||||||
|
/* As long as frames remain to be sent, attempt to queue them on Tx FIFO. If
|
||||||
|
* the FIFO is full then another attempt will be made next pass. If Rx length
|
||||||
|
* > Tx length then queue dummy Tx in order to read the requested Rx data.
|
||||||
|
*/
|
||||||
if (spi_context_tx_buf_on(ctx)) {
|
if (spi_context_tx_buf_on(ctx)) {
|
||||||
|
send = true;
|
||||||
txd = *ctx->tx_buf;
|
txd = *ctx->tx_buf;
|
||||||
} else {
|
} else if (queued_frames == 0) { /* Implies spi_context_rx_on(). */
|
||||||
|
send = true;
|
||||||
txd = 0U;
|
txd = 0U;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (send && spi_sifive_send_available(dev)) {
|
||||||
spi_sifive_send(dev, txd);
|
spi_sifive_send(dev, txd);
|
||||||
|
queued_frames++;
|
||||||
spi_context_update_tx(ctx, 1, 1);
|
spi_context_update_tx(ctx, 1, 1);
|
||||||
|
}
|
||||||
|
|
||||||
/* Receive a frame */
|
if (queued_frames > 0 && spi_sifive_recv(dev, &rxd)) {
|
||||||
rxd = spi_sifive_recv(dev);
|
|
||||||
|
|
||||||
if (spi_context_rx_buf_on(ctx)) {
|
if (spi_context_rx_buf_on(ctx)) {
|
||||||
*ctx->rx_buf = rxd;
|
*ctx->rx_buf = rxd;
|
||||||
}
|
}
|
||||||
|
queued_frames--;
|
||||||
spi_context_update_rx(ctx, 1, 1);
|
spi_context_update_rx(ctx, 1, 1);
|
||||||
} while (spi_context_tx_on(ctx) || spi_context_rx_on(ctx));
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Deassert the CS line */
|
/* Deassert the CS line */
|
||||||
if (!hw_cs_control) {
|
if (!hw_cs_control) {
|
||||||
|
@ -157,7 +172,7 @@ void spi_sifive_xfer(const struct device *dev, const bool hw_cs_control)
|
||||||
|
|
||||||
/* API Functions */
|
/* API Functions */
|
||||||
|
|
||||||
int spi_sifive_init(const struct device *dev)
|
static int spi_sifive_init(const struct device *dev)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
#ifdef CONFIG_PINCTRL
|
#ifdef CONFIG_PINCTRL
|
||||||
|
@ -183,7 +198,7 @@ int spi_sifive_init(const struct device *dev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int spi_sifive_transceive(const struct device *dev,
|
static int spi_sifive_transceive(const struct device *dev,
|
||||||
const struct spi_config *config,
|
const struct spi_config *config,
|
||||||
const struct spi_buf_set *tx_bufs,
|
const struct spi_buf_set *tx_bufs,
|
||||||
const struct spi_buf_set *rx_bufs)
|
const struct spi_buf_set *rx_bufs)
|
||||||
|
@ -248,7 +263,7 @@ int spi_sifive_transceive(const struct device *dev,
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
int spi_sifive_release(const struct device *dev,
|
static int spi_sifive_release(const struct device *dev,
|
||||||
const struct spi_config *config)
|
const struct spi_config *config)
|
||||||
{
|
{
|
||||||
spi_context_unlock_unconditionally(&SPI_DATA(dev)->ctx);
|
spi_context_unlock_unconditionally(&SPI_DATA(dev)->ctx);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue