Mainly this is a preparation patch before adding one-by-one DMA SG entries
transmission. But logically the Tx and Rx DMA channels setup should be
performed in the dma_setup() callback anyway. So let's move the DMA slave
channels src/dst burst lengths, address and address width configuration to
the DMA setup stage. While at it make sure the return value of the
dmaengine_slave_config() method is checked. It has been unnecessary in
case if Dw DMAC is utilized as a DMA engine, since its device_config()
callback always returns zero (though it might change in future). But since
DW APB SSI driver now supports any DMA back-end we must make sure the
DMA device configuration has been successful before proceeding with
further setups.

Signed-off-by: Serge Semin <sergey.se...@baikalelectronics.ru>
---
 drivers/spi/spi-dw-dma.c | 42 +++++++++++++++++++++++++++++-----------
 1 file changed, 31 insertions(+), 11 deletions(-)

diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c
index ec721af61663..56496b659d62 100644
--- a/drivers/spi/spi-dw-dma.c
+++ b/drivers/spi/spi-dw-dma.c
@@ -257,11 +257,9 @@ static void dw_spi_dma_tx_done(void *arg)
        complete(&dws->dma_completion);
 }
 
-static struct dma_async_tx_descriptor *
-dw_spi_dma_prepare_tx(struct dw_spi *dws, struct spi_transfer *xfer)
+static int dw_spi_dma_config_tx(struct dw_spi *dws)
 {
        struct dma_slave_config txconf;
-       struct dma_async_tx_descriptor *txdesc;
 
        memset(&txconf, 0, sizeof(txconf));
        txconf.direction = DMA_MEM_TO_DEV;
@@ -271,7 +269,13 @@ dw_spi_dma_prepare_tx(struct dw_spi *dws, struct 
spi_transfer *xfer)
        txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
        txconf.device_fc = false;
 
-       dmaengine_slave_config(dws->txchan, &txconf);
+       return dmaengine_slave_config(dws->txchan, &txconf);
+}
+
+static struct dma_async_tx_descriptor *
+dw_spi_dma_prepare_tx(struct dw_spi *dws, struct spi_transfer *xfer)
+{
+       struct dma_async_tx_descriptor *txdesc;
 
        txdesc = dmaengine_prep_slave_sg(dws->txchan,
                                xfer->tx_sg.sgl,
@@ -346,14 +350,9 @@ static void dw_spi_dma_rx_done(void *arg)
        complete(&dws->dma_completion);
 }
 
-static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi 
*dws,
-               struct spi_transfer *xfer)
+static int dw_spi_dma_config_rx(struct dw_spi *dws)
 {
        struct dma_slave_config rxconf;
-       struct dma_async_tx_descriptor *rxdesc;
-
-       if (!xfer->rx_buf)
-               return NULL;
 
        memset(&rxconf, 0, sizeof(rxconf));
        rxconf.direction = DMA_DEV_TO_MEM;
@@ -363,7 +362,16 @@ static struct dma_async_tx_descriptor 
*dw_spi_dma_prepare_rx(struct dw_spi *dws,
        rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
        rxconf.device_fc = false;
 
-       dmaengine_slave_config(dws->rxchan, &rxconf);
+       return dmaengine_slave_config(dws->rxchan, &rxconf);
+}
+
+static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi 
*dws,
+               struct spi_transfer *xfer)
+{
+       struct dma_async_tx_descriptor *rxdesc;
+
+       if (!xfer->rx_buf)
+               return NULL;
 
        rxdesc = dmaengine_prep_slave_sg(dws->rxchan,
                                xfer->rx_sg.sgl,
@@ -382,10 +390,22 @@ static struct dma_async_tx_descriptor 
*dw_spi_dma_prepare_rx(struct dw_spi *dws,
 static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
 {
        u16 imr, dma_ctrl;
+       int ret;
 
        if (!xfer->tx_buf)
                return -EINVAL;
 
+       /* Setup DMA channels */
+       ret = dw_spi_dma_config_tx(dws);
+       if (ret)
+               return ret;
+
+       if (xfer->rx_buf) {
+               ret = dw_spi_dma_config_rx(dws);
+               if (ret)
+                       return ret;
+       }
+
        /* Set the DMA handshaking interface */
        dma_ctrl = SPI_DMA_TDMAE;
        if (xfer->rx_buf)
-- 
2.27.0

Reply via email to