spi: dw-dma: Move DMA transfers submission to the channels prep methods
Indeed we can freely move the dmaengine_submit() method invocation and the Tx and Rx busy flag setting into the DMA Tx/Rx prepare methods. Since the Tx/Rx preparation method is now mainly used for the DMA transfers submission, here we suggest to rename it to have the _submit_{r,t}x suffix instead. By having this alteration applied first we implement another code preparation before adding the one-by-one DMA SG entries transmission, second we now have the dma_async_tx_descriptor descriptor used locally only in the new DMA transfers submission methods (this will be cleaned up a bit later), third we make the generic transfer method more readable, where now the functionality of submission, execution and wait procedures is transparently split up instead of having a preparation, intermixed submission/execution and wait procedures. Signed-off-by: Serge Semin <Sergey.Semin@baikalelectronics.ru> Link: https://lore.kernel.org/r/20200920112322.24585-6-Sergey.Semin@baikalelectronics.ru Signed-off-by: Mark Brown <broonie@kernel.org>
This commit is contained in:
Родитель
be3034d9f9
Коммит
ab7a4d758b
|
@ -272,7 +272,7 @@ static int dw_spi_dma_config_tx(struct dw_spi *dws)
|
|||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
dw_spi_dma_prepare_tx(struct dw_spi *dws, struct spi_transfer *xfer)
|
||||
dw_spi_dma_submit_tx(struct dw_spi *dws, struct spi_transfer *xfer)
|
||||
{
|
||||
struct dma_async_tx_descriptor *txdesc;
|
||||
|
||||
|
@ -287,6 +287,9 @@ dw_spi_dma_prepare_tx(struct dw_spi *dws, struct spi_transfer *xfer)
|
|||
txdesc->callback = dw_spi_dma_tx_done;
|
||||
txdesc->callback_param = dws;
|
||||
|
||||
dmaengine_submit(txdesc);
|
||||
set_bit(TX_BUSY, &dws->dma_chan_busy);
|
||||
|
||||
return txdesc;
|
||||
}
|
||||
|
||||
|
@ -364,7 +367,7 @@ static int dw_spi_dma_config_rx(struct dw_spi *dws)
|
|||
return dmaengine_slave_config(dws->rxchan, &rxconf);
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws,
|
||||
static struct dma_async_tx_descriptor *dw_spi_dma_submit_rx(struct dw_spi *dws,
|
||||
struct spi_transfer *xfer)
|
||||
{
|
||||
struct dma_async_tx_descriptor *rxdesc;
|
||||
|
@ -380,6 +383,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws,
|
|||
rxdesc->callback = dw_spi_dma_rx_done;
|
||||
rxdesc->callback_param = dws;
|
||||
|
||||
dmaengine_submit(rxdesc);
|
||||
set_bit(RX_BUSY, &dws->dma_chan_busy);
|
||||
|
||||
return rxdesc;
|
||||
}
|
||||
|
||||
|
@ -426,25 +432,21 @@ static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
|
|||
struct dma_async_tx_descriptor *txdesc, *rxdesc;
|
||||
int ret;
|
||||
|
||||
/* Prepare the TX dma transfer */
|
||||
txdesc = dw_spi_dma_prepare_tx(dws, xfer);
|
||||
/* Submit the DMA Tx transfer */
|
||||
txdesc = dw_spi_dma_submit_tx(dws, xfer);
|
||||
if (!txdesc)
|
||||
return -EINVAL;
|
||||
|
||||
/* Prepare the RX dma transfer */
|
||||
/* Submit the DMA Rx transfer if required */
|
||||
if (xfer->rx_buf) {
|
||||
rxdesc = dw_spi_dma_prepare_rx(dws, xfer);
|
||||
rxdesc = dw_spi_dma_submit_rx(dws, xfer);
|
||||
if (!rxdesc)
|
||||
return -EINVAL;
|
||||
|
||||
/* rx must be started before tx due to spi instinct */
|
||||
set_bit(RX_BUSY, &dws->dma_chan_busy);
|
||||
dmaengine_submit(rxdesc);
|
||||
dma_async_issue_pending(dws->rxchan);
|
||||
}
|
||||
|
||||
set_bit(TX_BUSY, &dws->dma_chan_busy);
|
||||
dmaengine_submit(txdesc);
|
||||
dma_async_issue_pending(dws->txchan);
|
||||
|
||||
ret = dw_spi_dma_wait(dws, xfer);
|
||||
|
|
Загрузка…
Ссылка в новой задаче