summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorPeter Ujfalusi <peter.ujfalusi@ti.com>2019-07-16 11:26:55 +0300
committerVinod Koul <vkoul@kernel.org>2019-07-29 12:11:38 +0530
commitaa3c6ce4eab8fb0e967954be1ba1cad3b715f63b (patch)
treebd01cc1b347fa53e617ea734ad5497c46c41c0cb /drivers
parent097ffdc75259139ba157b7f924cfeb0d6b00559e (diff)
dmaengine: ti: edma: Support for polled (memcpy) completion
When a DMA client driver does not set the DMA_PREP_INTERRUPT because it does not want to use interrupts for DMA completion or because it can not rely on DMA interrupts due to executing the memcpy when interrupts are disabled it will poll the status of the transfer. Since we can not tell from any EDMA register that the transfer is completed, we can only know that the paRAM set has been sent to TPTC for processing we need to check the residue of the transfer, if it is 0 then the transfer is completed. Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com> Link: https://lore.kernel.org/r/20190716082655.1620-4-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/ti/edma.c37
1 files changed, 33 insertions, 4 deletions
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 201b838ec808..fe468e2f7e67 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -179,6 +179,7 @@ struct edma_desc {
struct list_head node;
enum dma_transfer_direction direction;
int cyclic;
+ bool polled;
int absync;
int pset_nr;
struct edma_chan *echan;
@@ -1226,8 +1227,9 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
edesc->pset[0].param.opt |= ITCCHEN;
if (nslots == 1) {
- /* Enable transfer complete interrupt */
- edesc->pset[0].param.opt |= TCINTEN;
+ /* Enable transfer complete interrupt if requested */
+ if (tx_flags & DMA_PREP_INTERRUPT)
+ edesc->pset[0].param.opt |= TCINTEN;
} else {
/* Enable transfer complete chaining for the first slot */
edesc->pset[0].param.opt |= TCCHEN;
@@ -1254,9 +1256,14 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
}
edesc->pset[1].param.opt |= ITCCHEN;
- edesc->pset[1].param.opt |= TCINTEN;
+ /* Enable transfer complete interrupt if requested */
+ if (tx_flags & DMA_PREP_INTERRUPT)
+ edesc->pset[1].param.opt |= TCINTEN;
}
+ if (!(tx_flags & DMA_PREP_INTERRUPT))
+ edesc->polled = true;
+
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}
@@ -1826,18 +1833,40 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
{
struct edma_chan *echan = to_edma_chan(chan);
struct virt_dma_desc *vdesc;
+ struct dma_tx_state txstate_tmp;
enum dma_status ret;
unsigned long flags;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_COMPLETE || !txstate)
+
+ if (ret == DMA_COMPLETE)
return ret;
+ /* Provide a dummy dma_tx_state for completion checking */
+ if (!txstate)
+ txstate = &txstate_tmp;
+
+ txstate->residue = 0;
spin_lock_irqsave(&echan->vchan.lock, flags);
if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
txstate->residue = edma_residue(echan->edesc);
else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
txstate->residue = to_edma_desc(&vdesc->tx)->residue;
+
+ /*
+ * Mark the cookie completed if the residue is 0 for non cyclic
+ * transfers
+ */
+ if (ret != DMA_COMPLETE && !txstate->residue &&
+ echan->edesc && echan->edesc->polled &&
+ echan->edesc->vdesc.tx.cookie == cookie) {
+ edma_stop(echan);
+ vchan_cookie_complete(&echan->edesc->vdesc);
+ echan->edesc = NULL;
+ edma_execute(echan);
+ ret = DMA_COMPLETE;
+ }
+
spin_unlock_irqrestore(&echan->vchan.lock, flags);
return ret;