summaryrefslogtreecommitdiff
path: root/drivers/dma/mxs-dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/mxs-dma.c')
-rw-r--r--drivers/dma/mxs-dma.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 22cc7f68ef6e..20a9cb7cb6d3 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -24,6 +24,7 @@
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/list.h>
+#include <linux/dma/mxs-dma.h>
#include <asm/irq.h>
@@ -77,6 +78,7 @@
#define BM_CCW_COMMAND (3 << 0)
#define CCW_CHAIN (1 << 2)
#define CCW_IRQ (1 << 3)
+#define CCW_WAIT4RDY (1 << 5)
#define CCW_DEC_SEM (1 << 6)
#define CCW_WAIT4END (1 << 7)
#define CCW_HALT_ON_TERM (1 << 8)
@@ -477,16 +479,16 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
* ......
* ->device_prep_slave_sg(0);
* ......
- * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ * ->device_prep_slave_sg(DMA_CTRL_ACK);
* ......
* [3] If there are more than two DMA commands in the DMA chain, the code
* should be:
* ......
* ->device_prep_slave_sg(0); // First
* ......
- * ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]);
+ * ->device_prep_slave_sg(DMA_CTRL_ACK]);
* ......
- * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last
+ * ->device_prep_slave_sg(DMA_CTRL_ACK); // Last
* ......
*/
static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
@@ -500,13 +502,12 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
struct scatterlist *sg;
u32 i, j;
u32 *pio;
- bool append = flags & DMA_PREP_INTERRUPT;
- int idx = append ? mxs_chan->desc_count : 0;
+ int idx = 0;
- if (mxs_chan->status == DMA_IN_PROGRESS && !append)
- return NULL;
+ if (mxs_chan->status == DMA_IN_PROGRESS)
+ idx = mxs_chan->desc_count;
- if (sg_len + (append ? idx : 0) > NUM_CCW) {
+ if (sg_len + idx > NUM_CCW) {
dev_err(mxs_dma->dma_device.dev,
"maximum number of sg exceeded: %d > %d\n",
sg_len, NUM_CCW);
@@ -520,7 +521,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
* If the sg is prepared with append flag set, the sg
* will be appended to the last prepared sg.
*/
- if (append) {
+ if (idx) {
BUG_ON(idx < 1);
ccw = &mxs_chan->ccw[idx - 1];
ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
@@ -541,12 +542,14 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->bits = 0;
ccw->bits |= CCW_IRQ;
ccw->bits |= CCW_DEC_SEM;
- if (flags & DMA_CTRL_ACK)
+ if (flags & MXS_DMA_CTRL_WAIT4END)
ccw->bits |= CCW_WAIT4END;
ccw->bits |= CCW_HALT_ON_TERM;
ccw->bits |= CCW_TERM_FLUSH;
ccw->bits |= BF_CCW(sg_len, PIO_NUM);
ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
+ if (flags & MXS_DMA_CTRL_WAIT4RDY)
+ ccw->bits |= CCW_WAIT4RDY;
} else {
for_each_sg(sgl, sg, sg_len, i) {
if (sg_dma_len(sg) > MAX_XFER_BYTES) {
@@ -573,7 +576,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->bits &= ~CCW_CHAIN;
ccw->bits |= CCW_IRQ;
ccw->bits |= CCW_DEC_SEM;
- if (flags & DMA_CTRL_ACK)
+ if (flags & MXS_DMA_CTRL_WAIT4END)
ccw->bits |= CCW_WAIT4END;
}
}