diff options
author | Andy Shevchenko <andriy.shevchenko@linux.intel.com> | 2012-06-19 13:46:32 +0300 |
---|---|---|
committer | Vinod Koul <vinod.koul@linux.intel.com> | 2012-06-21 08:05:39 +0530 |
commit | 3f9362078b101bd57bfed635e49c8ecb8b6d465d (patch) | |
tree | 452c1ed2b153d7b954f52f4b325ff86dd4b25474 | |
parent | 0272e93f364eac1a30f2831adcaca3dd633d5f14 (diff) |
dw_dmac: introduce dwc_chan_disable
This piece of code is used often. Make it as a separate function.
Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Acked-by: Viresh Kumar <viresh.linux@gmail.com>
Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
-rw-r--r-- | drivers/dma/dw_dmac.c | 32 |
1 files changed, 14 insertions, 18 deletions
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 83f63b38dcc0..85ed5f20e027 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -217,6 +217,14 @@ static void dwc_dump_chan_regs(struct dw_dma_chan *dwc) channel_readl(dwc, CTL_LO)); } + +static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) +{ + channel_clear_bit(dw, CH_EN, dwc->mask); + while (dma_readl(dw, CH_EN) & dwc->mask) + cpu_relax(); +} + /*----------------------------------------------------------------------*/ /* Called with dwc->lock held and bh disabled */ @@ -312,9 +320,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) "BUG: XFER bit set, but channel not idle!\n"); /* Try to continue after resetting the channel... */ - channel_clear_bit(dw, CH_EN, dwc->mask); - while (dma_readl(dw, CH_EN) & dwc->mask) - cpu_relax(); + dwc_chan_disable(dw, dwc); } /* @@ -396,9 +402,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) "BUG: All descriptors done, but channel not idle!\n"); /* Try to continue after resetting the channel... */ - channel_clear_bit(dw, CH_EN, dwc->mask); - while (dma_readl(dw, CH_EN) & dwc->mask) - cpu_relax(); + dwc_chan_disable(dw, dwc); if (!list_empty(&dwc->queue)) { list_move(dwc->queue.next, &dwc->active_list); @@ -514,9 +518,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, dwc_dump_chan_regs(dwc); - channel_clear_bit(dw, CH_EN, dwc->mask); - while (dma_readl(dw, CH_EN) & dwc->mask) - cpu_relax(); + dwc_chan_disable(dw, dwc); /* make sure DMA does not restart by loading a new list */ channel_writel(dwc, LLP, 0); @@ -946,9 +948,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, } else if (cmd == DMA_TERMINATE_ALL) { spin_lock_irqsave(&dwc->lock, flags); - channel_clear_bit(dw, CH_EN, dwc->mask); - while (dma_readl(dw, CH_EN) & dwc->mask) - cpu_relax(); + dwc_chan_disable(dw, dwc); dwc->paused = false; @@ -1156,9 +1156,7 @@ void dw_dma_cyclic_stop(struct dma_chan *chan) spin_lock_irqsave(&dwc->lock, flags); - channel_clear_bit(dw, CH_EN, dwc->mask); - while (dma_readl(dw, CH_EN) & dwc->mask) - cpu_relax(); + dwc_chan_disable(dw, dwc); spin_unlock_irqrestore(&dwc->lock, flags); } @@ -1336,9 +1334,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan) spin_lock_irqsave(&dwc->lock, flags); - channel_clear_bit(dw, CH_EN, dwc->mask); - while (dma_readl(dw, CH_EN) & dwc->mask) - cpu_relax(); + dwc_chan_disable(dw, dwc); dma_writel(dw, CLEAR.ERROR, dwc->mask); dma_writel(dw, CLEAR.XFER, dwc->mask); |