diff options
author | Vinod Koul <vkoul@kernel.org> | 2018-12-31 19:31:31 +0530 |
---|---|---|
committer | Vinod Koul <vkoul@kernel.org> | 2018-12-31 19:31:31 +0530 |
commit | 77ee1aacdd5bad4617a9617bb838d79840cf1f20 (patch) | |
tree | bf85bcdc1a1e0c5bad209f5dfa432f865bf1e839 /drivers/dma | |
parent | 5f443256e3a1e22a0969878428bcfbd3639439d9 (diff) | |
parent | 9e314ef35c3844b53a72c588028383e5e678f4d2 (diff) |
Merge branch 'topic/dirn_remove' into for-linus
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/imx-sdma.c | 56 | ||||
-rw-r--r-- | drivers/dma/mmp_pdma.c | 28 | ||||
-rw-r--r-- | drivers/dma/pl330.c | 28 | ||||
-rw-r--r-- | drivers/dma/ste_dma40.c | 31 |
4 files changed, 111 insertions, 32 deletions
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index b4ec2d20e661..d6a5d6ddfdad 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -335,6 +335,7 @@ struct sdma_desc { * @sdma: pointer to the SDMA engine for this channel * @channel: the channel number, matches dmaengine chan_id + 1 * @direction: transfer type. Needed for setting SDMA script + * @slave_config Slave configuration * @peripheral_type: Peripheral type. Needed for setting SDMA script * @event_id0: aka dma request line * @event_id1: for channels that use 2 events @@ -362,6 +363,7 @@ struct sdma_channel { struct sdma_engine *sdma; unsigned int channel; enum dma_transfer_direction direction; + struct dma_slave_config slave_config; enum sdma_peripheral_type peripheral_type; unsigned int event_id0; unsigned int event_id1; @@ -440,6 +442,10 @@ struct sdma_engine { struct sdma_buffer_descriptor *bd0; }; +static int sdma_config_write(struct dma_chan *chan, + struct dma_slave_config *dmaengine_cfg, + enum dma_transfer_direction direction); + static struct sdma_driver_data sdma_imx31 = { .chnenbl0 = SDMA_CHNENBL0_IMX31, .num_events = 32, @@ -1104,18 +1110,6 @@ static int sdma_config_channel(struct dma_chan *chan) sdmac->shp_addr = 0; sdmac->per_addr = 0; - if (sdmac->event_id0) { - if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) - return -EINVAL; - sdma_event_enable(sdmac, sdmac->event_id0); - } - - if (sdmac->event_id1) { - if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events) - return -EINVAL; - sdma_event_enable(sdmac, sdmac->event_id1); - } - switch (sdmac->peripheral_type) { case IMX_DMATYPE_DSP: sdma_config_ownership(sdmac, false, true, true); @@ -1415,6 +1409,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( struct scatterlist *sg; struct sdma_desc *desc; + sdma_config_write(chan, &sdmac->slave_config, direction); + desc = sdma_transfer_init(sdmac, direction, sg_len); if (!desc) goto err_out; @@ -1499,6 +1495,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); + sdma_config_write(chan, &sdmac->slave_config, direction); + desc = sdma_transfer_init(sdmac, direction, num_periods); if (!desc) goto err_out; @@ -1554,17 +1552,18 @@ err_out: return NULL; } -static int sdma_config(struct dma_chan *chan, - struct dma_slave_config *dmaengine_cfg) +static int sdma_config_write(struct dma_chan *chan, + struct dma_slave_config *dmaengine_cfg, + enum dma_transfer_direction direction) { struct sdma_channel *sdmac = to_sdma_chan(chan); - if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { + if (direction == DMA_DEV_TO_MEM) { sdmac->per_address = dmaengine_cfg->src_addr; sdmac->watermark_level = dmaengine_cfg->src_maxburst * dmaengine_cfg->src_addr_width; sdmac->word_size = dmaengine_cfg->src_addr_width; - } else if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) { + } else if (direction == DMA_DEV_TO_DEV) { sdmac->per_address2 = dmaengine_cfg->src_addr; sdmac->per_address = dmaengine_cfg->dst_addr; sdmac->watermark_level = dmaengine_cfg->src_maxburst & @@ -1578,10 +1577,33 @@ static int sdma_config(struct dma_chan *chan, dmaengine_cfg->dst_addr_width; sdmac->word_size = dmaengine_cfg->dst_addr_width; } - sdmac->direction = dmaengine_cfg->direction; + sdmac->direction = direction; return sdma_config_channel(chan); } +static int sdma_config(struct dma_chan *chan, + struct dma_slave_config *dmaengine_cfg) +{ + struct sdma_channel *sdmac = to_sdma_chan(chan); + + memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg)); + + /* Set ENBLn earlier to make sure dma request triggered after that */ + if (sdmac->event_id0) { + if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) + return -EINVAL; + sdma_event_enable(sdmac, sdmac->event_id0); + } + + if (sdmac->event_id1) { + if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events) + return -EINVAL; + sdma_event_enable(sdmac, sdmac->event_id1); + } + + return 0; +} + static enum dma_status sdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index eb3a1f42ab06..334bab92d26d 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -96,6 +96,7 @@ struct mmp_pdma_chan { struct dma_async_tx_descriptor desc; struct mmp_pdma_phy *phy; enum dma_transfer_direction dir; + struct dma_slave_config slave_config; struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel * is in cyclic mode */ @@ -140,6 +141,10 @@ struct mmp_pdma_device { #define to_mmp_pdma_dev(dmadev) \ container_of(dmadev, struct mmp_pdma_device, device) +static int mmp_pdma_config_write(struct dma_chan *dchan, + struct dma_slave_config *cfg, + enum dma_transfer_direction direction); + static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) { u32 reg = (phy->idx << 4) + DDADR; @@ -537,6 +542,8 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, chan->byte_align = false; + mmp_pdma_config_write(dchan, &chan->slave_config, dir); + for_each_sg(sgl, sg, sg_len, i) { addr = sg_dma_address(sg); avail = sg_dma_len(sgl); @@ -619,6 +626,7 @@ mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan, return NULL; chan = to_mmp_pdma_chan(dchan); + mmp_pdma_config_write(dchan, &chan->slave_config, direction); switch (direction) { case DMA_MEM_TO_DEV: @@ -684,8 +692,9 @@ fail: return NULL; } -static int mmp_pdma_config(struct dma_chan *dchan, - struct dma_slave_config *cfg) +static int mmp_pdma_config_write(struct dma_chan *dchan, + struct dma_slave_config *cfg, + enum dma_transfer_direction direction) { struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); u32 maxburst = 0, addr = 0; @@ -694,12 +703,12 @@ static int mmp_pdma_config(struct dma_chan *dchan, if (!dchan) return -EINVAL; - if (cfg->direction == DMA_DEV_TO_MEM) { + if (direction == DMA_DEV_TO_MEM) { chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; maxburst = cfg->src_maxburst; width = cfg->src_addr_width; addr = cfg->src_addr; - } else if (cfg->direction == DMA_MEM_TO_DEV) { + } else if (direction == DMA_MEM_TO_DEV) { chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; maxburst = cfg->dst_maxburst; width = cfg->dst_addr_width; @@ -720,7 +729,7 @@ static int mmp_pdma_config(struct dma_chan *dchan, else if (maxburst == 32) chan->dcmd |= DCMD_BURST32; - chan->dir = cfg->direction; + chan->dir = direction; chan->dev_addr = addr; /* FIXME: drivers should be ported over to use the filter * function. Once that's done, the following two lines can @@ -732,6 +741,15 @@ static int mmp_pdma_config(struct dma_chan *dchan, return 0; } +static int mmp_pdma_config(struct dma_chan *dchan, + struct dma_slave_config *cfg) +{ + struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); + + memcpy(&chan->slave_config, cfg, sizeof(*cfg)); + return 0; +} + static int mmp_pdma_terminate_all(struct dma_chan *dchan) { struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 88750a34e859..cff1b143fff5 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -448,6 +448,7 @@ struct dma_pl330_chan { /* DMA-mapped view of the FIFO; may differ if an IOMMU is present */ dma_addr_t fifo_dma; enum dma_data_direction dir; + struct dma_slave_config slave_config; /* for cyclic capability */ bool cyclic; @@ -542,6 +543,10 @@ struct _xfer_spec { struct dma_pl330_desc *desc; }; +static int pl330_config_write(struct dma_chan *chan, + struct dma_slave_config *slave_config, + enum dma_transfer_direction direction); + static inline bool _queue_full(struct pl330_thread *thrd) { return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL; @@ -2220,20 +2225,21 @@ static int fixup_burst_len(int max_burst_len, int quirks) return max_burst_len; } -static int pl330_config(struct dma_chan *chan, - struct dma_slave_config *slave_config) +static int pl330_config_write(struct dma_chan *chan, + struct dma_slave_config *slave_config, + enum dma_transfer_direction direction) { struct dma_pl330_chan *pch = to_pchan(chan); pl330_unprep_slave_fifo(pch); - if (slave_config->direction == DMA_MEM_TO_DEV) { + if (direction == DMA_MEM_TO_DEV) { if (slave_config->dst_addr) pch->fifo_addr = slave_config->dst_addr; if (slave_config->dst_addr_width) pch->burst_sz = __ffs(slave_config->dst_addr_width); pch->burst_len = fixup_burst_len(slave_config->dst_maxburst, pch->dmac->quirks); - } else if (slave_config->direction == DMA_DEV_TO_MEM) { + } else if (direction == DMA_DEV_TO_MEM) { if (slave_config->src_addr) pch->fifo_addr = slave_config->src_addr; if (slave_config->src_addr_width) @@ -2245,6 +2251,16 @@ static int pl330_config(struct dma_chan *chan, return 0; } +static int pl330_config(struct dma_chan *chan, + struct dma_slave_config *slave_config) +{ + struct dma_pl330_chan *pch = to_pchan(chan); + + memcpy(&pch->slave_config, slave_config, sizeof(*slave_config)); + + return 0; +} + static int pl330_terminate_all(struct dma_chan *chan) { struct dma_pl330_chan *pch = to_pchan(chan); @@ -2661,6 +2677,8 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( return NULL; } + pl330_config_write(chan, &pch->slave_config, direction); + if (!pl330_prep_slave_fifo(pch, direction)) return NULL; @@ -2815,6 +2833,8 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (unlikely(!pch || !sgl || !sg_len)) return NULL; + pl330_config_write(chan, &pch->slave_config, direction); + if (!pl330_prep_slave_fifo(pch, direction)) return NULL; diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 5e328bd10c27..907ae97a3ef4 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -442,6 +442,7 @@ struct d40_base; * @queue: Queued jobs. * @prepare_queue: Prepared jobs. * @dma_cfg: The client configuration of this dma channel. + * @slave_config: DMA slave configuration. * @configured: whether the dma_cfg configuration is valid * @base: Pointer to the device instance struct. * @src_def_cfg: Default cfg register setting for src. @@ -468,6 +469,7 @@ struct d40_chan { struct list_head queue; struct list_head prepare_queue; struct stedma40_chan_cfg dma_cfg; + struct dma_slave_config slave_config; bool configured; struct d40_base *base; /* Default register configurations */ @@ -625,6 +627,10 @@ static void __iomem *chan_base(struct d40_chan *chan) #define chan_err(d40c, format, arg...) \ d40_err(chan2dev(d40c), format, ## arg) +static int d40_set_runtime_config_write(struct dma_chan *chan, + struct dma_slave_config *config, + enum dma_transfer_direction direction); + static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, int lli_len) { @@ -2216,6 +2222,8 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, return NULL; } + d40_set_runtime_config_write(dchan, &chan->slave_config, direction); + spin_lock_irqsave(&chan->lock, flags); desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); @@ -2634,11 +2642,22 @@ dma40_config_to_halfchannel(struct d40_chan *d40c, return 0; } -/* Runtime reconfiguration extension */ static int d40_set_runtime_config(struct dma_chan *chan, struct dma_slave_config *config) { struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); + + memcpy(&d40c->slave_config, config, sizeof(*config)); + + return 0; +} + +/* Runtime reconfiguration extension */ +static int d40_set_runtime_config_write(struct dma_chan *chan, + struct dma_slave_config *config, + enum dma_transfer_direction direction) +{ + struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; enum dma_slave_buswidth src_addr_width, dst_addr_width; dma_addr_t config_addr; @@ -2655,7 +2674,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, dst_addr_width = config->dst_addr_width; dst_maxburst = config->dst_maxburst; - if (config->direction == DMA_DEV_TO_MEM) { + if (direction == DMA_DEV_TO_MEM) { config_addr = config->src_addr; if (cfg->dir != DMA_DEV_TO_MEM) @@ -2671,7 +2690,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, if (dst_maxburst == 0) dst_maxburst = src_maxburst; - } else if (config->direction == DMA_MEM_TO_DEV) { + } else if (direction == DMA_MEM_TO_DEV) { config_addr = config->dst_addr; if (cfg->dir != DMA_MEM_TO_DEV) @@ -2689,7 +2708,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, } else { dev_err(d40c->base->dev, "unrecognized channel direction %d\n", - config->direction); + direction); return -EINVAL; } @@ -2746,12 +2765,12 @@ static int d40_set_runtime_config(struct dma_chan *chan, /* These settings will take precedence later */ d40c->runtime_addr = config_addr; - d40c->runtime_direction = config->direction; + d40c->runtime_direction = direction; dev_dbg(d40c->base->dev, "configured channel %s for %s, data width %d/%d, " "maxburst %d/%d elements, LE, no flow control\n", dma_chan_name(chan), - (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", + (direction == DMA_DEV_TO_MEM) ? "RX" : "TX", src_addr_width, dst_addr_width, src_maxburst, dst_maxburst); |