From ff0e0f4f568e8d7593e0035c0c58067bcaf4ab07 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Mon, 22 Apr 2013 10:33:32 +0200 Subject: dma: of: Remove restriction that #dma-cells can't be 0 There is no sensible reason why #dma-cells shouldn't be allowed to be 0. It is completely up to the DMA controller how many additional parameters, besides the phandle, it needs to identify a channel. E.g. for DMA controller with only one channel or for DMA controllers which don't have a restriction on which channel can be used for which peripheral it completely legitimate to not require any additional parameters. Also fixes the following warning: drivers/dma/of-dma.c: In function 'of_dma_controller_register': drivers/dma/of-dma.c:67:7: warning: 'nbcells' may be used uninitialized in this function Signed-off-by: Lars-Peter Clausen Acked-by: Arnd Bergmann Signed-off-by: Vinod Koul --- drivers/dma/of-dma.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index 7aa0864cd487..268cc8ab34e8 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -64,7 +64,6 @@ int of_dma_controller_register(struct device_node *np, void *data) { struct of_dma *ofdma; - int nbcells; const __be32 *prop; if (!np || !of_dma_xlate) { @@ -77,18 +76,16 @@ int of_dma_controller_register(struct device_node *np, return -ENOMEM; prop = of_get_property(np, "#dma-cells", NULL); - if (prop) - nbcells = be32_to_cpup(prop); - - if (!prop || !nbcells) { - pr_err("%s: #dma-cells property is missing or invalid\n", + if (!prop) { + pr_err("%s: #dma-cells property is missing\n", __func__); kfree(ofdma); return -EINVAL; } + ofdma->of_node = np; - ofdma->of_dma_nbcells = nbcells; + ofdma->of_dma_nbcells = be32_to_cpup(prop); ofdma->of_dma_xlate = of_dma_xlate; ofdma->of_dma_data = data; -- cgit v1.2.3-58-ga151 From 8552bb4f16800d5ebc176a2cf5f2aa55b22731ea Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Mon, 22 Apr 2013 10:33:33 +0200 Subject: dma: of: Remove check on always true condition Both of_dma_nbcells field of the of_dma_controller and the args_count field of the dma_spec are initialized by parsing the #dma-cells attribute of their device tree node. So if the device tree nodes of a DMA controller and the dma_spec match this means that of_dma_nbcells and args_count will also match. So the second test in the of_dma_find_controller loop is redundant because given the first test yields true the second test will also yield true. So we can safely remove the test whether of_dma_nbcells matches args_count. Since this was the last user of the of_dma_nbcells field we can remove it altogether. Signed-off-by: Lars-Peter Clausen Acked-by: Arnd Bergmann Signed-off-by: Vinod Koul --- drivers/dma/of-dma.c | 14 +------------- include/linux/of_dma.h | 1 - 2 files changed, 1 insertion(+), 14 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index 268cc8ab34e8..75334bdd2c56 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -35,8 +35,7 @@ static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec) struct of_dma *ofdma; list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers) - if ((ofdma->of_node == dma_spec->np) && - (ofdma->of_dma_nbcells == dma_spec->args_count)) + if (ofdma->of_node == dma_spec->np) return ofdma; pr_debug("%s: can't find DMA controller %s\n", __func__, @@ -64,7 +63,6 @@ int of_dma_controller_register(struct device_node *np, void *data) { struct of_dma *ofdma; - const __be32 *prop; if (!np || !of_dma_xlate) { pr_err("%s: not enough information provided\n", __func__); @@ -75,17 +73,7 @@ int of_dma_controller_register(struct device_node *np, if (!ofdma) return -ENOMEM; - prop = of_get_property(np, "#dma-cells", NULL); - if (!prop) { - pr_err("%s: #dma-cells property is missing\n", - __func__); - kfree(ofdma); - return -EINVAL; - } - - ofdma->of_node = np; - ofdma->of_dma_nbcells = be32_to_cpup(prop); ofdma->of_dma_xlate = of_dma_xlate; ofdma->of_dma_data = data; diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h index 364dda734877..ae36298ba076 100644 --- a/include/linux/of_dma.h +++ b/include/linux/of_dma.h @@ -21,7 +21,6 @@ struct device_node; struct of_dma { struct list_head of_dma_controllers; struct device_node *of_node; - int of_dma_nbcells; struct dma_chan *(*of_dma_xlate) (struct of_phandle_args *, struct of_dma *); void *of_dma_data; -- cgit v1.2.3-58-ga151 From 290ad0f9d954b445788bf26652b239c59cec2060 Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Sun, 26 May 2013 11:53:20 +0200 Subject: dma: imx-dma: Add oftree support Adding devicetree support for imx-dma driver. Use driver name for function 'imx_dma_is_general_purpose' because the devicename for devicetree initialized devices is different. Signed-off-by: Markus Pargmann Reviewed-by: Arnd Bergmann Reviewed-by: Shawn Guo Acked-by: Sascha Hauer Signed-off-by: Vinod Koul --- .../devicetree/bindings/dma/fsl-imx-dma.txt | 48 ++++++++++++++ drivers/dma/imx-dma.c | 75 ++++++++++++++++++++++ include/linux/platform_data/dma-imx.h | 6 +- 3 files changed, 125 insertions(+), 4 deletions(-) create mode 100644 Documentation/devicetree/bindings/dma/fsl-imx-dma.txt (limited to 'drivers/dma') diff --git a/Documentation/devicetree/bindings/dma/fsl-imx-dma.txt b/Documentation/devicetree/bindings/dma/fsl-imx-dma.txt new file mode 100644 index 000000000000..2717ecb47db9 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/fsl-imx-dma.txt @@ -0,0 +1,48 @@ +* Freescale Direct Memory Access (DMA) Controller for i.MX + +This document will only describe differences to the generic DMA Controller and +DMA request bindings as described in dma/dma.txt . + +* DMA controller + +Required properties: +- compatible : Should be "fsl,-dma". chip can be imx1, imx21 or imx27 +- reg : Should contain DMA registers location and length +- interrupts : First item should be DMA interrupt, second one is optional and + should contain DMA Error interrupt +- #dma-cells : Has to be 1. imx-dma does not support anything else. + +Optional properties: +- #dma-channels : Number of DMA channels supported. Should be 16. +- #dma-requests : Number of DMA requests supported. + +Example: + + dma: dma@10001000 { + compatible = "fsl,imx27-dma"; + reg = <0x10001000 0x1000>; + interrupts = <32 33>; + #dma-cells = <1>; + #dma-channels = <16>; + }; + + +* DMA client + +Clients have to specify the DMA requests with phandles in a list. + +Required properties: +- dmas: List of one or more DMA request specifiers. One DMA request specifier + consists of a phandle to the DMA controller followed by the integer + specifiying the request line. +- dma-names: List of string identifiers for the DMA requests. For the correct + names, have a look at the specific client driver. + +Example: + + sdhci1: sdhci@10013000 { + ... + dmas = <&dma 7>; + dma-names = "rx-tx"; + ... + }; diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index f28583370d00..34c54cf08231 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -27,6 +27,8 @@ #include #include #include +#include +#include #include #include @@ -186,6 +188,11 @@ struct imxdma_engine { enum imx_dma_type devtype; }; +struct imxdma_filter_data { + struct imxdma_engine *imxdma; + int request; +}; + static struct platform_device_id imx_dma_devtype[] = { { .name = "imx1-dma", @@ -202,6 +209,22 @@ static struct platform_device_id imx_dma_devtype[] = { }; MODULE_DEVICE_TABLE(platform, imx_dma_devtype); +static const struct of_device_id imx_dma_of_dev_id[] = { + { + .compatible = "fsl,imx1-dma", + .data = &imx_dma_devtype[IMX1_DMA], + }, { + .compatible = "fsl,imx21-dma", + .data = &imx_dma_devtype[IMX21_DMA], + }, { + .compatible = "fsl,imx27-dma", + .data = &imx_dma_devtype[IMX27_DMA], + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(of, imx_dma_of_dev_id); + static inline int is_imx1_dma(struct imxdma_engine *imxdma) { return imxdma->devtype == IMX1_DMA; @@ -996,13 +1019,50 @@ static void imxdma_issue_pending(struct dma_chan *chan) spin_unlock_irqrestore(&imxdma->lock, flags); } +static bool imxdma_filter_fn(struct dma_chan *chan, void *param) +{ + struct imxdma_filter_data *fdata = param; + struct imxdma_channel *imxdma_chan = to_imxdma_chan(chan); + + if (chan->device->dev != fdata->imxdma->dev) + return false; + + imxdma_chan->dma_request = fdata->request; + chan->private = NULL; + + return true; +} + +static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + int count = dma_spec->args_count; + struct imxdma_engine *imxdma = ofdma->of_dma_data; + struct imxdma_filter_data fdata = { + .imxdma = imxdma, + }; + + if (count != 1) + return NULL; + + fdata.request = dma_spec->args[0]; + + return dma_request_channel(imxdma->dma_device.cap_mask, + imxdma_filter_fn, &fdata); +} + static int __init imxdma_probe(struct platform_device *pdev) { struct imxdma_engine *imxdma; struct resource *res; + const struct of_device_id *of_id; int ret, i; int irq, irq_err; + of_id = of_match_device(imx_dma_of_dev_id, &pdev->dev); + if (of_id) + pdev->id_entry = of_id->data; + imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL); if (!imxdma) return -ENOMEM; @@ -1136,8 +1196,19 @@ static int __init imxdma_probe(struct platform_device *pdev) goto err; } + if (pdev->dev.of_node) { + ret = of_dma_controller_register(pdev->dev.of_node, + imxdma_xlate, imxdma); + if (ret) { + dev_err(&pdev->dev, "unable to register of_dma_controller\n"); + goto err_of_dma_controller; + } + } + return 0; +err_of_dma_controller: + dma_async_device_unregister(&imxdma->dma_device); err: clk_disable_unprepare(imxdma->dma_ipg); clk_disable_unprepare(imxdma->dma_ahb); @@ -1150,6 +1221,9 @@ static int imxdma_remove(struct platform_device *pdev) dma_async_device_unregister(&imxdma->dma_device); + if (pdev->dev.of_node) + of_dma_controller_free(pdev->dev.of_node); + clk_disable_unprepare(imxdma->dma_ipg); clk_disable_unprepare(imxdma->dma_ahb); @@ -1159,6 +1233,7 @@ static int imxdma_remove(struct platform_device *pdev) static struct platform_driver imxdma_driver = { .driver = { .name = "imx-dma", + .of_match_table = imx_dma_of_dev_id, }, .id_table = imx_dma_devtype, .remove = imxdma_remove, diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h index f6d30cc1cb77..beac6b8b6a7b 100644 --- a/include/linux/platform_data/dma-imx.h +++ b/include/linux/platform_data/dma-imx.h @@ -60,10 +60,8 @@ static inline int imx_dma_is_ipu(struct dma_chan *chan) static inline int imx_dma_is_general_purpose(struct dma_chan *chan) { - return strstr(dev_name(chan->device->dev), "sdma") || - !strcmp(dev_name(chan->device->dev), "imx1-dma") || - !strcmp(dev_name(chan->device->dev), "imx21-dma") || - !strcmp(dev_name(chan->device->dev), "imx27-dma"); + return !strcmp(chan->device->dev->driver->name, "imx-sdma") || + !strcmp(chan->device->dev->driver->name, "imx-dma"); } #endif -- cgit v1.2.3-58-ga151 From 5c6b3e7725384f02418d80e7dc32b1a690497004 Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Sun, 26 May 2013 11:53:21 +0200 Subject: DMA: imx-dma: imxdma->dev used uninitialized imxdma->dev is used for dev_warn before it was set. Signed-off-by: Markus Pargmann Reviewed-by: Shawn Guo Acked-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 34c54cf08231..ff2aab973b45 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -1067,6 +1067,7 @@ static int __init imxdma_probe(struct platform_device *pdev) if (!imxdma) return -ENOMEM; + imxdma->dev = &pdev->dev; imxdma->devtype = pdev->id_entry->driver_data; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1171,7 +1172,6 @@ static int __init imxdma_probe(struct platform_device *pdev) &imxdma->dma_device.channels); } - imxdma->dev = &pdev->dev; imxdma->dma_device.dev = &pdev->dev; imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources; -- cgit v1.2.3-58-ga151 From ea7e79063e604c89b16b819d2e88b20c421d9514 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Fri, 10 May 2013 15:19:13 +0200 Subject: dmaengine: at_hdmac/trivial: correct typo in comment Signed-off-by: Nicolas Ferre Acked-by: Jean-Christophe PLAGNIOL-VILLARD Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index e923cda930f9..cd494209352a 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1120,7 +1120,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) */ BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); - /* if cfg configuration specified take it instad of default */ + /* if cfg configuration specified take it instead of default */ if (atslave->cfg) cfg = atslave->cfg; } -- cgit v1.2.3-58-ga151 From 72ae6e4b31e40397eaa81007b39a1074638a6798 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Fri, 10 May 2013 15:19:14 +0200 Subject: dmaengine: at_hdmac: extend hardware handshaking interface identification Peripheral handshaking identification numbers can be bigger than 15, so new fields have been created in the CFG register. Add macros to take this modification into account and use them in at_dma_xlate() function. Signed-off-by: Nicolas Ferre Acked-by: Jean-Christophe PLAGNIOL-VILLARD Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 2 ++ include/linux/platform_data/dma-atmel.h | 4 ++++ 2 files changed, 6 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index cd494209352a..78c3fb4b4e40 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1230,6 +1230,8 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, per_id = dma_spec->args[1]; atslave->cfg = ATC_FIFOCFG_HALFFIFO | ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW | ATC_DST_PER(per_id) + | ATC_DST_PER_MSB(per_id) + | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id); atslave->dma_dev = &dmac_pdev->dev; diff --git a/include/linux/platform_data/dma-atmel.h b/include/linux/platform_data/dma-atmel.h index cab0997be3de..e95f19c65873 100644 --- a/include/linux/platform_data/dma-atmel.h +++ b/include/linux/platform_data/dma-atmel.h @@ -35,16 +35,20 @@ struct at_dma_slave { /* Platform-configurable bits in CFG */ +#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */ + #define ATC_SRC_PER(h) (0xFU & (h)) /* Channel src rq associated with periph handshaking ifc h */ #define ATC_DST_PER(h) ((0xFU & (h)) << 4) /* Channel dst rq associated with periph handshaking ifc h */ #define ATC_SRC_REP (0x1 << 8) /* Source Replay Mod */ #define ATC_SRC_H2SEL (0x1 << 9) /* Source Handshaking Mod */ #define ATC_SRC_H2SEL_SW (0x0 << 9) #define ATC_SRC_H2SEL_HW (0x1 << 9) +#define ATC_SRC_PER_MSB(h) (ATC_PER_MSB(h) << 10) /* Channel src rq (most significant bits) */ #define ATC_DST_REP (0x1 << 12) /* Destination Replay Mod */ #define ATC_DST_H2SEL (0x1 << 13) /* Destination Handshaking Mod */ #define ATC_DST_H2SEL_SW (0x0 << 13) #define ATC_DST_H2SEL_HW (0x1 << 13) +#define ATC_DST_PER_MSB(h) (ATC_PER_MSB(h) << 14) /* Channel dst rq (most significant bits) */ #define ATC_SOD (0x1 << 16) /* Stop On Done */ #define ATC_LOCK_IF (0x1 << 20) /* Interface Lock */ #define ATC_LOCK_B (0x1 << 21) /* AHB Bus Lock */ -- cgit v1.2.3-58-ga151 From 6c22770f644bf23aecc11fedd7b305488a861bfc Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Fri, 10 May 2013 15:19:15 +0200 Subject: dmaengine: at_hdmac/trivial: rearrange CFG register bits assignment No modification in CFG register configuration, just rearrange bits directives to group logically and make it more readable. Signed-off-by: Nicolas Ferre Acked-by: Jean-Christophe PLAGNIOL-VILLARD Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 78c3fb4b4e40..9e1ad73a78cf 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1228,11 +1228,10 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, * ignored depending on DMA transfer direction. */ per_id = dma_spec->args[1]; - atslave->cfg = ATC_FIFOCFG_HALFFIFO | ATC_DST_H2SEL_HW - | ATC_SRC_H2SEL_HW | ATC_DST_PER(per_id) - | ATC_DST_PER_MSB(per_id) - | ATC_SRC_PER_MSB(per_id) - | ATC_SRC_PER(per_id); + atslave->cfg = ATC_FIFOCFG_HALFFIFO + | ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW + | ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id) + | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id); atslave->dma_dev = &dmac_pdev->dev; chan = dma_request_channel(mask, at_dma_filter, atslave); -- cgit v1.2.3-58-ga151 From d088c33b646e9f3564eea7a057a2cb697c18bcd0 Mon Sep 17 00:00:00 2001 From: Elen Song Date: Fri, 10 May 2013 11:00:50 +0800 Subject: DMA: AT91: Get transfer width In one dma transfer, the data transfer width can be configured and it is limited by source or destination peripheral width, tx_width will save the transfer width, but for memcpy, either source or destination transfer width is taken as tx_width. Signed-off-by: Elen Song Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 3 +++ drivers/dma/at_hdmac_regs.h | 2 ++ 2 files changed, 5 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 9e1ad73a78cf..4c101a9dd3cb 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -615,6 +615,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, /* First descriptor of the chain embedds additional information */ first->txd.cookie = -EBUSY; first->len = len; + first->tx_width = src_width; /* set end-of-link to the last link descriptor of list*/ set_desc_eol(desc); @@ -761,6 +762,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, /* First descriptor of the chain embedds additional information */ first->txd.cookie = -EBUSY; first->len = total_len; + first->tx_width = reg_width; /* first link descriptor of list is responsible of flags */ first->txd.flags = flags; /* client is in control of this ack */ @@ -919,6 +921,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, /* First descriptor of the chain embedds additional information */ first->txd.cookie = -EBUSY; first->len = buf_len; + first->tx_width = reg_width; return &first->txd; diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index c604d26fd4d3..3679933fb646 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -182,6 +182,7 @@ struct at_lli { * @txd: support for the async_tx api * @desc_node: node on the channed descriptors list * @len: total transaction bytecount + * @tx_width: transfer width */ struct at_desc { /* FIRST values the hardware uses */ @@ -192,6 +193,7 @@ struct at_desc { struct dma_async_tx_descriptor txd; struct list_head desc_node; size_t len; + u32 tx_width; }; static inline struct at_desc * -- cgit v1.2.3-58-ga151 From d48de6f1a81b3d10de0f5765aff1b3bd788617b0 Mon Sep 17 00:00:00 2001 From: Elen Song Date: Fri, 10 May 2013 11:01:46 +0800 Subject: DMA: AT91: Get residual bytes in dma buffer Add support for returning the residue for current transfer cookie by reading the transfered buffer size(BTSIZE) in CTRLA register. For a single buffer cookie, the descriptor length minus BTSIZE can get the residue. For a lli cookie, remain_desc will record remain descriptor length when last descriptor finish, the remain_desc minus BTSIZE can get the current residue. If the cookie has completed successfully, the residue will be zero. If the cookie is in progress, it will be the number of bytes yet to be transferred. If get residue error, the cookie will be turn into error status. Check dma fifo to see if data remain, let issue pending finish remain work if there is. Signed-off-by: Elen Song Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 130 ++++++++++++++++++++++++++++++++++++++------ drivers/dma/at_hdmac_regs.h | 3 + 2 files changed, 115 insertions(+), 18 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 4c101a9dd3cb..5ce89368a8db 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -54,6 +54,7 @@ MODULE_PARM_DESC(init_nr_desc_per_channel, /* prototypes */ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); +static void atc_issue_pending(struct dma_chan *chan); /*----------------------------------------------------------------------*/ @@ -230,6 +231,94 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) vdbg_dump_regs(atchan); } +/* + * atc_get_current_descriptors - + * locate the descriptor which equal to physical address in DSCR + * @atchan: the channel we want to start + * @dscr_addr: physical descriptor address in DSCR + */ +static struct at_desc *atc_get_current_descriptors(struct at_dma_chan *atchan, + u32 dscr_addr) +{ + struct at_desc *desc, *_desc, *child, *desc_cur = NULL; + + list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { + if (desc->lli.dscr == dscr_addr) { + desc_cur = desc; + break; + } + + list_for_each_entry(child, &desc->tx_list, desc_node) { + if (child->lli.dscr == dscr_addr) { + desc_cur = child; + break; + } + } + } + + return desc_cur; +} + +/* + * atc_get_bytes_left - + * Get the number of bytes residue in dma buffer, + * @chan: the channel we want to start + */ +static int atc_get_bytes_left(struct dma_chan *chan) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_dma *atdma = to_at_dma(chan->device); + int chan_id = atchan->chan_common.chan_id; + struct at_desc *desc_first = atc_first_active(atchan); + struct at_desc *desc_cur; + int ret = 0, count = 0; + + /* + * Initialize necessary values in the first time. + * remain_desc record remain desc length. + */ + if (atchan->remain_desc == 0) + /* First descriptor embedds the transaction length */ + atchan->remain_desc = desc_first->len; + + /* + * This happens when current descriptor transfer complete. + * The residual buffer size should reduce current descriptor length. + */ + if (unlikely(test_bit(ATC_IS_BTC, &atchan->status))) { + clear_bit(ATC_IS_BTC, &atchan->status); + desc_cur = atc_get_current_descriptors(atchan, + channel_readl(atchan, DSCR)); + if (!desc_cur) { + ret = -EINVAL; + goto out; + } + atchan->remain_desc -= (desc_cur->lli.ctrla & ATC_BTSIZE_MAX) + << (desc_first->tx_width); + if (atchan->remain_desc < 0) { + ret = -EINVAL; + goto out; + } else + ret = atchan->remain_desc; + } else { + /* + * Get residual bytes when current + * descriptor transfer in progress. + */ + count = (channel_readl(atchan, CTRLA) & ATC_BTSIZE_MAX) + << (desc_first->tx_width); + ret = atchan->remain_desc - count; + } + /* + * Check fifo empty. + */ + if (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id))) + atc_issue_pending(chan); + +out: + return ret; +} + /** * atc_chain_complete - finish work for one transaction chain * @atchan: channel we work on @@ -496,6 +585,8 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id) /* Give information to tasklet */ set_bit(ATC_IS_ERROR, &atchan->status); } + if (pending & AT_DMA_BTC(i)) + set_bit(ATC_IS_BTC, &atchan->status); tasklet_schedule(&atchan->tasklet); ret = IRQ_HANDLED; } @@ -1035,34 +1126,35 @@ atc_tx_status(struct dma_chan *chan, struct dma_tx_state *txstate) { struct at_dma_chan *atchan = to_at_dma_chan(chan); - dma_cookie_t last_used; - dma_cookie_t last_complete; unsigned long flags; enum dma_status ret; - - spin_lock_irqsave(&atchan->lock, flags); + int bytes = 0; ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_SUCCESS) { - atc_cleanup_descriptors(atchan); + if (ret == DMA_SUCCESS) + return ret; + /* + * There's no point calculating the residue if there's + * no txstate to store the value. + */ + if (!txstate) + return DMA_ERROR; - ret = dma_cookie_status(chan, cookie, txstate); - } + spin_lock_irqsave(&atchan->lock, flags); - last_complete = chan->completed_cookie; - last_used = chan->cookie; + /* Get number of bytes left in the active transactions */ + bytes = atc_get_bytes_left(chan); spin_unlock_irqrestore(&atchan->lock, flags); - if (ret != DMA_SUCCESS) - dma_set_residue(txstate, atc_first_active(atchan)->len); - - if (atc_chan_is_paused(atchan)) - ret = DMA_PAUSED; + if (unlikely(bytes < 0)) { + dev_vdbg(chan2dev(chan), "get residual bytes error\n"); + return DMA_ERROR; + } else + dma_set_residue(txstate, bytes); - dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", - ret, cookie, last_complete ? last_complete : 0, - last_used ? last_used : 0); + dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n", + ret, cookie, bytes); return ret; } @@ -1146,6 +1238,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) spin_lock_irqsave(&atchan->lock, flags); atchan->descs_allocated = i; + atchan->remain_desc = 0; list_splice(&tmp_list, &atchan->free_list); dma_cookie_init(chan); spin_unlock_irqrestore(&atchan->lock, flags); @@ -1188,6 +1281,7 @@ static void atc_free_chan_resources(struct dma_chan *chan) list_splice_init(&atchan->free_list, &list); atchan->descs_allocated = 0; atchan->status = 0; + atchan->remain_desc = 0; dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); } diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 3679933fb646..f31d647acdfa 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -213,6 +213,7 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd) enum atc_status { ATC_IS_ERROR = 0, ATC_IS_PAUSED = 1, + ATC_IS_BTC = 2, ATC_IS_CYCLIC = 24, }; @@ -230,6 +231,7 @@ enum atc_status { * @save_cfg: configuration register that is saved on suspend/resume cycle * @save_dscr: for cyclic operations, preserve next descriptor address in * the cyclic list on suspend/resume cycle + * @remain_desc: to save remain desc length * @dma_sconfig: configuration for slave transfers, passed via DMA_SLAVE_CONFIG * @lock: serializes enqueue/dequeue operations to descriptors lists * @active_list: list of descriptors dmaengine is being running on @@ -248,6 +250,7 @@ struct at_dma_chan { struct tasklet_struct tasklet; u32 save_cfg; u32 save_dscr; + u32 remain_desc; struct dma_slave_config dma_sconfig; spinlock_t lock; -- cgit v1.2.3-58-ga151 From dd3daca162f7411448dd80a8872a002c43cfd8e5 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 24 May 2013 10:10:13 +0900 Subject: dma: use platform_{get,set}_drvdata() Use the wrapper functions for getting and setting the driver data using platform_device instead of using dev_{get,set}_drvdata() with &pdev->dev, so we can directly pass a struct platform_device. Also, unnecessary dev_set_drvdata() is removed, because the driver core clears the driver data to NULL after device_release or on probe failure. Signed-off-by: Jingoo Han Signed-off-by: Vinod Koul --- drivers/dma/fsldma.c | 5 ++--- drivers/dma/ppc4xx/adma.c | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 4fc2980556ad..49e8fbdb8983 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1368,7 +1368,7 @@ static int fsldma_of_probe(struct platform_device *op) dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); - dev_set_drvdata(&op->dev, fdev); + platform_set_drvdata(op, fdev); /* * We cannot use of_platform_bus_probe() because there is no @@ -1417,7 +1417,7 @@ static int fsldma_of_remove(struct platform_device *op) struct fsldma_device *fdev; unsigned int i; - fdev = dev_get_drvdata(&op->dev); + fdev = platform_get_drvdata(op); dma_async_device_unregister(&fdev->common); fsldma_free_irqs(fdev); @@ -1428,7 +1428,6 @@ static int fsldma_of_remove(struct platform_device *op) } iounmap(fdev->regs); - dev_set_drvdata(&op->dev, NULL); kfree(fdev); return 0; diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 5d3d95569a1e..e68c51d13cb1 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -4481,7 +4481,7 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev) adev->dev = &ofdev->dev; adev->common.dev = &ofdev->dev; INIT_LIST_HEAD(&adev->common.channels); - dev_set_drvdata(&ofdev->dev, adev); + platform_set_drvdata(ofdev, adev); /* create a channel */ chan = kzalloc(sizeof(*chan), GFP_KERNEL); @@ -4594,14 +4594,13 @@ out: */ static int ppc440spe_adma_remove(struct platform_device *ofdev) { - struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev); + struct ppc440spe_adma_device *adev = platform_get_drvdata(ofdev); struct device_node *np = ofdev->dev.of_node; struct resource res; struct dma_chan *chan, *_chan; struct ppc_dma_chan_ref *ref, *_ref; struct ppc440spe_adma_chan *ppc440spe_chan; - dev_set_drvdata(&ofdev->dev, NULL); if (adev->id < PPC440SPE_ADMA_ENGINES_NUM) ppc440spe_adma_devices[adev->id] = -1; -- cgit v1.2.3-58-ga151 From 3208b3701b98dcd14f0d5f0a36dd33d21b0b458f Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Fri, 24 May 2013 16:37:27 -0300 Subject: dma: mxs-dma: Staticize mxs_dma_xlate Fix the following sparse warning: drivers/dma/mxs-dma.c:696:17: warning: symbol 'mxs_dma_xlate' was not declared. Should it be static? Signed-off-by: Fabio Estevam Acked-by: Shawn Guo Signed-off-by: Vinod Koul --- drivers/dma/mxs-dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index b48a79c28845..719593002ab7 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -693,7 +693,7 @@ static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param) return true; } -struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec, +static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { struct mxs_dma_engine *mxs_dma = ofdma->of_dma_data; -- cgit v1.2.3-58-ga151 From 36c6df5062568f0b923930b63e2e477cb3a391bd Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Mon, 6 May 2013 12:53:33 +0900 Subject: dma: at_hdmac: remove unnecessary platform_set_drvdata() The driver core clears the driver data to NULL after device_release or on probe failure, since commit 0998d0631001288a5974afc0b2a5f568bcdecb4d (device-core: Ensure drvdata = NULL when no driver is bound). Thus, it is not needed to manually clear the device driver data to NULL. Signed-off-by: Jingoo Han Acked-by: Nicolas Ferre Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 5ce89368a8db..6db5228f4134 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1570,7 +1570,6 @@ err_of_dma_controller_register: dma_async_device_unregister(&atdma->dma_common); dma_pool_destroy(atdma->dma_desc_pool); err_pool_create: - platform_set_drvdata(pdev, NULL); free_irq(platform_get_irq(pdev, 0), atdma); err_irq: clk_disable(atdma->clk); @@ -1595,7 +1594,6 @@ static int at_dma_remove(struct platform_device *pdev) dma_async_device_unregister(&atdma->dma_common); dma_pool_destroy(atdma->dma_desc_pool); - platform_set_drvdata(pdev, NULL); free_irq(platform_get_irq(pdev, 0), atdma); list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, -- cgit v1.2.3-58-ga151 From c1a9d391adc7feb219edd354deacb587b26cad06 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Mon, 6 May 2013 12:54:48 +0900 Subject: dma: timb_dma: remove unnecessary platform_set_drvdata() The driver core clears the driver data to NULL after device_release or on probe failure, since commit 0998d0631001288a5974afc0b2a5f568bcdecb4d (device-core: Ensure drvdata = NULL when no driver is bound). Thus, it is not needed to manually clear the device driver data to NULL. Signed-off-by: Jingoo Han Signed-off-by: Vinod Koul --- drivers/dma/timb_dma.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 26107ba6edb3..0ef43c136aa7 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -811,8 +811,6 @@ static int td_remove(struct platform_device *pdev) kfree(td); release_mem_region(iomem->start, resource_size(iomem)); - platform_set_drvdata(pdev, NULL); - dev_dbg(&pdev->dev, "Removed...\n"); return 0; } -- cgit v1.2.3-58-ga151 From 8004cbb481494c166596b0d469a6c777415e18f6 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Thu, 9 May 2013 13:19:40 +0400 Subject: dw_dmac: remove inline marking of EXPORT_SYMBOL functions EXPORT_SYMBOL and inline directives are contradictory to each other. The patch fixes this inconsistency. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Denis Efremov Acked-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 2e5deaa82b60..724083d02b34 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -556,14 +556,14 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) /* --------------------- Cyclic DMA API extensions -------------------- */ -inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) +dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); return channel_readl(dwc, SAR); } EXPORT_SYMBOL(dw_dma_get_src_addr); -inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) +dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); return channel_readl(dwc, DAR); -- cgit v1.2.3-58-ga151 From ac7ae754d592571478959833796b7bdf1a3c08da Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Sat, 11 May 2013 20:30:52 +0400 Subject: dma: tegra20-apbdma: err message correction Fixed err msg params order on irq request fail. Signed-off-by: Dmitry Osipenko Acked-by: Stephen Warren Acked-by: Laxman Dewangan Signed-off-by: Vinod Koul --- drivers/dma/tegra20-apb-dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 33f59ecd256e..5953547a5f75 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -1334,7 +1334,7 @@ static int tegra_dma_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "request_irq failed with err %d channel %d\n", - i, ret); + ret, i); goto err_irq; } -- cgit v1.2.3-58-ga151 From 7bdc1e272a471062e8f310137c896e2355b46d13 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Sat, 11 May 2013 20:30:53 +0400 Subject: dma: tegra: avoid channel lock up after free Lock scenario: Channel 1 was allocated and prepared as slave_sg, used and freed. Now preparation of cyclic dma on channel 1 will fail with err "DMA configuration conflict" because tdc->isr_handler still setted to handle_once_dma_done. This happens because tegra_dma_abort_all() won't be called on channel freeing if pending list is empty and channel not busy. We need to clear isr_handler on channel freeing to avoid locking. Signed-off-by: Dmitry Osipenko Acked-by: Stephen Warren Acked-by: Laxman Dewangan Signed-off-by: Vinod Koul --- drivers/dma/tegra20-apb-dma.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/dma') diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 5953547a5f75..f137914d7b16 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -1191,6 +1191,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) list_splice_init(&tdc->free_dma_desc, &dma_desc_list); INIT_LIST_HEAD(&tdc->cb_desc); tdc->config_init = false; + tdc->isr_handler = NULL; spin_unlock_irqrestore(&tdc->lock, flags); while (!list_empty(&dma_desc_list)) { -- cgit v1.2.3-58-ga151 From add93b578edda2a952b9b481ce8da2a9dc412cee Mon Sep 17 00:00:00 2001 From: Rongjun Ying Date: Tue, 14 May 2013 23:03:20 +0800 Subject: dmaengine: sirf: set dma residue based on the current dma transfer position read SIRFSOC_DMA_CH_ADDR register to get current dma transfer position, then update dma residue so that things like ALSA drivers work as ALSA drivers need the right residue value. Signed-off-by: Rongjun Ying Signed-off-by: Barry Song Signed-off-by: Vinod Koul --- drivers/dma/sirf-dma.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 1765a0a2736d..716b23e4f327 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c @@ -466,12 +466,29 @@ static enum dma_status sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { + struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); unsigned long flags; enum dma_status ret; + struct sirfsoc_dma_desc *sdesc; + int cid = schan->chan.chan_id; + unsigned long dma_pos; + unsigned long dma_request_bytes; + unsigned long residue; spin_lock_irqsave(&schan->lock, flags); + + sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, + node); + dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) * + (sdesc->width * SIRFSOC_DMA_WORD_LEN); + ret = dma_cookie_status(chan, cookie, txstate); + dma_pos = readl_relaxed(sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR) + << 2; + residue = dma_request_bytes - (dma_pos - sdesc->addr); + dma_set_residue(txstate, residue); + spin_unlock_irqrestore(&schan->lock, flags); return ret; -- cgit v1.2.3-58-ga151 From 9479e17c9bb455c01b369d294e01de8fa9b0a8d3 Mon Sep 17 00:00:00 2001 From: Shawn Guo Date: Thu, 30 May 2013 22:23:32 +0800 Subject: dma: imx-sdma: move to generic device tree bindings Update imx-sdma driver to adopt generic DMA device tree bindings. It calls of_dma_controller_register() with imx-sdma specific of_dma_xlate to get the generic DMA device tree helper support. The #dma-cells for imx-sdma must be 3, which includes request ID, peripheral type and priority. The existing way of requesting channel, clients directly call dma_request_channel(), still work there, and will be removed after all imx-sdma clients get converted to generic DMA device tree helper. Signed-off-by: Shawn Guo Signed-off-by: Vinod Koul Acked-by: Arnd Bergmann --- .../devicetree/bindings/dma/fsl-imx-sdma.txt | 56 ++++++++++++++++++++++ drivers/dma/imx-sdma.c | 40 ++++++++++++++++ 2 files changed, 96 insertions(+) (limited to 'drivers/dma') diff --git a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt index d1e3f443e205..68cee4f5539f 100644 --- a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt +++ b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt @@ -4,14 +4,70 @@ Required properties: - compatible : Should be "fsl,-sdma" - reg : Should contain SDMA registers location and length - interrupts : Should contain SDMA interrupt +- #dma-cells : Must be <3>. + The first cell specifies the DMA request/event ID. See details below + about the second and third cell. - fsl,sdma-ram-script-name : Should contain the full path of SDMA RAM scripts firmware +The second cell of dma phandle specifies the peripheral type of DMA transfer. +The full ID of peripheral types can be found below. + + ID transfer type + --------------------- + 0 MCU domain SSI + 1 Shared SSI + 2 MMC + 3 SDHC + 4 MCU domain UART + 5 Shared UART + 6 FIRI + 7 MCU domain CSPI + 8 Shared CSPI + 9 SIM + 10 ATA + 11 CCM + 12 External peripheral + 13 Memory Stick Host Controller + 14 Shared Memory Stick Host Controller + 15 DSP + 16 Memory + 17 FIFO type Memory + 18 SPDIF + 19 IPU Memory + 20 ASRC + 21 ESAI + +The third cell specifies the transfer priority as below. + + ID transfer priority + ------------------------- + 0 High + 1 Medium + 2 Low + Examples: sdma@83fb0000 { compatible = "fsl,imx51-sdma", "fsl,imx35-sdma"; reg = <0x83fb0000 0x4000>; interrupts = <6>; + #dma-cells = <3>; fsl,sdma-ram-script-name = "sdma-imx51.bin"; }; + +DMA clients connected to the i.MX SDMA controller must use the format +described in the dma.txt file. + +Examples: + +ssi2: ssi@70014000 { + compatible = "fsl,imx51-ssi", "fsl,imx21-ssi"; + reg = <0x70014000 0x4000>; + interrupts = <30>; + clocks = <&clks 49>; + dmas = <&sdma 24 1 0>, + <&sdma 25 1 0>; + dma-names = "rx", "tx"; + fsl,fifo-depth = <15>; +}; diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 092867bf795c..1e44b8cf95da 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -1296,6 +1297,35 @@ err_dma_alloc: return ret; } +static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param) +{ + struct imx_dma_data *data = fn_param; + + if (!imx_dma_is_general_purpose(chan)) + return false; + + chan->private = data; + + return true; +} + +static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct sdma_engine *sdma = ofdma->of_dma_data; + dma_cap_mask_t mask = sdma->dma_device.cap_mask; + struct imx_dma_data data; + + if (dma_spec->args_count != 3) + return NULL; + + data.dma_request = dma_spec->args[0]; + data.peripheral_type = dma_spec->args[1]; + data.priority = dma_spec->args[2]; + + return dma_request_channel(mask, sdma_filter_fn, &data); +} + static int __init sdma_probe(struct platform_device *pdev) { const struct of_device_id *of_id = @@ -1443,10 +1473,20 @@ static int __init sdma_probe(struct platform_device *pdev) goto err_init; } + if (np) { + ret = of_dma_controller_register(np, sdma_xlate, sdma); + if (ret) { + dev_err(&pdev->dev, "failed to register controller\n"); + goto err_register; + } + } + dev_info(sdma->dev, "initialized\n"); return 0; +err_register: + dma_async_device_unregister(&sdma->dma_device); err_init: kfree(sdma->script_addrs); err_alloc: -- cgit v1.2.3-58-ga151 From 09677176610e7c3ed8ddb302fd24bbb59bdbf205 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 10 Jun 2013 19:34:37 +0100 Subject: dma: pl330: rip out broken, redundant ID probing The PL330 driver probes the peripheral and primecell IDs of the device to make sure that it is indeed an AMBA PL330. However, it does this by making byte accesses to a device mapping of the word-aligned ID registers, which is either UNPREDICTABLE or generates an alignment fault (depending on the presence of the virtualisation extensions). Rather than fix this code, we can actually rip most of it out and let the AMBA bus driver correctly do the probing for us. Cc: Jassi Brar Cc: Vinod Koul Signed-off-by: Will Deacon Acked-by: Jassi Brar Acked-by: Grant Likely Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 27 +++------------------------ 1 file changed, 3 insertions(+), 24 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index a17553f7c028..ac04335ef444 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -157,7 +157,6 @@ enum pl330_reqtype { #define PERIPH_REV_R0P0 0 #define PERIPH_REV_R1P0 1 #define PERIPH_REV_R1P1 2 -#define PCELL_ID 0xff0 #define CR0_PERIPH_REQ_SET (1 << 0) #define CR0_BOOT_EN_SET (1 << 1) @@ -193,8 +192,6 @@ enum pl330_reqtype { #define INTEG_CFG 0x0 #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12)) -#define PCELL_ID_VAL 0xb105f00d - #define PL330_STATE_STOPPED (1 << 0) #define PL330_STATE_EXECUTING (1 << 1) #define PL330_STATE_WFE (1 << 2) @@ -292,7 +289,6 @@ static unsigned cmd_line; /* Populated by the PL330 core driver for DMA API driver's info */ struct pl330_config { u32 periph_id; - u32 pcell_id; #define DMAC_MODE_NS (1 << 0) unsigned int mode; unsigned int data_bus_width:10; /* In number of bits */ @@ -650,19 +646,6 @@ static inline bool _manager_ns(struct pl330_thread *thrd) return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false; } -static inline u32 get_id(struct pl330_info *pi, u32 off) -{ - void __iomem *regs = pi->base; - u32 id = 0; - - id |= (readb(regs + off + 0x0) << 0); - id |= (readb(regs + off + 0x4) << 8); - id |= (readb(regs + off + 0x8) << 16); - id |= (readb(regs + off + 0xc) << 24); - - return id; -} - static inline u32 get_revision(u32 periph_id) { return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK; @@ -1986,9 +1969,6 @@ static void read_dmac_config(struct pl330_info *pi) pi->pcfg.num_events = val; pi->pcfg.irq_ns = readl(regs + CR3); - - pi->pcfg.periph_id = get_id(pi, PERIPH_ID); - pi->pcfg.pcell_id = get_id(pi, PCELL_ID); } static inline void _reset_thread(struct pl330_thread *thrd) @@ -2098,10 +2078,8 @@ static int pl330_add(struct pl330_info *pi) regs = pi->base; /* Check if we can handle this DMAC */ - if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL - || get_id(pi, PCELL_ID) != PCELL_ID_VAL) { - dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n", - get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID)); + if ((pi->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) { + dev_err(pi->dev, "PERIPH_ID 0x%x !\n", pi->pcfg.periph_id); return -EINVAL; } @@ -2916,6 +2894,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) if (ret) return ret; + pi->pcfg.periph_id = adev->periphid; ret = pl330_add(pi); if (ret) goto probe_err1; -- cgit v1.2.3-58-ga151 From fed8c45727abd273fd74b3e78b35be4929121334 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 10 Jun 2013 19:34:38 +0100 Subject: dma: pl330: use dma_addr_t for describing bus addresses The microcode bus address (pl330_dmac.mcode_bus) is currently a u32, which fails to compile when building on a system with 64-bit bus addresses. This patch uses dma_addr_t to represent the address instead. Cc: Jassi Brar Cc: Vinod Koul Signed-off-by: Will Deacon Acked-by: Jassi Brar Acked-by: Grant Likely Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index ac04335ef444..bd69cc47150e 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -501,7 +501,7 @@ struct pl330_dmac { /* Maximum possible events/irqs */ int events[32]; /* BUS address of MicroCode buffer */ - u32 mcode_bus; + dma_addr_t mcode_bus; /* CPU address of MicroCode buffer */ void *mcode_cpu; /* List of all Channel threads */ -- cgit v1.2.3-58-ga151 From 0b95961e03ecee31d6151db79cc0826e702d1e0a Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 5 Jun 2013 15:26:43 +0300 Subject: dw_dmac: don't check resource with devm_ioremap_resource devm_ioremap_resource does sanity checks on the given resource. No need to duplicate this in the driver. Signed-off-by: Andy Shevchenko Acked-by: Viresh Kumar Acked-by: Arnd Bergmann Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 724083d02b34..2b65ba614e60 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -1667,14 +1667,11 @@ static int dw_probe(struct platform_device *pdev) int err; int i; - io = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!io) - return -EINVAL; - irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; + io = platform_get_resource(pdev, IORESOURCE_MEM, 0); regs = devm_ioremap_resource(&pdev->dev, io); if (IS_ERR(regs)) return PTR_ERR(regs); -- cgit v1.2.3-58-ga151 From 61a7649620d54a037c612f9a713abe5178cddc65 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 5 Jun 2013 15:26:44 +0300 Subject: dma: move dw_dmac driver to an own directory The dw_dmac driver is going to be split into multiple files. To make this more convenient move it to an own directory. Signed-off-by: Andy Shevchenko Acked-by: Viresh Kumar Acked-by: Arnd Bergmann Signed-off-by: Vinod Koul --- MAINTAINERS | 3 +- drivers/dma/Kconfig | 20 +- drivers/dma/Makefile | 2 +- drivers/dma/dw/Kconfig | 23 + drivers/dma/dw/Makefile | 1 + drivers/dma/dw/dw_dmac.c | 1969 +++++++++++++++++++++++++++++++++++++++++ drivers/dma/dw/dw_dmac_regs.h | 311 +++++++ drivers/dma/dw_dmac.c | 1969 ----------------------------------------- drivers/dma/dw_dmac_regs.h | 311 ------- 9 files changed, 2307 insertions(+), 2302 deletions(-) create mode 100644 drivers/dma/dw/Kconfig create mode 100644 drivers/dma/dw/Makefile create mode 100644 drivers/dma/dw/dw_dmac.c create mode 100644 drivers/dma/dw/dw_dmac_regs.h delete mode 100644 drivers/dma/dw_dmac.c delete mode 100644 drivers/dma/dw_dmac_regs.h (limited to 'drivers/dma') diff --git a/MAINTAINERS b/MAINTAINERS index 250dc970c62d..e325809fd412 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6990,8 +6990,7 @@ SYNOPSYS DESIGNWARE DMAC DRIVER M: Viresh Kumar S: Maintained F: include/linux/dw_dmac.h -F: drivers/dma/dw_dmac_regs.h -F: drivers/dma/dw_dmac.c +F: drivers/dma/dw/ SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER M: Seungwon Jeon diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index e9924898043a..146a1d864a71 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -79,25 +79,7 @@ config INTEL_IOP_ADMA help Enable support for the Intel(R) IOP Series RAID engines. -config DW_DMAC - tristate "Synopsys DesignWare AHB DMA support" - depends on GENERIC_HARDIRQS - select DMA_ENGINE - default y if CPU_AT32AP7000 - help - Support the Synopsys DesignWare AHB DMA controller. This - can be integrated in chips such as the Atmel AT32ap7000. - -config DW_DMAC_BIG_ENDIAN_IO - bool "Use big endian I/O register access" - default y if AVR32 - depends on DW_DMAC - help - Say yes here to use big endian I/O access when reading and writing - to the DMA controller registers. This is needed on some platforms, - like the Atmel AVR32 architecture. - - If unsure, use the default setting. +source "drivers/dma/dw/Kconfig" config AT_HDMAC tristate "Atmel AHB DMA support" diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index a2b0df591f95..ac44ca0d468a 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -15,7 +15,7 @@ obj-$(CONFIG_FSL_DMA) += fsldma.o obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ obj-$(CONFIG_MV_XOR) += mv_xor.o -obj-$(CONFIG_DW_DMAC) += dw_dmac.o +obj-$(CONFIG_DW_DMAC) += dw/ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o obj-$(CONFIG_MX3_IPU) += ipu/ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig new file mode 100644 index 000000000000..38a215af5ccc --- /dev/null +++ b/drivers/dma/dw/Kconfig @@ -0,0 +1,23 @@ +# +# DMA engine configuration for dw +# + +config DW_DMAC + tristate "Synopsys DesignWare AHB DMA support" + depends on GENERIC_HARDIRQS + select DMA_ENGINE + default y if CPU_AT32AP7000 + help + Support the Synopsys DesignWare AHB DMA controller. This + can be integrated in chips such as the Atmel AT32ap7000. + +config DW_DMAC_BIG_ENDIAN_IO + bool "Use big endian I/O register access" + default y if AVR32 + depends on DW_DMAC + help + Say yes here to use big endian I/O access when reading and writing + to the DMA controller registers. This is needed on some platforms, + like the Atmel AVR32 architecture. + + If unsure, use the default setting. diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile new file mode 100644 index 000000000000..dd8d9936beef --- /dev/null +++ b/drivers/dma/dw/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_DW_DMAC) += dw_dmac.o diff --git a/drivers/dma/dw/dw_dmac.c b/drivers/dma/dw/dw_dmac.c new file mode 100644 index 000000000000..15f3f4f79c10 --- /dev/null +++ b/drivers/dma/dw/dw_dmac.c @@ -0,0 +1,1969 @@ +/* + * Core driver for the Synopsys DesignWare DMA Controller + * + * Copyright (C) 2007-2008 Atmel Corporation + * Copyright (C) 2010-2011 ST Microelectronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../dmaengine.h" +#include "dw_dmac_regs.h" + +/* + * This supports the Synopsys "DesignWare AHB Central DMA Controller", + * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all + * of which use ARM any more). See the "Databook" from Synopsys for + * information beyond what licensees probably provide. + * + * The driver has currently been tested only with the Atmel AT32AP7000, + * which does not support descriptor writeback. + */ + +static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave) +{ + return slave ? slave->dst_master : 0; +} + +static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) +{ + return slave ? slave->src_master : 1; +} + +static inline void dwc_set_masters(struct dw_dma_chan *dwc) +{ + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + struct dw_dma_slave *dws = dwc->chan.private; + unsigned char mmax = dw->nr_masters - 1; + + if (dwc->request_line == ~0) { + dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws)); + dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws)); + } +} + +#define DWC_DEFAULT_CTLLO(_chan) ({ \ + struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ + struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ + bool _is_slave = is_slave_direction(_dwc->direction); \ + u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ + DW_DMA_MSIZE_16; \ + u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ + DW_DMA_MSIZE_16; \ + \ + (DWC_CTLL_DST_MSIZE(_dmsize) \ + | DWC_CTLL_SRC_MSIZE(_smsize) \ + | DWC_CTLL_LLP_D_EN \ + | DWC_CTLL_LLP_S_EN \ + | DWC_CTLL_DMS(_dwc->dst_master) \ + | DWC_CTLL_SMS(_dwc->src_master)); \ + }) + +/* + * Number of descriptors to allocate for each channel. This should be + * made configurable somehow; preferably, the clients (at least the + * ones using slave transfers) should be able to give us a hint. + */ +#define NR_DESCS_PER_CHANNEL 64 + +/*----------------------------------------------------------------------*/ + +static struct device *chan2dev(struct dma_chan *chan) +{ + return &chan->dev->device; +} +static struct device *chan2parent(struct dma_chan *chan) +{ + return chan->dev->device.parent; +} + +static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) +{ + return to_dw_desc(dwc->active_list.next); +} + +static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) +{ + struct dw_desc *desc, *_desc; + struct dw_desc *ret = NULL; + unsigned int i = 0; + unsigned long flags; + + spin_lock_irqsave(&dwc->lock, flags); + list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { + i++; + if (async_tx_test_ack(&desc->txd)) { + list_del(&desc->desc_node); + ret = desc; + break; + } + dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); + } + spin_unlock_irqrestore(&dwc->lock, flags); + + dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); + + return ret; +} + +/* + * Move a descriptor, including any children, to the free list. + * `desc' must not be on any lists. + */ +static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) +{ + unsigned long flags; + + if (desc) { + struct dw_desc *child; + + spin_lock_irqsave(&dwc->lock, flags); + list_for_each_entry(child, &desc->tx_list, desc_node) + dev_vdbg(chan2dev(&dwc->chan), + "moving child desc %p to freelist\n", + child); + list_splice_init(&desc->tx_list, &dwc->free_list); + dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); + list_add(&desc->desc_node, &dwc->free_list); + spin_unlock_irqrestore(&dwc->lock, flags); + } +} + +static void dwc_initialize(struct dw_dma_chan *dwc) +{ + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + struct dw_dma_slave *dws = dwc->chan.private; + u32 cfghi = DWC_CFGH_FIFO_MODE; + u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); + + if (dwc->initialized == true) + return; + + if (dws) { + /* + * We need controller-specific data to set up slave + * transfers. + */ + BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); + + cfghi = dws->cfg_hi; + cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; + } else { + if (dwc->direction == DMA_MEM_TO_DEV) + cfghi = DWC_CFGH_DST_PER(dwc->request_line); + else if (dwc->direction == DMA_DEV_TO_MEM) + cfghi = DWC_CFGH_SRC_PER(dwc->request_line); + } + + channel_writel(dwc, CFG_LO, cfglo); + channel_writel(dwc, CFG_HI, cfghi); + + /* Enable interrupts */ + channel_set_bit(dw, MASK.XFER, dwc->mask); + channel_set_bit(dw, MASK.ERROR, dwc->mask); + + dwc->initialized = true; +} + +/*----------------------------------------------------------------------*/ + +static inline unsigned int dwc_fast_fls(unsigned long long v) +{ + /* + * We can be a lot more clever here, but this should take care + * of the most common optimization. + */ + if (!(v & 7)) + return 3; + else if (!(v & 3)) + return 2; + else if (!(v & 1)) + return 1; + return 0; +} + +static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) +{ + dev_err(chan2dev(&dwc->chan), + " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", + channel_readl(dwc, SAR), + channel_readl(dwc, DAR), + channel_readl(dwc, LLP), + channel_readl(dwc, CTL_HI), + channel_readl(dwc, CTL_LO)); +} + +static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) +{ + channel_clear_bit(dw, CH_EN, dwc->mask); + while (dma_readl(dw, CH_EN) & dwc->mask) + cpu_relax(); +} + +/*----------------------------------------------------------------------*/ + +/* Perform single block transfer */ +static inline void dwc_do_single_block(struct dw_dma_chan *dwc, + struct dw_desc *desc) +{ + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + u32 ctllo; + + /* Software emulation of LLP mode relies on interrupts to continue + * multi block transfer. */ + ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN; + + channel_writel(dwc, SAR, desc->lli.sar); + channel_writel(dwc, DAR, desc->lli.dar); + channel_writel(dwc, CTL_LO, ctllo); + channel_writel(dwc, CTL_HI, desc->lli.ctlhi); + channel_set_bit(dw, CH_EN, dwc->mask); + + /* Move pointer to next descriptor */ + dwc->tx_node_active = dwc->tx_node_active->next; +} + +/* Called with dwc->lock held and bh disabled */ +static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) +{ + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + unsigned long was_soft_llp; + + /* ASSERT: channel is idle */ + if (dma_readl(dw, CH_EN) & dwc->mask) { + dev_err(chan2dev(&dwc->chan), + "BUG: Attempted to start non-idle channel\n"); + dwc_dump_chan_regs(dwc); + + /* The tasklet will hopefully advance the queue... */ + return; + } + + if (dwc->nollp) { + was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP, + &dwc->flags); + if (was_soft_llp) { + dev_err(chan2dev(&dwc->chan), + "BUG: Attempted to start new LLP transfer " + "inside ongoing one\n"); + return; + } + + dwc_initialize(dwc); + + dwc->residue = first->total_len; + dwc->tx_node_active = &first->tx_list; + + /* Submit first block */ + dwc_do_single_block(dwc, first); + + return; + } + + dwc_initialize(dwc); + + channel_writel(dwc, LLP, first->txd.phys); + channel_writel(dwc, CTL_LO, + DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); + channel_writel(dwc, CTL_HI, 0); + channel_set_bit(dw, CH_EN, dwc->mask); +} + +/*----------------------------------------------------------------------*/ + +static void +dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, + bool callback_required) +{ + dma_async_tx_callback callback = NULL; + void *param = NULL; + struct dma_async_tx_descriptor *txd = &desc->txd; + struct dw_desc *child; + unsigned long flags; + + dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); + + spin_lock_irqsave(&dwc->lock, flags); + dma_cookie_complete(txd); + if (callback_required) { + callback = txd->callback; + param = txd->callback_param; + } + + /* async_tx_ack */ + list_for_each_entry(child, &desc->tx_list, desc_node) + async_tx_ack(&child->txd); + async_tx_ack(&desc->txd); + + list_splice_init(&desc->tx_list, &dwc->free_list); + list_move(&desc->desc_node, &dwc->free_list); + + if (!is_slave_direction(dwc->direction)) { + struct device *parent = chan2parent(&dwc->chan); + if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) + dma_unmap_single(parent, desc->lli.dar, + desc->total_len, DMA_FROM_DEVICE); + else + dma_unmap_page(parent, desc->lli.dar, + desc->total_len, DMA_FROM_DEVICE); + } + if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) + dma_unmap_single(parent, desc->lli.sar, + desc->total_len, DMA_TO_DEVICE); + else + dma_unmap_page(parent, desc->lli.sar, + desc->total_len, DMA_TO_DEVICE); + } + } + + spin_unlock_irqrestore(&dwc->lock, flags); + + if (callback) + callback(param); +} + +static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) +{ + struct dw_desc *desc, *_desc; + LIST_HEAD(list); + unsigned long flags; + + spin_lock_irqsave(&dwc->lock, flags); + if (dma_readl(dw, CH_EN) & dwc->mask) { + dev_err(chan2dev(&dwc->chan), + "BUG: XFER bit set, but channel not idle!\n"); + + /* Try to continue after resetting the channel... */ + dwc_chan_disable(dw, dwc); + } + + /* + * Submit queued descriptors ASAP, i.e. before we go through + * the completed ones. + */ + list_splice_init(&dwc->active_list, &list); + if (!list_empty(&dwc->queue)) { + list_move(dwc->queue.next, &dwc->active_list); + dwc_dostart(dwc, dwc_first_active(dwc)); + } + + spin_unlock_irqrestore(&dwc->lock, flags); + + list_for_each_entry_safe(desc, _desc, &list, desc_node) + dwc_descriptor_complete(dwc, desc, true); +} + +/* Returns how many bytes were already received from source */ +static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) +{ + u32 ctlhi = channel_readl(dwc, CTL_HI); + u32 ctllo = channel_readl(dwc, CTL_LO); + + return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7)); +} + +static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) +{ + dma_addr_t llp; + struct dw_desc *desc, *_desc; + struct dw_desc *child; + u32 status_xfer; + unsigned long flags; + + spin_lock_irqsave(&dwc->lock, flags); + llp = channel_readl(dwc, LLP); + status_xfer = dma_readl(dw, RAW.XFER); + + if (status_xfer & dwc->mask) { + /* Everything we've submitted is done */ + dma_writel(dw, CLEAR.XFER, dwc->mask); + + if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { + struct list_head *head, *active = dwc->tx_node_active; + + /* + * We are inside first active descriptor. + * Otherwise something is really wrong. + */ + desc = dwc_first_active(dwc); + + head = &desc->tx_list; + if (active != head) { + /* Update desc to reflect last sent one */ + if (active != head->next) + desc = to_dw_desc(active->prev); + + dwc->residue -= desc->len; + + child = to_dw_desc(active); + + /* Submit next block */ + dwc_do_single_block(dwc, child); + + spin_unlock_irqrestore(&dwc->lock, flags); + return; + } + + /* We are done here */ + clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); + } + + dwc->residue = 0; + + spin_unlock_irqrestore(&dwc->lock, flags); + + dwc_complete_all(dw, dwc); + return; + } + + if (list_empty(&dwc->active_list)) { + dwc->residue = 0; + spin_unlock_irqrestore(&dwc->lock, flags); + return; + } + + if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { + dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__); + spin_unlock_irqrestore(&dwc->lock, flags); + return; + } + + dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__, + (unsigned long long)llp); + + list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { + /* Initial residue value */ + dwc->residue = desc->total_len; + + /* Check first descriptors addr */ + if (desc->txd.phys == llp) { + spin_unlock_irqrestore(&dwc->lock, flags); + return; + } + + /* Check first descriptors llp */ + if (desc->lli.llp == llp) { + /* This one is currently in progress */ + dwc->residue -= dwc_get_sent(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); + return; + } + + dwc->residue -= desc->len; + list_for_each_entry(child, &desc->tx_list, desc_node) { + if (child->lli.llp == llp) { + /* Currently in progress */ + dwc->residue -= dwc_get_sent(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); + return; + } + dwc->residue -= child->len; + } + + /* + * No descriptors so far seem to be in progress, i.e. + * this one must be done. + */ + spin_unlock_irqrestore(&dwc->lock, flags); + dwc_descriptor_complete(dwc, desc, true); + spin_lock_irqsave(&dwc->lock, flags); + } + + dev_err(chan2dev(&dwc->chan), + "BUG: All descriptors done, but channel not idle!\n"); + + /* Try to continue after resetting the channel... */ + dwc_chan_disable(dw, dwc); + + if (!list_empty(&dwc->queue)) { + list_move(dwc->queue.next, &dwc->active_list); + dwc_dostart(dwc, dwc_first_active(dwc)); + } + spin_unlock_irqrestore(&dwc->lock, flags); +} + +static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) +{ + dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", + lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); +} + +static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) +{ + struct dw_desc *bad_desc; + struct dw_desc *child; + unsigned long flags; + + dwc_scan_descriptors(dw, dwc); + + spin_lock_irqsave(&dwc->lock, flags); + + /* + * The descriptor currently at the head of the active list is + * borked. Since we don't have any way to report errors, we'll + * just have to scream loudly and try to carry on. + */ + bad_desc = dwc_first_active(dwc); + list_del_init(&bad_desc->desc_node); + list_move(dwc->queue.next, dwc->active_list.prev); + + /* Clear the error flag and try to restart the controller */ + dma_writel(dw, CLEAR.ERROR, dwc->mask); + if (!list_empty(&dwc->active_list)) + dwc_dostart(dwc, dwc_first_active(dwc)); + + /* + * WARN may seem harsh, but since this only happens + * when someone submits a bad physical address in a + * descriptor, we should consider ourselves lucky that the + * controller flagged an error instead of scribbling over + * random memory locations. + */ + dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" + " cookie: %d\n", bad_desc->txd.cookie); + dwc_dump_lli(dwc, &bad_desc->lli); + list_for_each_entry(child, &bad_desc->tx_list, desc_node) + dwc_dump_lli(dwc, &child->lli); + + spin_unlock_irqrestore(&dwc->lock, flags); + + /* Pretend the descriptor completed successfully */ + dwc_descriptor_complete(dwc, bad_desc, true); +} + +/* --------------------- Cyclic DMA API extensions -------------------- */ + +dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + return channel_readl(dwc, SAR); +} +EXPORT_SYMBOL(dw_dma_get_src_addr); + +dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + return channel_readl(dwc, DAR); +} +EXPORT_SYMBOL(dw_dma_get_dst_addr); + +/* Called with dwc->lock held and all DMAC interrupts disabled */ +static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, + u32 status_err, u32 status_xfer) +{ + unsigned long flags; + + if (dwc->mask) { + void (*callback)(void *param); + void *callback_param; + + dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", + channel_readl(dwc, LLP)); + + callback = dwc->cdesc->period_callback; + callback_param = dwc->cdesc->period_callback_param; + + if (callback) + callback(callback_param); + } + + /* + * Error and transfer complete are highly unlikely, and will most + * likely be due to a configuration error by the user. + */ + if (unlikely(status_err & dwc->mask) || + unlikely(status_xfer & dwc->mask)) { + int i; + + dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " + "interrupt, stopping DMA transfer\n", + status_xfer ? "xfer" : "error"); + + spin_lock_irqsave(&dwc->lock, flags); + + dwc_dump_chan_regs(dwc); + + dwc_chan_disable(dw, dwc); + + /* Make sure DMA does not restart by loading a new list */ + channel_writel(dwc, LLP, 0); + channel_writel(dwc, CTL_LO, 0); + channel_writel(dwc, CTL_HI, 0); + + dma_writel(dw, CLEAR.ERROR, dwc->mask); + dma_writel(dw, CLEAR.XFER, dwc->mask); + + for (i = 0; i < dwc->cdesc->periods; i++) + dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); + + spin_unlock_irqrestore(&dwc->lock, flags); + } +} + +/* ------------------------------------------------------------------------- */ + +static void dw_dma_tasklet(unsigned long data) +{ + struct dw_dma *dw = (struct dw_dma *)data; + struct dw_dma_chan *dwc; + u32 status_xfer; + u32 status_err; + int i; + + status_xfer = dma_readl(dw, RAW.XFER); + status_err = dma_readl(dw, RAW.ERROR); + + dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); + + for (i = 0; i < dw->dma.chancnt; i++) { + dwc = &dw->chan[i]; + if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) + dwc_handle_cyclic(dw, dwc, status_err, status_xfer); + else if (status_err & (1 << i)) + dwc_handle_error(dw, dwc); + else if (status_xfer & (1 << i)) + dwc_scan_descriptors(dw, dwc); + } + + /* + * Re-enable interrupts. + */ + channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); + channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); +} + +static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) +{ + struct dw_dma *dw = dev_id; + u32 status; + + dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, + dma_readl(dw, STATUS_INT)); + + /* + * Just disable the interrupts. We'll turn them back on in the + * softirq handler. + */ + channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); + channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); + + status = dma_readl(dw, STATUS_INT); + if (status) { + dev_err(dw->dma.dev, + "BUG: Unexpected interrupts pending: 0x%x\n", + status); + + /* Try to recover */ + channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); + channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); + channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); + channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); + } + + tasklet_schedule(&dw->tasklet); + + return IRQ_HANDLED; +} + +/*----------------------------------------------------------------------*/ + +static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct dw_desc *desc = txd_to_dw_desc(tx); + struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); + dma_cookie_t cookie; + unsigned long flags; + + spin_lock_irqsave(&dwc->lock, flags); + cookie = dma_cookie_assign(tx); + + /* + * REVISIT: We should attempt to chain as many descriptors as + * possible, perhaps even appending to those already submitted + * for DMA. But this is hard to do in a race-free manner. + */ + if (list_empty(&dwc->active_list)) { + dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__, + desc->txd.cookie); + list_add_tail(&desc->desc_node, &dwc->active_list); + dwc_dostart(dwc, dwc_first_active(dwc)); + } else { + dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, + desc->txd.cookie); + + list_add_tail(&desc->desc_node, &dwc->queue); + } + + spin_unlock_irqrestore(&dwc->lock, flags); + + return cookie; +} + +static struct dma_async_tx_descriptor * +dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(chan->device); + struct dw_desc *desc; + struct dw_desc *first; + struct dw_desc *prev; + size_t xfer_count; + size_t offset; + unsigned int src_width; + unsigned int dst_width; + unsigned int data_width; + u32 ctllo; + + dev_vdbg(chan2dev(chan), + "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__, + (unsigned long long)dest, (unsigned long long)src, + len, flags); + + if (unlikely(!len)) { + dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); + return NULL; + } + + dwc->direction = DMA_MEM_TO_MEM; + + data_width = min_t(unsigned int, dw->data_width[dwc->src_master], + dw->data_width[dwc->dst_master]); + + src_width = dst_width = min_t(unsigned int, data_width, + dwc_fast_fls(src | dest | len)); + + ctllo = DWC_DEFAULT_CTLLO(chan) + | DWC_CTLL_DST_WIDTH(dst_width) + | DWC_CTLL_SRC_WIDTH(src_width) + | DWC_CTLL_DST_INC + | DWC_CTLL_SRC_INC + | DWC_CTLL_FC_M2M; + prev = first = NULL; + + for (offset = 0; offset < len; offset += xfer_count << src_width) { + xfer_count = min_t(size_t, (len - offset) >> src_width, + dwc->block_size); + + desc = dwc_desc_get(dwc); + if (!desc) + goto err_desc_get; + + desc->lli.sar = src + offset; + desc->lli.dar = dest + offset; + desc->lli.ctllo = ctllo; + desc->lli.ctlhi = xfer_count; + desc->len = xfer_count << src_width; + + if (!first) { + first = desc; + } else { + prev->lli.llp = desc->txd.phys; + list_add_tail(&desc->desc_node, + &first->tx_list); + } + prev = desc; + } + + if (flags & DMA_PREP_INTERRUPT) + /* Trigger interrupt after last block */ + prev->lli.ctllo |= DWC_CTLL_INT_EN; + + prev->lli.llp = 0; + first->txd.flags = flags; + first->total_len = len; + + return &first->txd; + +err_desc_get: + dwc_desc_put(dwc, first); + return NULL; +} + +static struct dma_async_tx_descriptor * +dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(chan->device); + struct dma_slave_config *sconfig = &dwc->dma_sconfig; + struct dw_desc *prev; + struct dw_desc *first; + u32 ctllo; + dma_addr_t reg; + unsigned int reg_width; + unsigned int mem_width; + unsigned int data_width; + unsigned int i; + struct scatterlist *sg; + size_t total_len = 0; + + dev_vdbg(chan2dev(chan), "%s\n", __func__); + + if (unlikely(!is_slave_direction(direction) || !sg_len)) + return NULL; + + dwc->direction = direction; + + prev = first = NULL; + + switch (direction) { + case DMA_MEM_TO_DEV: + reg_width = __fls(sconfig->dst_addr_width); + reg = sconfig->dst_addr; + ctllo = (DWC_DEFAULT_CTLLO(chan) + | DWC_CTLL_DST_WIDTH(reg_width) + | DWC_CTLL_DST_FIX + | DWC_CTLL_SRC_INC); + + ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : + DWC_CTLL_FC(DW_DMA_FC_D_M2P); + + data_width = dw->data_width[dwc->src_master]; + + for_each_sg(sgl, sg, sg_len, i) { + struct dw_desc *desc; + u32 len, dlen, mem; + + mem = sg_dma_address(sg); + len = sg_dma_len(sg); + + mem_width = min_t(unsigned int, + data_width, dwc_fast_fls(mem | len)); + +slave_sg_todev_fill_desc: + desc = dwc_desc_get(dwc); + if (!desc) { + dev_err(chan2dev(chan), + "not enough descriptors available\n"); + goto err_desc_get; + } + + desc->lli.sar = mem; + desc->lli.dar = reg; + desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); + if ((len >> mem_width) > dwc->block_size) { + dlen = dwc->block_size << mem_width; + mem += dlen; + len -= dlen; + } else { + dlen = len; + len = 0; + } + + desc->lli.ctlhi = dlen >> mem_width; + desc->len = dlen; + + if (!first) { + first = desc; + } else { + prev->lli.llp = desc->txd.phys; + list_add_tail(&desc->desc_node, + &first->tx_list); + } + prev = desc; + total_len += dlen; + + if (len) + goto slave_sg_todev_fill_desc; + } + break; + case DMA_DEV_TO_MEM: + reg_width = __fls(sconfig->src_addr_width); + reg = sconfig->src_addr; + ctllo = (DWC_DEFAULT_CTLLO(chan) + | DWC_CTLL_SRC_WIDTH(reg_width) + | DWC_CTLL_DST_INC + | DWC_CTLL_SRC_FIX); + + ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : + DWC_CTLL_FC(DW_DMA_FC_D_P2M); + + data_width = dw->data_width[dwc->dst_master]; + + for_each_sg(sgl, sg, sg_len, i) { + struct dw_desc *desc; + u32 len, dlen, mem; + + mem = sg_dma_address(sg); + len = sg_dma_len(sg); + + mem_width = min_t(unsigned int, + data_width, dwc_fast_fls(mem | len)); + +slave_sg_fromdev_fill_desc: + desc = dwc_desc_get(dwc); + if (!desc) { + dev_err(chan2dev(chan), + "not enough descriptors available\n"); + goto err_desc_get; + } + + desc->lli.sar = reg; + desc->lli.dar = mem; + desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); + if ((len >> reg_width) > dwc->block_size) { + dlen = dwc->block_size << reg_width; + mem += dlen; + len -= dlen; + } else { + dlen = len; + len = 0; + } + desc->lli.ctlhi = dlen >> reg_width; + desc->len = dlen; + + if (!first) { + first = desc; + } else { + prev->lli.llp = desc->txd.phys; + list_add_tail(&desc->desc_node, + &first->tx_list); + } + prev = desc; + total_len += dlen; + + if (len) + goto slave_sg_fromdev_fill_desc; + } + break; + default: + return NULL; + } + + if (flags & DMA_PREP_INTERRUPT) + /* Trigger interrupt after last block */ + prev->lli.ctllo |= DWC_CTLL_INT_EN; + + prev->lli.llp = 0; + first->total_len = total_len; + + return &first->txd; + +err_desc_get: + dwc_desc_put(dwc, first); + return NULL; +} + +/* + * Fix sconfig's burst size according to dw_dmac. We need to convert them as: + * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. + * + * NOTE: burst size 2 is not supported by controller. + * + * This can be done by finding least significant bit set: n & (n - 1) + */ +static inline void convert_burst(u32 *maxburst) +{ + if (*maxburst > 1) + *maxburst = fls(*maxburst) - 2; + else + *maxburst = 0; +} + +static int +set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + + /* Check if chan will be configured for slave transfers */ + if (!is_slave_direction(sconfig->direction)) + return -EINVAL; + + memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); + dwc->direction = sconfig->direction; + + /* Take the request line from slave_id member */ + if (dwc->request_line == ~0) + dwc->request_line = sconfig->slave_id; + + convert_burst(&dwc->dma_sconfig.src_maxburst); + convert_burst(&dwc->dma_sconfig.dst_maxburst); + + return 0; +} + +static inline void dwc_chan_pause(struct dw_dma_chan *dwc) +{ + u32 cfglo = channel_readl(dwc, CFG_LO); + unsigned int count = 20; /* timeout iterations */ + + channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); + while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) + udelay(2); + + dwc->paused = true; +} + +static inline void dwc_chan_resume(struct dw_dma_chan *dwc) +{ + u32 cfglo = channel_readl(dwc, CFG_LO); + + channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); + + dwc->paused = false; +} + +static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(chan->device); + struct dw_desc *desc, *_desc; + unsigned long flags; + LIST_HEAD(list); + + if (cmd == DMA_PAUSE) { + spin_lock_irqsave(&dwc->lock, flags); + + dwc_chan_pause(dwc); + + spin_unlock_irqrestore(&dwc->lock, flags); + } else if (cmd == DMA_RESUME) { + if (!dwc->paused) + return 0; + + spin_lock_irqsave(&dwc->lock, flags); + + dwc_chan_resume(dwc); + + spin_unlock_irqrestore(&dwc->lock, flags); + } else if (cmd == DMA_TERMINATE_ALL) { + spin_lock_irqsave(&dwc->lock, flags); + + clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); + + dwc_chan_disable(dw, dwc); + + dwc_chan_resume(dwc); + + /* active_list entries will end up before queued entries */ + list_splice_init(&dwc->queue, &list); + list_splice_init(&dwc->active_list, &list); + + spin_unlock_irqrestore(&dwc->lock, flags); + + /* Flush all pending and queued descriptors */ + list_for_each_entry_safe(desc, _desc, &list, desc_node) + dwc_descriptor_complete(dwc, desc, false); + } else if (cmd == DMA_SLAVE_CONFIG) { + return set_runtime_config(chan, (struct dma_slave_config *)arg); + } else { + return -ENXIO; + } + + return 0; +} + +static inline u32 dwc_get_residue(struct dw_dma_chan *dwc) +{ + unsigned long flags; + u32 residue; + + spin_lock_irqsave(&dwc->lock, flags); + + residue = dwc->residue; + if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) + residue -= dwc_get_sent(dwc); + + spin_unlock_irqrestore(&dwc->lock, flags); + return residue; +} + +static enum dma_status +dwc_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + enum dma_status ret; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret != DMA_SUCCESS) { + dwc_scan_descriptors(to_dw_dma(chan->device), dwc); + + ret = dma_cookie_status(chan, cookie, txstate); + } + + if (ret != DMA_SUCCESS) + dma_set_residue(txstate, dwc_get_residue(dwc)); + + if (dwc->paused) + return DMA_PAUSED; + + return ret; +} + +static void dwc_issue_pending(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + + if (!list_empty(&dwc->queue)) + dwc_scan_descriptors(to_dw_dma(chan->device), dwc); +} + +static int dwc_alloc_chan_resources(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(chan->device); + struct dw_desc *desc; + int i; + unsigned long flags; + + dev_vdbg(chan2dev(chan), "%s\n", __func__); + + /* ASSERT: channel is idle */ + if (dma_readl(dw, CH_EN) & dwc->mask) { + dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); + return -EIO; + } + + dma_cookie_init(chan); + + /* + * NOTE: some controllers may have additional features that we + * need to initialize here, like "scatter-gather" (which + * doesn't mean what you think it means), and status writeback. + */ + + dwc_set_masters(dwc); + + spin_lock_irqsave(&dwc->lock, flags); + i = dwc->descs_allocated; + while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { + dma_addr_t phys; + + spin_unlock_irqrestore(&dwc->lock, flags); + + desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys); + if (!desc) + goto err_desc_alloc; + + memset(desc, 0, sizeof(struct dw_desc)); + + INIT_LIST_HEAD(&desc->tx_list); + dma_async_tx_descriptor_init(&desc->txd, chan); + desc->txd.tx_submit = dwc_tx_submit; + desc->txd.flags = DMA_CTRL_ACK; + desc->txd.phys = phys; + + dwc_desc_put(dwc, desc); + + spin_lock_irqsave(&dwc->lock, flags); + i = ++dwc->descs_allocated; + } + + spin_unlock_irqrestore(&dwc->lock, flags); + + dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); + + return i; + +err_desc_alloc: + dev_info(chan2dev(chan), "only allocated %d descriptors\n", i); + + return i; +} + +static void dwc_free_chan_resources(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(chan->device); + struct dw_desc *desc, *_desc; + unsigned long flags; + LIST_HEAD(list); + + dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, + dwc->descs_allocated); + + /* ASSERT: channel is idle */ + BUG_ON(!list_empty(&dwc->active_list)); + BUG_ON(!list_empty(&dwc->queue)); + BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); + + spin_lock_irqsave(&dwc->lock, flags); + list_splice_init(&dwc->free_list, &list); + dwc->descs_allocated = 0; + dwc->initialized = false; + dwc->request_line = ~0; + + /* Disable interrupts */ + channel_clear_bit(dw, MASK.XFER, dwc->mask); + channel_clear_bit(dw, MASK.ERROR, dwc->mask); + + spin_unlock_irqrestore(&dwc->lock, flags); + + list_for_each_entry_safe(desc, _desc, &list, desc_node) { + dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); + dma_pool_free(dw->desc_pool, desc, desc->txd.phys); + } + + dev_vdbg(chan2dev(chan), "%s: done\n", __func__); +} + +/*----------------------------------------------------------------------*/ + +struct dw_dma_of_filter_args { + struct dw_dma *dw; + unsigned int req; + unsigned int src; + unsigned int dst; +}; + +static bool dw_dma_of_filter(struct dma_chan *chan, void *param) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma_of_filter_args *fargs = param; + + /* Ensure the device matches our channel */ + if (chan->device != &fargs->dw->dma) + return false; + + dwc->request_line = fargs->req; + dwc->src_master = fargs->src; + dwc->dst_master = fargs->dst; + + return true; +} + +static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct dw_dma *dw = ofdma->of_dma_data; + struct dw_dma_of_filter_args fargs = { + .dw = dw, + }; + dma_cap_mask_t cap; + + if (dma_spec->args_count != 3) + return NULL; + + fargs.req = dma_spec->args[0]; + fargs.src = dma_spec->args[1]; + fargs.dst = dma_spec->args[2]; + + if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS || + fargs.src >= dw->nr_masters || + fargs.dst >= dw->nr_masters)) + return NULL; + + dma_cap_zero(cap); + dma_cap_set(DMA_SLAVE, cap); + + /* TODO: there should be a simpler way to do this */ + return dma_request_channel(cap, dw_dma_of_filter, &fargs); +} + +#ifdef CONFIG_ACPI +static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct acpi_dma_spec *dma_spec = param; + + if (chan->device->dev != dma_spec->dev || + chan->chan_id != dma_spec->chan_id) + return false; + + dwc->request_line = dma_spec->slave_id; + dwc->src_master = dwc_get_sms(NULL); + dwc->dst_master = dwc_get_dms(NULL); + + return true; +} + +static void dw_dma_acpi_controller_register(struct dw_dma *dw) +{ + struct device *dev = dw->dma.dev; + struct acpi_dma_filter_info *info; + int ret; + + info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); + if (!info) + return; + + dma_cap_zero(info->dma_cap); + dma_cap_set(DMA_SLAVE, info->dma_cap); + info->filter_fn = dw_dma_acpi_filter; + + ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate, + info); + if (ret) + dev_err(dev, "could not register acpi_dma_controller\n"); +} +#else /* !CONFIG_ACPI */ +static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {} +#endif /* !CONFIG_ACPI */ + +/* --------------------- Cyclic DMA API extensions -------------------- */ + +/** + * dw_dma_cyclic_start - start the cyclic DMA transfer + * @chan: the DMA channel to start + * + * Must be called with soft interrupts disabled. Returns zero on success or + * -errno on failure. + */ +int dw_dma_cyclic_start(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + unsigned long flags; + + if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { + dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); + return -ENODEV; + } + + spin_lock_irqsave(&dwc->lock, flags); + + /* Assert channel is idle */ + if (dma_readl(dw, CH_EN) & dwc->mask) { + dev_err(chan2dev(&dwc->chan), + "BUG: Attempted to start non-idle channel\n"); + dwc_dump_chan_regs(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); + return -EBUSY; + } + + dma_writel(dw, CLEAR.ERROR, dwc->mask); + dma_writel(dw, CLEAR.XFER, dwc->mask); + + /* Setup DMAC channel registers */ + channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); + channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); + channel_writel(dwc, CTL_HI, 0); + + channel_set_bit(dw, CH_EN, dwc->mask); + + spin_unlock_irqrestore(&dwc->lock, flags); + + return 0; +} +EXPORT_SYMBOL(dw_dma_cyclic_start); + +/** + * dw_dma_cyclic_stop - stop the cyclic DMA transfer + * @chan: the DMA channel to stop + * + * Must be called with soft interrupts disabled. + */ +void dw_dma_cyclic_stop(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + unsigned long flags; + + spin_lock_irqsave(&dwc->lock, flags); + + dwc_chan_disable(dw, dwc); + + spin_unlock_irqrestore(&dwc->lock, flags); +} +EXPORT_SYMBOL(dw_dma_cyclic_stop); + +/** + * dw_dma_cyclic_prep - prepare the cyclic DMA transfer + * @chan: the DMA channel to prepare + * @buf_addr: physical DMA address where the buffer starts + * @buf_len: total number of bytes for the entire buffer + * @period_len: number of bytes for each period + * @direction: transfer direction, to or from device + * + * Must be called before trying to start the transfer. Returns a valid struct + * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. + */ +struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, + dma_addr_t buf_addr, size_t buf_len, size_t period_len, + enum dma_transfer_direction direction) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dma_slave_config *sconfig = &dwc->dma_sconfig; + struct dw_cyclic_desc *cdesc; + struct dw_cyclic_desc *retval = NULL; + struct dw_desc *desc; + struct dw_desc *last = NULL; + unsigned long was_cyclic; + unsigned int reg_width; + unsigned int periods; + unsigned int i; + unsigned long flags; + + spin_lock_irqsave(&dwc->lock, flags); + if (dwc->nollp) { + spin_unlock_irqrestore(&dwc->lock, flags); + dev_dbg(chan2dev(&dwc->chan), + "channel doesn't support LLP transfers\n"); + return ERR_PTR(-EINVAL); + } + + if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { + spin_unlock_irqrestore(&dwc->lock, flags); + dev_dbg(chan2dev(&dwc->chan), + "queue and/or active list are not empty\n"); + return ERR_PTR(-EBUSY); + } + + was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); + spin_unlock_irqrestore(&dwc->lock, flags); + if (was_cyclic) { + dev_dbg(chan2dev(&dwc->chan), + "channel already prepared for cyclic DMA\n"); + return ERR_PTR(-EBUSY); + } + + retval = ERR_PTR(-EINVAL); + + if (unlikely(!is_slave_direction(direction))) + goto out_err; + + dwc->direction = direction; + + if (direction == DMA_MEM_TO_DEV) + reg_width = __ffs(sconfig->dst_addr_width); + else + reg_width = __ffs(sconfig->src_addr_width); + + periods = buf_len / period_len; + + /* Check for too big/unaligned periods and unaligned DMA buffer. */ + if (period_len > (dwc->block_size << reg_width)) + goto out_err; + if (unlikely(period_len & ((1 << reg_width) - 1))) + goto out_err; + if (unlikely(buf_addr & ((1 << reg_width) - 1))) + goto out_err; + + retval = ERR_PTR(-ENOMEM); + + if (periods > NR_DESCS_PER_CHANNEL) + goto out_err; + + cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); + if (!cdesc) + goto out_err; + + cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); + if (!cdesc->desc) + goto out_err_alloc; + + for (i = 0; i < periods; i++) { + desc = dwc_desc_get(dwc); + if (!desc) + goto out_err_desc_get; + + switch (direction) { + case DMA_MEM_TO_DEV: + desc->lli.dar = sconfig->dst_addr; + desc->lli.sar = buf_addr + (period_len * i); + desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) + | DWC_CTLL_DST_WIDTH(reg_width) + | DWC_CTLL_SRC_WIDTH(reg_width) + | DWC_CTLL_DST_FIX + | DWC_CTLL_SRC_INC + | DWC_CTLL_INT_EN); + + desc->lli.ctllo |= sconfig->device_fc ? + DWC_CTLL_FC(DW_DMA_FC_P_M2P) : + DWC_CTLL_FC(DW_DMA_FC_D_M2P); + + break; + case DMA_DEV_TO_MEM: + desc->lli.dar = buf_addr + (period_len * i); + desc->lli.sar = sconfig->src_addr; + desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) + | DWC_CTLL_SRC_WIDTH(reg_width) + | DWC_CTLL_DST_WIDTH(reg_width) + | DWC_CTLL_DST_INC + | DWC_CTLL_SRC_FIX + | DWC_CTLL_INT_EN); + + desc->lli.ctllo |= sconfig->device_fc ? + DWC_CTLL_FC(DW_DMA_FC_P_P2M) : + DWC_CTLL_FC(DW_DMA_FC_D_P2M); + + break; + default: + break; + } + + desc->lli.ctlhi = (period_len >> reg_width); + cdesc->desc[i] = desc; + + if (last) + last->lli.llp = desc->txd.phys; + + last = desc; + } + + /* Let's make a cyclic list */ + last->lli.llp = cdesc->desc[0]->txd.phys; + + dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " + "period %zu periods %d\n", (unsigned long long)buf_addr, + buf_len, period_len, periods); + + cdesc->periods = periods; + dwc->cdesc = cdesc; + + return cdesc; + +out_err_desc_get: + while (i--) + dwc_desc_put(dwc, cdesc->desc[i]); +out_err_alloc: + kfree(cdesc); +out_err: + clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); + return (struct dw_cyclic_desc *)retval; +} +EXPORT_SYMBOL(dw_dma_cyclic_prep); + +/** + * dw_dma_cyclic_free - free a prepared cyclic DMA transfer + * @chan: the DMA channel to free + */ +void dw_dma_cyclic_free(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + struct dw_cyclic_desc *cdesc = dwc->cdesc; + int i; + unsigned long flags; + + dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); + + if (!cdesc) + return; + + spin_lock_irqsave(&dwc->lock, flags); + + dwc_chan_disable(dw, dwc); + + dma_writel(dw, CLEAR.ERROR, dwc->mask); + dma_writel(dw, CLEAR.XFER, dwc->mask); + + spin_unlock_irqrestore(&dwc->lock, flags); + + for (i = 0; i < cdesc->periods; i++) + dwc_desc_put(dwc, cdesc->desc[i]); + + kfree(cdesc->desc); + kfree(cdesc); + + clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); +} +EXPORT_SYMBOL(dw_dma_cyclic_free); + +/*----------------------------------------------------------------------*/ + +static void dw_dma_off(struct dw_dma *dw) +{ + int i; + + dma_writel(dw, CFG, 0); + + channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); + channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); + channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); + channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); + + while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) + cpu_relax(); + + for (i = 0; i < dw->dma.chancnt; i++) + dw->chan[i].initialized = false; +} + +#ifdef CONFIG_OF +static struct dw_dma_platform_data * +dw_dma_parse_dt(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct dw_dma_platform_data *pdata; + u32 tmp, arr[4]; + + if (!np) { + dev_err(&pdev->dev, "Missing DT data\n"); + return NULL; + } + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + + if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels)) + return NULL; + + if (of_property_read_bool(np, "is_private")) + pdata->is_private = true; + + if (!of_property_read_u32(np, "chan_allocation_order", &tmp)) + pdata->chan_allocation_order = (unsigned char)tmp; + + if (!of_property_read_u32(np, "chan_priority", &tmp)) + pdata->chan_priority = tmp; + + if (!of_property_read_u32(np, "block_size", &tmp)) + pdata->block_size = tmp; + + if (!of_property_read_u32(np, "dma-masters", &tmp)) { + if (tmp > 4) + return NULL; + + pdata->nr_masters = tmp; + } + + if (!of_property_read_u32_array(np, "data_width", arr, + pdata->nr_masters)) + for (tmp = 0; tmp < pdata->nr_masters; tmp++) + pdata->data_width[tmp] = arr[tmp]; + + return pdata; +} +#else +static inline struct dw_dma_platform_data * +dw_dma_parse_dt(struct platform_device *pdev) +{ + return NULL; +} +#endif + +static int dw_probe(struct platform_device *pdev) +{ + struct dw_dma_platform_data *pdata; + struct resource *io; + struct dw_dma *dw; + size_t size; + void __iomem *regs; + bool autocfg; + unsigned int dw_params; + unsigned int nr_channels; + unsigned int max_blk_size = 0; + int irq; + int err; + int i; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + io = platform_get_resource(pdev, IORESOURCE_MEM, 0); + regs = devm_ioremap_resource(&pdev->dev, io); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + /* Apply default dma_mask if needed */ + if (!pdev->dev.dma_mask) { + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + } + + dw_params = dma_read_byaddr(regs, DW_PARAMS); + autocfg = dw_params >> DW_PARAMS_EN & 0x1; + + dev_dbg(&pdev->dev, "DW_PARAMS: 0x%08x\n", dw_params); + + pdata = dev_get_platdata(&pdev->dev); + if (!pdata) + pdata = dw_dma_parse_dt(pdev); + + if (!pdata && autocfg) { + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + /* Fill platform data with the default values */ + pdata->is_private = true; + pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; + pdata->chan_priority = CHAN_PRIORITY_ASCENDING; + } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) + return -EINVAL; + + if (autocfg) + nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; + else + nr_channels = pdata->nr_channels; + + size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan); + dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + if (!dw) + return -ENOMEM; + + dw->clk = devm_clk_get(&pdev->dev, "hclk"); + if (IS_ERR(dw->clk)) + return PTR_ERR(dw->clk); + clk_prepare_enable(dw->clk); + + dw->regs = regs; + + /* Get hardware configuration parameters */ + if (autocfg) { + max_blk_size = dma_readl(dw, MAX_BLK_SIZE); + + dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; + for (i = 0; i < dw->nr_masters; i++) { + dw->data_width[i] = + (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2; + } + } else { + dw->nr_masters = pdata->nr_masters; + memcpy(dw->data_width, pdata->data_width, 4); + } + + /* Calculate all channel mask before DMA setup */ + dw->all_chan_mask = (1 << nr_channels) - 1; + + /* Force dma off, just in case */ + dw_dma_off(dw); + + /* Disable BLOCK interrupts as well */ + channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); + + err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0, + "dw_dmac", dw); + if (err) + return err; + + platform_set_drvdata(pdev, dw); + + /* Create a pool of consistent memory blocks for hardware descriptors */ + dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev, + sizeof(struct dw_desc), 4, 0); + if (!dw->desc_pool) { + dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); + return -ENOMEM; + } + + tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); + + INIT_LIST_HEAD(&dw->dma.channels); + for (i = 0; i < nr_channels; i++) { + struct dw_dma_chan *dwc = &dw->chan[i]; + int r = nr_channels - i - 1; + + dwc->chan.device = &dw->dma; + dma_cookie_init(&dwc->chan); + if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) + list_add_tail(&dwc->chan.device_node, + &dw->dma.channels); + else + list_add(&dwc->chan.device_node, &dw->dma.channels); + + /* 7 is highest priority & 0 is lowest. */ + if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) + dwc->priority = r; + else + dwc->priority = i; + + dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; + spin_lock_init(&dwc->lock); + dwc->mask = 1 << i; + + INIT_LIST_HEAD(&dwc->active_list); + INIT_LIST_HEAD(&dwc->queue); + INIT_LIST_HEAD(&dwc->free_list); + + channel_clear_bit(dw, CH_EN, dwc->mask); + + dwc->direction = DMA_TRANS_NONE; + dwc->request_line = ~0; + + /* Hardware configuration */ + if (autocfg) { + unsigned int dwc_params; + + dwc_params = dma_read_byaddr(regs + r * sizeof(u32), + DWC_PARAMS); + + dev_dbg(&pdev->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, + dwc_params); + + /* Decode maximum block size for given channel. The + * stored 4 bit value represents blocks from 0x00 for 3 + * up to 0x0a for 4095. */ + dwc->block_size = + (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1; + dwc->nollp = + (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; + } else { + dwc->block_size = pdata->block_size; + + /* Check if channel supports multi block transfer */ + channel_writel(dwc, LLP, 0xfffffffc); + dwc->nollp = + (channel_readl(dwc, LLP) & 0xfffffffc) == 0; + channel_writel(dwc, LLP, 0); + } + } + + /* Clear all interrupts on all channels. */ + dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); + dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); + dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); + dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); + dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); + + dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); + dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); + if (pdata->is_private) + dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); + dw->dma.dev = &pdev->dev; + dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; + dw->dma.device_free_chan_resources = dwc_free_chan_resources; + + dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; + + dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; + dw->dma.device_control = dwc_control; + + dw->dma.device_tx_status = dwc_tx_status; + dw->dma.device_issue_pending = dwc_issue_pending; + + dma_writel(dw, CFG, DW_CFG_DMA_EN); + + dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n", + nr_channels); + + dma_async_device_register(&dw->dma); + + if (pdev->dev.of_node) { + err = of_dma_controller_register(pdev->dev.of_node, + dw_dma_of_xlate, dw); + if (err) + dev_err(&pdev->dev, + "could not register of_dma_controller\n"); + } + + if (ACPI_HANDLE(&pdev->dev)) + dw_dma_acpi_controller_register(dw); + + return 0; +} + +static int dw_remove(struct platform_device *pdev) +{ + struct dw_dma *dw = platform_get_drvdata(pdev); + struct dw_dma_chan *dwc, *_dwc; + + if (pdev->dev.of_node) + of_dma_controller_free(pdev->dev.of_node); + dw_dma_off(dw); + dma_async_device_unregister(&dw->dma); + + tasklet_kill(&dw->tasklet); + + list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, + chan.device_node) { + list_del(&dwc->chan.device_node); + channel_clear_bit(dw, CH_EN, dwc->mask); + } + + return 0; +} + +static void dw_shutdown(struct platform_device *pdev) +{ + struct dw_dma *dw = platform_get_drvdata(pdev); + + dw_dma_off(dw); + clk_disable_unprepare(dw->clk); +} + +static int dw_suspend_noirq(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct dw_dma *dw = platform_get_drvdata(pdev); + + dw_dma_off(dw); + clk_disable_unprepare(dw->clk); + + return 0; +} + +static int dw_resume_noirq(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct dw_dma *dw = platform_get_drvdata(pdev); + + clk_prepare_enable(dw->clk); + dma_writel(dw, CFG, DW_CFG_DMA_EN); + + return 0; +} + +static const struct dev_pm_ops dw_dev_pm_ops = { + .suspend_noirq = dw_suspend_noirq, + .resume_noirq = dw_resume_noirq, + .freeze_noirq = dw_suspend_noirq, + .thaw_noirq = dw_resume_noirq, + .restore_noirq = dw_resume_noirq, + .poweroff_noirq = dw_suspend_noirq, +}; + +#ifdef CONFIG_OF +static const struct of_device_id dw_dma_of_id_table[] = { + { .compatible = "snps,dma-spear1340" }, + {} +}; +MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); +#endif + +#ifdef CONFIG_ACPI +static const struct acpi_device_id dw_dma_acpi_id_table[] = { + { "INTL9C60", 0 }, + { } +}; +#endif + +static struct platform_driver dw_driver = { + .probe = dw_probe, + .remove = dw_remove, + .shutdown = dw_shutdown, + .driver = { + .name = "dw_dmac", + .pm = &dw_dev_pm_ops, + .of_match_table = of_match_ptr(dw_dma_of_id_table), + .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), + }, +}; + +static int __init dw_init(void) +{ + return platform_driver_register(&dw_driver); +} +subsys_initcall(dw_init); + +static void __exit dw_exit(void) +{ + platform_driver_unregister(&dw_driver); +} +module_exit(dw_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); +MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); +MODULE_AUTHOR("Viresh Kumar "); diff --git a/drivers/dma/dw/dw_dmac_regs.h b/drivers/dma/dw/dw_dmac_regs.h new file mode 100644 index 000000000000..9d417200bd57 --- /dev/null +++ b/drivers/dma/dw/dw_dmac_regs.h @@ -0,0 +1,311 @@ +/* + * Driver for the Synopsys DesignWare AHB DMA Controller + * + * Copyright (C) 2005-2007 Atmel Corporation + * Copyright (C) 2010-2011 ST Microelectronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +#define DW_DMA_MAX_NR_CHANNELS 8 +#define DW_DMA_MAX_NR_REQUESTS 16 + +/* flow controller */ +enum dw_dma_fc { + DW_DMA_FC_D_M2M, + DW_DMA_FC_D_M2P, + DW_DMA_FC_D_P2M, + DW_DMA_FC_D_P2P, + DW_DMA_FC_P_P2M, + DW_DMA_FC_SP_P2P, + DW_DMA_FC_P_M2P, + DW_DMA_FC_DP_P2P, +}; + +/* + * Redefine this macro to handle differences between 32- and 64-bit + * addressing, big vs. little endian, etc. + */ +#define DW_REG(name) u32 name; u32 __pad_##name + +/* Hardware register definitions. */ +struct dw_dma_chan_regs { + DW_REG(SAR); /* Source Address Register */ + DW_REG(DAR); /* Destination Address Register */ + DW_REG(LLP); /* Linked List Pointer */ + u32 CTL_LO; /* Control Register Low */ + u32 CTL_HI; /* Control Register High */ + DW_REG(SSTAT); + DW_REG(DSTAT); + DW_REG(SSTATAR); + DW_REG(DSTATAR); + u32 CFG_LO; /* Configuration Register Low */ + u32 CFG_HI; /* Configuration Register High */ + DW_REG(SGR); + DW_REG(DSR); +}; + +struct dw_dma_irq_regs { + DW_REG(XFER); + DW_REG(BLOCK); + DW_REG(SRC_TRAN); + DW_REG(DST_TRAN); + DW_REG(ERROR); +}; + +struct dw_dma_regs { + /* per-channel registers */ + struct dw_dma_chan_regs CHAN[DW_DMA_MAX_NR_CHANNELS]; + + /* irq handling */ + struct dw_dma_irq_regs RAW; /* r */ + struct dw_dma_irq_regs STATUS; /* r (raw & mask) */ + struct dw_dma_irq_regs MASK; /* rw (set = irq enabled) */ + struct dw_dma_irq_regs CLEAR; /* w (ack, affects "raw") */ + + DW_REG(STATUS_INT); /* r */ + + /* software handshaking */ + DW_REG(REQ_SRC); + DW_REG(REQ_DST); + DW_REG(SGL_REQ_SRC); + DW_REG(SGL_REQ_DST); + DW_REG(LAST_SRC); + DW_REG(LAST_DST); + + /* miscellaneous */ + DW_REG(CFG); + DW_REG(CH_EN); + DW_REG(ID); + DW_REG(TEST); + + /* reserved */ + DW_REG(__reserved0); + DW_REG(__reserved1); + + /* optional encoded params, 0x3c8..0x3f7 */ + u32 __reserved; + + /* per-channel configuration registers */ + u32 DWC_PARAMS[DW_DMA_MAX_NR_CHANNELS]; + u32 MULTI_BLK_TYPE; + u32 MAX_BLK_SIZE; + + /* top-level parameters */ + u32 DW_PARAMS; +}; + +#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO +#define dma_readl_native ioread32be +#define dma_writel_native iowrite32be +#else +#define dma_readl_native readl +#define dma_writel_native writel +#endif + +/* To access the registers in early stage of probe */ +#define dma_read_byaddr(addr, name) \ + dma_readl_native((addr) + offsetof(struct dw_dma_regs, name)) + +/* Bitfields in DW_PARAMS */ +#define DW_PARAMS_NR_CHAN 8 /* number of channels */ +#define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */ +#define DW_PARAMS_DATA_WIDTH(n) (15 + 2 * (n)) +#define DW_PARAMS_DATA_WIDTH1 15 /* master 1 data width */ +#define DW_PARAMS_DATA_WIDTH2 17 /* master 2 data width */ +#define DW_PARAMS_DATA_WIDTH3 19 /* master 3 data width */ +#define DW_PARAMS_DATA_WIDTH4 21 /* master 4 data width */ +#define DW_PARAMS_EN 28 /* encoded parameters */ + +/* Bitfields in DWC_PARAMS */ +#define DWC_PARAMS_MBLK_EN 11 /* multi block transfer */ + +/* Bitfields in CTL_LO */ +#define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ +#define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ +#define DWC_CTLL_SRC_WIDTH(n) ((n)<<4) +#define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */ +#define DWC_CTLL_DST_DEC (1<<7) +#define DWC_CTLL_DST_FIX (2<<7) +#define DWC_CTLL_SRC_INC (0<<7) /* SAR update/not */ +#define DWC_CTLL_SRC_DEC (1<<9) +#define DWC_CTLL_SRC_FIX (2<<9) +#define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */ +#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14) +#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */ +#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */ +#define DWC_CTLL_FC(n) ((n) << 20) +#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */ +#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */ +#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */ +#define DWC_CTLL_FC_P2P (3 << 20) /* periph-to-periph */ +/* plus 4 transfer types for peripheral-as-flow-controller */ +#define DWC_CTLL_DMS(n) ((n)<<23) /* dst master select */ +#define DWC_CTLL_SMS(n) ((n)<<25) /* src master select */ +#define DWC_CTLL_LLP_D_EN (1 << 27) /* dest block chain */ +#define DWC_CTLL_LLP_S_EN (1 << 28) /* src block chain */ + +/* Bitfields in CTL_HI */ +#define DWC_CTLH_DONE 0x00001000 +#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff + +/* Bitfields in CFG_LO. Platform-configurable bits are in */ +#define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */ +#define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */ +#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ +#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ +#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ +#define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */ +#define DWC_CFGL_MAX_BURST(x) ((x) << 20) +#define DWC_CFGL_RELOAD_SAR (1 << 30) +#define DWC_CFGL_RELOAD_DAR (1 << 31) + +/* Bitfields in CFG_HI. Platform-configurable bits are in */ +#define DWC_CFGH_DS_UPD_EN (1 << 5) +#define DWC_CFGH_SS_UPD_EN (1 << 6) + +/* Bitfields in SGR */ +#define DWC_SGR_SGI(x) ((x) << 0) +#define DWC_SGR_SGC(x) ((x) << 20) + +/* Bitfields in DSR */ +#define DWC_DSR_DSI(x) ((x) << 0) +#define DWC_DSR_DSC(x) ((x) << 20) + +/* Bitfields in CFG */ +#define DW_CFG_DMA_EN (1 << 0) + +enum dw_dmac_flags { + DW_DMA_IS_CYCLIC = 0, + DW_DMA_IS_SOFT_LLP = 1, +}; + +struct dw_dma_chan { + struct dma_chan chan; + void __iomem *ch_regs; + u8 mask; + u8 priority; + enum dma_transfer_direction direction; + bool paused; + bool initialized; + + /* software emulation of the LLP transfers */ + struct list_head *tx_node_active; + + spinlock_t lock; + + /* these other elements are all protected by lock */ + unsigned long flags; + struct list_head active_list; + struct list_head queue; + struct list_head free_list; + u32 residue; + struct dw_cyclic_desc *cdesc; + + unsigned int descs_allocated; + + /* hardware configuration */ + unsigned int block_size; + bool nollp; + + /* custom slave configuration */ + unsigned int request_line; + unsigned char src_master; + unsigned char dst_master; + + /* configuration passed via DMA_SLAVE_CONFIG */ + struct dma_slave_config dma_sconfig; +}; + +static inline struct dw_dma_chan_regs __iomem * +__dwc_regs(struct dw_dma_chan *dwc) +{ + return dwc->ch_regs; +} + +#define channel_readl(dwc, name) \ + dma_readl_native(&(__dwc_regs(dwc)->name)) +#define channel_writel(dwc, name, val) \ + dma_writel_native((val), &(__dwc_regs(dwc)->name)) + +static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct dw_dma_chan, chan); +} + +struct dw_dma { + struct dma_device dma; + void __iomem *regs; + struct dma_pool *desc_pool; + struct tasklet_struct tasklet; + struct clk *clk; + + u8 all_chan_mask; + + /* hardware configuration */ + unsigned char nr_masters; + unsigned char data_width[4]; + + struct dw_dma_chan chan[0]; +}; + +static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) +{ + return dw->regs; +} + +#define dma_readl(dw, name) \ + dma_readl_native(&(__dw_regs(dw)->name)) +#define dma_writel(dw, name, val) \ + dma_writel_native((val), &(__dw_regs(dw)->name)) + +#define channel_set_bit(dw, reg, mask) \ + dma_writel(dw, reg, ((mask) << 8) | (mask)) +#define channel_clear_bit(dw, reg, mask) \ + dma_writel(dw, reg, ((mask) << 8) | 0) + +static inline struct dw_dma *to_dw_dma(struct dma_device *ddev) +{ + return container_of(ddev, struct dw_dma, dma); +} + +/* LLI == Linked List Item; a.k.a. DMA block descriptor */ +struct dw_lli { + /* values that are not changed by hardware */ + u32 sar; + u32 dar; + u32 llp; /* chain to next lli */ + u32 ctllo; + /* values that may get written back: */ + u32 ctlhi; + /* sstat and dstat can snapshot peripheral register state. + * silicon config may discard either or both... + */ + u32 sstat; + u32 dstat; +}; + +struct dw_desc { + /* FIRST values the hardware uses */ + struct dw_lli lli; + + /* THEN values for driver housekeeping */ + struct list_head desc_node; + struct list_head tx_list; + struct dma_async_tx_descriptor txd; + size_t len; + size_t total_len; +}; + +#define to_dw_desc(h) list_entry(h, struct dw_desc, desc_node) + +static inline struct dw_desc * +txd_to_dw_desc(struct dma_async_tx_descriptor *txd) +{ + return container_of(txd, struct dw_desc, txd); +} diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c deleted file mode 100644 index 2b65ba614e60..000000000000 --- a/drivers/dma/dw_dmac.c +++ /dev/null @@ -1,1969 +0,0 @@ -/* - * Core driver for the Synopsys DesignWare DMA Controller - * - * Copyright (C) 2007-2008 Atmel Corporation - * Copyright (C) 2010-2011 ST Microelectronics - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "dw_dmac_regs.h" -#include "dmaengine.h" - -/* - * This supports the Synopsys "DesignWare AHB Central DMA Controller", - * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all - * of which use ARM any more). See the "Databook" from Synopsys for - * information beyond what licensees probably provide. - * - * The driver has currently been tested only with the Atmel AT32AP7000, - * which does not support descriptor writeback. - */ - -static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave) -{ - return slave ? slave->dst_master : 0; -} - -static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) -{ - return slave ? slave->src_master : 1; -} - -static inline void dwc_set_masters(struct dw_dma_chan *dwc) -{ - struct dw_dma *dw = to_dw_dma(dwc->chan.device); - struct dw_dma_slave *dws = dwc->chan.private; - unsigned char mmax = dw->nr_masters - 1; - - if (dwc->request_line == ~0) { - dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws)); - dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws)); - } -} - -#define DWC_DEFAULT_CTLLO(_chan) ({ \ - struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ - struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ - bool _is_slave = is_slave_direction(_dwc->direction); \ - u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ - DW_DMA_MSIZE_16; \ - u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ - DW_DMA_MSIZE_16; \ - \ - (DWC_CTLL_DST_MSIZE(_dmsize) \ - | DWC_CTLL_SRC_MSIZE(_smsize) \ - | DWC_CTLL_LLP_D_EN \ - | DWC_CTLL_LLP_S_EN \ - | DWC_CTLL_DMS(_dwc->dst_master) \ - | DWC_CTLL_SMS(_dwc->src_master)); \ - }) - -/* - * Number of descriptors to allocate for each channel. This should be - * made configurable somehow; preferably, the clients (at least the - * ones using slave transfers) should be able to give us a hint. - */ -#define NR_DESCS_PER_CHANNEL 64 - -/*----------------------------------------------------------------------*/ - -static struct device *chan2dev(struct dma_chan *chan) -{ - return &chan->dev->device; -} -static struct device *chan2parent(struct dma_chan *chan) -{ - return chan->dev->device.parent; -} - -static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) -{ - return to_dw_desc(dwc->active_list.next); -} - -static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) -{ - struct dw_desc *desc, *_desc; - struct dw_desc *ret = NULL; - unsigned int i = 0; - unsigned long flags; - - spin_lock_irqsave(&dwc->lock, flags); - list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { - i++; - if (async_tx_test_ack(&desc->txd)) { - list_del(&desc->desc_node); - ret = desc; - break; - } - dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); - } - spin_unlock_irqrestore(&dwc->lock, flags); - - dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); - - return ret; -} - -/* - * Move a descriptor, including any children, to the free list. - * `desc' must not be on any lists. - */ -static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) -{ - unsigned long flags; - - if (desc) { - struct dw_desc *child; - - spin_lock_irqsave(&dwc->lock, flags); - list_for_each_entry(child, &desc->tx_list, desc_node) - dev_vdbg(chan2dev(&dwc->chan), - "moving child desc %p to freelist\n", - child); - list_splice_init(&desc->tx_list, &dwc->free_list); - dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); - list_add(&desc->desc_node, &dwc->free_list); - spin_unlock_irqrestore(&dwc->lock, flags); - } -} - -static void dwc_initialize(struct dw_dma_chan *dwc) -{ - struct dw_dma *dw = to_dw_dma(dwc->chan.device); - struct dw_dma_slave *dws = dwc->chan.private; - u32 cfghi = DWC_CFGH_FIFO_MODE; - u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); - - if (dwc->initialized == true) - return; - - if (dws) { - /* - * We need controller-specific data to set up slave - * transfers. - */ - BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); - - cfghi = dws->cfg_hi; - cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; - } else { - if (dwc->direction == DMA_MEM_TO_DEV) - cfghi = DWC_CFGH_DST_PER(dwc->request_line); - else if (dwc->direction == DMA_DEV_TO_MEM) - cfghi = DWC_CFGH_SRC_PER(dwc->request_line); - } - - channel_writel(dwc, CFG_LO, cfglo); - channel_writel(dwc, CFG_HI, cfghi); - - /* Enable interrupts */ - channel_set_bit(dw, MASK.XFER, dwc->mask); - channel_set_bit(dw, MASK.ERROR, dwc->mask); - - dwc->initialized = true; -} - -/*----------------------------------------------------------------------*/ - -static inline unsigned int dwc_fast_fls(unsigned long long v) -{ - /* - * We can be a lot more clever here, but this should take care - * of the most common optimization. - */ - if (!(v & 7)) - return 3; - else if (!(v & 3)) - return 2; - else if (!(v & 1)) - return 1; - return 0; -} - -static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) -{ - dev_err(chan2dev(&dwc->chan), - " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", - channel_readl(dwc, SAR), - channel_readl(dwc, DAR), - channel_readl(dwc, LLP), - channel_readl(dwc, CTL_HI), - channel_readl(dwc, CTL_LO)); -} - -static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) -{ - channel_clear_bit(dw, CH_EN, dwc->mask); - while (dma_readl(dw, CH_EN) & dwc->mask) - cpu_relax(); -} - -/*----------------------------------------------------------------------*/ - -/* Perform single block transfer */ -static inline void dwc_do_single_block(struct dw_dma_chan *dwc, - struct dw_desc *desc) -{ - struct dw_dma *dw = to_dw_dma(dwc->chan.device); - u32 ctllo; - - /* Software emulation of LLP mode relies on interrupts to continue - * multi block transfer. */ - ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN; - - channel_writel(dwc, SAR, desc->lli.sar); - channel_writel(dwc, DAR, desc->lli.dar); - channel_writel(dwc, CTL_LO, ctllo); - channel_writel(dwc, CTL_HI, desc->lli.ctlhi); - channel_set_bit(dw, CH_EN, dwc->mask); - - /* Move pointer to next descriptor */ - dwc->tx_node_active = dwc->tx_node_active->next; -} - -/* Called with dwc->lock held and bh disabled */ -static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) -{ - struct dw_dma *dw = to_dw_dma(dwc->chan.device); - unsigned long was_soft_llp; - - /* ASSERT: channel is idle */ - if (dma_readl(dw, CH_EN) & dwc->mask) { - dev_err(chan2dev(&dwc->chan), - "BUG: Attempted to start non-idle channel\n"); - dwc_dump_chan_regs(dwc); - - /* The tasklet will hopefully advance the queue... */ - return; - } - - if (dwc->nollp) { - was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP, - &dwc->flags); - if (was_soft_llp) { - dev_err(chan2dev(&dwc->chan), - "BUG: Attempted to start new LLP transfer " - "inside ongoing one\n"); - return; - } - - dwc_initialize(dwc); - - dwc->residue = first->total_len; - dwc->tx_node_active = &first->tx_list; - - /* Submit first block */ - dwc_do_single_block(dwc, first); - - return; - } - - dwc_initialize(dwc); - - channel_writel(dwc, LLP, first->txd.phys); - channel_writel(dwc, CTL_LO, - DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); - channel_writel(dwc, CTL_HI, 0); - channel_set_bit(dw, CH_EN, dwc->mask); -} - -/*----------------------------------------------------------------------*/ - -static void -dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, - bool callback_required) -{ - dma_async_tx_callback callback = NULL; - void *param = NULL; - struct dma_async_tx_descriptor *txd = &desc->txd; - struct dw_desc *child; - unsigned long flags; - - dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); - - spin_lock_irqsave(&dwc->lock, flags); - dma_cookie_complete(txd); - if (callback_required) { - callback = txd->callback; - param = txd->callback_param; - } - - /* async_tx_ack */ - list_for_each_entry(child, &desc->tx_list, desc_node) - async_tx_ack(&child->txd); - async_tx_ack(&desc->txd); - - list_splice_init(&desc->tx_list, &dwc->free_list); - list_move(&desc->desc_node, &dwc->free_list); - - if (!is_slave_direction(dwc->direction)) { - struct device *parent = chan2parent(&dwc->chan); - if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(parent, desc->lli.dar, - desc->total_len, DMA_FROM_DEVICE); - else - dma_unmap_page(parent, desc->lli.dar, - desc->total_len, DMA_FROM_DEVICE); - } - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(parent, desc->lli.sar, - desc->total_len, DMA_TO_DEVICE); - else - dma_unmap_page(parent, desc->lli.sar, - desc->total_len, DMA_TO_DEVICE); - } - } - - spin_unlock_irqrestore(&dwc->lock, flags); - - if (callback) - callback(param); -} - -static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) -{ - struct dw_desc *desc, *_desc; - LIST_HEAD(list); - unsigned long flags; - - spin_lock_irqsave(&dwc->lock, flags); - if (dma_readl(dw, CH_EN) & dwc->mask) { - dev_err(chan2dev(&dwc->chan), - "BUG: XFER bit set, but channel not idle!\n"); - - /* Try to continue after resetting the channel... */ - dwc_chan_disable(dw, dwc); - } - - /* - * Submit queued descriptors ASAP, i.e. before we go through - * the completed ones. - */ - list_splice_init(&dwc->active_list, &list); - if (!list_empty(&dwc->queue)) { - list_move(dwc->queue.next, &dwc->active_list); - dwc_dostart(dwc, dwc_first_active(dwc)); - } - - spin_unlock_irqrestore(&dwc->lock, flags); - - list_for_each_entry_safe(desc, _desc, &list, desc_node) - dwc_descriptor_complete(dwc, desc, true); -} - -/* Returns how many bytes were already received from source */ -static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) -{ - u32 ctlhi = channel_readl(dwc, CTL_HI); - u32 ctllo = channel_readl(dwc, CTL_LO); - - return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7)); -} - -static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) -{ - dma_addr_t llp; - struct dw_desc *desc, *_desc; - struct dw_desc *child; - u32 status_xfer; - unsigned long flags; - - spin_lock_irqsave(&dwc->lock, flags); - llp = channel_readl(dwc, LLP); - status_xfer = dma_readl(dw, RAW.XFER); - - if (status_xfer & dwc->mask) { - /* Everything we've submitted is done */ - dma_writel(dw, CLEAR.XFER, dwc->mask); - - if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { - struct list_head *head, *active = dwc->tx_node_active; - - /* - * We are inside first active descriptor. - * Otherwise something is really wrong. - */ - desc = dwc_first_active(dwc); - - head = &desc->tx_list; - if (active != head) { - /* Update desc to reflect last sent one */ - if (active != head->next) - desc = to_dw_desc(active->prev); - - dwc->residue -= desc->len; - - child = to_dw_desc(active); - - /* Submit next block */ - dwc_do_single_block(dwc, child); - - spin_unlock_irqrestore(&dwc->lock, flags); - return; - } - - /* We are done here */ - clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); - } - - dwc->residue = 0; - - spin_unlock_irqrestore(&dwc->lock, flags); - - dwc_complete_all(dw, dwc); - return; - } - - if (list_empty(&dwc->active_list)) { - dwc->residue = 0; - spin_unlock_irqrestore(&dwc->lock, flags); - return; - } - - if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { - dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__); - spin_unlock_irqrestore(&dwc->lock, flags); - return; - } - - dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__, - (unsigned long long)llp); - - list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { - /* Initial residue value */ - dwc->residue = desc->total_len; - - /* Check first descriptors addr */ - if (desc->txd.phys == llp) { - spin_unlock_irqrestore(&dwc->lock, flags); - return; - } - - /* Check first descriptors llp */ - if (desc->lli.llp == llp) { - /* This one is currently in progress */ - dwc->residue -= dwc_get_sent(dwc); - spin_unlock_irqrestore(&dwc->lock, flags); - return; - } - - dwc->residue -= desc->len; - list_for_each_entry(child, &desc->tx_list, desc_node) { - if (child->lli.llp == llp) { - /* Currently in progress */ - dwc->residue -= dwc_get_sent(dwc); - spin_unlock_irqrestore(&dwc->lock, flags); - return; - } - dwc->residue -= child->len; - } - - /* - * No descriptors so far seem to be in progress, i.e. - * this one must be done. - */ - spin_unlock_irqrestore(&dwc->lock, flags); - dwc_descriptor_complete(dwc, desc, true); - spin_lock_irqsave(&dwc->lock, flags); - } - - dev_err(chan2dev(&dwc->chan), - "BUG: All descriptors done, but channel not idle!\n"); - - /* Try to continue after resetting the channel... */ - dwc_chan_disable(dw, dwc); - - if (!list_empty(&dwc->queue)) { - list_move(dwc->queue.next, &dwc->active_list); - dwc_dostart(dwc, dwc_first_active(dwc)); - } - spin_unlock_irqrestore(&dwc->lock, flags); -} - -static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) -{ - dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", - lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); -} - -static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) -{ - struct dw_desc *bad_desc; - struct dw_desc *child; - unsigned long flags; - - dwc_scan_descriptors(dw, dwc); - - spin_lock_irqsave(&dwc->lock, flags); - - /* - * The descriptor currently at the head of the active list is - * borked. Since we don't have any way to report errors, we'll - * just have to scream loudly and try to carry on. - */ - bad_desc = dwc_first_active(dwc); - list_del_init(&bad_desc->desc_node); - list_move(dwc->queue.next, dwc->active_list.prev); - - /* Clear the error flag and try to restart the controller */ - dma_writel(dw, CLEAR.ERROR, dwc->mask); - if (!list_empty(&dwc->active_list)) - dwc_dostart(dwc, dwc_first_active(dwc)); - - /* - * WARN may seem harsh, but since this only happens - * when someone submits a bad physical address in a - * descriptor, we should consider ourselves lucky that the - * controller flagged an error instead of scribbling over - * random memory locations. - */ - dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" - " cookie: %d\n", bad_desc->txd.cookie); - dwc_dump_lli(dwc, &bad_desc->lli); - list_for_each_entry(child, &bad_desc->tx_list, desc_node) - dwc_dump_lli(dwc, &child->lli); - - spin_unlock_irqrestore(&dwc->lock, flags); - - /* Pretend the descriptor completed successfully */ - dwc_descriptor_complete(dwc, bad_desc, true); -} - -/* --------------------- Cyclic DMA API extensions -------------------- */ - -dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - return channel_readl(dwc, SAR); -} -EXPORT_SYMBOL(dw_dma_get_src_addr); - -dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - return channel_readl(dwc, DAR); -} -EXPORT_SYMBOL(dw_dma_get_dst_addr); - -/* Called with dwc->lock held and all DMAC interrupts disabled */ -static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, - u32 status_err, u32 status_xfer) -{ - unsigned long flags; - - if (dwc->mask) { - void (*callback)(void *param); - void *callback_param; - - dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", - channel_readl(dwc, LLP)); - - callback = dwc->cdesc->period_callback; - callback_param = dwc->cdesc->period_callback_param; - - if (callback) - callback(callback_param); - } - - /* - * Error and transfer complete are highly unlikely, and will most - * likely be due to a configuration error by the user. - */ - if (unlikely(status_err & dwc->mask) || - unlikely(status_xfer & dwc->mask)) { - int i; - - dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " - "interrupt, stopping DMA transfer\n", - status_xfer ? "xfer" : "error"); - - spin_lock_irqsave(&dwc->lock, flags); - - dwc_dump_chan_regs(dwc); - - dwc_chan_disable(dw, dwc); - - /* Make sure DMA does not restart by loading a new list */ - channel_writel(dwc, LLP, 0); - channel_writel(dwc, CTL_LO, 0); - channel_writel(dwc, CTL_HI, 0); - - dma_writel(dw, CLEAR.ERROR, dwc->mask); - dma_writel(dw, CLEAR.XFER, dwc->mask); - - for (i = 0; i < dwc->cdesc->periods; i++) - dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); - - spin_unlock_irqrestore(&dwc->lock, flags); - } -} - -/* ------------------------------------------------------------------------- */ - -static void dw_dma_tasklet(unsigned long data) -{ - struct dw_dma *dw = (struct dw_dma *)data; - struct dw_dma_chan *dwc; - u32 status_xfer; - u32 status_err; - int i; - - status_xfer = dma_readl(dw, RAW.XFER); - status_err = dma_readl(dw, RAW.ERROR); - - dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); - - for (i = 0; i < dw->dma.chancnt; i++) { - dwc = &dw->chan[i]; - if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) - dwc_handle_cyclic(dw, dwc, status_err, status_xfer); - else if (status_err & (1 << i)) - dwc_handle_error(dw, dwc); - else if (status_xfer & (1 << i)) - dwc_scan_descriptors(dw, dwc); - } - - /* - * Re-enable interrupts. - */ - channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); - channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); -} - -static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) -{ - struct dw_dma *dw = dev_id; - u32 status; - - dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, - dma_readl(dw, STATUS_INT)); - - /* - * Just disable the interrupts. We'll turn them back on in the - * softirq handler. - */ - channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); - channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); - - status = dma_readl(dw, STATUS_INT); - if (status) { - dev_err(dw->dma.dev, - "BUG: Unexpected interrupts pending: 0x%x\n", - status); - - /* Try to recover */ - channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); - channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); - channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); - channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); - } - - tasklet_schedule(&dw->tasklet); - - return IRQ_HANDLED; -} - -/*----------------------------------------------------------------------*/ - -static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) -{ - struct dw_desc *desc = txd_to_dw_desc(tx); - struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); - dma_cookie_t cookie; - unsigned long flags; - - spin_lock_irqsave(&dwc->lock, flags); - cookie = dma_cookie_assign(tx); - - /* - * REVISIT: We should attempt to chain as many descriptors as - * possible, perhaps even appending to those already submitted - * for DMA. But this is hard to do in a race-free manner. - */ - if (list_empty(&dwc->active_list)) { - dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__, - desc->txd.cookie); - list_add_tail(&desc->desc_node, &dwc->active_list); - dwc_dostart(dwc, dwc_first_active(dwc)); - } else { - dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, - desc->txd.cookie); - - list_add_tail(&desc->desc_node, &dwc->queue); - } - - spin_unlock_irqrestore(&dwc->lock, flags); - - return cookie; -} - -static struct dma_async_tx_descriptor * -dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, - size_t len, unsigned long flags) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma *dw = to_dw_dma(chan->device); - struct dw_desc *desc; - struct dw_desc *first; - struct dw_desc *prev; - size_t xfer_count; - size_t offset; - unsigned int src_width; - unsigned int dst_width; - unsigned int data_width; - u32 ctllo; - - dev_vdbg(chan2dev(chan), - "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__, - (unsigned long long)dest, (unsigned long long)src, - len, flags); - - if (unlikely(!len)) { - dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); - return NULL; - } - - dwc->direction = DMA_MEM_TO_MEM; - - data_width = min_t(unsigned int, dw->data_width[dwc->src_master], - dw->data_width[dwc->dst_master]); - - src_width = dst_width = min_t(unsigned int, data_width, - dwc_fast_fls(src | dest | len)); - - ctllo = DWC_DEFAULT_CTLLO(chan) - | DWC_CTLL_DST_WIDTH(dst_width) - | DWC_CTLL_SRC_WIDTH(src_width) - | DWC_CTLL_DST_INC - | DWC_CTLL_SRC_INC - | DWC_CTLL_FC_M2M; - prev = first = NULL; - - for (offset = 0; offset < len; offset += xfer_count << src_width) { - xfer_count = min_t(size_t, (len - offset) >> src_width, - dwc->block_size); - - desc = dwc_desc_get(dwc); - if (!desc) - goto err_desc_get; - - desc->lli.sar = src + offset; - desc->lli.dar = dest + offset; - desc->lli.ctllo = ctllo; - desc->lli.ctlhi = xfer_count; - desc->len = xfer_count << src_width; - - if (!first) { - first = desc; - } else { - prev->lli.llp = desc->txd.phys; - list_add_tail(&desc->desc_node, - &first->tx_list); - } - prev = desc; - } - - if (flags & DMA_PREP_INTERRUPT) - /* Trigger interrupt after last block */ - prev->lli.ctllo |= DWC_CTLL_INT_EN; - - prev->lli.llp = 0; - first->txd.flags = flags; - first->total_len = len; - - return &first->txd; - -err_desc_get: - dwc_desc_put(dwc, first); - return NULL; -} - -static struct dma_async_tx_descriptor * -dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, - unsigned int sg_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma *dw = to_dw_dma(chan->device); - struct dma_slave_config *sconfig = &dwc->dma_sconfig; - struct dw_desc *prev; - struct dw_desc *first; - u32 ctllo; - dma_addr_t reg; - unsigned int reg_width; - unsigned int mem_width; - unsigned int data_width; - unsigned int i; - struct scatterlist *sg; - size_t total_len = 0; - - dev_vdbg(chan2dev(chan), "%s\n", __func__); - - if (unlikely(!is_slave_direction(direction) || !sg_len)) - return NULL; - - dwc->direction = direction; - - prev = first = NULL; - - switch (direction) { - case DMA_MEM_TO_DEV: - reg_width = __fls(sconfig->dst_addr_width); - reg = sconfig->dst_addr; - ctllo = (DWC_DEFAULT_CTLLO(chan) - | DWC_CTLL_DST_WIDTH(reg_width) - | DWC_CTLL_DST_FIX - | DWC_CTLL_SRC_INC); - - ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : - DWC_CTLL_FC(DW_DMA_FC_D_M2P); - - data_width = dw->data_width[dwc->src_master]; - - for_each_sg(sgl, sg, sg_len, i) { - struct dw_desc *desc; - u32 len, dlen, mem; - - mem = sg_dma_address(sg); - len = sg_dma_len(sg); - - mem_width = min_t(unsigned int, - data_width, dwc_fast_fls(mem | len)); - -slave_sg_todev_fill_desc: - desc = dwc_desc_get(dwc); - if (!desc) { - dev_err(chan2dev(chan), - "not enough descriptors available\n"); - goto err_desc_get; - } - - desc->lli.sar = mem; - desc->lli.dar = reg; - desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); - if ((len >> mem_width) > dwc->block_size) { - dlen = dwc->block_size << mem_width; - mem += dlen; - len -= dlen; - } else { - dlen = len; - len = 0; - } - - desc->lli.ctlhi = dlen >> mem_width; - desc->len = dlen; - - if (!first) { - first = desc; - } else { - prev->lli.llp = desc->txd.phys; - list_add_tail(&desc->desc_node, - &first->tx_list); - } - prev = desc; - total_len += dlen; - - if (len) - goto slave_sg_todev_fill_desc; - } - break; - case DMA_DEV_TO_MEM: - reg_width = __fls(sconfig->src_addr_width); - reg = sconfig->src_addr; - ctllo = (DWC_DEFAULT_CTLLO(chan) - | DWC_CTLL_SRC_WIDTH(reg_width) - | DWC_CTLL_DST_INC - | DWC_CTLL_SRC_FIX); - - ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : - DWC_CTLL_FC(DW_DMA_FC_D_P2M); - - data_width = dw->data_width[dwc->dst_master]; - - for_each_sg(sgl, sg, sg_len, i) { - struct dw_desc *desc; - u32 len, dlen, mem; - - mem = sg_dma_address(sg); - len = sg_dma_len(sg); - - mem_width = min_t(unsigned int, - data_width, dwc_fast_fls(mem | len)); - -slave_sg_fromdev_fill_desc: - desc = dwc_desc_get(dwc); - if (!desc) { - dev_err(chan2dev(chan), - "not enough descriptors available\n"); - goto err_desc_get; - } - - desc->lli.sar = reg; - desc->lli.dar = mem; - desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); - if ((len >> reg_width) > dwc->block_size) { - dlen = dwc->block_size << reg_width; - mem += dlen; - len -= dlen; - } else { - dlen = len; - len = 0; - } - desc->lli.ctlhi = dlen >> reg_width; - desc->len = dlen; - - if (!first) { - first = desc; - } else { - prev->lli.llp = desc->txd.phys; - list_add_tail(&desc->desc_node, - &first->tx_list); - } - prev = desc; - total_len += dlen; - - if (len) - goto slave_sg_fromdev_fill_desc; - } - break; - default: - return NULL; - } - - if (flags & DMA_PREP_INTERRUPT) - /* Trigger interrupt after last block */ - prev->lli.ctllo |= DWC_CTLL_INT_EN; - - prev->lli.llp = 0; - first->total_len = total_len; - - return &first->txd; - -err_desc_get: - dwc_desc_put(dwc, first); - return NULL; -} - -/* - * Fix sconfig's burst size according to dw_dmac. We need to convert them as: - * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. - * - * NOTE: burst size 2 is not supported by controller. - * - * This can be done by finding least significant bit set: n & (n - 1) - */ -static inline void convert_burst(u32 *maxburst) -{ - if (*maxburst > 1) - *maxburst = fls(*maxburst) - 2; - else - *maxburst = 0; -} - -static int -set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - - /* Check if chan will be configured for slave transfers */ - if (!is_slave_direction(sconfig->direction)) - return -EINVAL; - - memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); - dwc->direction = sconfig->direction; - - /* Take the request line from slave_id member */ - if (dwc->request_line == ~0) - dwc->request_line = sconfig->slave_id; - - convert_burst(&dwc->dma_sconfig.src_maxburst); - convert_burst(&dwc->dma_sconfig.dst_maxburst); - - return 0; -} - -static inline void dwc_chan_pause(struct dw_dma_chan *dwc) -{ - u32 cfglo = channel_readl(dwc, CFG_LO); - unsigned int count = 20; /* timeout iterations */ - - channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); - while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) - udelay(2); - - dwc->paused = true; -} - -static inline void dwc_chan_resume(struct dw_dma_chan *dwc) -{ - u32 cfglo = channel_readl(dwc, CFG_LO); - - channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); - - dwc->paused = false; -} - -static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma *dw = to_dw_dma(chan->device); - struct dw_desc *desc, *_desc; - unsigned long flags; - LIST_HEAD(list); - - if (cmd == DMA_PAUSE) { - spin_lock_irqsave(&dwc->lock, flags); - - dwc_chan_pause(dwc); - - spin_unlock_irqrestore(&dwc->lock, flags); - } else if (cmd == DMA_RESUME) { - if (!dwc->paused) - return 0; - - spin_lock_irqsave(&dwc->lock, flags); - - dwc_chan_resume(dwc); - - spin_unlock_irqrestore(&dwc->lock, flags); - } else if (cmd == DMA_TERMINATE_ALL) { - spin_lock_irqsave(&dwc->lock, flags); - - clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); - - dwc_chan_disable(dw, dwc); - - dwc_chan_resume(dwc); - - /* active_list entries will end up before queued entries */ - list_splice_init(&dwc->queue, &list); - list_splice_init(&dwc->active_list, &list); - - spin_unlock_irqrestore(&dwc->lock, flags); - - /* Flush all pending and queued descriptors */ - list_for_each_entry_safe(desc, _desc, &list, desc_node) - dwc_descriptor_complete(dwc, desc, false); - } else if (cmd == DMA_SLAVE_CONFIG) { - return set_runtime_config(chan, (struct dma_slave_config *)arg); - } else { - return -ENXIO; - } - - return 0; -} - -static inline u32 dwc_get_residue(struct dw_dma_chan *dwc) -{ - unsigned long flags; - u32 residue; - - spin_lock_irqsave(&dwc->lock, flags); - - residue = dwc->residue; - if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) - residue -= dwc_get_sent(dwc); - - spin_unlock_irqrestore(&dwc->lock, flags); - return residue; -} - -static enum dma_status -dwc_tx_status(struct dma_chan *chan, - dma_cookie_t cookie, - struct dma_tx_state *txstate) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - enum dma_status ret; - - ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_SUCCESS) { - dwc_scan_descriptors(to_dw_dma(chan->device), dwc); - - ret = dma_cookie_status(chan, cookie, txstate); - } - - if (ret != DMA_SUCCESS) - dma_set_residue(txstate, dwc_get_residue(dwc)); - - if (dwc->paused) - return DMA_PAUSED; - - return ret; -} - -static void dwc_issue_pending(struct dma_chan *chan) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - - if (!list_empty(&dwc->queue)) - dwc_scan_descriptors(to_dw_dma(chan->device), dwc); -} - -static int dwc_alloc_chan_resources(struct dma_chan *chan) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma *dw = to_dw_dma(chan->device); - struct dw_desc *desc; - int i; - unsigned long flags; - - dev_vdbg(chan2dev(chan), "%s\n", __func__); - - /* ASSERT: channel is idle */ - if (dma_readl(dw, CH_EN) & dwc->mask) { - dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); - return -EIO; - } - - dma_cookie_init(chan); - - /* - * NOTE: some controllers may have additional features that we - * need to initialize here, like "scatter-gather" (which - * doesn't mean what you think it means), and status writeback. - */ - - dwc_set_masters(dwc); - - spin_lock_irqsave(&dwc->lock, flags); - i = dwc->descs_allocated; - while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { - dma_addr_t phys; - - spin_unlock_irqrestore(&dwc->lock, flags); - - desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys); - if (!desc) - goto err_desc_alloc; - - memset(desc, 0, sizeof(struct dw_desc)); - - INIT_LIST_HEAD(&desc->tx_list); - dma_async_tx_descriptor_init(&desc->txd, chan); - desc->txd.tx_submit = dwc_tx_submit; - desc->txd.flags = DMA_CTRL_ACK; - desc->txd.phys = phys; - - dwc_desc_put(dwc, desc); - - spin_lock_irqsave(&dwc->lock, flags); - i = ++dwc->descs_allocated; - } - - spin_unlock_irqrestore(&dwc->lock, flags); - - dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); - - return i; - -err_desc_alloc: - dev_info(chan2dev(chan), "only allocated %d descriptors\n", i); - - return i; -} - -static void dwc_free_chan_resources(struct dma_chan *chan) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma *dw = to_dw_dma(chan->device); - struct dw_desc *desc, *_desc; - unsigned long flags; - LIST_HEAD(list); - - dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, - dwc->descs_allocated); - - /* ASSERT: channel is idle */ - BUG_ON(!list_empty(&dwc->active_list)); - BUG_ON(!list_empty(&dwc->queue)); - BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); - - spin_lock_irqsave(&dwc->lock, flags); - list_splice_init(&dwc->free_list, &list); - dwc->descs_allocated = 0; - dwc->initialized = false; - dwc->request_line = ~0; - - /* Disable interrupts */ - channel_clear_bit(dw, MASK.XFER, dwc->mask); - channel_clear_bit(dw, MASK.ERROR, dwc->mask); - - spin_unlock_irqrestore(&dwc->lock, flags); - - list_for_each_entry_safe(desc, _desc, &list, desc_node) { - dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); - dma_pool_free(dw->desc_pool, desc, desc->txd.phys); - } - - dev_vdbg(chan2dev(chan), "%s: done\n", __func__); -} - -/*----------------------------------------------------------------------*/ - -struct dw_dma_of_filter_args { - struct dw_dma *dw; - unsigned int req; - unsigned int src; - unsigned int dst; -}; - -static bool dw_dma_of_filter(struct dma_chan *chan, void *param) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma_of_filter_args *fargs = param; - - /* Ensure the device matches our channel */ - if (chan->device != &fargs->dw->dma) - return false; - - dwc->request_line = fargs->req; - dwc->src_master = fargs->src; - dwc->dst_master = fargs->dst; - - return true; -} - -static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, - struct of_dma *ofdma) -{ - struct dw_dma *dw = ofdma->of_dma_data; - struct dw_dma_of_filter_args fargs = { - .dw = dw, - }; - dma_cap_mask_t cap; - - if (dma_spec->args_count != 3) - return NULL; - - fargs.req = dma_spec->args[0]; - fargs.src = dma_spec->args[1]; - fargs.dst = dma_spec->args[2]; - - if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS || - fargs.src >= dw->nr_masters || - fargs.dst >= dw->nr_masters)) - return NULL; - - dma_cap_zero(cap); - dma_cap_set(DMA_SLAVE, cap); - - /* TODO: there should be a simpler way to do this */ - return dma_request_channel(cap, dw_dma_of_filter, &fargs); -} - -#ifdef CONFIG_ACPI -static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct acpi_dma_spec *dma_spec = param; - - if (chan->device->dev != dma_spec->dev || - chan->chan_id != dma_spec->chan_id) - return false; - - dwc->request_line = dma_spec->slave_id; - dwc->src_master = dwc_get_sms(NULL); - dwc->dst_master = dwc_get_dms(NULL); - - return true; -} - -static void dw_dma_acpi_controller_register(struct dw_dma *dw) -{ - struct device *dev = dw->dma.dev; - struct acpi_dma_filter_info *info; - int ret; - - info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); - if (!info) - return; - - dma_cap_zero(info->dma_cap); - dma_cap_set(DMA_SLAVE, info->dma_cap); - info->filter_fn = dw_dma_acpi_filter; - - ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate, - info); - if (ret) - dev_err(dev, "could not register acpi_dma_controller\n"); -} -#else /* !CONFIG_ACPI */ -static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {} -#endif /* !CONFIG_ACPI */ - -/* --------------------- Cyclic DMA API extensions -------------------- */ - -/** - * dw_dma_cyclic_start - start the cyclic DMA transfer - * @chan: the DMA channel to start - * - * Must be called with soft interrupts disabled. Returns zero on success or - * -errno on failure. - */ -int dw_dma_cyclic_start(struct dma_chan *chan) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma *dw = to_dw_dma(dwc->chan.device); - unsigned long flags; - - if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { - dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); - return -ENODEV; - } - - spin_lock_irqsave(&dwc->lock, flags); - - /* Assert channel is idle */ - if (dma_readl(dw, CH_EN) & dwc->mask) { - dev_err(chan2dev(&dwc->chan), - "BUG: Attempted to start non-idle channel\n"); - dwc_dump_chan_regs(dwc); - spin_unlock_irqrestore(&dwc->lock, flags); - return -EBUSY; - } - - dma_writel(dw, CLEAR.ERROR, dwc->mask); - dma_writel(dw, CLEAR.XFER, dwc->mask); - - /* Setup DMAC channel registers */ - channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); - channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); - channel_writel(dwc, CTL_HI, 0); - - channel_set_bit(dw, CH_EN, dwc->mask); - - spin_unlock_irqrestore(&dwc->lock, flags); - - return 0; -} -EXPORT_SYMBOL(dw_dma_cyclic_start); - -/** - * dw_dma_cyclic_stop - stop the cyclic DMA transfer - * @chan: the DMA channel to stop - * - * Must be called with soft interrupts disabled. - */ -void dw_dma_cyclic_stop(struct dma_chan *chan) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma *dw = to_dw_dma(dwc->chan.device); - unsigned long flags; - - spin_lock_irqsave(&dwc->lock, flags); - - dwc_chan_disable(dw, dwc); - - spin_unlock_irqrestore(&dwc->lock, flags); -} -EXPORT_SYMBOL(dw_dma_cyclic_stop); - -/** - * dw_dma_cyclic_prep - prepare the cyclic DMA transfer - * @chan: the DMA channel to prepare - * @buf_addr: physical DMA address where the buffer starts - * @buf_len: total number of bytes for the entire buffer - * @period_len: number of bytes for each period - * @direction: transfer direction, to or from device - * - * Must be called before trying to start the transfer. Returns a valid struct - * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. - */ -struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, - dma_addr_t buf_addr, size_t buf_len, size_t period_len, - enum dma_transfer_direction direction) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dma_slave_config *sconfig = &dwc->dma_sconfig; - struct dw_cyclic_desc *cdesc; - struct dw_cyclic_desc *retval = NULL; - struct dw_desc *desc; - struct dw_desc *last = NULL; - unsigned long was_cyclic; - unsigned int reg_width; - unsigned int periods; - unsigned int i; - unsigned long flags; - - spin_lock_irqsave(&dwc->lock, flags); - if (dwc->nollp) { - spin_unlock_irqrestore(&dwc->lock, flags); - dev_dbg(chan2dev(&dwc->chan), - "channel doesn't support LLP transfers\n"); - return ERR_PTR(-EINVAL); - } - - if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { - spin_unlock_irqrestore(&dwc->lock, flags); - dev_dbg(chan2dev(&dwc->chan), - "queue and/or active list are not empty\n"); - return ERR_PTR(-EBUSY); - } - - was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); - spin_unlock_irqrestore(&dwc->lock, flags); - if (was_cyclic) { - dev_dbg(chan2dev(&dwc->chan), - "channel already prepared for cyclic DMA\n"); - return ERR_PTR(-EBUSY); - } - - retval = ERR_PTR(-EINVAL); - - if (unlikely(!is_slave_direction(direction))) - goto out_err; - - dwc->direction = direction; - - if (direction == DMA_MEM_TO_DEV) - reg_width = __ffs(sconfig->dst_addr_width); - else - reg_width = __ffs(sconfig->src_addr_width); - - periods = buf_len / period_len; - - /* Check for too big/unaligned periods and unaligned DMA buffer. */ - if (period_len > (dwc->block_size << reg_width)) - goto out_err; - if (unlikely(period_len & ((1 << reg_width) - 1))) - goto out_err; - if (unlikely(buf_addr & ((1 << reg_width) - 1))) - goto out_err; - - retval = ERR_PTR(-ENOMEM); - - if (periods > NR_DESCS_PER_CHANNEL) - goto out_err; - - cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); - if (!cdesc) - goto out_err; - - cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); - if (!cdesc->desc) - goto out_err_alloc; - - for (i = 0; i < periods; i++) { - desc = dwc_desc_get(dwc); - if (!desc) - goto out_err_desc_get; - - switch (direction) { - case DMA_MEM_TO_DEV: - desc->lli.dar = sconfig->dst_addr; - desc->lli.sar = buf_addr + (period_len * i); - desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) - | DWC_CTLL_DST_WIDTH(reg_width) - | DWC_CTLL_SRC_WIDTH(reg_width) - | DWC_CTLL_DST_FIX - | DWC_CTLL_SRC_INC - | DWC_CTLL_INT_EN); - - desc->lli.ctllo |= sconfig->device_fc ? - DWC_CTLL_FC(DW_DMA_FC_P_M2P) : - DWC_CTLL_FC(DW_DMA_FC_D_M2P); - - break; - case DMA_DEV_TO_MEM: - desc->lli.dar = buf_addr + (period_len * i); - desc->lli.sar = sconfig->src_addr; - desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) - | DWC_CTLL_SRC_WIDTH(reg_width) - | DWC_CTLL_DST_WIDTH(reg_width) - | DWC_CTLL_DST_INC - | DWC_CTLL_SRC_FIX - | DWC_CTLL_INT_EN); - - desc->lli.ctllo |= sconfig->device_fc ? - DWC_CTLL_FC(DW_DMA_FC_P_P2M) : - DWC_CTLL_FC(DW_DMA_FC_D_P2M); - - break; - default: - break; - } - - desc->lli.ctlhi = (period_len >> reg_width); - cdesc->desc[i] = desc; - - if (last) - last->lli.llp = desc->txd.phys; - - last = desc; - } - - /* Let's make a cyclic list */ - last->lli.llp = cdesc->desc[0]->txd.phys; - - dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " - "period %zu periods %d\n", (unsigned long long)buf_addr, - buf_len, period_len, periods); - - cdesc->periods = periods; - dwc->cdesc = cdesc; - - return cdesc; - -out_err_desc_get: - while (i--) - dwc_desc_put(dwc, cdesc->desc[i]); -out_err_alloc: - kfree(cdesc); -out_err: - clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); - return (struct dw_cyclic_desc *)retval; -} -EXPORT_SYMBOL(dw_dma_cyclic_prep); - -/** - * dw_dma_cyclic_free - free a prepared cyclic DMA transfer - * @chan: the DMA channel to free - */ -void dw_dma_cyclic_free(struct dma_chan *chan) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma *dw = to_dw_dma(dwc->chan.device); - struct dw_cyclic_desc *cdesc = dwc->cdesc; - int i; - unsigned long flags; - - dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); - - if (!cdesc) - return; - - spin_lock_irqsave(&dwc->lock, flags); - - dwc_chan_disable(dw, dwc); - - dma_writel(dw, CLEAR.ERROR, dwc->mask); - dma_writel(dw, CLEAR.XFER, dwc->mask); - - spin_unlock_irqrestore(&dwc->lock, flags); - - for (i = 0; i < cdesc->periods; i++) - dwc_desc_put(dwc, cdesc->desc[i]); - - kfree(cdesc->desc); - kfree(cdesc); - - clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); -} -EXPORT_SYMBOL(dw_dma_cyclic_free); - -/*----------------------------------------------------------------------*/ - -static void dw_dma_off(struct dw_dma *dw) -{ - int i; - - dma_writel(dw, CFG, 0); - - channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); - channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); - channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); - channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); - - while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) - cpu_relax(); - - for (i = 0; i < dw->dma.chancnt; i++) - dw->chan[i].initialized = false; -} - -#ifdef CONFIG_OF -static struct dw_dma_platform_data * -dw_dma_parse_dt(struct platform_device *pdev) -{ - struct device_node *np = pdev->dev.of_node; - struct dw_dma_platform_data *pdata; - u32 tmp, arr[4]; - - if (!np) { - dev_err(&pdev->dev, "Missing DT data\n"); - return NULL; - } - - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return NULL; - - if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels)) - return NULL; - - if (of_property_read_bool(np, "is_private")) - pdata->is_private = true; - - if (!of_property_read_u32(np, "chan_allocation_order", &tmp)) - pdata->chan_allocation_order = (unsigned char)tmp; - - if (!of_property_read_u32(np, "chan_priority", &tmp)) - pdata->chan_priority = tmp; - - if (!of_property_read_u32(np, "block_size", &tmp)) - pdata->block_size = tmp; - - if (!of_property_read_u32(np, "dma-masters", &tmp)) { - if (tmp > 4) - return NULL; - - pdata->nr_masters = tmp; - } - - if (!of_property_read_u32_array(np, "data_width", arr, - pdata->nr_masters)) - for (tmp = 0; tmp < pdata->nr_masters; tmp++) - pdata->data_width[tmp] = arr[tmp]; - - return pdata; -} -#else -static inline struct dw_dma_platform_data * -dw_dma_parse_dt(struct platform_device *pdev) -{ - return NULL; -} -#endif - -static int dw_probe(struct platform_device *pdev) -{ - struct dw_dma_platform_data *pdata; - struct resource *io; - struct dw_dma *dw; - size_t size; - void __iomem *regs; - bool autocfg; - unsigned int dw_params; - unsigned int nr_channels; - unsigned int max_blk_size = 0; - int irq; - int err; - int i; - - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; - - io = platform_get_resource(pdev, IORESOURCE_MEM, 0); - regs = devm_ioremap_resource(&pdev->dev, io); - if (IS_ERR(regs)) - return PTR_ERR(regs); - - /* Apply default dma_mask if needed */ - if (!pdev->dev.dma_mask) { - pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; - pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); - } - - dw_params = dma_read_byaddr(regs, DW_PARAMS); - autocfg = dw_params >> DW_PARAMS_EN & 0x1; - - dev_dbg(&pdev->dev, "DW_PARAMS: 0x%08x\n", dw_params); - - pdata = dev_get_platdata(&pdev->dev); - if (!pdata) - pdata = dw_dma_parse_dt(pdev); - - if (!pdata && autocfg) { - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return -ENOMEM; - - /* Fill platform data with the default values */ - pdata->is_private = true; - pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; - pdata->chan_priority = CHAN_PRIORITY_ASCENDING; - } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) - return -EINVAL; - - if (autocfg) - nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; - else - nr_channels = pdata->nr_channels; - - size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan); - dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); - if (!dw) - return -ENOMEM; - - dw->clk = devm_clk_get(&pdev->dev, "hclk"); - if (IS_ERR(dw->clk)) - return PTR_ERR(dw->clk); - clk_prepare_enable(dw->clk); - - dw->regs = regs; - - /* Get hardware configuration parameters */ - if (autocfg) { - max_blk_size = dma_readl(dw, MAX_BLK_SIZE); - - dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; - for (i = 0; i < dw->nr_masters; i++) { - dw->data_width[i] = - (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2; - } - } else { - dw->nr_masters = pdata->nr_masters; - memcpy(dw->data_width, pdata->data_width, 4); - } - - /* Calculate all channel mask before DMA setup */ - dw->all_chan_mask = (1 << nr_channels) - 1; - - /* Force dma off, just in case */ - dw_dma_off(dw); - - /* Disable BLOCK interrupts as well */ - channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); - - err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0, - "dw_dmac", dw); - if (err) - return err; - - platform_set_drvdata(pdev, dw); - - /* Create a pool of consistent memory blocks for hardware descriptors */ - dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev, - sizeof(struct dw_desc), 4, 0); - if (!dw->desc_pool) { - dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); - return -ENOMEM; - } - - tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); - - INIT_LIST_HEAD(&dw->dma.channels); - for (i = 0; i < nr_channels; i++) { - struct dw_dma_chan *dwc = &dw->chan[i]; - int r = nr_channels - i - 1; - - dwc->chan.device = &dw->dma; - dma_cookie_init(&dwc->chan); - if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) - list_add_tail(&dwc->chan.device_node, - &dw->dma.channels); - else - list_add(&dwc->chan.device_node, &dw->dma.channels); - - /* 7 is highest priority & 0 is lowest. */ - if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) - dwc->priority = r; - else - dwc->priority = i; - - dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; - spin_lock_init(&dwc->lock); - dwc->mask = 1 << i; - - INIT_LIST_HEAD(&dwc->active_list); - INIT_LIST_HEAD(&dwc->queue); - INIT_LIST_HEAD(&dwc->free_list); - - channel_clear_bit(dw, CH_EN, dwc->mask); - - dwc->direction = DMA_TRANS_NONE; - dwc->request_line = ~0; - - /* Hardware configuration */ - if (autocfg) { - unsigned int dwc_params; - - dwc_params = dma_read_byaddr(regs + r * sizeof(u32), - DWC_PARAMS); - - dev_dbg(&pdev->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, - dwc_params); - - /* Decode maximum block size for given channel. The - * stored 4 bit value represents blocks from 0x00 for 3 - * up to 0x0a for 4095. */ - dwc->block_size = - (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1; - dwc->nollp = - (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; - } else { - dwc->block_size = pdata->block_size; - - /* Check if channel supports multi block transfer */ - channel_writel(dwc, LLP, 0xfffffffc); - dwc->nollp = - (channel_readl(dwc, LLP) & 0xfffffffc) == 0; - channel_writel(dwc, LLP, 0); - } - } - - /* Clear all interrupts on all channels. */ - dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); - dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); - dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); - dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); - dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); - - dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); - dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); - if (pdata->is_private) - dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); - dw->dma.dev = &pdev->dev; - dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; - dw->dma.device_free_chan_resources = dwc_free_chan_resources; - - dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; - - dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; - dw->dma.device_control = dwc_control; - - dw->dma.device_tx_status = dwc_tx_status; - dw->dma.device_issue_pending = dwc_issue_pending; - - dma_writel(dw, CFG, DW_CFG_DMA_EN); - - dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n", - nr_channels); - - dma_async_device_register(&dw->dma); - - if (pdev->dev.of_node) { - err = of_dma_controller_register(pdev->dev.of_node, - dw_dma_of_xlate, dw); - if (err) - dev_err(&pdev->dev, - "could not register of_dma_controller\n"); - } - - if (ACPI_HANDLE(&pdev->dev)) - dw_dma_acpi_controller_register(dw); - - return 0; -} - -static int dw_remove(struct platform_device *pdev) -{ - struct dw_dma *dw = platform_get_drvdata(pdev); - struct dw_dma_chan *dwc, *_dwc; - - if (pdev->dev.of_node) - of_dma_controller_free(pdev->dev.of_node); - dw_dma_off(dw); - dma_async_device_unregister(&dw->dma); - - tasklet_kill(&dw->tasklet); - - list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, - chan.device_node) { - list_del(&dwc->chan.device_node); - channel_clear_bit(dw, CH_EN, dwc->mask); - } - - return 0; -} - -static void dw_shutdown(struct platform_device *pdev) -{ - struct dw_dma *dw = platform_get_drvdata(pdev); - - dw_dma_off(dw); - clk_disable_unprepare(dw->clk); -} - -static int dw_suspend_noirq(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct dw_dma *dw = platform_get_drvdata(pdev); - - dw_dma_off(dw); - clk_disable_unprepare(dw->clk); - - return 0; -} - -static int dw_resume_noirq(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct dw_dma *dw = platform_get_drvdata(pdev); - - clk_prepare_enable(dw->clk); - dma_writel(dw, CFG, DW_CFG_DMA_EN); - - return 0; -} - -static const struct dev_pm_ops dw_dev_pm_ops = { - .suspend_noirq = dw_suspend_noirq, - .resume_noirq = dw_resume_noirq, - .freeze_noirq = dw_suspend_noirq, - .thaw_noirq = dw_resume_noirq, - .restore_noirq = dw_resume_noirq, - .poweroff_noirq = dw_suspend_noirq, -}; - -#ifdef CONFIG_OF -static const struct of_device_id dw_dma_of_id_table[] = { - { .compatible = "snps,dma-spear1340" }, - {} -}; -MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); -#endif - -#ifdef CONFIG_ACPI -static const struct acpi_device_id dw_dma_acpi_id_table[] = { - { "INTL9C60", 0 }, - { } -}; -#endif - -static struct platform_driver dw_driver = { - .probe = dw_probe, - .remove = dw_remove, - .shutdown = dw_shutdown, - .driver = { - .name = "dw_dmac", - .pm = &dw_dev_pm_ops, - .of_match_table = of_match_ptr(dw_dma_of_id_table), - .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), - }, -}; - -static int __init dw_init(void) -{ - return platform_driver_register(&dw_driver); -} -subsys_initcall(dw_init); - -static void __exit dw_exit(void) -{ - platform_driver_unregister(&dw_driver); -} -module_exit(dw_exit); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); -MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); -MODULE_AUTHOR("Viresh Kumar "); diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h deleted file mode 100644 index 9d417200bd57..000000000000 --- a/drivers/dma/dw_dmac_regs.h +++ /dev/null @@ -1,311 +0,0 @@ -/* - * Driver for the Synopsys DesignWare AHB DMA Controller - * - * Copyright (C) 2005-2007 Atmel Corporation - * Copyright (C) 2010-2011 ST Microelectronics - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include - -#define DW_DMA_MAX_NR_CHANNELS 8 -#define DW_DMA_MAX_NR_REQUESTS 16 - -/* flow controller */ -enum dw_dma_fc { - DW_DMA_FC_D_M2M, - DW_DMA_FC_D_M2P, - DW_DMA_FC_D_P2M, - DW_DMA_FC_D_P2P, - DW_DMA_FC_P_P2M, - DW_DMA_FC_SP_P2P, - DW_DMA_FC_P_M2P, - DW_DMA_FC_DP_P2P, -}; - -/* - * Redefine this macro to handle differences between 32- and 64-bit - * addressing, big vs. little endian, etc. - */ -#define DW_REG(name) u32 name; u32 __pad_##name - -/* Hardware register definitions. */ -struct dw_dma_chan_regs { - DW_REG(SAR); /* Source Address Register */ - DW_REG(DAR); /* Destination Address Register */ - DW_REG(LLP); /* Linked List Pointer */ - u32 CTL_LO; /* Control Register Low */ - u32 CTL_HI; /* Control Register High */ - DW_REG(SSTAT); - DW_REG(DSTAT); - DW_REG(SSTATAR); - DW_REG(DSTATAR); - u32 CFG_LO; /* Configuration Register Low */ - u32 CFG_HI; /* Configuration Register High */ - DW_REG(SGR); - DW_REG(DSR); -}; - -struct dw_dma_irq_regs { - DW_REG(XFER); - DW_REG(BLOCK); - DW_REG(SRC_TRAN); - DW_REG(DST_TRAN); - DW_REG(ERROR); -}; - -struct dw_dma_regs { - /* per-channel registers */ - struct dw_dma_chan_regs CHAN[DW_DMA_MAX_NR_CHANNELS]; - - /* irq handling */ - struct dw_dma_irq_regs RAW; /* r */ - struct dw_dma_irq_regs STATUS; /* r (raw & mask) */ - struct dw_dma_irq_regs MASK; /* rw (set = irq enabled) */ - struct dw_dma_irq_regs CLEAR; /* w (ack, affects "raw") */ - - DW_REG(STATUS_INT); /* r */ - - /* software handshaking */ - DW_REG(REQ_SRC); - DW_REG(REQ_DST); - DW_REG(SGL_REQ_SRC); - DW_REG(SGL_REQ_DST); - DW_REG(LAST_SRC); - DW_REG(LAST_DST); - - /* miscellaneous */ - DW_REG(CFG); - DW_REG(CH_EN); - DW_REG(ID); - DW_REG(TEST); - - /* reserved */ - DW_REG(__reserved0); - DW_REG(__reserved1); - - /* optional encoded params, 0x3c8..0x3f7 */ - u32 __reserved; - - /* per-channel configuration registers */ - u32 DWC_PARAMS[DW_DMA_MAX_NR_CHANNELS]; - u32 MULTI_BLK_TYPE; - u32 MAX_BLK_SIZE; - - /* top-level parameters */ - u32 DW_PARAMS; -}; - -#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO -#define dma_readl_native ioread32be -#define dma_writel_native iowrite32be -#else -#define dma_readl_native readl -#define dma_writel_native writel -#endif - -/* To access the registers in early stage of probe */ -#define dma_read_byaddr(addr, name) \ - dma_readl_native((addr) + offsetof(struct dw_dma_regs, name)) - -/* Bitfields in DW_PARAMS */ -#define DW_PARAMS_NR_CHAN 8 /* number of channels */ -#define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */ -#define DW_PARAMS_DATA_WIDTH(n) (15 + 2 * (n)) -#define DW_PARAMS_DATA_WIDTH1 15 /* master 1 data width */ -#define DW_PARAMS_DATA_WIDTH2 17 /* master 2 data width */ -#define DW_PARAMS_DATA_WIDTH3 19 /* master 3 data width */ -#define DW_PARAMS_DATA_WIDTH4 21 /* master 4 data width */ -#define DW_PARAMS_EN 28 /* encoded parameters */ - -/* Bitfields in DWC_PARAMS */ -#define DWC_PARAMS_MBLK_EN 11 /* multi block transfer */ - -/* Bitfields in CTL_LO */ -#define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ -#define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ -#define DWC_CTLL_SRC_WIDTH(n) ((n)<<4) -#define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */ -#define DWC_CTLL_DST_DEC (1<<7) -#define DWC_CTLL_DST_FIX (2<<7) -#define DWC_CTLL_SRC_INC (0<<7) /* SAR update/not */ -#define DWC_CTLL_SRC_DEC (1<<9) -#define DWC_CTLL_SRC_FIX (2<<9) -#define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */ -#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14) -#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */ -#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */ -#define DWC_CTLL_FC(n) ((n) << 20) -#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */ -#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */ -#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */ -#define DWC_CTLL_FC_P2P (3 << 20) /* periph-to-periph */ -/* plus 4 transfer types for peripheral-as-flow-controller */ -#define DWC_CTLL_DMS(n) ((n)<<23) /* dst master select */ -#define DWC_CTLL_SMS(n) ((n)<<25) /* src master select */ -#define DWC_CTLL_LLP_D_EN (1 << 27) /* dest block chain */ -#define DWC_CTLL_LLP_S_EN (1 << 28) /* src block chain */ - -/* Bitfields in CTL_HI */ -#define DWC_CTLH_DONE 0x00001000 -#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff - -/* Bitfields in CFG_LO. Platform-configurable bits are in */ -#define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */ -#define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */ -#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ -#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ -#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ -#define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */ -#define DWC_CFGL_MAX_BURST(x) ((x) << 20) -#define DWC_CFGL_RELOAD_SAR (1 << 30) -#define DWC_CFGL_RELOAD_DAR (1 << 31) - -/* Bitfields in CFG_HI. Platform-configurable bits are in */ -#define DWC_CFGH_DS_UPD_EN (1 << 5) -#define DWC_CFGH_SS_UPD_EN (1 << 6) - -/* Bitfields in SGR */ -#define DWC_SGR_SGI(x) ((x) << 0) -#define DWC_SGR_SGC(x) ((x) << 20) - -/* Bitfields in DSR */ -#define DWC_DSR_DSI(x) ((x) << 0) -#define DWC_DSR_DSC(x) ((x) << 20) - -/* Bitfields in CFG */ -#define DW_CFG_DMA_EN (1 << 0) - -enum dw_dmac_flags { - DW_DMA_IS_CYCLIC = 0, - DW_DMA_IS_SOFT_LLP = 1, -}; - -struct dw_dma_chan { - struct dma_chan chan; - void __iomem *ch_regs; - u8 mask; - u8 priority; - enum dma_transfer_direction direction; - bool paused; - bool initialized; - - /* software emulation of the LLP transfers */ - struct list_head *tx_node_active; - - spinlock_t lock; - - /* these other elements are all protected by lock */ - unsigned long flags; - struct list_head active_list; - struct list_head queue; - struct list_head free_list; - u32 residue; - struct dw_cyclic_desc *cdesc; - - unsigned int descs_allocated; - - /* hardware configuration */ - unsigned int block_size; - bool nollp; - - /* custom slave configuration */ - unsigned int request_line; - unsigned char src_master; - unsigned char dst_master; - - /* configuration passed via DMA_SLAVE_CONFIG */ - struct dma_slave_config dma_sconfig; -}; - -static inline struct dw_dma_chan_regs __iomem * -__dwc_regs(struct dw_dma_chan *dwc) -{ - return dwc->ch_regs; -} - -#define channel_readl(dwc, name) \ - dma_readl_native(&(__dwc_regs(dwc)->name)) -#define channel_writel(dwc, name, val) \ - dma_writel_native((val), &(__dwc_regs(dwc)->name)) - -static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) -{ - return container_of(chan, struct dw_dma_chan, chan); -} - -struct dw_dma { - struct dma_device dma; - void __iomem *regs; - struct dma_pool *desc_pool; - struct tasklet_struct tasklet; - struct clk *clk; - - u8 all_chan_mask; - - /* hardware configuration */ - unsigned char nr_masters; - unsigned char data_width[4]; - - struct dw_dma_chan chan[0]; -}; - -static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) -{ - return dw->regs; -} - -#define dma_readl(dw, name) \ - dma_readl_native(&(__dw_regs(dw)->name)) -#define dma_writel(dw, name, val) \ - dma_writel_native((val), &(__dw_regs(dw)->name)) - -#define channel_set_bit(dw, reg, mask) \ - dma_writel(dw, reg, ((mask) << 8) | (mask)) -#define channel_clear_bit(dw, reg, mask) \ - dma_writel(dw, reg, ((mask) << 8) | 0) - -static inline struct dw_dma *to_dw_dma(struct dma_device *ddev) -{ - return container_of(ddev, struct dw_dma, dma); -} - -/* LLI == Linked List Item; a.k.a. DMA block descriptor */ -struct dw_lli { - /* values that are not changed by hardware */ - u32 sar; - u32 dar; - u32 llp; /* chain to next lli */ - u32 ctllo; - /* values that may get written back: */ - u32 ctlhi; - /* sstat and dstat can snapshot peripheral register state. - * silicon config may discard either or both... - */ - u32 sstat; - u32 dstat; -}; - -struct dw_desc { - /* FIRST values the hardware uses */ - struct dw_lli lli; - - /* THEN values for driver housekeeping */ - struct list_head desc_node; - struct list_head tx_list; - struct dma_async_tx_descriptor txd; - size_t len; - size_t total_len; -}; - -#define to_dw_desc(h) list_entry(h, struct dw_desc, desc_node) - -static inline struct dw_desc * -txd_to_dw_desc(struct dma_async_tx_descriptor *txd) -{ - return container_of(txd, struct dw_desc, txd); -} -- cgit v1.2.3-58-ga151 From 9cade1a46c77dfc96d57a3ea6354e95b2a7fcf61 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 5 Jun 2013 15:26:45 +0300 Subject: dma: dw: split driver to library part and platform code To simplify the driver development let's split driver to library and platform code parts. It helps us to add PCI driver in future. Signed-off-by: Andy Shevchenko Acked-by: Arnd Bergmann Acked-by: Viresh Kumar [Fixed compile error and few checkpatch issues] Signed-off-by: Vinod Koul --- drivers/dma/Makefile | 2 +- drivers/dma/dw/Kconfig | 8 +- drivers/dma/dw/Makefile | 6 +- drivers/dma/dw/core.c | 1730 ++++++++++++++++++++++++++++++++++++ drivers/dma/dw/dw_dmac.c | 1969 ----------------------------------------- drivers/dma/dw/dw_dmac_regs.h | 311 ------- drivers/dma/dw/internal.h | 70 ++ drivers/dma/dw/platform.c | 317 +++++++ drivers/dma/dw/regs.h | 312 +++++++ 9 files changed, 2441 insertions(+), 2284 deletions(-) create mode 100644 drivers/dma/dw/core.c delete mode 100644 drivers/dma/dw/dw_dmac.c delete mode 100644 drivers/dma/dw/dw_dmac_regs.h create mode 100644 drivers/dma/dw/internal.h create mode 100644 drivers/dma/dw/platform.c create mode 100644 drivers/dma/dw/regs.h (limited to 'drivers/dma') diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index ac44ca0d468a..6e2a521fbbe3 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -15,7 +15,7 @@ obj-$(CONFIG_FSL_DMA) += fsldma.o obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ obj-$(CONFIG_MV_XOR) += mv_xor.o -obj-$(CONFIG_DW_DMAC) += dw/ +obj-$(CONFIG_DW_DMAC_CORE) += dw/ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o obj-$(CONFIG_MX3_IPU) += ipu/ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig index 38a215af5ccc..efd9e02a58eb 100644 --- a/drivers/dma/dw/Kconfig +++ b/drivers/dma/dw/Kconfig @@ -2,10 +2,14 @@ # DMA engine configuration for dw # -config DW_DMAC +config DW_DMAC_CORE tristate "Synopsys DesignWare AHB DMA support" depends on GENERIC_HARDIRQS select DMA_ENGINE + +config DW_DMAC + tristate "Synopsys DesignWare AHB DMA platform driver" + select DW_DMAC_CORE default y if CPU_AT32AP7000 help Support the Synopsys DesignWare AHB DMA controller. This @@ -14,7 +18,7 @@ config DW_DMAC config DW_DMAC_BIG_ENDIAN_IO bool "Use big endian I/O register access" default y if AVR32 - depends on DW_DMAC + depends on DW_DMAC_CORE help Say yes here to use big endian I/O access when reading and writing to the DMA controller registers. This is needed on some platforms, diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile index dd8d9936beef..47f36746c559 100644 --- a/drivers/dma/dw/Makefile +++ b/drivers/dma/dw/Makefile @@ -1 +1,5 @@ -obj-$(CONFIG_DW_DMAC) += dw_dmac.o +obj-$(CONFIG_DW_DMAC_CORE) += dw_dmac_core.o +dw_dmac_core-objs := core.o + +obj-$(CONFIG_DW_DMAC) += dw_dmac.o +dw_dmac-objs := platform.o diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c new file mode 100644 index 000000000000..eea479c12173 --- /dev/null +++ b/drivers/dma/dw/core.c @@ -0,0 +1,1730 @@ +/* + * Core driver for the Synopsys DesignWare DMA Controller + * + * Copyright (C) 2007-2008 Atmel Corporation + * Copyright (C) 2010-2011 ST Microelectronics + * Copyright (C) 2013 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../dmaengine.h" +#include "internal.h" + +/* + * This supports the Synopsys "DesignWare AHB Central DMA Controller", + * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all + * of which use ARM any more). See the "Databook" from Synopsys for + * information beyond what licensees probably provide. + * + * The driver has currently been tested only with the Atmel AT32AP7000, + * which does not support descriptor writeback. + */ + +static inline void dwc_set_masters(struct dw_dma_chan *dwc) +{ + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + struct dw_dma_slave *dws = dwc->chan.private; + unsigned char mmax = dw->nr_masters - 1; + + if (dwc->request_line == ~0) { + dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws)); + dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws)); + } +} + +#define DWC_DEFAULT_CTLLO(_chan) ({ \ + struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ + struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ + bool _is_slave = is_slave_direction(_dwc->direction); \ + u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ + DW_DMA_MSIZE_16; \ + u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ + DW_DMA_MSIZE_16; \ + \ + (DWC_CTLL_DST_MSIZE(_dmsize) \ + | DWC_CTLL_SRC_MSIZE(_smsize) \ + | DWC_CTLL_LLP_D_EN \ + | DWC_CTLL_LLP_S_EN \ + | DWC_CTLL_DMS(_dwc->dst_master) \ + | DWC_CTLL_SMS(_dwc->src_master)); \ + }) + +/* + * Number of descriptors to allocate for each channel. This should be + * made configurable somehow; preferably, the clients (at least the + * ones using slave transfers) should be able to give us a hint. + */ +#define NR_DESCS_PER_CHANNEL 64 + +/*----------------------------------------------------------------------*/ + +static struct device *chan2dev(struct dma_chan *chan) +{ + return &chan->dev->device; +} +static struct device *chan2parent(struct dma_chan *chan) +{ + return chan->dev->device.parent; +} + +static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) +{ + return to_dw_desc(dwc->active_list.next); +} + +static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) +{ + struct dw_desc *desc, *_desc; + struct dw_desc *ret = NULL; + unsigned int i = 0; + unsigned long flags; + + spin_lock_irqsave(&dwc->lock, flags); + list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { + i++; + if (async_tx_test_ack(&desc->txd)) { + list_del(&desc->desc_node); + ret = desc; + break; + } + dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); + } + spin_unlock_irqrestore(&dwc->lock, flags); + + dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); + + return ret; +} + +/* + * Move a descriptor, including any children, to the free list. + * `desc' must not be on any lists. + */ +static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) +{ + unsigned long flags; + + if (desc) { + struct dw_desc *child; + + spin_lock_irqsave(&dwc->lock, flags); + list_for_each_entry(child, &desc->tx_list, desc_node) + dev_vdbg(chan2dev(&dwc->chan), + "moving child desc %p to freelist\n", + child); + list_splice_init(&desc->tx_list, &dwc->free_list); + dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); + list_add(&desc->desc_node, &dwc->free_list); + spin_unlock_irqrestore(&dwc->lock, flags); + } +} + +static void dwc_initialize(struct dw_dma_chan *dwc) +{ + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + struct dw_dma_slave *dws = dwc->chan.private; + u32 cfghi = DWC_CFGH_FIFO_MODE; + u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); + + if (dwc->initialized == true) + return; + + if (dws) { + /* + * We need controller-specific data to set up slave + * transfers. + */ + BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); + + cfghi = dws->cfg_hi; + cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; + } else { + if (dwc->direction == DMA_MEM_TO_DEV) + cfghi = DWC_CFGH_DST_PER(dwc->request_line); + else if (dwc->direction == DMA_DEV_TO_MEM) + cfghi = DWC_CFGH_SRC_PER(dwc->request_line); + } + + channel_writel(dwc, CFG_LO, cfglo); + channel_writel(dwc, CFG_HI, cfghi); + + /* Enable interrupts */ + channel_set_bit(dw, MASK.XFER, dwc->mask); + channel_set_bit(dw, MASK.ERROR, dwc->mask); + + dwc->initialized = true; +} + +/*----------------------------------------------------------------------*/ + +static inline unsigned int dwc_fast_fls(unsigned long long v) +{ + /* + * We can be a lot more clever here, but this should take care + * of the most common optimization. + */ + if (!(v & 7)) + return 3; + else if (!(v & 3)) + return 2; + else if (!(v & 1)) + return 1; + return 0; +} + +static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) +{ + dev_err(chan2dev(&dwc->chan), + " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", + channel_readl(dwc, SAR), + channel_readl(dwc, DAR), + channel_readl(dwc, LLP), + channel_readl(dwc, CTL_HI), + channel_readl(dwc, CTL_LO)); +} + +static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) +{ + channel_clear_bit(dw, CH_EN, dwc->mask); + while (dma_readl(dw, CH_EN) & dwc->mask) + cpu_relax(); +} + +/*----------------------------------------------------------------------*/ + +/* Perform single block transfer */ +static inline void dwc_do_single_block(struct dw_dma_chan *dwc, + struct dw_desc *desc) +{ + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + u32 ctllo; + + /* Software emulation of LLP mode relies on interrupts to continue + * multi block transfer. */ + ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN; + + channel_writel(dwc, SAR, desc->lli.sar); + channel_writel(dwc, DAR, desc->lli.dar); + channel_writel(dwc, CTL_LO, ctllo); + channel_writel(dwc, CTL_HI, desc->lli.ctlhi); + channel_set_bit(dw, CH_EN, dwc->mask); + + /* Move pointer to next descriptor */ + dwc->tx_node_active = dwc->tx_node_active->next; +} + +/* Called with dwc->lock held and bh disabled */ +static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) +{ + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + unsigned long was_soft_llp; + + /* ASSERT: channel is idle */ + if (dma_readl(dw, CH_EN) & dwc->mask) { + dev_err(chan2dev(&dwc->chan), + "BUG: Attempted to start non-idle channel\n"); + dwc_dump_chan_regs(dwc); + + /* The tasklet will hopefully advance the queue... */ + return; + } + + if (dwc->nollp) { + was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP, + &dwc->flags); + if (was_soft_llp) { + dev_err(chan2dev(&dwc->chan), + "BUG: Attempted to start new LLP transfer " + "inside ongoing one\n"); + return; + } + + dwc_initialize(dwc); + + dwc->residue = first->total_len; + dwc->tx_node_active = &first->tx_list; + + /* Submit first block */ + dwc_do_single_block(dwc, first); + + return; + } + + dwc_initialize(dwc); + + channel_writel(dwc, LLP, first->txd.phys); + channel_writel(dwc, CTL_LO, + DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); + channel_writel(dwc, CTL_HI, 0); + channel_set_bit(dw, CH_EN, dwc->mask); +} + +/*----------------------------------------------------------------------*/ + +static void +dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, + bool callback_required) +{ + dma_async_tx_callback callback = NULL; + void *param = NULL; + struct dma_async_tx_descriptor *txd = &desc->txd; + struct dw_desc *child; + unsigned long flags; + + dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); + + spin_lock_irqsave(&dwc->lock, flags); + dma_cookie_complete(txd); + if (callback_required) { + callback = txd->callback; + param = txd->callback_param; + } + + /* async_tx_ack */ + list_for_each_entry(child, &desc->tx_list, desc_node) + async_tx_ack(&child->txd); + async_tx_ack(&desc->txd); + + list_splice_init(&desc->tx_list, &dwc->free_list); + list_move(&desc->desc_node, &dwc->free_list); + + if (!is_slave_direction(dwc->direction)) { + struct device *parent = chan2parent(&dwc->chan); + if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) + dma_unmap_single(parent, desc->lli.dar, + desc->total_len, DMA_FROM_DEVICE); + else + dma_unmap_page(parent, desc->lli.dar, + desc->total_len, DMA_FROM_DEVICE); + } + if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) + dma_unmap_single(parent, desc->lli.sar, + desc->total_len, DMA_TO_DEVICE); + else + dma_unmap_page(parent, desc->lli.sar, + desc->total_len, DMA_TO_DEVICE); + } + } + + spin_unlock_irqrestore(&dwc->lock, flags); + + if (callback) + callback(param); +} + +static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) +{ + struct dw_desc *desc, *_desc; + LIST_HEAD(list); + unsigned long flags; + + spin_lock_irqsave(&dwc->lock, flags); + if (dma_readl(dw, CH_EN) & dwc->mask) { + dev_err(chan2dev(&dwc->chan), + "BUG: XFER bit set, but channel not idle!\n"); + + /* Try to continue after resetting the channel... */ + dwc_chan_disable(dw, dwc); + } + + /* + * Submit queued descriptors ASAP, i.e. before we go through + * the completed ones. + */ + list_splice_init(&dwc->active_list, &list); + if (!list_empty(&dwc->queue)) { + list_move(dwc->queue.next, &dwc->active_list); + dwc_dostart(dwc, dwc_first_active(dwc)); + } + + spin_unlock_irqrestore(&dwc->lock, flags); + + list_for_each_entry_safe(desc, _desc, &list, desc_node) + dwc_descriptor_complete(dwc, desc, true); +} + +/* Returns how many bytes were already received from source */ +static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) +{ + u32 ctlhi = channel_readl(dwc, CTL_HI); + u32 ctllo = channel_readl(dwc, CTL_LO); + + return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7)); +} + +static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) +{ + dma_addr_t llp; + struct dw_desc *desc, *_desc; + struct dw_desc *child; + u32 status_xfer; + unsigned long flags; + + spin_lock_irqsave(&dwc->lock, flags); + llp = channel_readl(dwc, LLP); + status_xfer = dma_readl(dw, RAW.XFER); + + if (status_xfer & dwc->mask) { + /* Everything we've submitted is done */ + dma_writel(dw, CLEAR.XFER, dwc->mask); + + if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { + struct list_head *head, *active = dwc->tx_node_active; + + /* + * We are inside first active descriptor. + * Otherwise something is really wrong. + */ + desc = dwc_first_active(dwc); + + head = &desc->tx_list; + if (active != head) { + /* Update desc to reflect last sent one */ + if (active != head->next) + desc = to_dw_desc(active->prev); + + dwc->residue -= desc->len; + + child = to_dw_desc(active); + + /* Submit next block */ + dwc_do_single_block(dwc, child); + + spin_unlock_irqrestore(&dwc->lock, flags); + return; + } + + /* We are done here */ + clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); + } + + dwc->residue = 0; + + spin_unlock_irqrestore(&dwc->lock, flags); + + dwc_complete_all(dw, dwc); + return; + } + + if (list_empty(&dwc->active_list)) { + dwc->residue = 0; + spin_unlock_irqrestore(&dwc->lock, flags); + return; + } + + if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { + dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__); + spin_unlock_irqrestore(&dwc->lock, flags); + return; + } + + dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__, + (unsigned long long)llp); + + list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { + /* Initial residue value */ + dwc->residue = desc->total_len; + + /* Check first descriptors addr */ + if (desc->txd.phys == llp) { + spin_unlock_irqrestore(&dwc->lock, flags); + return; + } + + /* Check first descriptors llp */ + if (desc->lli.llp == llp) { + /* This one is currently in progress */ + dwc->residue -= dwc_get_sent(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); + return; + } + + dwc->residue -= desc->len; + list_for_each_entry(child, &desc->tx_list, desc_node) { + if (child->lli.llp == llp) { + /* Currently in progress */ + dwc->residue -= dwc_get_sent(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); + return; + } + dwc->residue -= child->len; + } + + /* + * No descriptors so far seem to be in progress, i.e. + * this one must be done. + */ + spin_unlock_irqrestore(&dwc->lock, flags); + dwc_descriptor_complete(dwc, desc, true); + spin_lock_irqsave(&dwc->lock, flags); + } + + dev_err(chan2dev(&dwc->chan), + "BUG: All descriptors done, but channel not idle!\n"); + + /* Try to continue after resetting the channel... */ + dwc_chan_disable(dw, dwc); + + if (!list_empty(&dwc->queue)) { + list_move(dwc->queue.next, &dwc->active_list); + dwc_dostart(dwc, dwc_first_active(dwc)); + } + spin_unlock_irqrestore(&dwc->lock, flags); +} + +static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) +{ + dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", + lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); +} + +static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) +{ + struct dw_desc *bad_desc; + struct dw_desc *child; + unsigned long flags; + + dwc_scan_descriptors(dw, dwc); + + spin_lock_irqsave(&dwc->lock, flags); + + /* + * The descriptor currently at the head of the active list is + * borked. Since we don't have any way to report errors, we'll + * just have to scream loudly and try to carry on. + */ + bad_desc = dwc_first_active(dwc); + list_del_init(&bad_desc->desc_node); + list_move(dwc->queue.next, dwc->active_list.prev); + + /* Clear the error flag and try to restart the controller */ + dma_writel(dw, CLEAR.ERROR, dwc->mask); + if (!list_empty(&dwc->active_list)) + dwc_dostart(dwc, dwc_first_active(dwc)); + + /* + * WARN may seem harsh, but since this only happens + * when someone submits a bad physical address in a + * descriptor, we should consider ourselves lucky that the + * controller flagged an error instead of scribbling over + * random memory locations. + */ + dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" + " cookie: %d\n", bad_desc->txd.cookie); + dwc_dump_lli(dwc, &bad_desc->lli); + list_for_each_entry(child, &bad_desc->tx_list, desc_node) + dwc_dump_lli(dwc, &child->lli); + + spin_unlock_irqrestore(&dwc->lock, flags); + + /* Pretend the descriptor completed successfully */ + dwc_descriptor_complete(dwc, bad_desc, true); +} + +/* --------------------- Cyclic DMA API extensions -------------------- */ + +dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + return channel_readl(dwc, SAR); +} +EXPORT_SYMBOL(dw_dma_get_src_addr); + +dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + return channel_readl(dwc, DAR); +} +EXPORT_SYMBOL(dw_dma_get_dst_addr); + +/* Called with dwc->lock held and all DMAC interrupts disabled */ +static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, + u32 status_err, u32 status_xfer) +{ + unsigned long flags; + + if (dwc->mask) { + void (*callback)(void *param); + void *callback_param; + + dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", + channel_readl(dwc, LLP)); + + callback = dwc->cdesc->period_callback; + callback_param = dwc->cdesc->period_callback_param; + + if (callback) + callback(callback_param); + } + + /* + * Error and transfer complete are highly unlikely, and will most + * likely be due to a configuration error by the user. + */ + if (unlikely(status_err & dwc->mask) || + unlikely(status_xfer & dwc->mask)) { + int i; + + dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " + "interrupt, stopping DMA transfer\n", + status_xfer ? "xfer" : "error"); + + spin_lock_irqsave(&dwc->lock, flags); + + dwc_dump_chan_regs(dwc); + + dwc_chan_disable(dw, dwc); + + /* Make sure DMA does not restart by loading a new list */ + channel_writel(dwc, LLP, 0); + channel_writel(dwc, CTL_LO, 0); + channel_writel(dwc, CTL_HI, 0); + + dma_writel(dw, CLEAR.ERROR, dwc->mask); + dma_writel(dw, CLEAR.XFER, dwc->mask); + + for (i = 0; i < dwc->cdesc->periods; i++) + dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); + + spin_unlock_irqrestore(&dwc->lock, flags); + } +} + +/* ------------------------------------------------------------------------- */ + +static void dw_dma_tasklet(unsigned long data) +{ + struct dw_dma *dw = (struct dw_dma *)data; + struct dw_dma_chan *dwc; + u32 status_xfer; + u32 status_err; + int i; + + status_xfer = dma_readl(dw, RAW.XFER); + status_err = dma_readl(dw, RAW.ERROR); + + dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); + + for (i = 0; i < dw->dma.chancnt; i++) { + dwc = &dw->chan[i]; + if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) + dwc_handle_cyclic(dw, dwc, status_err, status_xfer); + else if (status_err & (1 << i)) + dwc_handle_error(dw, dwc); + else if (status_xfer & (1 << i)) + dwc_scan_descriptors(dw, dwc); + } + + /* + * Re-enable interrupts. + */ + channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); + channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); +} + +static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) +{ + struct dw_dma *dw = dev_id; + u32 status; + + dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, + dma_readl(dw, STATUS_INT)); + + /* + * Just disable the interrupts. We'll turn them back on in the + * softirq handler. + */ + channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); + channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); + + status = dma_readl(dw, STATUS_INT); + if (status) { + dev_err(dw->dma.dev, + "BUG: Unexpected interrupts pending: 0x%x\n", + status); + + /* Try to recover */ + channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); + channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); + channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); + channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); + } + + tasklet_schedule(&dw->tasklet); + + return IRQ_HANDLED; +} + +/*----------------------------------------------------------------------*/ + +static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct dw_desc *desc = txd_to_dw_desc(tx); + struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); + dma_cookie_t cookie; + unsigned long flags; + + spin_lock_irqsave(&dwc->lock, flags); + cookie = dma_cookie_assign(tx); + + /* + * REVISIT: We should attempt to chain as many descriptors as + * possible, perhaps even appending to those already submitted + * for DMA. But this is hard to do in a race-free manner. + */ + if (list_empty(&dwc->active_list)) { + dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__, + desc->txd.cookie); + list_add_tail(&desc->desc_node, &dwc->active_list); + dwc_dostart(dwc, dwc_first_active(dwc)); + } else { + dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, + desc->txd.cookie); + + list_add_tail(&desc->desc_node, &dwc->queue); + } + + spin_unlock_irqrestore(&dwc->lock, flags); + + return cookie; +} + +static struct dma_async_tx_descriptor * +dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(chan->device); + struct dw_desc *desc; + struct dw_desc *first; + struct dw_desc *prev; + size_t xfer_count; + size_t offset; + unsigned int src_width; + unsigned int dst_width; + unsigned int data_width; + u32 ctllo; + + dev_vdbg(chan2dev(chan), + "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__, + (unsigned long long)dest, (unsigned long long)src, + len, flags); + + if (unlikely(!len)) { + dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); + return NULL; + } + + dwc->direction = DMA_MEM_TO_MEM; + + data_width = min_t(unsigned int, dw->data_width[dwc->src_master], + dw->data_width[dwc->dst_master]); + + src_width = dst_width = min_t(unsigned int, data_width, + dwc_fast_fls(src | dest | len)); + + ctllo = DWC_DEFAULT_CTLLO(chan) + | DWC_CTLL_DST_WIDTH(dst_width) + | DWC_CTLL_SRC_WIDTH(src_width) + | DWC_CTLL_DST_INC + | DWC_CTLL_SRC_INC + | DWC_CTLL_FC_M2M; + prev = first = NULL; + + for (offset = 0; offset < len; offset += xfer_count << src_width) { + xfer_count = min_t(size_t, (len - offset) >> src_width, + dwc->block_size); + + desc = dwc_desc_get(dwc); + if (!desc) + goto err_desc_get; + + desc->lli.sar = src + offset; + desc->lli.dar = dest + offset; + desc->lli.ctllo = ctllo; + desc->lli.ctlhi = xfer_count; + desc->len = xfer_count << src_width; + + if (!first) { + first = desc; + } else { + prev->lli.llp = desc->txd.phys; + list_add_tail(&desc->desc_node, + &first->tx_list); + } + prev = desc; + } + + if (flags & DMA_PREP_INTERRUPT) + /* Trigger interrupt after last block */ + prev->lli.ctllo |= DWC_CTLL_INT_EN; + + prev->lli.llp = 0; + first->txd.flags = flags; + first->total_len = len; + + return &first->txd; + +err_desc_get: + dwc_desc_put(dwc, first); + return NULL; +} + +static struct dma_async_tx_descriptor * +dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(chan->device); + struct dma_slave_config *sconfig = &dwc->dma_sconfig; + struct dw_desc *prev; + struct dw_desc *first; + u32 ctllo; + dma_addr_t reg; + unsigned int reg_width; + unsigned int mem_width; + unsigned int data_width; + unsigned int i; + struct scatterlist *sg; + size_t total_len = 0; + + dev_vdbg(chan2dev(chan), "%s\n", __func__); + + if (unlikely(!is_slave_direction(direction) || !sg_len)) + return NULL; + + dwc->direction = direction; + + prev = first = NULL; + + switch (direction) { + case DMA_MEM_TO_DEV: + reg_width = __fls(sconfig->dst_addr_width); + reg = sconfig->dst_addr; + ctllo = (DWC_DEFAULT_CTLLO(chan) + | DWC_CTLL_DST_WIDTH(reg_width) + | DWC_CTLL_DST_FIX + | DWC_CTLL_SRC_INC); + + ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : + DWC_CTLL_FC(DW_DMA_FC_D_M2P); + + data_width = dw->data_width[dwc->src_master]; + + for_each_sg(sgl, sg, sg_len, i) { + struct dw_desc *desc; + u32 len, dlen, mem; + + mem = sg_dma_address(sg); + len = sg_dma_len(sg); + + mem_width = min_t(unsigned int, + data_width, dwc_fast_fls(mem | len)); + +slave_sg_todev_fill_desc: + desc = dwc_desc_get(dwc); + if (!desc) { + dev_err(chan2dev(chan), + "not enough descriptors available\n"); + goto err_desc_get; + } + + desc->lli.sar = mem; + desc->lli.dar = reg; + desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); + if ((len >> mem_width) > dwc->block_size) { + dlen = dwc->block_size << mem_width; + mem += dlen; + len -= dlen; + } else { + dlen = len; + len = 0; + } + + desc->lli.ctlhi = dlen >> mem_width; + desc->len = dlen; + + if (!first) { + first = desc; + } else { + prev->lli.llp = desc->txd.phys; + list_add_tail(&desc->desc_node, + &first->tx_list); + } + prev = desc; + total_len += dlen; + + if (len) + goto slave_sg_todev_fill_desc; + } + break; + case DMA_DEV_TO_MEM: + reg_width = __fls(sconfig->src_addr_width); + reg = sconfig->src_addr; + ctllo = (DWC_DEFAULT_CTLLO(chan) + | DWC_CTLL_SRC_WIDTH(reg_width) + | DWC_CTLL_DST_INC + | DWC_CTLL_SRC_FIX); + + ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : + DWC_CTLL_FC(DW_DMA_FC_D_P2M); + + data_width = dw->data_width[dwc->dst_master]; + + for_each_sg(sgl, sg, sg_len, i) { + struct dw_desc *desc; + u32 len, dlen, mem; + + mem = sg_dma_address(sg); + len = sg_dma_len(sg); + + mem_width = min_t(unsigned int, + data_width, dwc_fast_fls(mem | len)); + +slave_sg_fromdev_fill_desc: + desc = dwc_desc_get(dwc); + if (!desc) { + dev_err(chan2dev(chan), + "not enough descriptors available\n"); + goto err_desc_get; + } + + desc->lli.sar = reg; + desc->lli.dar = mem; + desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); + if ((len >> reg_width) > dwc->block_size) { + dlen = dwc->block_size << reg_width; + mem += dlen; + len -= dlen; + } else { + dlen = len; + len = 0; + } + desc->lli.ctlhi = dlen >> reg_width; + desc->len = dlen; + + if (!first) { + first = desc; + } else { + prev->lli.llp = desc->txd.phys; + list_add_tail(&desc->desc_node, + &first->tx_list); + } + prev = desc; + total_len += dlen; + + if (len) + goto slave_sg_fromdev_fill_desc; + } + break; + default: + return NULL; + } + + if (flags & DMA_PREP_INTERRUPT) + /* Trigger interrupt after last block */ + prev->lli.ctllo |= DWC_CTLL_INT_EN; + + prev->lli.llp = 0; + first->total_len = total_len; + + return &first->txd; + +err_desc_get: + dwc_desc_put(dwc, first); + return NULL; +} + +/* + * Fix sconfig's burst size according to dw_dmac. We need to convert them as: + * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. + * + * NOTE: burst size 2 is not supported by controller. + * + * This can be done by finding least significant bit set: n & (n - 1) + */ +static inline void convert_burst(u32 *maxburst) +{ + if (*maxburst > 1) + *maxburst = fls(*maxburst) - 2; + else + *maxburst = 0; +} + +static int +set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + + /* Check if chan will be configured for slave transfers */ + if (!is_slave_direction(sconfig->direction)) + return -EINVAL; + + memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); + dwc->direction = sconfig->direction; + + /* Take the request line from slave_id member */ + if (dwc->request_line == ~0) + dwc->request_line = sconfig->slave_id; + + convert_burst(&dwc->dma_sconfig.src_maxburst); + convert_burst(&dwc->dma_sconfig.dst_maxburst); + + return 0; +} + +static inline void dwc_chan_pause(struct dw_dma_chan *dwc) +{ + u32 cfglo = channel_readl(dwc, CFG_LO); + unsigned int count = 20; /* timeout iterations */ + + channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); + while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) + udelay(2); + + dwc->paused = true; +} + +static inline void dwc_chan_resume(struct dw_dma_chan *dwc) +{ + u32 cfglo = channel_readl(dwc, CFG_LO); + + channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); + + dwc->paused = false; +} + +static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(chan->device); + struct dw_desc *desc, *_desc; + unsigned long flags; + LIST_HEAD(list); + + if (cmd == DMA_PAUSE) { + spin_lock_irqsave(&dwc->lock, flags); + + dwc_chan_pause(dwc); + + spin_unlock_irqrestore(&dwc->lock, flags); + } else if (cmd == DMA_RESUME) { + if (!dwc->paused) + return 0; + + spin_lock_irqsave(&dwc->lock, flags); + + dwc_chan_resume(dwc); + + spin_unlock_irqrestore(&dwc->lock, flags); + } else if (cmd == DMA_TERMINATE_ALL) { + spin_lock_irqsave(&dwc->lock, flags); + + clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); + + dwc_chan_disable(dw, dwc); + + dwc_chan_resume(dwc); + + /* active_list entries will end up before queued entries */ + list_splice_init(&dwc->queue, &list); + list_splice_init(&dwc->active_list, &list); + + spin_unlock_irqrestore(&dwc->lock, flags); + + /* Flush all pending and queued descriptors */ + list_for_each_entry_safe(desc, _desc, &list, desc_node) + dwc_descriptor_complete(dwc, desc, false); + } else if (cmd == DMA_SLAVE_CONFIG) { + return set_runtime_config(chan, (struct dma_slave_config *)arg); + } else { + return -ENXIO; + } + + return 0; +} + +static inline u32 dwc_get_residue(struct dw_dma_chan *dwc) +{ + unsigned long flags; + u32 residue; + + spin_lock_irqsave(&dwc->lock, flags); + + residue = dwc->residue; + if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) + residue -= dwc_get_sent(dwc); + + spin_unlock_irqrestore(&dwc->lock, flags); + return residue; +} + +static enum dma_status +dwc_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + enum dma_status ret; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret != DMA_SUCCESS) { + dwc_scan_descriptors(to_dw_dma(chan->device), dwc); + + ret = dma_cookie_status(chan, cookie, txstate); + } + + if (ret != DMA_SUCCESS) + dma_set_residue(txstate, dwc_get_residue(dwc)); + + if (dwc->paused) + return DMA_PAUSED; + + return ret; +} + +static void dwc_issue_pending(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + + if (!list_empty(&dwc->queue)) + dwc_scan_descriptors(to_dw_dma(chan->device), dwc); +} + +static int dwc_alloc_chan_resources(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(chan->device); + struct dw_desc *desc; + int i; + unsigned long flags; + + dev_vdbg(chan2dev(chan), "%s\n", __func__); + + /* ASSERT: channel is idle */ + if (dma_readl(dw, CH_EN) & dwc->mask) { + dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); + return -EIO; + } + + dma_cookie_init(chan); + + /* + * NOTE: some controllers may have additional features that we + * need to initialize here, like "scatter-gather" (which + * doesn't mean what you think it means), and status writeback. + */ + + dwc_set_masters(dwc); + + spin_lock_irqsave(&dwc->lock, flags); + i = dwc->descs_allocated; + while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { + dma_addr_t phys; + + spin_unlock_irqrestore(&dwc->lock, flags); + + desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys); + if (!desc) + goto err_desc_alloc; + + memset(desc, 0, sizeof(struct dw_desc)); + + INIT_LIST_HEAD(&desc->tx_list); + dma_async_tx_descriptor_init(&desc->txd, chan); + desc->txd.tx_submit = dwc_tx_submit; + desc->txd.flags = DMA_CTRL_ACK; + desc->txd.phys = phys; + + dwc_desc_put(dwc, desc); + + spin_lock_irqsave(&dwc->lock, flags); + i = ++dwc->descs_allocated; + } + + spin_unlock_irqrestore(&dwc->lock, flags); + + dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); + + return i; + +err_desc_alloc: + dev_info(chan2dev(chan), "only allocated %d descriptors\n", i); + + return i; +} + +static void dwc_free_chan_resources(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(chan->device); + struct dw_desc *desc, *_desc; + unsigned long flags; + LIST_HEAD(list); + + dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, + dwc->descs_allocated); + + /* ASSERT: channel is idle */ + BUG_ON(!list_empty(&dwc->active_list)); + BUG_ON(!list_empty(&dwc->queue)); + BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); + + spin_lock_irqsave(&dwc->lock, flags); + list_splice_init(&dwc->free_list, &list); + dwc->descs_allocated = 0; + dwc->initialized = false; + dwc->request_line = ~0; + + /* Disable interrupts */ + channel_clear_bit(dw, MASK.XFER, dwc->mask); + channel_clear_bit(dw, MASK.ERROR, dwc->mask); + + spin_unlock_irqrestore(&dwc->lock, flags); + + list_for_each_entry_safe(desc, _desc, &list, desc_node) { + dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); + dma_pool_free(dw->desc_pool, desc, desc->txd.phys); + } + + dev_vdbg(chan2dev(chan), "%s: done\n", __func__); +} + +/* --------------------- Cyclic DMA API extensions -------------------- */ + +/** + * dw_dma_cyclic_start - start the cyclic DMA transfer + * @chan: the DMA channel to start + * + * Must be called with soft interrupts disabled. Returns zero on success or + * -errno on failure. + */ +int dw_dma_cyclic_start(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + unsigned long flags; + + if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { + dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); + return -ENODEV; + } + + spin_lock_irqsave(&dwc->lock, flags); + + /* Assert channel is idle */ + if (dma_readl(dw, CH_EN) & dwc->mask) { + dev_err(chan2dev(&dwc->chan), + "BUG: Attempted to start non-idle channel\n"); + dwc_dump_chan_regs(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); + return -EBUSY; + } + + dma_writel(dw, CLEAR.ERROR, dwc->mask); + dma_writel(dw, CLEAR.XFER, dwc->mask); + + /* Setup DMAC channel registers */ + channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); + channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); + channel_writel(dwc, CTL_HI, 0); + + channel_set_bit(dw, CH_EN, dwc->mask); + + spin_unlock_irqrestore(&dwc->lock, flags); + + return 0; +} +EXPORT_SYMBOL(dw_dma_cyclic_start); + +/** + * dw_dma_cyclic_stop - stop the cyclic DMA transfer + * @chan: the DMA channel to stop + * + * Must be called with soft interrupts disabled. + */ +void dw_dma_cyclic_stop(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + unsigned long flags; + + spin_lock_irqsave(&dwc->lock, flags); + + dwc_chan_disable(dw, dwc); + + spin_unlock_irqrestore(&dwc->lock, flags); +} +EXPORT_SYMBOL(dw_dma_cyclic_stop); + +/** + * dw_dma_cyclic_prep - prepare the cyclic DMA transfer + * @chan: the DMA channel to prepare + * @buf_addr: physical DMA address where the buffer starts + * @buf_len: total number of bytes for the entire buffer + * @period_len: number of bytes for each period + * @direction: transfer direction, to or from device + * + * Must be called before trying to start the transfer. Returns a valid struct + * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. + */ +struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, + dma_addr_t buf_addr, size_t buf_len, size_t period_len, + enum dma_transfer_direction direction) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dma_slave_config *sconfig = &dwc->dma_sconfig; + struct dw_cyclic_desc *cdesc; + struct dw_cyclic_desc *retval = NULL; + struct dw_desc *desc; + struct dw_desc *last = NULL; + unsigned long was_cyclic; + unsigned int reg_width; + unsigned int periods; + unsigned int i; + unsigned long flags; + + spin_lock_irqsave(&dwc->lock, flags); + if (dwc->nollp) { + spin_unlock_irqrestore(&dwc->lock, flags); + dev_dbg(chan2dev(&dwc->chan), + "channel doesn't support LLP transfers\n"); + return ERR_PTR(-EINVAL); + } + + if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { + spin_unlock_irqrestore(&dwc->lock, flags); + dev_dbg(chan2dev(&dwc->chan), + "queue and/or active list are not empty\n"); + return ERR_PTR(-EBUSY); + } + + was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); + spin_unlock_irqrestore(&dwc->lock, flags); + if (was_cyclic) { + dev_dbg(chan2dev(&dwc->chan), + "channel already prepared for cyclic DMA\n"); + return ERR_PTR(-EBUSY); + } + + retval = ERR_PTR(-EINVAL); + + if (unlikely(!is_slave_direction(direction))) + goto out_err; + + dwc->direction = direction; + + if (direction == DMA_MEM_TO_DEV) + reg_width = __ffs(sconfig->dst_addr_width); + else + reg_width = __ffs(sconfig->src_addr_width); + + periods = buf_len / period_len; + + /* Check for too big/unaligned periods and unaligned DMA buffer. */ + if (period_len > (dwc->block_size << reg_width)) + goto out_err; + if (unlikely(period_len & ((1 << reg_width) - 1))) + goto out_err; + if (unlikely(buf_addr & ((1 << reg_width) - 1))) + goto out_err; + + retval = ERR_PTR(-ENOMEM); + + if (periods > NR_DESCS_PER_CHANNEL) + goto out_err; + + cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); + if (!cdesc) + goto out_err; + + cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); + if (!cdesc->desc) + goto out_err_alloc; + + for (i = 0; i < periods; i++) { + desc = dwc_desc_get(dwc); + if (!desc) + goto out_err_desc_get; + + switch (direction) { + case DMA_MEM_TO_DEV: + desc->lli.dar = sconfig->dst_addr; + desc->lli.sar = buf_addr + (period_len * i); + desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) + | DWC_CTLL_DST_WIDTH(reg_width) + | DWC_CTLL_SRC_WIDTH(reg_width) + | DWC_CTLL_DST_FIX + | DWC_CTLL_SRC_INC + | DWC_CTLL_INT_EN); + + desc->lli.ctllo |= sconfig->device_fc ? + DWC_CTLL_FC(DW_DMA_FC_P_M2P) : + DWC_CTLL_FC(DW_DMA_FC_D_M2P); + + break; + case DMA_DEV_TO_MEM: + desc->lli.dar = buf_addr + (period_len * i); + desc->lli.sar = sconfig->src_addr; + desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) + | DWC_CTLL_SRC_WIDTH(reg_width) + | DWC_CTLL_DST_WIDTH(reg_width) + | DWC_CTLL_DST_INC + | DWC_CTLL_SRC_FIX + | DWC_CTLL_INT_EN); + + desc->lli.ctllo |= sconfig->device_fc ? + DWC_CTLL_FC(DW_DMA_FC_P_P2M) : + DWC_CTLL_FC(DW_DMA_FC_D_P2M); + + break; + default: + break; + } + + desc->lli.ctlhi = (period_len >> reg_width); + cdesc->desc[i] = desc; + + if (last) + last->lli.llp = desc->txd.phys; + + last = desc; + } + + /* Let's make a cyclic list */ + last->lli.llp = cdesc->desc[0]->txd.phys; + + dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " + "period %zu periods %d\n", (unsigned long long)buf_addr, + buf_len, period_len, periods); + + cdesc->periods = periods; + dwc->cdesc = cdesc; + + return cdesc; + +out_err_desc_get: + while (i--) + dwc_desc_put(dwc, cdesc->desc[i]); +out_err_alloc: + kfree(cdesc); +out_err: + clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); + return (struct dw_cyclic_desc *)retval; +} +EXPORT_SYMBOL(dw_dma_cyclic_prep); + +/** + * dw_dma_cyclic_free - free a prepared cyclic DMA transfer + * @chan: the DMA channel to free + */ +void dw_dma_cyclic_free(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + struct dw_cyclic_desc *cdesc = dwc->cdesc; + int i; + unsigned long flags; + + dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); + + if (!cdesc) + return; + + spin_lock_irqsave(&dwc->lock, flags); + + dwc_chan_disable(dw, dwc); + + dma_writel(dw, CLEAR.ERROR, dwc->mask); + dma_writel(dw, CLEAR.XFER, dwc->mask); + + spin_unlock_irqrestore(&dwc->lock, flags); + + for (i = 0; i < cdesc->periods; i++) + dwc_desc_put(dwc, cdesc->desc[i]); + + kfree(cdesc->desc); + kfree(cdesc); + + clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); +} +EXPORT_SYMBOL(dw_dma_cyclic_free); + +/*----------------------------------------------------------------------*/ + +static void dw_dma_off(struct dw_dma *dw) +{ + int i; + + dma_writel(dw, CFG, 0); + + channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); + channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); + channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); + channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); + + while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) + cpu_relax(); + + for (i = 0; i < dw->dma.chancnt; i++) + dw->chan[i].initialized = false; +} + +int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) +{ + struct dw_dma *dw; + size_t size; + bool autocfg; + unsigned int dw_params; + unsigned int nr_channels; + unsigned int max_blk_size = 0; + int err; + int i; + + dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); + autocfg = dw_params >> DW_PARAMS_EN & 0x1; + + dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params); + + if (!pdata && autocfg) { + pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + /* Fill platform data with the default values */ + pdata->is_private = true; + pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; + pdata->chan_priority = CHAN_PRIORITY_ASCENDING; + } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) + return -EINVAL; + + if (autocfg) + nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; + else + nr_channels = pdata->nr_channels; + + size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan); + dw = devm_kzalloc(chip->dev, size, GFP_KERNEL); + if (!dw) + return -ENOMEM; + + dw->clk = devm_clk_get(chip->dev, "hclk"); + if (IS_ERR(dw->clk)) + return PTR_ERR(dw->clk); + clk_prepare_enable(dw->clk); + + dw->regs = chip->regs; + chip->dw = dw; + + /* Get hardware configuration parameters */ + if (autocfg) { + max_blk_size = dma_readl(dw, MAX_BLK_SIZE); + + dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; + for (i = 0; i < dw->nr_masters; i++) { + dw->data_width[i] = + (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2; + } + } else { + dw->nr_masters = pdata->nr_masters; + memcpy(dw->data_width, pdata->data_width, 4); + } + + /* Calculate all channel mask before DMA setup */ + dw->all_chan_mask = (1 << nr_channels) - 1; + + /* Force dma off, just in case */ + dw_dma_off(dw); + + /* Disable BLOCK interrupts as well */ + channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); + + err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt, 0, + "dw_dmac", dw); + if (err) + return err; + + /* Create a pool of consistent memory blocks for hardware descriptors */ + dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, + sizeof(struct dw_desc), 4, 0); + if (!dw->desc_pool) { + dev_err(chip->dev, "No memory for descriptors dma pool\n"); + return -ENOMEM; + } + + tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); + + INIT_LIST_HEAD(&dw->dma.channels); + for (i = 0; i < nr_channels; i++) { + struct dw_dma_chan *dwc = &dw->chan[i]; + int r = nr_channels - i - 1; + + dwc->chan.device = &dw->dma; + dma_cookie_init(&dwc->chan); + if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) + list_add_tail(&dwc->chan.device_node, + &dw->dma.channels); + else + list_add(&dwc->chan.device_node, &dw->dma.channels); + + /* 7 is highest priority & 0 is lowest. */ + if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) + dwc->priority = r; + else + dwc->priority = i; + + dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; + spin_lock_init(&dwc->lock); + dwc->mask = 1 << i; + + INIT_LIST_HEAD(&dwc->active_list); + INIT_LIST_HEAD(&dwc->queue); + INIT_LIST_HEAD(&dwc->free_list); + + channel_clear_bit(dw, CH_EN, dwc->mask); + + dwc->direction = DMA_TRANS_NONE; + dwc->request_line = ~0; + + /* Hardware configuration */ + if (autocfg) { + unsigned int dwc_params; + void __iomem *addr = chip->regs + r * sizeof(u32); + + dwc_params = dma_read_byaddr(addr, DWC_PARAMS); + + dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, + dwc_params); + + /* Decode maximum block size for given channel. The + * stored 4 bit value represents blocks from 0x00 for 3 + * up to 0x0a for 4095. */ + dwc->block_size = + (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1; + dwc->nollp = + (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; + } else { + dwc->block_size = pdata->block_size; + + /* Check if channel supports multi block transfer */ + channel_writel(dwc, LLP, 0xfffffffc); + dwc->nollp = + (channel_readl(dwc, LLP) & 0xfffffffc) == 0; + channel_writel(dwc, LLP, 0); + } + } + + /* Clear all interrupts on all channels. */ + dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); + dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); + dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); + dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); + dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); + + dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); + dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); + if (pdata->is_private) + dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); + dw->dma.dev = chip->dev; + dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; + dw->dma.device_free_chan_resources = dwc_free_chan_resources; + + dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; + + dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; + dw->dma.device_control = dwc_control; + + dw->dma.device_tx_status = dwc_tx_status; + dw->dma.device_issue_pending = dwc_issue_pending; + + dma_writel(dw, CFG, DW_CFG_DMA_EN); + + dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n", + nr_channels); + + dma_async_device_register(&dw->dma); + + return 0; +} +EXPORT_SYMBOL_GPL(dw_dma_probe); + +int dw_dma_remove(struct dw_dma_chip *chip) +{ + struct dw_dma *dw = chip->dw; + struct dw_dma_chan *dwc, *_dwc; + + dw_dma_off(dw); + dma_async_device_unregister(&dw->dma); + + tasklet_kill(&dw->tasklet); + + list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, + chan.device_node) { + list_del(&dwc->chan.device_node); + channel_clear_bit(dw, CH_EN, dwc->mask); + } + + return 0; +} +EXPORT_SYMBOL_GPL(dw_dma_remove); + +void dw_dma_shutdown(struct dw_dma_chip *chip) +{ + struct dw_dma *dw = chip->dw; + + dw_dma_off(dw); + clk_disable_unprepare(dw->clk); +} +EXPORT_SYMBOL_GPL(dw_dma_shutdown); + +#ifdef CONFIG_PM_SLEEP + +int dw_dma_suspend(struct dw_dma_chip *chip) +{ + struct dw_dma *dw = chip->dw; + + dw_dma_off(dw); + clk_disable_unprepare(dw->clk); + + return 0; +} +EXPORT_SYMBOL_GPL(dw_dma_suspend); + +int dw_dma_resume(struct dw_dma_chip *chip) +{ + struct dw_dma *dw = chip->dw; + + clk_prepare_enable(dw->clk); + dma_writel(dw, CFG, DW_CFG_DMA_EN); + + return 0; +} +EXPORT_SYMBOL_GPL(dw_dma_resume); + +#endif /* CONFIG_PM_SLEEP */ + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); +MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); +MODULE_AUTHOR("Viresh Kumar "); diff --git a/drivers/dma/dw/dw_dmac.c b/drivers/dma/dw/dw_dmac.c deleted file mode 100644 index 15f3f4f79c10..000000000000 --- a/drivers/dma/dw/dw_dmac.c +++ /dev/null @@ -1,1969 +0,0 @@ -/* - * Core driver for the Synopsys DesignWare DMA Controller - * - * Copyright (C) 2007-2008 Atmel Corporation - * Copyright (C) 2010-2011 ST Microelectronics - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../dmaengine.h" -#include "dw_dmac_regs.h" - -/* - * This supports the Synopsys "DesignWare AHB Central DMA Controller", - * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all - * of which use ARM any more). See the "Databook" from Synopsys for - * information beyond what licensees probably provide. - * - * The driver has currently been tested only with the Atmel AT32AP7000, - * which does not support descriptor writeback. - */ - -static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave) -{ - return slave ? slave->dst_master : 0; -} - -static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) -{ - return slave ? slave->src_master : 1; -} - -static inline void dwc_set_masters(struct dw_dma_chan *dwc) -{ - struct dw_dma *dw = to_dw_dma(dwc->chan.device); - struct dw_dma_slave *dws = dwc->chan.private; - unsigned char mmax = dw->nr_masters - 1; - - if (dwc->request_line == ~0) { - dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws)); - dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws)); - } -} - -#define DWC_DEFAULT_CTLLO(_chan) ({ \ - struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ - struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ - bool _is_slave = is_slave_direction(_dwc->direction); \ - u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ - DW_DMA_MSIZE_16; \ - u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ - DW_DMA_MSIZE_16; \ - \ - (DWC_CTLL_DST_MSIZE(_dmsize) \ - | DWC_CTLL_SRC_MSIZE(_smsize) \ - | DWC_CTLL_LLP_D_EN \ - | DWC_CTLL_LLP_S_EN \ - | DWC_CTLL_DMS(_dwc->dst_master) \ - | DWC_CTLL_SMS(_dwc->src_master)); \ - }) - -/* - * Number of descriptors to allocate for each channel. This should be - * made configurable somehow; preferably, the clients (at least the - * ones using slave transfers) should be able to give us a hint. - */ -#define NR_DESCS_PER_CHANNEL 64 - -/*----------------------------------------------------------------------*/ - -static struct device *chan2dev(struct dma_chan *chan) -{ - return &chan->dev->device; -} -static struct device *chan2parent(struct dma_chan *chan) -{ - return chan->dev->device.parent; -} - -static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) -{ - return to_dw_desc(dwc->active_list.next); -} - -static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) -{ - struct dw_desc *desc, *_desc; - struct dw_desc *ret = NULL; - unsigned int i = 0; - unsigned long flags; - - spin_lock_irqsave(&dwc->lock, flags); - list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { - i++; - if (async_tx_test_ack(&desc->txd)) { - list_del(&desc->desc_node); - ret = desc; - break; - } - dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); - } - spin_unlock_irqrestore(&dwc->lock, flags); - - dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); - - return ret; -} - -/* - * Move a descriptor, including any children, to the free list. - * `desc' must not be on any lists. - */ -static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) -{ - unsigned long flags; - - if (desc) { - struct dw_desc *child; - - spin_lock_irqsave(&dwc->lock, flags); - list_for_each_entry(child, &desc->tx_list, desc_node) - dev_vdbg(chan2dev(&dwc->chan), - "moving child desc %p to freelist\n", - child); - list_splice_init(&desc->tx_list, &dwc->free_list); - dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); - list_add(&desc->desc_node, &dwc->free_list); - spin_unlock_irqrestore(&dwc->lock, flags); - } -} - -static void dwc_initialize(struct dw_dma_chan *dwc) -{ - struct dw_dma *dw = to_dw_dma(dwc->chan.device); - struct dw_dma_slave *dws = dwc->chan.private; - u32 cfghi = DWC_CFGH_FIFO_MODE; - u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); - - if (dwc->initialized == true) - return; - - if (dws) { - /* - * We need controller-specific data to set up slave - * transfers. - */ - BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); - - cfghi = dws->cfg_hi; - cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; - } else { - if (dwc->direction == DMA_MEM_TO_DEV) - cfghi = DWC_CFGH_DST_PER(dwc->request_line); - else if (dwc->direction == DMA_DEV_TO_MEM) - cfghi = DWC_CFGH_SRC_PER(dwc->request_line); - } - - channel_writel(dwc, CFG_LO, cfglo); - channel_writel(dwc, CFG_HI, cfghi); - - /* Enable interrupts */ - channel_set_bit(dw, MASK.XFER, dwc->mask); - channel_set_bit(dw, MASK.ERROR, dwc->mask); - - dwc->initialized = true; -} - -/*----------------------------------------------------------------------*/ - -static inline unsigned int dwc_fast_fls(unsigned long long v) -{ - /* - * We can be a lot more clever here, but this should take care - * of the most common optimization. - */ - if (!(v & 7)) - return 3; - else if (!(v & 3)) - return 2; - else if (!(v & 1)) - return 1; - return 0; -} - -static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) -{ - dev_err(chan2dev(&dwc->chan), - " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", - channel_readl(dwc, SAR), - channel_readl(dwc, DAR), - channel_readl(dwc, LLP), - channel_readl(dwc, CTL_HI), - channel_readl(dwc, CTL_LO)); -} - -static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) -{ - channel_clear_bit(dw, CH_EN, dwc->mask); - while (dma_readl(dw, CH_EN) & dwc->mask) - cpu_relax(); -} - -/*----------------------------------------------------------------------*/ - -/* Perform single block transfer */ -static inline void dwc_do_single_block(struct dw_dma_chan *dwc, - struct dw_desc *desc) -{ - struct dw_dma *dw = to_dw_dma(dwc->chan.device); - u32 ctllo; - - /* Software emulation of LLP mode relies on interrupts to continue - * multi block transfer. */ - ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN; - - channel_writel(dwc, SAR, desc->lli.sar); - channel_writel(dwc, DAR, desc->lli.dar); - channel_writel(dwc, CTL_LO, ctllo); - channel_writel(dwc, CTL_HI, desc->lli.ctlhi); - channel_set_bit(dw, CH_EN, dwc->mask); - - /* Move pointer to next descriptor */ - dwc->tx_node_active = dwc->tx_node_active->next; -} - -/* Called with dwc->lock held and bh disabled */ -static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) -{ - struct dw_dma *dw = to_dw_dma(dwc->chan.device); - unsigned long was_soft_llp; - - /* ASSERT: channel is idle */ - if (dma_readl(dw, CH_EN) & dwc->mask) { - dev_err(chan2dev(&dwc->chan), - "BUG: Attempted to start non-idle channel\n"); - dwc_dump_chan_regs(dwc); - - /* The tasklet will hopefully advance the queue... */ - return; - } - - if (dwc->nollp) { - was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP, - &dwc->flags); - if (was_soft_llp) { - dev_err(chan2dev(&dwc->chan), - "BUG: Attempted to start new LLP transfer " - "inside ongoing one\n"); - return; - } - - dwc_initialize(dwc); - - dwc->residue = first->total_len; - dwc->tx_node_active = &first->tx_list; - - /* Submit first block */ - dwc_do_single_block(dwc, first); - - return; - } - - dwc_initialize(dwc); - - channel_writel(dwc, LLP, first->txd.phys); - channel_writel(dwc, CTL_LO, - DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); - channel_writel(dwc, CTL_HI, 0); - channel_set_bit(dw, CH_EN, dwc->mask); -} - -/*----------------------------------------------------------------------*/ - -static void -dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, - bool callback_required) -{ - dma_async_tx_callback callback = NULL; - void *param = NULL; - struct dma_async_tx_descriptor *txd = &desc->txd; - struct dw_desc *child; - unsigned long flags; - - dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); - - spin_lock_irqsave(&dwc->lock, flags); - dma_cookie_complete(txd); - if (callback_required) { - callback = txd->callback; - param = txd->callback_param; - } - - /* async_tx_ack */ - list_for_each_entry(child, &desc->tx_list, desc_node) - async_tx_ack(&child->txd); - async_tx_ack(&desc->txd); - - list_splice_init(&desc->tx_list, &dwc->free_list); - list_move(&desc->desc_node, &dwc->free_list); - - if (!is_slave_direction(dwc->direction)) { - struct device *parent = chan2parent(&dwc->chan); - if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(parent, desc->lli.dar, - desc->total_len, DMA_FROM_DEVICE); - else - dma_unmap_page(parent, desc->lli.dar, - desc->total_len, DMA_FROM_DEVICE); - } - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(parent, desc->lli.sar, - desc->total_len, DMA_TO_DEVICE); - else - dma_unmap_page(parent, desc->lli.sar, - desc->total_len, DMA_TO_DEVICE); - } - } - - spin_unlock_irqrestore(&dwc->lock, flags); - - if (callback) - callback(param); -} - -static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) -{ - struct dw_desc *desc, *_desc; - LIST_HEAD(list); - unsigned long flags; - - spin_lock_irqsave(&dwc->lock, flags); - if (dma_readl(dw, CH_EN) & dwc->mask) { - dev_err(chan2dev(&dwc->chan), - "BUG: XFER bit set, but channel not idle!\n"); - - /* Try to continue after resetting the channel... */ - dwc_chan_disable(dw, dwc); - } - - /* - * Submit queued descriptors ASAP, i.e. before we go through - * the completed ones. - */ - list_splice_init(&dwc->active_list, &list); - if (!list_empty(&dwc->queue)) { - list_move(dwc->queue.next, &dwc->active_list); - dwc_dostart(dwc, dwc_first_active(dwc)); - } - - spin_unlock_irqrestore(&dwc->lock, flags); - - list_for_each_entry_safe(desc, _desc, &list, desc_node) - dwc_descriptor_complete(dwc, desc, true); -} - -/* Returns how many bytes were already received from source */ -static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) -{ - u32 ctlhi = channel_readl(dwc, CTL_HI); - u32 ctllo = channel_readl(dwc, CTL_LO); - - return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7)); -} - -static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) -{ - dma_addr_t llp; - struct dw_desc *desc, *_desc; - struct dw_desc *child; - u32 status_xfer; - unsigned long flags; - - spin_lock_irqsave(&dwc->lock, flags); - llp = channel_readl(dwc, LLP); - status_xfer = dma_readl(dw, RAW.XFER); - - if (status_xfer & dwc->mask) { - /* Everything we've submitted is done */ - dma_writel(dw, CLEAR.XFER, dwc->mask); - - if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { - struct list_head *head, *active = dwc->tx_node_active; - - /* - * We are inside first active descriptor. - * Otherwise something is really wrong. - */ - desc = dwc_first_active(dwc); - - head = &desc->tx_list; - if (active != head) { - /* Update desc to reflect last sent one */ - if (active != head->next) - desc = to_dw_desc(active->prev); - - dwc->residue -= desc->len; - - child = to_dw_desc(active); - - /* Submit next block */ - dwc_do_single_block(dwc, child); - - spin_unlock_irqrestore(&dwc->lock, flags); - return; - } - - /* We are done here */ - clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); - } - - dwc->residue = 0; - - spin_unlock_irqrestore(&dwc->lock, flags); - - dwc_complete_all(dw, dwc); - return; - } - - if (list_empty(&dwc->active_list)) { - dwc->residue = 0; - spin_unlock_irqrestore(&dwc->lock, flags); - return; - } - - if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { - dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__); - spin_unlock_irqrestore(&dwc->lock, flags); - return; - } - - dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__, - (unsigned long long)llp); - - list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { - /* Initial residue value */ - dwc->residue = desc->total_len; - - /* Check first descriptors addr */ - if (desc->txd.phys == llp) { - spin_unlock_irqrestore(&dwc->lock, flags); - return; - } - - /* Check first descriptors llp */ - if (desc->lli.llp == llp) { - /* This one is currently in progress */ - dwc->residue -= dwc_get_sent(dwc); - spin_unlock_irqrestore(&dwc->lock, flags); - return; - } - - dwc->residue -= desc->len; - list_for_each_entry(child, &desc->tx_list, desc_node) { - if (child->lli.llp == llp) { - /* Currently in progress */ - dwc->residue -= dwc_get_sent(dwc); - spin_unlock_irqrestore(&dwc->lock, flags); - return; - } - dwc->residue -= child->len; - } - - /* - * No descriptors so far seem to be in progress, i.e. - * this one must be done. - */ - spin_unlock_irqrestore(&dwc->lock, flags); - dwc_descriptor_complete(dwc, desc, true); - spin_lock_irqsave(&dwc->lock, flags); - } - - dev_err(chan2dev(&dwc->chan), - "BUG: All descriptors done, but channel not idle!\n"); - - /* Try to continue after resetting the channel... */ - dwc_chan_disable(dw, dwc); - - if (!list_empty(&dwc->queue)) { - list_move(dwc->queue.next, &dwc->active_list); - dwc_dostart(dwc, dwc_first_active(dwc)); - } - spin_unlock_irqrestore(&dwc->lock, flags); -} - -static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) -{ - dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", - lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); -} - -static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) -{ - struct dw_desc *bad_desc; - struct dw_desc *child; - unsigned long flags; - - dwc_scan_descriptors(dw, dwc); - - spin_lock_irqsave(&dwc->lock, flags); - - /* - * The descriptor currently at the head of the active list is - * borked. Since we don't have any way to report errors, we'll - * just have to scream loudly and try to carry on. - */ - bad_desc = dwc_first_active(dwc); - list_del_init(&bad_desc->desc_node); - list_move(dwc->queue.next, dwc->active_list.prev); - - /* Clear the error flag and try to restart the controller */ - dma_writel(dw, CLEAR.ERROR, dwc->mask); - if (!list_empty(&dwc->active_list)) - dwc_dostart(dwc, dwc_first_active(dwc)); - - /* - * WARN may seem harsh, but since this only happens - * when someone submits a bad physical address in a - * descriptor, we should consider ourselves lucky that the - * controller flagged an error instead of scribbling over - * random memory locations. - */ - dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" - " cookie: %d\n", bad_desc->txd.cookie); - dwc_dump_lli(dwc, &bad_desc->lli); - list_for_each_entry(child, &bad_desc->tx_list, desc_node) - dwc_dump_lli(dwc, &child->lli); - - spin_unlock_irqrestore(&dwc->lock, flags); - - /* Pretend the descriptor completed successfully */ - dwc_descriptor_complete(dwc, bad_desc, true); -} - -/* --------------------- Cyclic DMA API extensions -------------------- */ - -dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - return channel_readl(dwc, SAR); -} -EXPORT_SYMBOL(dw_dma_get_src_addr); - -dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - return channel_readl(dwc, DAR); -} -EXPORT_SYMBOL(dw_dma_get_dst_addr); - -/* Called with dwc->lock held and all DMAC interrupts disabled */ -static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, - u32 status_err, u32 status_xfer) -{ - unsigned long flags; - - if (dwc->mask) { - void (*callback)(void *param); - void *callback_param; - - dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", - channel_readl(dwc, LLP)); - - callback = dwc->cdesc->period_callback; - callback_param = dwc->cdesc->period_callback_param; - - if (callback) - callback(callback_param); - } - - /* - * Error and transfer complete are highly unlikely, and will most - * likely be due to a configuration error by the user. - */ - if (unlikely(status_err & dwc->mask) || - unlikely(status_xfer & dwc->mask)) { - int i; - - dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " - "interrupt, stopping DMA transfer\n", - status_xfer ? "xfer" : "error"); - - spin_lock_irqsave(&dwc->lock, flags); - - dwc_dump_chan_regs(dwc); - - dwc_chan_disable(dw, dwc); - - /* Make sure DMA does not restart by loading a new list */ - channel_writel(dwc, LLP, 0); - channel_writel(dwc, CTL_LO, 0); - channel_writel(dwc, CTL_HI, 0); - - dma_writel(dw, CLEAR.ERROR, dwc->mask); - dma_writel(dw, CLEAR.XFER, dwc->mask); - - for (i = 0; i < dwc->cdesc->periods; i++) - dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); - - spin_unlock_irqrestore(&dwc->lock, flags); - } -} - -/* ------------------------------------------------------------------------- */ - -static void dw_dma_tasklet(unsigned long data) -{ - struct dw_dma *dw = (struct dw_dma *)data; - struct dw_dma_chan *dwc; - u32 status_xfer; - u32 status_err; - int i; - - status_xfer = dma_readl(dw, RAW.XFER); - status_err = dma_readl(dw, RAW.ERROR); - - dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); - - for (i = 0; i < dw->dma.chancnt; i++) { - dwc = &dw->chan[i]; - if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) - dwc_handle_cyclic(dw, dwc, status_err, status_xfer); - else if (status_err & (1 << i)) - dwc_handle_error(dw, dwc); - else if (status_xfer & (1 << i)) - dwc_scan_descriptors(dw, dwc); - } - - /* - * Re-enable interrupts. - */ - channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); - channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); -} - -static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) -{ - struct dw_dma *dw = dev_id; - u32 status; - - dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, - dma_readl(dw, STATUS_INT)); - - /* - * Just disable the interrupts. We'll turn them back on in the - * softirq handler. - */ - channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); - channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); - - status = dma_readl(dw, STATUS_INT); - if (status) { - dev_err(dw->dma.dev, - "BUG: Unexpected interrupts pending: 0x%x\n", - status); - - /* Try to recover */ - channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); - channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); - channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); - channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); - } - - tasklet_schedule(&dw->tasklet); - - return IRQ_HANDLED; -} - -/*----------------------------------------------------------------------*/ - -static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) -{ - struct dw_desc *desc = txd_to_dw_desc(tx); - struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); - dma_cookie_t cookie; - unsigned long flags; - - spin_lock_irqsave(&dwc->lock, flags); - cookie = dma_cookie_assign(tx); - - /* - * REVISIT: We should attempt to chain as many descriptors as - * possible, perhaps even appending to those already submitted - * for DMA. But this is hard to do in a race-free manner. - */ - if (list_empty(&dwc->active_list)) { - dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__, - desc->txd.cookie); - list_add_tail(&desc->desc_node, &dwc->active_list); - dwc_dostart(dwc, dwc_first_active(dwc)); - } else { - dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, - desc->txd.cookie); - - list_add_tail(&desc->desc_node, &dwc->queue); - } - - spin_unlock_irqrestore(&dwc->lock, flags); - - return cookie; -} - -static struct dma_async_tx_descriptor * -dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, - size_t len, unsigned long flags) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma *dw = to_dw_dma(chan->device); - struct dw_desc *desc; - struct dw_desc *first; - struct dw_desc *prev; - size_t xfer_count; - size_t offset; - unsigned int src_width; - unsigned int dst_width; - unsigned int data_width; - u32 ctllo; - - dev_vdbg(chan2dev(chan), - "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__, - (unsigned long long)dest, (unsigned long long)src, - len, flags); - - if (unlikely(!len)) { - dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); - return NULL; - } - - dwc->direction = DMA_MEM_TO_MEM; - - data_width = min_t(unsigned int, dw->data_width[dwc->src_master], - dw->data_width[dwc->dst_master]); - - src_width = dst_width = min_t(unsigned int, data_width, - dwc_fast_fls(src | dest | len)); - - ctllo = DWC_DEFAULT_CTLLO(chan) - | DWC_CTLL_DST_WIDTH(dst_width) - | DWC_CTLL_SRC_WIDTH(src_width) - | DWC_CTLL_DST_INC - | DWC_CTLL_SRC_INC - | DWC_CTLL_FC_M2M; - prev = first = NULL; - - for (offset = 0; offset < len; offset += xfer_count << src_width) { - xfer_count = min_t(size_t, (len - offset) >> src_width, - dwc->block_size); - - desc = dwc_desc_get(dwc); - if (!desc) - goto err_desc_get; - - desc->lli.sar = src + offset; - desc->lli.dar = dest + offset; - desc->lli.ctllo = ctllo; - desc->lli.ctlhi = xfer_count; - desc->len = xfer_count << src_width; - - if (!first) { - first = desc; - } else { - prev->lli.llp = desc->txd.phys; - list_add_tail(&desc->desc_node, - &first->tx_list); - } - prev = desc; - } - - if (flags & DMA_PREP_INTERRUPT) - /* Trigger interrupt after last block */ - prev->lli.ctllo |= DWC_CTLL_INT_EN; - - prev->lli.llp = 0; - first->txd.flags = flags; - first->total_len = len; - - return &first->txd; - -err_desc_get: - dwc_desc_put(dwc, first); - return NULL; -} - -static struct dma_async_tx_descriptor * -dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, - unsigned int sg_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma *dw = to_dw_dma(chan->device); - struct dma_slave_config *sconfig = &dwc->dma_sconfig; - struct dw_desc *prev; - struct dw_desc *first; - u32 ctllo; - dma_addr_t reg; - unsigned int reg_width; - unsigned int mem_width; - unsigned int data_width; - unsigned int i; - struct scatterlist *sg; - size_t total_len = 0; - - dev_vdbg(chan2dev(chan), "%s\n", __func__); - - if (unlikely(!is_slave_direction(direction) || !sg_len)) - return NULL; - - dwc->direction = direction; - - prev = first = NULL; - - switch (direction) { - case DMA_MEM_TO_DEV: - reg_width = __fls(sconfig->dst_addr_width); - reg = sconfig->dst_addr; - ctllo = (DWC_DEFAULT_CTLLO(chan) - | DWC_CTLL_DST_WIDTH(reg_width) - | DWC_CTLL_DST_FIX - | DWC_CTLL_SRC_INC); - - ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : - DWC_CTLL_FC(DW_DMA_FC_D_M2P); - - data_width = dw->data_width[dwc->src_master]; - - for_each_sg(sgl, sg, sg_len, i) { - struct dw_desc *desc; - u32 len, dlen, mem; - - mem = sg_dma_address(sg); - len = sg_dma_len(sg); - - mem_width = min_t(unsigned int, - data_width, dwc_fast_fls(mem | len)); - -slave_sg_todev_fill_desc: - desc = dwc_desc_get(dwc); - if (!desc) { - dev_err(chan2dev(chan), - "not enough descriptors available\n"); - goto err_desc_get; - } - - desc->lli.sar = mem; - desc->lli.dar = reg; - desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); - if ((len >> mem_width) > dwc->block_size) { - dlen = dwc->block_size << mem_width; - mem += dlen; - len -= dlen; - } else { - dlen = len; - len = 0; - } - - desc->lli.ctlhi = dlen >> mem_width; - desc->len = dlen; - - if (!first) { - first = desc; - } else { - prev->lli.llp = desc->txd.phys; - list_add_tail(&desc->desc_node, - &first->tx_list); - } - prev = desc; - total_len += dlen; - - if (len) - goto slave_sg_todev_fill_desc; - } - break; - case DMA_DEV_TO_MEM: - reg_width = __fls(sconfig->src_addr_width); - reg = sconfig->src_addr; - ctllo = (DWC_DEFAULT_CTLLO(chan) - | DWC_CTLL_SRC_WIDTH(reg_width) - | DWC_CTLL_DST_INC - | DWC_CTLL_SRC_FIX); - - ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : - DWC_CTLL_FC(DW_DMA_FC_D_P2M); - - data_width = dw->data_width[dwc->dst_master]; - - for_each_sg(sgl, sg, sg_len, i) { - struct dw_desc *desc; - u32 len, dlen, mem; - - mem = sg_dma_address(sg); - len = sg_dma_len(sg); - - mem_width = min_t(unsigned int, - data_width, dwc_fast_fls(mem | len)); - -slave_sg_fromdev_fill_desc: - desc = dwc_desc_get(dwc); - if (!desc) { - dev_err(chan2dev(chan), - "not enough descriptors available\n"); - goto err_desc_get; - } - - desc->lli.sar = reg; - desc->lli.dar = mem; - desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); - if ((len >> reg_width) > dwc->block_size) { - dlen = dwc->block_size << reg_width; - mem += dlen; - len -= dlen; - } else { - dlen = len; - len = 0; - } - desc->lli.ctlhi = dlen >> reg_width; - desc->len = dlen; - - if (!first) { - first = desc; - } else { - prev->lli.llp = desc->txd.phys; - list_add_tail(&desc->desc_node, - &first->tx_list); - } - prev = desc; - total_len += dlen; - - if (len) - goto slave_sg_fromdev_fill_desc; - } - break; - default: - return NULL; - } - - if (flags & DMA_PREP_INTERRUPT) - /* Trigger interrupt after last block */ - prev->lli.ctllo |= DWC_CTLL_INT_EN; - - prev->lli.llp = 0; - first->total_len = total_len; - - return &first->txd; - -err_desc_get: - dwc_desc_put(dwc, first); - return NULL; -} - -/* - * Fix sconfig's burst size according to dw_dmac. We need to convert them as: - * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. - * - * NOTE: burst size 2 is not supported by controller. - * - * This can be done by finding least significant bit set: n & (n - 1) - */ -static inline void convert_burst(u32 *maxburst) -{ - if (*maxburst > 1) - *maxburst = fls(*maxburst) - 2; - else - *maxburst = 0; -} - -static int -set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - - /* Check if chan will be configured for slave transfers */ - if (!is_slave_direction(sconfig->direction)) - return -EINVAL; - - memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); - dwc->direction = sconfig->direction; - - /* Take the request line from slave_id member */ - if (dwc->request_line == ~0) - dwc->request_line = sconfig->slave_id; - - convert_burst(&dwc->dma_sconfig.src_maxburst); - convert_burst(&dwc->dma_sconfig.dst_maxburst); - - return 0; -} - -static inline void dwc_chan_pause(struct dw_dma_chan *dwc) -{ - u32 cfglo = channel_readl(dwc, CFG_LO); - unsigned int count = 20; /* timeout iterations */ - - channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); - while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) - udelay(2); - - dwc->paused = true; -} - -static inline void dwc_chan_resume(struct dw_dma_chan *dwc) -{ - u32 cfglo = channel_readl(dwc, CFG_LO); - - channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); - - dwc->paused = false; -} - -static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma *dw = to_dw_dma(chan->device); - struct dw_desc *desc, *_desc; - unsigned long flags; - LIST_HEAD(list); - - if (cmd == DMA_PAUSE) { - spin_lock_irqsave(&dwc->lock, flags); - - dwc_chan_pause(dwc); - - spin_unlock_irqrestore(&dwc->lock, flags); - } else if (cmd == DMA_RESUME) { - if (!dwc->paused) - return 0; - - spin_lock_irqsave(&dwc->lock, flags); - - dwc_chan_resume(dwc); - - spin_unlock_irqrestore(&dwc->lock, flags); - } else if (cmd == DMA_TERMINATE_ALL) { - spin_lock_irqsave(&dwc->lock, flags); - - clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); - - dwc_chan_disable(dw, dwc); - - dwc_chan_resume(dwc); - - /* active_list entries will end up before queued entries */ - list_splice_init(&dwc->queue, &list); - list_splice_init(&dwc->active_list, &list); - - spin_unlock_irqrestore(&dwc->lock, flags); - - /* Flush all pending and queued descriptors */ - list_for_each_entry_safe(desc, _desc, &list, desc_node) - dwc_descriptor_complete(dwc, desc, false); - } else if (cmd == DMA_SLAVE_CONFIG) { - return set_runtime_config(chan, (struct dma_slave_config *)arg); - } else { - return -ENXIO; - } - - return 0; -} - -static inline u32 dwc_get_residue(struct dw_dma_chan *dwc) -{ - unsigned long flags; - u32 residue; - - spin_lock_irqsave(&dwc->lock, flags); - - residue = dwc->residue; - if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) - residue -= dwc_get_sent(dwc); - - spin_unlock_irqrestore(&dwc->lock, flags); - return residue; -} - -static enum dma_status -dwc_tx_status(struct dma_chan *chan, - dma_cookie_t cookie, - struct dma_tx_state *txstate) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - enum dma_status ret; - - ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_SUCCESS) { - dwc_scan_descriptors(to_dw_dma(chan->device), dwc); - - ret = dma_cookie_status(chan, cookie, txstate); - } - - if (ret != DMA_SUCCESS) - dma_set_residue(txstate, dwc_get_residue(dwc)); - - if (dwc->paused) - return DMA_PAUSED; - - return ret; -} - -static void dwc_issue_pending(struct dma_chan *chan) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - - if (!list_empty(&dwc->queue)) - dwc_scan_descriptors(to_dw_dma(chan->device), dwc); -} - -static int dwc_alloc_chan_resources(struct dma_chan *chan) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma *dw = to_dw_dma(chan->device); - struct dw_desc *desc; - int i; - unsigned long flags; - - dev_vdbg(chan2dev(chan), "%s\n", __func__); - - /* ASSERT: channel is idle */ - if (dma_readl(dw, CH_EN) & dwc->mask) { - dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); - return -EIO; - } - - dma_cookie_init(chan); - - /* - * NOTE: some controllers may have additional features that we - * need to initialize here, like "scatter-gather" (which - * doesn't mean what you think it means), and status writeback. - */ - - dwc_set_masters(dwc); - - spin_lock_irqsave(&dwc->lock, flags); - i = dwc->descs_allocated; - while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { - dma_addr_t phys; - - spin_unlock_irqrestore(&dwc->lock, flags); - - desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys); - if (!desc) - goto err_desc_alloc; - - memset(desc, 0, sizeof(struct dw_desc)); - - INIT_LIST_HEAD(&desc->tx_list); - dma_async_tx_descriptor_init(&desc->txd, chan); - desc->txd.tx_submit = dwc_tx_submit; - desc->txd.flags = DMA_CTRL_ACK; - desc->txd.phys = phys; - - dwc_desc_put(dwc, desc); - - spin_lock_irqsave(&dwc->lock, flags); - i = ++dwc->descs_allocated; - } - - spin_unlock_irqrestore(&dwc->lock, flags); - - dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); - - return i; - -err_desc_alloc: - dev_info(chan2dev(chan), "only allocated %d descriptors\n", i); - - return i; -} - -static void dwc_free_chan_resources(struct dma_chan *chan) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma *dw = to_dw_dma(chan->device); - struct dw_desc *desc, *_desc; - unsigned long flags; - LIST_HEAD(list); - - dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, - dwc->descs_allocated); - - /* ASSERT: channel is idle */ - BUG_ON(!list_empty(&dwc->active_list)); - BUG_ON(!list_empty(&dwc->queue)); - BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); - - spin_lock_irqsave(&dwc->lock, flags); - list_splice_init(&dwc->free_list, &list); - dwc->descs_allocated = 0; - dwc->initialized = false; - dwc->request_line = ~0; - - /* Disable interrupts */ - channel_clear_bit(dw, MASK.XFER, dwc->mask); - channel_clear_bit(dw, MASK.ERROR, dwc->mask); - - spin_unlock_irqrestore(&dwc->lock, flags); - - list_for_each_entry_safe(desc, _desc, &list, desc_node) { - dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); - dma_pool_free(dw->desc_pool, desc, desc->txd.phys); - } - - dev_vdbg(chan2dev(chan), "%s: done\n", __func__); -} - -/*----------------------------------------------------------------------*/ - -struct dw_dma_of_filter_args { - struct dw_dma *dw; - unsigned int req; - unsigned int src; - unsigned int dst; -}; - -static bool dw_dma_of_filter(struct dma_chan *chan, void *param) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma_of_filter_args *fargs = param; - - /* Ensure the device matches our channel */ - if (chan->device != &fargs->dw->dma) - return false; - - dwc->request_line = fargs->req; - dwc->src_master = fargs->src; - dwc->dst_master = fargs->dst; - - return true; -} - -static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, - struct of_dma *ofdma) -{ - struct dw_dma *dw = ofdma->of_dma_data; - struct dw_dma_of_filter_args fargs = { - .dw = dw, - }; - dma_cap_mask_t cap; - - if (dma_spec->args_count != 3) - return NULL; - - fargs.req = dma_spec->args[0]; - fargs.src = dma_spec->args[1]; - fargs.dst = dma_spec->args[2]; - - if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS || - fargs.src >= dw->nr_masters || - fargs.dst >= dw->nr_masters)) - return NULL; - - dma_cap_zero(cap); - dma_cap_set(DMA_SLAVE, cap); - - /* TODO: there should be a simpler way to do this */ - return dma_request_channel(cap, dw_dma_of_filter, &fargs); -} - -#ifdef CONFIG_ACPI -static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct acpi_dma_spec *dma_spec = param; - - if (chan->device->dev != dma_spec->dev || - chan->chan_id != dma_spec->chan_id) - return false; - - dwc->request_line = dma_spec->slave_id; - dwc->src_master = dwc_get_sms(NULL); - dwc->dst_master = dwc_get_dms(NULL); - - return true; -} - -static void dw_dma_acpi_controller_register(struct dw_dma *dw) -{ - struct device *dev = dw->dma.dev; - struct acpi_dma_filter_info *info; - int ret; - - info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); - if (!info) - return; - - dma_cap_zero(info->dma_cap); - dma_cap_set(DMA_SLAVE, info->dma_cap); - info->filter_fn = dw_dma_acpi_filter; - - ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate, - info); - if (ret) - dev_err(dev, "could not register acpi_dma_controller\n"); -} -#else /* !CONFIG_ACPI */ -static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {} -#endif /* !CONFIG_ACPI */ - -/* --------------------- Cyclic DMA API extensions -------------------- */ - -/** - * dw_dma_cyclic_start - start the cyclic DMA transfer - * @chan: the DMA channel to start - * - * Must be called with soft interrupts disabled. Returns zero on success or - * -errno on failure. - */ -int dw_dma_cyclic_start(struct dma_chan *chan) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma *dw = to_dw_dma(dwc->chan.device); - unsigned long flags; - - if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { - dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); - return -ENODEV; - } - - spin_lock_irqsave(&dwc->lock, flags); - - /* Assert channel is idle */ - if (dma_readl(dw, CH_EN) & dwc->mask) { - dev_err(chan2dev(&dwc->chan), - "BUG: Attempted to start non-idle channel\n"); - dwc_dump_chan_regs(dwc); - spin_unlock_irqrestore(&dwc->lock, flags); - return -EBUSY; - } - - dma_writel(dw, CLEAR.ERROR, dwc->mask); - dma_writel(dw, CLEAR.XFER, dwc->mask); - - /* Setup DMAC channel registers */ - channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); - channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); - channel_writel(dwc, CTL_HI, 0); - - channel_set_bit(dw, CH_EN, dwc->mask); - - spin_unlock_irqrestore(&dwc->lock, flags); - - return 0; -} -EXPORT_SYMBOL(dw_dma_cyclic_start); - -/** - * dw_dma_cyclic_stop - stop the cyclic DMA transfer - * @chan: the DMA channel to stop - * - * Must be called with soft interrupts disabled. - */ -void dw_dma_cyclic_stop(struct dma_chan *chan) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma *dw = to_dw_dma(dwc->chan.device); - unsigned long flags; - - spin_lock_irqsave(&dwc->lock, flags); - - dwc_chan_disable(dw, dwc); - - spin_unlock_irqrestore(&dwc->lock, flags); -} -EXPORT_SYMBOL(dw_dma_cyclic_stop); - -/** - * dw_dma_cyclic_prep - prepare the cyclic DMA transfer - * @chan: the DMA channel to prepare - * @buf_addr: physical DMA address where the buffer starts - * @buf_len: total number of bytes for the entire buffer - * @period_len: number of bytes for each period - * @direction: transfer direction, to or from device - * - * Must be called before trying to start the transfer. Returns a valid struct - * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. - */ -struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, - dma_addr_t buf_addr, size_t buf_len, size_t period_len, - enum dma_transfer_direction direction) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dma_slave_config *sconfig = &dwc->dma_sconfig; - struct dw_cyclic_desc *cdesc; - struct dw_cyclic_desc *retval = NULL; - struct dw_desc *desc; - struct dw_desc *last = NULL; - unsigned long was_cyclic; - unsigned int reg_width; - unsigned int periods; - unsigned int i; - unsigned long flags; - - spin_lock_irqsave(&dwc->lock, flags); - if (dwc->nollp) { - spin_unlock_irqrestore(&dwc->lock, flags); - dev_dbg(chan2dev(&dwc->chan), - "channel doesn't support LLP transfers\n"); - return ERR_PTR(-EINVAL); - } - - if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { - spin_unlock_irqrestore(&dwc->lock, flags); - dev_dbg(chan2dev(&dwc->chan), - "queue and/or active list are not empty\n"); - return ERR_PTR(-EBUSY); - } - - was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); - spin_unlock_irqrestore(&dwc->lock, flags); - if (was_cyclic) { - dev_dbg(chan2dev(&dwc->chan), - "channel already prepared for cyclic DMA\n"); - return ERR_PTR(-EBUSY); - } - - retval = ERR_PTR(-EINVAL); - - if (unlikely(!is_slave_direction(direction))) - goto out_err; - - dwc->direction = direction; - - if (direction == DMA_MEM_TO_DEV) - reg_width = __ffs(sconfig->dst_addr_width); - else - reg_width = __ffs(sconfig->src_addr_width); - - periods = buf_len / period_len; - - /* Check for too big/unaligned periods and unaligned DMA buffer. */ - if (period_len > (dwc->block_size << reg_width)) - goto out_err; - if (unlikely(period_len & ((1 << reg_width) - 1))) - goto out_err; - if (unlikely(buf_addr & ((1 << reg_width) - 1))) - goto out_err; - - retval = ERR_PTR(-ENOMEM); - - if (periods > NR_DESCS_PER_CHANNEL) - goto out_err; - - cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); - if (!cdesc) - goto out_err; - - cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); - if (!cdesc->desc) - goto out_err_alloc; - - for (i = 0; i < periods; i++) { - desc = dwc_desc_get(dwc); - if (!desc) - goto out_err_desc_get; - - switch (direction) { - case DMA_MEM_TO_DEV: - desc->lli.dar = sconfig->dst_addr; - desc->lli.sar = buf_addr + (period_len * i); - desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) - | DWC_CTLL_DST_WIDTH(reg_width) - | DWC_CTLL_SRC_WIDTH(reg_width) - | DWC_CTLL_DST_FIX - | DWC_CTLL_SRC_INC - | DWC_CTLL_INT_EN); - - desc->lli.ctllo |= sconfig->device_fc ? - DWC_CTLL_FC(DW_DMA_FC_P_M2P) : - DWC_CTLL_FC(DW_DMA_FC_D_M2P); - - break; - case DMA_DEV_TO_MEM: - desc->lli.dar = buf_addr + (period_len * i); - desc->lli.sar = sconfig->src_addr; - desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) - | DWC_CTLL_SRC_WIDTH(reg_width) - | DWC_CTLL_DST_WIDTH(reg_width) - | DWC_CTLL_DST_INC - | DWC_CTLL_SRC_FIX - | DWC_CTLL_INT_EN); - - desc->lli.ctllo |= sconfig->device_fc ? - DWC_CTLL_FC(DW_DMA_FC_P_P2M) : - DWC_CTLL_FC(DW_DMA_FC_D_P2M); - - break; - default: - break; - } - - desc->lli.ctlhi = (period_len >> reg_width); - cdesc->desc[i] = desc; - - if (last) - last->lli.llp = desc->txd.phys; - - last = desc; - } - - /* Let's make a cyclic list */ - last->lli.llp = cdesc->desc[0]->txd.phys; - - dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " - "period %zu periods %d\n", (unsigned long long)buf_addr, - buf_len, period_len, periods); - - cdesc->periods = periods; - dwc->cdesc = cdesc; - - return cdesc; - -out_err_desc_get: - while (i--) - dwc_desc_put(dwc, cdesc->desc[i]); -out_err_alloc: - kfree(cdesc); -out_err: - clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); - return (struct dw_cyclic_desc *)retval; -} -EXPORT_SYMBOL(dw_dma_cyclic_prep); - -/** - * dw_dma_cyclic_free - free a prepared cyclic DMA transfer - * @chan: the DMA channel to free - */ -void dw_dma_cyclic_free(struct dma_chan *chan) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma *dw = to_dw_dma(dwc->chan.device); - struct dw_cyclic_desc *cdesc = dwc->cdesc; - int i; - unsigned long flags; - - dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); - - if (!cdesc) - return; - - spin_lock_irqsave(&dwc->lock, flags); - - dwc_chan_disable(dw, dwc); - - dma_writel(dw, CLEAR.ERROR, dwc->mask); - dma_writel(dw, CLEAR.XFER, dwc->mask); - - spin_unlock_irqrestore(&dwc->lock, flags); - - for (i = 0; i < cdesc->periods; i++) - dwc_desc_put(dwc, cdesc->desc[i]); - - kfree(cdesc->desc); - kfree(cdesc); - - clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); -} -EXPORT_SYMBOL(dw_dma_cyclic_free); - -/*----------------------------------------------------------------------*/ - -static void dw_dma_off(struct dw_dma *dw) -{ - int i; - - dma_writel(dw, CFG, 0); - - channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); - channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); - channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); - channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); - - while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) - cpu_relax(); - - for (i = 0; i < dw->dma.chancnt; i++) - dw->chan[i].initialized = false; -} - -#ifdef CONFIG_OF -static struct dw_dma_platform_data * -dw_dma_parse_dt(struct platform_device *pdev) -{ - struct device_node *np = pdev->dev.of_node; - struct dw_dma_platform_data *pdata; - u32 tmp, arr[4]; - - if (!np) { - dev_err(&pdev->dev, "Missing DT data\n"); - return NULL; - } - - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return NULL; - - if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels)) - return NULL; - - if (of_property_read_bool(np, "is_private")) - pdata->is_private = true; - - if (!of_property_read_u32(np, "chan_allocation_order", &tmp)) - pdata->chan_allocation_order = (unsigned char)tmp; - - if (!of_property_read_u32(np, "chan_priority", &tmp)) - pdata->chan_priority = tmp; - - if (!of_property_read_u32(np, "block_size", &tmp)) - pdata->block_size = tmp; - - if (!of_property_read_u32(np, "dma-masters", &tmp)) { - if (tmp > 4) - return NULL; - - pdata->nr_masters = tmp; - } - - if (!of_property_read_u32_array(np, "data_width", arr, - pdata->nr_masters)) - for (tmp = 0; tmp < pdata->nr_masters; tmp++) - pdata->data_width[tmp] = arr[tmp]; - - return pdata; -} -#else -static inline struct dw_dma_platform_data * -dw_dma_parse_dt(struct platform_device *pdev) -{ - return NULL; -} -#endif - -static int dw_probe(struct platform_device *pdev) -{ - struct dw_dma_platform_data *pdata; - struct resource *io; - struct dw_dma *dw; - size_t size; - void __iomem *regs; - bool autocfg; - unsigned int dw_params; - unsigned int nr_channels; - unsigned int max_blk_size = 0; - int irq; - int err; - int i; - - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; - - io = platform_get_resource(pdev, IORESOURCE_MEM, 0); - regs = devm_ioremap_resource(&pdev->dev, io); - if (IS_ERR(regs)) - return PTR_ERR(regs); - - /* Apply default dma_mask if needed */ - if (!pdev->dev.dma_mask) { - pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; - pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); - } - - dw_params = dma_read_byaddr(regs, DW_PARAMS); - autocfg = dw_params >> DW_PARAMS_EN & 0x1; - - dev_dbg(&pdev->dev, "DW_PARAMS: 0x%08x\n", dw_params); - - pdata = dev_get_platdata(&pdev->dev); - if (!pdata) - pdata = dw_dma_parse_dt(pdev); - - if (!pdata && autocfg) { - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return -ENOMEM; - - /* Fill platform data with the default values */ - pdata->is_private = true; - pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; - pdata->chan_priority = CHAN_PRIORITY_ASCENDING; - } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) - return -EINVAL; - - if (autocfg) - nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; - else - nr_channels = pdata->nr_channels; - - size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan); - dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); - if (!dw) - return -ENOMEM; - - dw->clk = devm_clk_get(&pdev->dev, "hclk"); - if (IS_ERR(dw->clk)) - return PTR_ERR(dw->clk); - clk_prepare_enable(dw->clk); - - dw->regs = regs; - - /* Get hardware configuration parameters */ - if (autocfg) { - max_blk_size = dma_readl(dw, MAX_BLK_SIZE); - - dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; - for (i = 0; i < dw->nr_masters; i++) { - dw->data_width[i] = - (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2; - } - } else { - dw->nr_masters = pdata->nr_masters; - memcpy(dw->data_width, pdata->data_width, 4); - } - - /* Calculate all channel mask before DMA setup */ - dw->all_chan_mask = (1 << nr_channels) - 1; - - /* Force dma off, just in case */ - dw_dma_off(dw); - - /* Disable BLOCK interrupts as well */ - channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); - - err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0, - "dw_dmac", dw); - if (err) - return err; - - platform_set_drvdata(pdev, dw); - - /* Create a pool of consistent memory blocks for hardware descriptors */ - dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev, - sizeof(struct dw_desc), 4, 0); - if (!dw->desc_pool) { - dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); - return -ENOMEM; - } - - tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); - - INIT_LIST_HEAD(&dw->dma.channels); - for (i = 0; i < nr_channels; i++) { - struct dw_dma_chan *dwc = &dw->chan[i]; - int r = nr_channels - i - 1; - - dwc->chan.device = &dw->dma; - dma_cookie_init(&dwc->chan); - if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) - list_add_tail(&dwc->chan.device_node, - &dw->dma.channels); - else - list_add(&dwc->chan.device_node, &dw->dma.channels); - - /* 7 is highest priority & 0 is lowest. */ - if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) - dwc->priority = r; - else - dwc->priority = i; - - dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; - spin_lock_init(&dwc->lock); - dwc->mask = 1 << i; - - INIT_LIST_HEAD(&dwc->active_list); - INIT_LIST_HEAD(&dwc->queue); - INIT_LIST_HEAD(&dwc->free_list); - - channel_clear_bit(dw, CH_EN, dwc->mask); - - dwc->direction = DMA_TRANS_NONE; - dwc->request_line = ~0; - - /* Hardware configuration */ - if (autocfg) { - unsigned int dwc_params; - - dwc_params = dma_read_byaddr(regs + r * sizeof(u32), - DWC_PARAMS); - - dev_dbg(&pdev->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, - dwc_params); - - /* Decode maximum block size for given channel. The - * stored 4 bit value represents blocks from 0x00 for 3 - * up to 0x0a for 4095. */ - dwc->block_size = - (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1; - dwc->nollp = - (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; - } else { - dwc->block_size = pdata->block_size; - - /* Check if channel supports multi block transfer */ - channel_writel(dwc, LLP, 0xfffffffc); - dwc->nollp = - (channel_readl(dwc, LLP) & 0xfffffffc) == 0; - channel_writel(dwc, LLP, 0); - } - } - - /* Clear all interrupts on all channels. */ - dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); - dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); - dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); - dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); - dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); - - dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); - dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); - if (pdata->is_private) - dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); - dw->dma.dev = &pdev->dev; - dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; - dw->dma.device_free_chan_resources = dwc_free_chan_resources; - - dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; - - dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; - dw->dma.device_control = dwc_control; - - dw->dma.device_tx_status = dwc_tx_status; - dw->dma.device_issue_pending = dwc_issue_pending; - - dma_writel(dw, CFG, DW_CFG_DMA_EN); - - dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n", - nr_channels); - - dma_async_device_register(&dw->dma); - - if (pdev->dev.of_node) { - err = of_dma_controller_register(pdev->dev.of_node, - dw_dma_of_xlate, dw); - if (err) - dev_err(&pdev->dev, - "could not register of_dma_controller\n"); - } - - if (ACPI_HANDLE(&pdev->dev)) - dw_dma_acpi_controller_register(dw); - - return 0; -} - -static int dw_remove(struct platform_device *pdev) -{ - struct dw_dma *dw = platform_get_drvdata(pdev); - struct dw_dma_chan *dwc, *_dwc; - - if (pdev->dev.of_node) - of_dma_controller_free(pdev->dev.of_node); - dw_dma_off(dw); - dma_async_device_unregister(&dw->dma); - - tasklet_kill(&dw->tasklet); - - list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, - chan.device_node) { - list_del(&dwc->chan.device_node); - channel_clear_bit(dw, CH_EN, dwc->mask); - } - - return 0; -} - -static void dw_shutdown(struct platform_device *pdev) -{ - struct dw_dma *dw = platform_get_drvdata(pdev); - - dw_dma_off(dw); - clk_disable_unprepare(dw->clk); -} - -static int dw_suspend_noirq(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct dw_dma *dw = platform_get_drvdata(pdev); - - dw_dma_off(dw); - clk_disable_unprepare(dw->clk); - - return 0; -} - -static int dw_resume_noirq(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct dw_dma *dw = platform_get_drvdata(pdev); - - clk_prepare_enable(dw->clk); - dma_writel(dw, CFG, DW_CFG_DMA_EN); - - return 0; -} - -static const struct dev_pm_ops dw_dev_pm_ops = { - .suspend_noirq = dw_suspend_noirq, - .resume_noirq = dw_resume_noirq, - .freeze_noirq = dw_suspend_noirq, - .thaw_noirq = dw_resume_noirq, - .restore_noirq = dw_resume_noirq, - .poweroff_noirq = dw_suspend_noirq, -}; - -#ifdef CONFIG_OF -static const struct of_device_id dw_dma_of_id_table[] = { - { .compatible = "snps,dma-spear1340" }, - {} -}; -MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); -#endif - -#ifdef CONFIG_ACPI -static const struct acpi_device_id dw_dma_acpi_id_table[] = { - { "INTL9C60", 0 }, - { } -}; -#endif - -static struct platform_driver dw_driver = { - .probe = dw_probe, - .remove = dw_remove, - .shutdown = dw_shutdown, - .driver = { - .name = "dw_dmac", - .pm = &dw_dev_pm_ops, - .of_match_table = of_match_ptr(dw_dma_of_id_table), - .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), - }, -}; - -static int __init dw_init(void) -{ - return platform_driver_register(&dw_driver); -} -subsys_initcall(dw_init); - -static void __exit dw_exit(void) -{ - platform_driver_unregister(&dw_driver); -} -module_exit(dw_exit); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); -MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); -MODULE_AUTHOR("Viresh Kumar "); diff --git a/drivers/dma/dw/dw_dmac_regs.h b/drivers/dma/dw/dw_dmac_regs.h deleted file mode 100644 index 9d417200bd57..000000000000 --- a/drivers/dma/dw/dw_dmac_regs.h +++ /dev/null @@ -1,311 +0,0 @@ -/* - * Driver for the Synopsys DesignWare AHB DMA Controller - * - * Copyright (C) 2005-2007 Atmel Corporation - * Copyright (C) 2010-2011 ST Microelectronics - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include - -#define DW_DMA_MAX_NR_CHANNELS 8 -#define DW_DMA_MAX_NR_REQUESTS 16 - -/* flow controller */ -enum dw_dma_fc { - DW_DMA_FC_D_M2M, - DW_DMA_FC_D_M2P, - DW_DMA_FC_D_P2M, - DW_DMA_FC_D_P2P, - DW_DMA_FC_P_P2M, - DW_DMA_FC_SP_P2P, - DW_DMA_FC_P_M2P, - DW_DMA_FC_DP_P2P, -}; - -/* - * Redefine this macro to handle differences between 32- and 64-bit - * addressing, big vs. little endian, etc. - */ -#define DW_REG(name) u32 name; u32 __pad_##name - -/* Hardware register definitions. */ -struct dw_dma_chan_regs { - DW_REG(SAR); /* Source Address Register */ - DW_REG(DAR); /* Destination Address Register */ - DW_REG(LLP); /* Linked List Pointer */ - u32 CTL_LO; /* Control Register Low */ - u32 CTL_HI; /* Control Register High */ - DW_REG(SSTAT); - DW_REG(DSTAT); - DW_REG(SSTATAR); - DW_REG(DSTATAR); - u32 CFG_LO; /* Configuration Register Low */ - u32 CFG_HI; /* Configuration Register High */ - DW_REG(SGR); - DW_REG(DSR); -}; - -struct dw_dma_irq_regs { - DW_REG(XFER); - DW_REG(BLOCK); - DW_REG(SRC_TRAN); - DW_REG(DST_TRAN); - DW_REG(ERROR); -}; - -struct dw_dma_regs { - /* per-channel registers */ - struct dw_dma_chan_regs CHAN[DW_DMA_MAX_NR_CHANNELS]; - - /* irq handling */ - struct dw_dma_irq_regs RAW; /* r */ - struct dw_dma_irq_regs STATUS; /* r (raw & mask) */ - struct dw_dma_irq_regs MASK; /* rw (set = irq enabled) */ - struct dw_dma_irq_regs CLEAR; /* w (ack, affects "raw") */ - - DW_REG(STATUS_INT); /* r */ - - /* software handshaking */ - DW_REG(REQ_SRC); - DW_REG(REQ_DST); - DW_REG(SGL_REQ_SRC); - DW_REG(SGL_REQ_DST); - DW_REG(LAST_SRC); - DW_REG(LAST_DST); - - /* miscellaneous */ - DW_REG(CFG); - DW_REG(CH_EN); - DW_REG(ID); - DW_REG(TEST); - - /* reserved */ - DW_REG(__reserved0); - DW_REG(__reserved1); - - /* optional encoded params, 0x3c8..0x3f7 */ - u32 __reserved; - - /* per-channel configuration registers */ - u32 DWC_PARAMS[DW_DMA_MAX_NR_CHANNELS]; - u32 MULTI_BLK_TYPE; - u32 MAX_BLK_SIZE; - - /* top-level parameters */ - u32 DW_PARAMS; -}; - -#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO -#define dma_readl_native ioread32be -#define dma_writel_native iowrite32be -#else -#define dma_readl_native readl -#define dma_writel_native writel -#endif - -/* To access the registers in early stage of probe */ -#define dma_read_byaddr(addr, name) \ - dma_readl_native((addr) + offsetof(struct dw_dma_regs, name)) - -/* Bitfields in DW_PARAMS */ -#define DW_PARAMS_NR_CHAN 8 /* number of channels */ -#define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */ -#define DW_PARAMS_DATA_WIDTH(n) (15 + 2 * (n)) -#define DW_PARAMS_DATA_WIDTH1 15 /* master 1 data width */ -#define DW_PARAMS_DATA_WIDTH2 17 /* master 2 data width */ -#define DW_PARAMS_DATA_WIDTH3 19 /* master 3 data width */ -#define DW_PARAMS_DATA_WIDTH4 21 /* master 4 data width */ -#define DW_PARAMS_EN 28 /* encoded parameters */ - -/* Bitfields in DWC_PARAMS */ -#define DWC_PARAMS_MBLK_EN 11 /* multi block transfer */ - -/* Bitfields in CTL_LO */ -#define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ -#define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ -#define DWC_CTLL_SRC_WIDTH(n) ((n)<<4) -#define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */ -#define DWC_CTLL_DST_DEC (1<<7) -#define DWC_CTLL_DST_FIX (2<<7) -#define DWC_CTLL_SRC_INC (0<<7) /* SAR update/not */ -#define DWC_CTLL_SRC_DEC (1<<9) -#define DWC_CTLL_SRC_FIX (2<<9) -#define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */ -#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14) -#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */ -#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */ -#define DWC_CTLL_FC(n) ((n) << 20) -#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */ -#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */ -#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */ -#define DWC_CTLL_FC_P2P (3 << 20) /* periph-to-periph */ -/* plus 4 transfer types for peripheral-as-flow-controller */ -#define DWC_CTLL_DMS(n) ((n)<<23) /* dst master select */ -#define DWC_CTLL_SMS(n) ((n)<<25) /* src master select */ -#define DWC_CTLL_LLP_D_EN (1 << 27) /* dest block chain */ -#define DWC_CTLL_LLP_S_EN (1 << 28) /* src block chain */ - -/* Bitfields in CTL_HI */ -#define DWC_CTLH_DONE 0x00001000 -#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff - -/* Bitfields in CFG_LO. Platform-configurable bits are in */ -#define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */ -#define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */ -#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ -#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ -#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ -#define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */ -#define DWC_CFGL_MAX_BURST(x) ((x) << 20) -#define DWC_CFGL_RELOAD_SAR (1 << 30) -#define DWC_CFGL_RELOAD_DAR (1 << 31) - -/* Bitfields in CFG_HI. Platform-configurable bits are in */ -#define DWC_CFGH_DS_UPD_EN (1 << 5) -#define DWC_CFGH_SS_UPD_EN (1 << 6) - -/* Bitfields in SGR */ -#define DWC_SGR_SGI(x) ((x) << 0) -#define DWC_SGR_SGC(x) ((x) << 20) - -/* Bitfields in DSR */ -#define DWC_DSR_DSI(x) ((x) << 0) -#define DWC_DSR_DSC(x) ((x) << 20) - -/* Bitfields in CFG */ -#define DW_CFG_DMA_EN (1 << 0) - -enum dw_dmac_flags { - DW_DMA_IS_CYCLIC = 0, - DW_DMA_IS_SOFT_LLP = 1, -}; - -struct dw_dma_chan { - struct dma_chan chan; - void __iomem *ch_regs; - u8 mask; - u8 priority; - enum dma_transfer_direction direction; - bool paused; - bool initialized; - - /* software emulation of the LLP transfers */ - struct list_head *tx_node_active; - - spinlock_t lock; - - /* these other elements are all protected by lock */ - unsigned long flags; - struct list_head active_list; - struct list_head queue; - struct list_head free_list; - u32 residue; - struct dw_cyclic_desc *cdesc; - - unsigned int descs_allocated; - - /* hardware configuration */ - unsigned int block_size; - bool nollp; - - /* custom slave configuration */ - unsigned int request_line; - unsigned char src_master; - unsigned char dst_master; - - /* configuration passed via DMA_SLAVE_CONFIG */ - struct dma_slave_config dma_sconfig; -}; - -static inline struct dw_dma_chan_regs __iomem * -__dwc_regs(struct dw_dma_chan *dwc) -{ - return dwc->ch_regs; -} - -#define channel_readl(dwc, name) \ - dma_readl_native(&(__dwc_regs(dwc)->name)) -#define channel_writel(dwc, name, val) \ - dma_writel_native((val), &(__dwc_regs(dwc)->name)) - -static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) -{ - return container_of(chan, struct dw_dma_chan, chan); -} - -struct dw_dma { - struct dma_device dma; - void __iomem *regs; - struct dma_pool *desc_pool; - struct tasklet_struct tasklet; - struct clk *clk; - - u8 all_chan_mask; - - /* hardware configuration */ - unsigned char nr_masters; - unsigned char data_width[4]; - - struct dw_dma_chan chan[0]; -}; - -static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) -{ - return dw->regs; -} - -#define dma_readl(dw, name) \ - dma_readl_native(&(__dw_regs(dw)->name)) -#define dma_writel(dw, name, val) \ - dma_writel_native((val), &(__dw_regs(dw)->name)) - -#define channel_set_bit(dw, reg, mask) \ - dma_writel(dw, reg, ((mask) << 8) | (mask)) -#define channel_clear_bit(dw, reg, mask) \ - dma_writel(dw, reg, ((mask) << 8) | 0) - -static inline struct dw_dma *to_dw_dma(struct dma_device *ddev) -{ - return container_of(ddev, struct dw_dma, dma); -} - -/* LLI == Linked List Item; a.k.a. DMA block descriptor */ -struct dw_lli { - /* values that are not changed by hardware */ - u32 sar; - u32 dar; - u32 llp; /* chain to next lli */ - u32 ctllo; - /* values that may get written back: */ - u32 ctlhi; - /* sstat and dstat can snapshot peripheral register state. - * silicon config may discard either or both... - */ - u32 sstat; - u32 dstat; -}; - -struct dw_desc { - /* FIRST values the hardware uses */ - struct dw_lli lli; - - /* THEN values for driver housekeeping */ - struct list_head desc_node; - struct list_head tx_list; - struct dma_async_tx_descriptor txd; - size_t len; - size_t total_len; -}; - -#define to_dw_desc(h) list_entry(h, struct dw_desc, desc_node) - -static inline struct dw_desc * -txd_to_dw_desc(struct dma_async_tx_descriptor *txd) -{ - return container_of(txd, struct dw_desc, txd); -} diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h new file mode 100644 index 000000000000..32667f9e0dda --- /dev/null +++ b/drivers/dma/dw/internal.h @@ -0,0 +1,70 @@ +/* + * Driver for the Synopsys DesignWare DMA Controller + * + * Copyright (C) 2013 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _DW_DMAC_INTERNAL_H +#define _DW_DMAC_INTERNAL_H + +#include +#include + +#include "regs.h" + +/** + * struct dw_dma_chip - representation of DesignWare DMA controller hardware + * @dev: struct device of the DMA controller + * @irq: irq line + * @regs: memory mapped I/O space + * @dw: struct dw_dma that is filed by dw_dma_probe() + */ +struct dw_dma_chip { + struct device *dev; + int irq; + void __iomem *regs; + struct dw_dma *dw; +}; + +/* Export to the platform drivers */ +int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata); +int dw_dma_remove(struct dw_dma_chip *chip); + +void dw_dma_shutdown(struct dw_dma_chip *chip); + +#ifdef CONFIG_PM_SLEEP + +int dw_dma_suspend(struct dw_dma_chip *chip); +int dw_dma_resume(struct dw_dma_chip *chip); + +#endif /* CONFIG_PM_SLEEP */ + +/** + * dwc_get_dms - get destination master + * @slave: pointer to the custom slave configuration + * + * Returns destination master in the custom slave configuration if defined, or + * default value otherwise. + */ +static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave) +{ + return slave ? slave->dst_master : 0; +} + +/** + * dwc_get_sms - get source master + * @slave: pointer to the custom slave configuration + * + * Returns source master in the custom slave configuration if defined, or + * default value otherwise. + */ +static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) +{ + return slave ? slave->src_master : 1; +} + +#endif /* _DW_DMAC_INTERNAL_H */ diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c new file mode 100644 index 000000000000..6c9449cffae8 --- /dev/null +++ b/drivers/dma/dw/platform.c @@ -0,0 +1,317 @@ +/* + * Platform driver for the Synopsys DesignWare DMA Controller + * + * Copyright (C) 2007-2008 Atmel Corporation + * Copyright (C) 2010-2011 ST Microelectronics + * Copyright (C) 2013 Intel Corporation + * + * Some parts of this driver are derived from the original dw_dmac. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "internal.h" + +struct dw_dma_of_filter_args { + struct dw_dma *dw; + unsigned int req; + unsigned int src; + unsigned int dst; +}; + +static bool dw_dma_of_filter(struct dma_chan *chan, void *param) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma_of_filter_args *fargs = param; + + /* Ensure the device matches our channel */ + if (chan->device != &fargs->dw->dma) + return false; + + dwc->request_line = fargs->req; + dwc->src_master = fargs->src; + dwc->dst_master = fargs->dst; + + return true; +} + +static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct dw_dma *dw = ofdma->of_dma_data; + struct dw_dma_of_filter_args fargs = { + .dw = dw, + }; + dma_cap_mask_t cap; + + if (dma_spec->args_count != 3) + return NULL; + + fargs.req = dma_spec->args[0]; + fargs.src = dma_spec->args[1]; + fargs.dst = dma_spec->args[2]; + + if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS || + fargs.src >= dw->nr_masters || + fargs.dst >= dw->nr_masters)) + return NULL; + + dma_cap_zero(cap); + dma_cap_set(DMA_SLAVE, cap); + + /* TODO: there should be a simpler way to do this */ + return dma_request_channel(cap, dw_dma_of_filter, &fargs); +} + +#ifdef CONFIG_ACPI +static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct acpi_dma_spec *dma_spec = param; + + if (chan->device->dev != dma_spec->dev || + chan->chan_id != dma_spec->chan_id) + return false; + + dwc->request_line = dma_spec->slave_id; + dwc->src_master = dwc_get_sms(NULL); + dwc->dst_master = dwc_get_dms(NULL); + + return true; +} + +static void dw_dma_acpi_controller_register(struct dw_dma *dw) +{ + struct device *dev = dw->dma.dev; + struct acpi_dma_filter_info *info; + int ret; + + info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); + if (!info) + return; + + dma_cap_zero(info->dma_cap); + dma_cap_set(DMA_SLAVE, info->dma_cap); + info->filter_fn = dw_dma_acpi_filter; + + ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate, + info); + if (ret) + dev_err(dev, "could not register acpi_dma_controller\n"); +} +#else /* !CONFIG_ACPI */ +static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {} +#endif /* !CONFIG_ACPI */ + +#ifdef CONFIG_OF +static struct dw_dma_platform_data * +dw_dma_parse_dt(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct dw_dma_platform_data *pdata; + u32 tmp, arr[4]; + + if (!np) { + dev_err(&pdev->dev, "Missing DT data\n"); + return NULL; + } + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + + if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels)) + return NULL; + + if (of_property_read_bool(np, "is_private")) + pdata->is_private = true; + + if (!of_property_read_u32(np, "chan_allocation_order", &tmp)) + pdata->chan_allocation_order = (unsigned char)tmp; + + if (!of_property_read_u32(np, "chan_priority", &tmp)) + pdata->chan_priority = tmp; + + if (!of_property_read_u32(np, "block_size", &tmp)) + pdata->block_size = tmp; + + if (!of_property_read_u32(np, "dma-masters", &tmp)) { + if (tmp > 4) + return NULL; + + pdata->nr_masters = tmp; + } + + if (!of_property_read_u32_array(np, "data_width", arr, + pdata->nr_masters)) + for (tmp = 0; tmp < pdata->nr_masters; tmp++) + pdata->data_width[tmp] = arr[tmp]; + + return pdata; +} +#else +static inline struct dw_dma_platform_data * +dw_dma_parse_dt(struct platform_device *pdev) +{ + return NULL; +} +#endif + +static int dw_probe(struct platform_device *pdev) +{ + struct dw_dma_chip *chip; + struct device *dev = &pdev->dev; + struct resource *mem; + struct dw_dma_platform_data *pdata; + int err; + + chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->irq = platform_get_irq(pdev, 0); + if (chip->irq < 0) + return chip->irq; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + chip->regs = devm_ioremap_resource(dev, mem); + if (IS_ERR(chip->regs)) + return PTR_ERR(chip->regs); + + /* Apply default dma_mask if needed */ + if (!dev->dma_mask) { + dev->dma_mask = &dev->coherent_dma_mask; + dev->coherent_dma_mask = DMA_BIT_MASK(32); + } + + pdata = dev_get_platdata(dev); + if (!pdata) + pdata = dw_dma_parse_dt(pdev); + + chip->dev = dev; + + err = dw_dma_probe(chip, pdata); + if (err) + return err; + + platform_set_drvdata(pdev, chip); + + if (pdev->dev.of_node) { + err = of_dma_controller_register(pdev->dev.of_node, + dw_dma_of_xlate, chip->dw); + if (err) + dev_err(&pdev->dev, + "could not register of_dma_controller\n"); + } + + if (ACPI_HANDLE(&pdev->dev)) + dw_dma_acpi_controller_register(chip->dw); + + return 0; +} + +static int dw_remove(struct platform_device *pdev) +{ + struct dw_dma_chip *chip = platform_get_drvdata(pdev); + + if (pdev->dev.of_node) + of_dma_controller_free(pdev->dev.of_node); + + return dw_dma_remove(chip); +} + +static void dw_shutdown(struct platform_device *pdev) +{ + struct dw_dma_chip *chip = platform_get_drvdata(pdev); + + dw_dma_shutdown(chip); +} + +#ifdef CONFIG_OF +static const struct of_device_id dw_dma_of_id_table[] = { + { .compatible = "snps,dma-spear1340" }, + {} +}; +MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); +#endif + +#ifdef CONFIG_ACPI +static const struct acpi_device_id dw_dma_acpi_id_table[] = { + { "INTL9C60", 0 }, + { } +}; +#endif + +#ifdef CONFIG_PM_SLEEP + +static int dw_suspend_noirq(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct dw_dma_chip *chip = platform_get_drvdata(pdev); + + return dw_dma_suspend(chip); +} + +static int dw_resume_noirq(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct dw_dma_chip *chip = platform_get_drvdata(pdev); + + return dw_dma_resume(chip); +} + +#else /* !CONFIG_PM_SLEEP */ + +#define dw_suspend_noirq NULL +#define dw_resume_noirq NULL + +#endif /* !CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops dw_dev_pm_ops = { + .suspend_noirq = dw_suspend_noirq, + .resume_noirq = dw_resume_noirq, + .freeze_noirq = dw_suspend_noirq, + .thaw_noirq = dw_resume_noirq, + .restore_noirq = dw_resume_noirq, + .poweroff_noirq = dw_suspend_noirq, +}; + +static struct platform_driver dw_driver = { + .probe = dw_probe, + .remove = dw_remove, + .shutdown = dw_shutdown, + .driver = { + .name = "dw_dmac", + .pm = &dw_dev_pm_ops, + .of_match_table = of_match_ptr(dw_dma_of_id_table), + .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), + }, +}; + +static int __init dw_init(void) +{ + return platform_driver_register(&dw_driver); +} +subsys_initcall(dw_init); + +static void __exit dw_exit(void) +{ + platform_driver_unregister(&dw_driver); +} +module_exit(dw_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver"); diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h new file mode 100644 index 000000000000..07c5a6ecb52b --- /dev/null +++ b/drivers/dma/dw/regs.h @@ -0,0 +1,312 @@ +/* + * Driver for the Synopsys DesignWare AHB DMA Controller + * + * Copyright (C) 2005-2007 Atmel Corporation + * Copyright (C) 2010-2011 ST Microelectronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#define DW_DMA_MAX_NR_CHANNELS 8 +#define DW_DMA_MAX_NR_REQUESTS 16 + +/* flow controller */ +enum dw_dma_fc { + DW_DMA_FC_D_M2M, + DW_DMA_FC_D_M2P, + DW_DMA_FC_D_P2M, + DW_DMA_FC_D_P2P, + DW_DMA_FC_P_P2M, + DW_DMA_FC_SP_P2P, + DW_DMA_FC_P_M2P, + DW_DMA_FC_DP_P2P, +}; + +/* + * Redefine this macro to handle differences between 32- and 64-bit + * addressing, big vs. little endian, etc. + */ +#define DW_REG(name) u32 name; u32 __pad_##name + +/* Hardware register definitions. */ +struct dw_dma_chan_regs { + DW_REG(SAR); /* Source Address Register */ + DW_REG(DAR); /* Destination Address Register */ + DW_REG(LLP); /* Linked List Pointer */ + u32 CTL_LO; /* Control Register Low */ + u32 CTL_HI; /* Control Register High */ + DW_REG(SSTAT); + DW_REG(DSTAT); + DW_REG(SSTATAR); + DW_REG(DSTATAR); + u32 CFG_LO; /* Configuration Register Low */ + u32 CFG_HI; /* Configuration Register High */ + DW_REG(SGR); + DW_REG(DSR); +}; + +struct dw_dma_irq_regs { + DW_REG(XFER); + DW_REG(BLOCK); + DW_REG(SRC_TRAN); + DW_REG(DST_TRAN); + DW_REG(ERROR); +}; + +struct dw_dma_regs { + /* per-channel registers */ + struct dw_dma_chan_regs CHAN[DW_DMA_MAX_NR_CHANNELS]; + + /* irq handling */ + struct dw_dma_irq_regs RAW; /* r */ + struct dw_dma_irq_regs STATUS; /* r (raw & mask) */ + struct dw_dma_irq_regs MASK; /* rw (set = irq enabled) */ + struct dw_dma_irq_regs CLEAR; /* w (ack, affects "raw") */ + + DW_REG(STATUS_INT); /* r */ + + /* software handshaking */ + DW_REG(REQ_SRC); + DW_REG(REQ_DST); + DW_REG(SGL_REQ_SRC); + DW_REG(SGL_REQ_DST); + DW_REG(LAST_SRC); + DW_REG(LAST_DST); + + /* miscellaneous */ + DW_REG(CFG); + DW_REG(CH_EN); + DW_REG(ID); + DW_REG(TEST); + + /* reserved */ + DW_REG(__reserved0); + DW_REG(__reserved1); + + /* optional encoded params, 0x3c8..0x3f7 */ + u32 __reserved; + + /* per-channel configuration registers */ + u32 DWC_PARAMS[DW_DMA_MAX_NR_CHANNELS]; + u32 MULTI_BLK_TYPE; + u32 MAX_BLK_SIZE; + + /* top-level parameters */ + u32 DW_PARAMS; +}; + +#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO +#define dma_readl_native ioread32be +#define dma_writel_native iowrite32be +#else +#define dma_readl_native readl +#define dma_writel_native writel +#endif + +/* To access the registers in early stage of probe */ +#define dma_read_byaddr(addr, name) \ + dma_readl_native((addr) + offsetof(struct dw_dma_regs, name)) + +/* Bitfields in DW_PARAMS */ +#define DW_PARAMS_NR_CHAN 8 /* number of channels */ +#define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */ +#define DW_PARAMS_DATA_WIDTH(n) (15 + 2 * (n)) +#define DW_PARAMS_DATA_WIDTH1 15 /* master 1 data width */ +#define DW_PARAMS_DATA_WIDTH2 17 /* master 2 data width */ +#define DW_PARAMS_DATA_WIDTH3 19 /* master 3 data width */ +#define DW_PARAMS_DATA_WIDTH4 21 /* master 4 data width */ +#define DW_PARAMS_EN 28 /* encoded parameters */ + +/* Bitfields in DWC_PARAMS */ +#define DWC_PARAMS_MBLK_EN 11 /* multi block transfer */ + +/* Bitfields in CTL_LO */ +#define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ +#define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ +#define DWC_CTLL_SRC_WIDTH(n) ((n)<<4) +#define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */ +#define DWC_CTLL_DST_DEC (1<<7) +#define DWC_CTLL_DST_FIX (2<<7) +#define DWC_CTLL_SRC_INC (0<<7) /* SAR update/not */ +#define DWC_CTLL_SRC_DEC (1<<9) +#define DWC_CTLL_SRC_FIX (2<<9) +#define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */ +#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14) +#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */ +#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */ +#define DWC_CTLL_FC(n) ((n) << 20) +#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */ +#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */ +#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */ +#define DWC_CTLL_FC_P2P (3 << 20) /* periph-to-periph */ +/* plus 4 transfer types for peripheral-as-flow-controller */ +#define DWC_CTLL_DMS(n) ((n)<<23) /* dst master select */ +#define DWC_CTLL_SMS(n) ((n)<<25) /* src master select */ +#define DWC_CTLL_LLP_D_EN (1 << 27) /* dest block chain */ +#define DWC_CTLL_LLP_S_EN (1 << 28) /* src block chain */ + +/* Bitfields in CTL_HI */ +#define DWC_CTLH_DONE 0x00001000 +#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff + +/* Bitfields in CFG_LO. Platform-configurable bits are in */ +#define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */ +#define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */ +#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ +#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ +#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ +#define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */ +#define DWC_CFGL_MAX_BURST(x) ((x) << 20) +#define DWC_CFGL_RELOAD_SAR (1 << 30) +#define DWC_CFGL_RELOAD_DAR (1 << 31) + +/* Bitfields in CFG_HI. Platform-configurable bits are in */ +#define DWC_CFGH_DS_UPD_EN (1 << 5) +#define DWC_CFGH_SS_UPD_EN (1 << 6) + +/* Bitfields in SGR */ +#define DWC_SGR_SGI(x) ((x) << 0) +#define DWC_SGR_SGC(x) ((x) << 20) + +/* Bitfields in DSR */ +#define DWC_DSR_DSI(x) ((x) << 0) +#define DWC_DSR_DSC(x) ((x) << 20) + +/* Bitfields in CFG */ +#define DW_CFG_DMA_EN (1 << 0) + +enum dw_dmac_flags { + DW_DMA_IS_CYCLIC = 0, + DW_DMA_IS_SOFT_LLP = 1, +}; + +struct dw_dma_chan { + struct dma_chan chan; + void __iomem *ch_regs; + u8 mask; + u8 priority; + enum dma_transfer_direction direction; + bool paused; + bool initialized; + + /* software emulation of the LLP transfers */ + struct list_head *tx_node_active; + + spinlock_t lock; + + /* these other elements are all protected by lock */ + unsigned long flags; + struct list_head active_list; + struct list_head queue; + struct list_head free_list; + u32 residue; + struct dw_cyclic_desc *cdesc; + + unsigned int descs_allocated; + + /* hardware configuration */ + unsigned int block_size; + bool nollp; + + /* custom slave configuration */ + unsigned int request_line; + unsigned char src_master; + unsigned char dst_master; + + /* configuration passed via DMA_SLAVE_CONFIG */ + struct dma_slave_config dma_sconfig; +}; + +static inline struct dw_dma_chan_regs __iomem * +__dwc_regs(struct dw_dma_chan *dwc) +{ + return dwc->ch_regs; +} + +#define channel_readl(dwc, name) \ + dma_readl_native(&(__dwc_regs(dwc)->name)) +#define channel_writel(dwc, name, val) \ + dma_writel_native((val), &(__dwc_regs(dwc)->name)) + +static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct dw_dma_chan, chan); +} + +struct dw_dma { + struct dma_device dma; + void __iomem *regs; + struct dma_pool *desc_pool; + struct tasklet_struct tasklet; + struct clk *clk; + + u8 all_chan_mask; + + /* hardware configuration */ + unsigned char nr_masters; + unsigned char data_width[4]; + + struct dw_dma_chan chan[0]; +}; + +static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) +{ + return dw->regs; +} + +#define dma_readl(dw, name) \ + dma_readl_native(&(__dw_regs(dw)->name)) +#define dma_writel(dw, name, val) \ + dma_writel_native((val), &(__dw_regs(dw)->name)) + +#define channel_set_bit(dw, reg, mask) \ + dma_writel(dw, reg, ((mask) << 8) | (mask)) +#define channel_clear_bit(dw, reg, mask) \ + dma_writel(dw, reg, ((mask) << 8) | 0) + +static inline struct dw_dma *to_dw_dma(struct dma_device *ddev) +{ + return container_of(ddev, struct dw_dma, dma); +} + +/* LLI == Linked List Item; a.k.a. DMA block descriptor */ +struct dw_lli { + /* values that are not changed by hardware */ + u32 sar; + u32 dar; + u32 llp; /* chain to next lli */ + u32 ctllo; + /* values that may get written back: */ + u32 ctlhi; + /* sstat and dstat can snapshot peripheral register state. + * silicon config may discard either or both... + */ + u32 sstat; + u32 dstat; +}; + +struct dw_desc { + /* FIRST values the hardware uses */ + struct dw_lli lli; + + /* THEN values for driver housekeeping */ + struct list_head desc_node; + struct list_head tx_list; + struct dma_async_tx_descriptor txd; + size_t len; + size_t total_len; +}; + +#define to_dw_desc(h) list_entry(h, struct dw_desc, desc_node) + +static inline struct dw_desc * +txd_to_dw_desc(struct dma_async_tx_descriptor *txd) +{ + return container_of(txd, struct dw_desc, txd); +} -- cgit v1.2.3-58-ga151 From fed42c198b45ece0b37eb25d37cbc4a9959c6522 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 5 Jun 2013 15:26:46 +0300 Subject: dma: dw: add PCI part of the driver This is the PCI part of the DesignWare DMAC driver. The controller is usually used in the Intel hardware such as Intel Medfield. Signed-off-by: Andy Shevchenko Acked-by: Arnd Bergmann Acked-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw/Kconfig | 9 +++++ drivers/dma/dw/Makefile | 3 ++ drivers/dma/dw/pci.c | 101 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 113 insertions(+) create mode 100644 drivers/dma/dw/pci.c (limited to 'drivers/dma') diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig index efd9e02a58eb..db2b41fab626 100644 --- a/drivers/dma/dw/Kconfig +++ b/drivers/dma/dw/Kconfig @@ -15,6 +15,15 @@ config DW_DMAC Support the Synopsys DesignWare AHB DMA controller. This can be integrated in chips such as the Atmel AT32ap7000. +config DW_DMAC_PCI + tristate "Synopsys DesignWare AHB DMA PCI driver" + depends on PCI + select DW_DMAC_CORE + help + Support the Synopsys DesignWare AHB DMA controller on the + platfroms that enumerate it as a PCI device. For example, + Intel Medfield has integrated this GPDMA controller. + config DW_DMAC_BIG_ENDIAN_IO bool "Use big endian I/O register access" default y if AVR32 diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile index 47f36746c559..3eebd1ce2c6b 100644 --- a/drivers/dma/dw/Makefile +++ b/drivers/dma/dw/Makefile @@ -3,3 +3,6 @@ dw_dmac_core-objs := core.o obj-$(CONFIG_DW_DMAC) += dw_dmac.o dw_dmac-objs := platform.o + +obj-$(CONFIG_DW_DMAC_PCI) += dw_dmac_pci.o +dw_dmac_pci-objs := pci.o diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c new file mode 100644 index 000000000000..e89fc24b8293 --- /dev/null +++ b/drivers/dma/dw/pci.c @@ -0,0 +1,101 @@ +/* + * PCI driver for the Synopsys DesignWare DMA Controller + * + * Copyright (C) 2013 Intel Corporation + * Author: Andy Shevchenko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include "internal.h" + +static struct dw_dma_platform_data dw_pci_pdata = { + .is_private = 1, + .chan_allocation_order = CHAN_ALLOCATION_ASCENDING, + .chan_priority = CHAN_PRIORITY_ASCENDING, +}; + +static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + struct dw_dma_chip *chip; + struct dw_dma_platform_data *pdata = (void *)pid->driver_data; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev)); + if (ret) { + dev_err(&pdev->dev, "I/O memory remapping failed\n"); + return ret; + } + + pci_set_master(pdev); + pci_try_set_mwi(pdev); + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) + return ret; + + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) + return ret; + + chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->dev = &pdev->dev; + chip->regs = pcim_iomap_table(pdev)[0]; + chip->irq = pdev->irq; + + ret = dw_dma_probe(chip, pdata); + if (ret) + return ret; + + pci_set_drvdata(pdev, chip); + + return 0; +} + +static void dw_pci_remove(struct pci_dev *pdev) +{ + struct dw_dma_chip *chip = pci_get_drvdata(pdev); + int ret; + + ret = dw_dma_remove(chip); + if (ret) + dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret); +} + +static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = { + /* Medfield */ + { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_pdata }, + { PCI_VDEVICE(INTEL, 0x0830), (kernel_ulong_t)&dw_pci_pdata }, + + /* BayTrail */ + { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_pdata }, + { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_pdata }, + { } +}; +MODULE_DEVICE_TABLE(pci, dw_pci_id_table); + +static struct pci_driver dw_pci_driver = { + .name = "dw_dmac_pci", + .id_table = dw_pci_id_table, + .probe = dw_pci_probe, + .remove = dw_pci_remove, +}; + +module_pci_driver(dw_pci_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller PCI driver"); +MODULE_AUTHOR("Andy Shevchenko "); -- cgit v1.2.3-58-ga151 From e368b510c01aaf7b2957306836ffdeacc24712a3 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 12 Jun 2013 13:39:57 +0530 Subject: dmaengine: dw: select DW_DMAC_BIG_ENDIAN_IO automagically Signed-off-by: Vinod Koul --- drivers/dma/dw/Kconfig | 11 ++--------- drivers/dma/dw/regs.h | 6 ++++++ 2 files changed, 8 insertions(+), 9 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig index db2b41fab626..dde13248b681 100644 --- a/drivers/dma/dw/Kconfig +++ b/drivers/dma/dw/Kconfig @@ -10,6 +10,7 @@ config DW_DMAC_CORE config DW_DMAC tristate "Synopsys DesignWare AHB DMA platform driver" select DW_DMAC_CORE + select DW_DMAC_BIG_ENDIAN_IO if AVR32 default y if CPU_AT32AP7000 help Support the Synopsys DesignWare AHB DMA controller. This @@ -25,12 +26,4 @@ config DW_DMAC_PCI Intel Medfield has integrated this GPDMA controller. config DW_DMAC_BIG_ENDIAN_IO - bool "Use big endian I/O register access" - default y if AVR32 - depends on DW_DMAC_CORE - help - Say yes here to use big endian I/O access when reading and writing - to the DMA controller registers. This is needed on some platforms, - like the Atmel AVR32 architecture. - - If unsure, use the default setting. + bool diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h index 07c5a6ecb52b..deb4274f80f4 100644 --- a/drivers/dma/dw/regs.h +++ b/drivers/dma/dw/regs.h @@ -101,6 +101,12 @@ struct dw_dma_regs { u32 DW_PARAMS; }; +/* + * Big endian I/O access when reading and writing to the DMA controller + * registers. This is needed on some platforms, like the Atmel AVR32 + * architecture. + */ + #ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO #define dma_readl_native ioread32be #define dma_writel_native iowrite32be -- cgit v1.2.3-58-ga151 From d7cabeed830b7eb3958cbc084a85649126cd670f Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 19 Jun 2013 20:38:28 +0100 Subject: dmaengine: PL08x: Avoid collisions with get_signal() macro As pointed out by Arnd Bergmann there is a get_signal macro definied in linux/signal.h which can conflict with the platform data callback function of the same name leading to confusing errors from the compiler (especially if signal.h manages to get pulled into the driver itself due to header dependencies). Avoid such errors by renaming get_signal and put_signal in the platform data to get_xfer_signal and put_xfer_signal. Signed-off-by: Mark Brown Acked-by: Arnd Bergmann Signed-off-by: Vinod Koul --- arch/arm/mach-lpc32xx/phy3250.c | 4 ++-- arch/arm/mach-spear/spear3xx.c | 4 ++-- arch/arm/mach-spear/spear6xx.c | 4 ++-- drivers/dma/amba-pl08x.c | 8 ++++---- include/linux/amba/pl08x.h | 8 ++++---- 5 files changed, 14 insertions(+), 14 deletions(-) (limited to 'drivers/dma') diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c index c1cd5a943ab1..e54f87ec2e4a 100644 --- a/arch/arm/mach-lpc32xx/phy3250.c +++ b/arch/arm/mach-lpc32xx/phy3250.c @@ -182,8 +182,8 @@ static void pl08x_put_signal(const struct pl08x_channel_data *cd, int ch) static struct pl08x_platform_data pl08x_pd = { .slave_channels = &pl08x_slave_channels[0], .num_slave_channels = ARRAY_SIZE(pl08x_slave_channels), - .get_signal = pl08x_get_signal, - .put_signal = pl08x_put_signal, + .get_xfer_signal = pl08x_get_signal, + .put_xfer_signal = pl08x_put_signal, .lli_buses = PL08X_AHB1, .mem_buses = PL08X_AHB1, }; diff --git a/arch/arm/mach-spear/spear3xx.c b/arch/arm/mach-spear/spear3xx.c index 0227c97797cd..bf3b1fd8cb23 100644 --- a/arch/arm/mach-spear/spear3xx.c +++ b/arch/arm/mach-spear/spear3xx.c @@ -56,8 +56,8 @@ struct pl08x_platform_data pl080_plat_data = { }, .lli_buses = PL08X_AHB1, .mem_buses = PL08X_AHB1, - .get_signal = pl080_get_signal, - .put_signal = pl080_put_signal, + .get_xfer_signal = pl080_get_signal, + .put_xfer_signal = pl080_put_signal, }; /* diff --git a/arch/arm/mach-spear/spear6xx.c b/arch/arm/mach-spear/spear6xx.c index ec8eefbbdfad..cf477c78bb18 100644 --- a/arch/arm/mach-spear/spear6xx.c +++ b/arch/arm/mach-spear/spear6xx.c @@ -335,8 +335,8 @@ static struct pl08x_platform_data spear6xx_pl080_plat_data = { }, .lli_buses = PL08X_AHB1, .mem_buses = PL08X_AHB1, - .get_signal = pl080_get_signal, - .put_signal = pl080_put_signal, + .get_xfer_signal = pl080_get_signal, + .put_xfer_signal = pl080_put_signal, .slave_channels = spear600_dma_info, .num_slave_channels = ARRAY_SIZE(spear600_dma_info), }; diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 8bad254a498d..06fe45c74de5 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -299,8 +299,8 @@ static int pl08x_request_mux(struct pl08x_dma_chan *plchan) const struct pl08x_platform_data *pd = plchan->host->pd; int ret; - if (plchan->mux_use++ == 0 && pd->get_signal) { - ret = pd->get_signal(plchan->cd); + if (plchan->mux_use++ == 0 && pd->get_xfer_signal) { + ret = pd->get_xfer_signal(plchan->cd); if (ret < 0) { plchan->mux_use = 0; return ret; @@ -318,8 +318,8 @@ static void pl08x_release_mux(struct pl08x_dma_chan *plchan) if (plchan->signal >= 0) { WARN_ON(plchan->mux_use == 0); - if (--plchan->mux_use == 0 && pd->put_signal) { - pd->put_signal(plchan->cd, plchan->signal); + if (--plchan->mux_use == 0 && pd->put_xfer_signal) { + pd->put_xfer_signal(plchan->cd, plchan->signal); plchan->signal = -1; } } diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h index 2a5f64a11b77..10fe2a211c2e 100644 --- a/include/linux/amba/pl08x.h +++ b/include/linux/amba/pl08x.h @@ -76,11 +76,11 @@ struct pl08x_channel_data { * platform, all inclusive, including multiplexed channels. The available * physical channels will be multiplexed around these signals as they are * requested, just enumerate all possible channels. - * @get_signal: request a physical signal to be used for a DMA transfer + * @get_xfer_signal: request a physical signal to be used for a DMA transfer * immediately: if there is some multiplexing or similar blocking the use * of the channel the transfer can be denied by returning less than zero, * else it returns the allocated signal number - * @put_signal: indicate to the platform that this physical signal is not + * @put_xfer_signal: indicate to the platform that this physical signal is not * running any DMA transfer and multiplexing can be recycled * @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2 * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2 @@ -89,8 +89,8 @@ struct pl08x_platform_data { const struct pl08x_channel_data *slave_channels; unsigned int num_slave_channels; struct pl08x_channel_data memcpy_channel; - int (*get_signal)(const struct pl08x_channel_data *); - void (*put_signal)(const struct pl08x_channel_data *, int); + int (*get_xfer_signal)(const struct pl08x_channel_data *); + void (*put_xfer_signal)(const struct pl08x_channel_data *, int); u8 lli_buses; u8 mem_buses; }; -- cgit v1.2.3-58-ga151 From 8e3c518fba4f2ddd192171cbd7b23ec26900bf6b Mon Sep 17 00:00:00 2001 From: Qiao Zhou Date: Sat, 15 Jun 2013 12:51:48 +0800 Subject: dma: mmp_tdma: disable irq when disabling dma channel mask dma irq when disabling dma channel, so that interrupt status will not be set and interrupt won't come again. Signed-off-by: Qiao Zhou Acked-by: Zhangfei Gao Signed-off-by: Vinod Koul --- drivers/dma/mmp_tdma.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 43d5a6c33297..9b9366537d73 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -154,6 +154,10 @@ static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac) { writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, tdmac->reg_base + TDCR); + + /* disable irq */ + writel(0, tdmac->reg_base + TDIMR); + tdmac->status = DMA_SUCCESS; } -- cgit v1.2.3-58-ga151 From 7c169a42d961daccf5638ea41d1c76700ad6ca42 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Thu, 30 May 2013 18:25:02 +0200 Subject: dma: Add a jz4740 dmaengine driver This patch adds dmaengine support for the JZ4740 DMA controller. For now the driver will be a wrapper around the custom JZ4740 DMA API. Once all users of the custom JZ4740 DMA API have been converted to the dmaengine API the custom API will be removed and direct hardware access will be added to the dmaengine driver. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 6 + drivers/dma/Makefile | 1 + drivers/dma/dma-jz4740.c | 433 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 440 insertions(+) create mode 100644 drivers/dma/dma-jz4740.c (limited to 'drivers/dma') diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 146a1d864a71..f2b3f0315ea2 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -294,6 +294,12 @@ config MMP_PDMA help Support the MMP PDMA engine for PXA and MMP platfrom. +config DMA_JZ4740 + tristate "JZ4740 DMA support" + depends on MACH_JZ4740 + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + config DMA_ENGINE bool diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 6e2a521fbbe3..5e0f2ef85614 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -38,3 +38,4 @@ obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o obj-$(CONFIG_DMA_OMAP) += omap-dma.o obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o +obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c new file mode 100644 index 000000000000..3d424341d8fe --- /dev/null +++ b/drivers/dma/dma-jz4740.c @@ -0,0 +1,433 @@ +/* + * Copyright (C) 2013, Lars-Peter Clausen + * JZ4740 DMAC support + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "virt-dma.h" + +#define JZ_DMA_NR_CHANS 6 + +struct jz4740_dma_sg { + dma_addr_t addr; + unsigned int len; +}; + +struct jz4740_dma_desc { + struct virt_dma_desc vdesc; + + enum dma_transfer_direction direction; + bool cyclic; + + unsigned int num_sgs; + struct jz4740_dma_sg sg[]; +}; + +struct jz4740_dmaengine_chan { + struct virt_dma_chan vchan; + struct jz4740_dma_chan *jz_chan; + + dma_addr_t fifo_addr; + + struct jz4740_dma_desc *desc; + unsigned int next_sg; +}; + +struct jz4740_dma_dev { + struct dma_device ddev; + + struct jz4740_dmaengine_chan chan[JZ_DMA_NR_CHANS]; +}; + +static struct jz4740_dmaengine_chan *to_jz4740_dma_chan(struct dma_chan *c) +{ + return container_of(c, struct jz4740_dmaengine_chan, vchan.chan); +} + +static struct jz4740_dma_desc *to_jz4740_dma_desc(struct virt_dma_desc *vdesc) +{ + return container_of(vdesc, struct jz4740_dma_desc, vdesc); +} + +static struct jz4740_dma_desc *jz4740_dma_alloc_desc(unsigned int num_sgs) +{ + return kzalloc(sizeof(struct jz4740_dma_desc) + + sizeof(struct jz4740_dma_sg) * num_sgs, GFP_ATOMIC); +} + +static enum jz4740_dma_width jz4740_dma_width(enum dma_slave_buswidth width) +{ + switch (width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + return JZ4740_DMA_WIDTH_8BIT; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + return JZ4740_DMA_WIDTH_16BIT; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + return JZ4740_DMA_WIDTH_32BIT; + default: + return JZ4740_DMA_WIDTH_32BIT; + } +} + +static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst) +{ + if (maxburst <= 1) + return JZ4740_DMA_TRANSFER_SIZE_1BYTE; + else if (maxburst <= 3) + return JZ4740_DMA_TRANSFER_SIZE_2BYTE; + else if (maxburst <= 15) + return JZ4740_DMA_TRANSFER_SIZE_4BYTE; + else if (maxburst <= 31) + return JZ4740_DMA_TRANSFER_SIZE_16BYTE; + + return JZ4740_DMA_TRANSFER_SIZE_32BYTE; +} + +static int jz4740_dma_slave_config(struct dma_chan *c, + const struct dma_slave_config *config) +{ + struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); + struct jz4740_dma_config jzcfg; + + switch (config->direction) { + case DMA_MEM_TO_DEV: + jzcfg.flags = JZ4740_DMA_SRC_AUTOINC; + jzcfg.transfer_size = jz4740_dma_maxburst(config->dst_maxburst); + chan->fifo_addr = config->dst_addr; + break; + case DMA_DEV_TO_MEM: + jzcfg.flags = JZ4740_DMA_DST_AUTOINC; + jzcfg.transfer_size = jz4740_dma_maxburst(config->src_maxburst); + chan->fifo_addr = config->src_addr; + break; + default: + return -EINVAL; + } + + + jzcfg.src_width = jz4740_dma_width(config->src_addr_width); + jzcfg.dst_width = jz4740_dma_width(config->dst_addr_width); + jzcfg.mode = JZ4740_DMA_MODE_SINGLE; + jzcfg.request_type = config->slave_id; + + jz4740_dma_configure(chan->jz_chan, &jzcfg); + + return 0; +} + +static int jz4740_dma_terminate_all(struct dma_chan *c) +{ + struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&chan->vchan.lock, flags); + jz4740_dma_disable(chan->jz_chan); + chan->desc = NULL; + vchan_get_all_descriptors(&chan->vchan, &head); + spin_unlock_irqrestore(&chan->vchan.lock, flags); + + vchan_dma_desc_free_list(&chan->vchan, &head); + + return 0; +} + +static int jz4740_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct dma_slave_config *config = (struct dma_slave_config *)arg; + + switch (cmd) { + case DMA_SLAVE_CONFIG: + return jz4740_dma_slave_config(chan, config); + case DMA_TERMINATE_ALL: + return jz4740_dma_terminate_all(chan); + default: + return -ENOSYS; + } +} + +static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan) +{ + dma_addr_t src_addr, dst_addr; + struct virt_dma_desc *vdesc; + struct jz4740_dma_sg *sg; + + jz4740_dma_disable(chan->jz_chan); + + if (!chan->desc) { + vdesc = vchan_next_desc(&chan->vchan); + if (!vdesc) + return 0; + chan->desc = to_jz4740_dma_desc(vdesc); + chan->next_sg = 0; + } + + if (chan->next_sg == chan->desc->num_sgs) + chan->next_sg = 0; + + sg = &chan->desc->sg[chan->next_sg]; + + if (chan->desc->direction == DMA_MEM_TO_DEV) { + src_addr = sg->addr; + dst_addr = chan->fifo_addr; + } else { + src_addr = chan->fifo_addr; + dst_addr = sg->addr; + } + jz4740_dma_set_src_addr(chan->jz_chan, src_addr); + jz4740_dma_set_dst_addr(chan->jz_chan, dst_addr); + jz4740_dma_set_transfer_count(chan->jz_chan, sg->len); + + chan->next_sg++; + + jz4740_dma_enable(chan->jz_chan); + + return 0; +} + +static void jz4740_dma_complete_cb(struct jz4740_dma_chan *jz_chan, int error, + void *devid) +{ + struct jz4740_dmaengine_chan *chan = devid; + + spin_lock(&chan->vchan.lock); + if (chan->desc) { + if (chan->desc && chan->desc->cyclic) { + vchan_cyclic_callback(&chan->desc->vdesc); + } else { + if (chan->next_sg == chan->desc->num_sgs) { + chan->desc = NULL; + vchan_cookie_complete(&chan->desc->vdesc); + } + } + } + jz4740_dma_start_transfer(chan); + spin_unlock(&chan->vchan.lock); +} + +static void jz4740_dma_issue_pending(struct dma_chan *c) +{ + struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); + unsigned long flags; + + spin_lock_irqsave(&chan->vchan.lock, flags); + if (vchan_issue_pending(&chan->vchan) && !chan->desc) + jz4740_dma_start_transfer(chan); + spin_unlock_irqrestore(&chan->vchan.lock, flags); +} + +static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg( + struct dma_chan *c, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); + struct jz4740_dma_desc *desc; + struct scatterlist *sg; + unsigned int i; + + desc = jz4740_dma_alloc_desc(sg_len); + if (!desc) + return NULL; + + for_each_sg(sgl, sg, sg_len, i) { + desc->sg[i].addr = sg_dma_address(sg); + desc->sg[i].len = sg_dma_len(sg); + } + + desc->num_sgs = sg_len; + desc->direction = direction; + desc->cyclic = false; + + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); +} + +static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic( + struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); + struct jz4740_dma_desc *desc; + unsigned int num_periods, i; + + if (buf_len % period_len) + return NULL; + + num_periods = buf_len / period_len; + + desc = jz4740_dma_alloc_desc(num_periods); + if (!desc) + return NULL; + + for (i = 0; i < num_periods; i++) { + desc->sg[i].addr = buf_addr; + desc->sg[i].len = period_len; + buf_addr += period_len; + } + + desc->num_sgs = num_periods; + desc->direction = direction; + desc->cyclic = true; + + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); +} + +static size_t jz4740_dma_desc_residue(struct jz4740_dmaengine_chan *chan, + struct jz4740_dma_desc *desc, unsigned int next_sg) +{ + size_t residue = 0; + unsigned int i; + + residue = 0; + + for (i = next_sg; i < desc->num_sgs; i++) + residue += desc->sg[i].len; + + if (next_sg != 0) + residue += jz4740_dma_get_residue(chan->jz_chan); + + return residue; +} + +static enum dma_status jz4740_dma_tx_status(struct dma_chan *c, + dma_cookie_t cookie, struct dma_tx_state *state) +{ + struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); + struct virt_dma_desc *vdesc; + enum dma_status status; + unsigned long flags; + + status = dma_cookie_status(c, cookie, state); + if (status == DMA_SUCCESS || !state) + return status; + + spin_lock_irqsave(&chan->vchan.lock, flags); + vdesc = vchan_find_desc(&chan->vchan, cookie); + if (cookie == chan->desc->vdesc.tx.cookie) { + state->residue = jz4740_dma_desc_residue(chan, chan->desc, + chan->next_sg); + } else if (vdesc) { + state->residue = jz4740_dma_desc_residue(chan, + to_jz4740_dma_desc(vdesc), 0); + } else { + state->residue = 0; + } + spin_unlock_irqrestore(&chan->vchan.lock, flags); + + return status; +} + +static int jz4740_dma_alloc_chan_resources(struct dma_chan *c) +{ + struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); + + chan->jz_chan = jz4740_dma_request(chan, NULL); + if (!chan->jz_chan) + return -EBUSY; + + jz4740_dma_set_complete_cb(chan->jz_chan, jz4740_dma_complete_cb); + + return 0; +} + +static void jz4740_dma_free_chan_resources(struct dma_chan *c) +{ + struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); + + vchan_free_chan_resources(&chan->vchan); + jz4740_dma_free(chan->jz_chan); + chan->jz_chan = NULL; +} + +static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc) +{ + kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc)); +} + +static int jz4740_dma_probe(struct platform_device *pdev) +{ + struct jz4740_dmaengine_chan *chan; + struct jz4740_dma_dev *dmadev; + struct dma_device *dd; + unsigned int i; + int ret; + + dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); + if (!dmadev) + return -EINVAL; + + dd = &dmadev->ddev; + + dma_cap_set(DMA_SLAVE, dd->cap_mask); + dma_cap_set(DMA_CYCLIC, dd->cap_mask); + dd->device_alloc_chan_resources = jz4740_dma_alloc_chan_resources; + dd->device_free_chan_resources = jz4740_dma_free_chan_resources; + dd->device_tx_status = jz4740_dma_tx_status; + dd->device_issue_pending = jz4740_dma_issue_pending; + dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg; + dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic; + dd->device_control = jz4740_dma_control; + dd->dev = &pdev->dev; + dd->chancnt = JZ_DMA_NR_CHANS; + INIT_LIST_HEAD(&dd->channels); + + for (i = 0; i < dd->chancnt; i++) { + chan = &dmadev->chan[i]; + chan->vchan.desc_free = jz4740_dma_desc_free; + vchan_init(&chan->vchan, dd); + } + + ret = dma_async_device_register(dd); + if (ret) + return ret; + + platform_set_drvdata(pdev, dmadev); + + return 0; +} + +static int jz4740_dma_remove(struct platform_device *pdev) +{ + struct jz4740_dma_dev *dmadev = platform_get_drvdata(pdev); + + dma_async_device_unregister(&dmadev->ddev); + + return 0; +} + +static struct platform_driver jz4740_dma_driver = { + .probe = jz4740_dma_probe, + .remove = jz4740_dma_remove, + .driver = { + .name = "jz4740-dma", + .owner = THIS_MODULE, + }, +}; +module_platform_driver(jz4740_dma_driver); + +MODULE_AUTHOR("Lars-Peter Clausen "); +MODULE_DESCRIPTION("JZ4740 DMA driver"); +MODULE_LICENSE("GPLv2"); -- cgit v1.2.3-58-ga151 From 25ce6c35fea0588c7a1b68e55d700c22d9e97ce0 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Thu, 30 May 2013 18:25:05 +0200 Subject: MIPS: jz4740: Remove custom DMA API Now that all users of the custom jz4740 DMA API have been converted to use the dmaengine API instead we can remove the custom API and move all the code talking to the hardware to the dmaengine driver. Signed-off-by: Lars-Peter Clausen Acked-by: Ralf Baechle Signed-off-by: Vinod Koul --- arch/mips/include/asm/mach-jz4740/dma.h | 56 ------ arch/mips/jz4740/Makefile | 2 +- arch/mips/jz4740/dma.c | 307 -------------------------------- drivers/dma/dma-jz4740.c | 258 +++++++++++++++++++++++---- 4 files changed, 222 insertions(+), 401 deletions(-) delete mode 100644 arch/mips/jz4740/dma.c (limited to 'drivers/dma') diff --git a/arch/mips/include/asm/mach-jz4740/dma.h b/arch/mips/include/asm/mach-jz4740/dma.h index 98b4e7c0dbae..509cd5828044 100644 --- a/arch/mips/include/asm/mach-jz4740/dma.h +++ b/arch/mips/include/asm/mach-jz4740/dma.h @@ -16,8 +16,6 @@ #ifndef __ASM_MACH_JZ4740_DMA_H__ #define __ASM_MACH_JZ4740_DMA_H__ -struct jz4740_dma_chan; - enum jz4740_dma_request_type { JZ4740_DMA_TYPE_AUTO_REQUEST = 8, JZ4740_DMA_TYPE_UART_TRANSMIT = 20, @@ -33,58 +31,4 @@ enum jz4740_dma_request_type { JZ4740_DMA_TYPE_SLCD = 30, }; -enum jz4740_dma_width { - JZ4740_DMA_WIDTH_32BIT = 0, - JZ4740_DMA_WIDTH_8BIT = 1, - JZ4740_DMA_WIDTH_16BIT = 2, -}; - -enum jz4740_dma_transfer_size { - JZ4740_DMA_TRANSFER_SIZE_4BYTE = 0, - JZ4740_DMA_TRANSFER_SIZE_1BYTE = 1, - JZ4740_DMA_TRANSFER_SIZE_2BYTE = 2, - JZ4740_DMA_TRANSFER_SIZE_16BYTE = 3, - JZ4740_DMA_TRANSFER_SIZE_32BYTE = 4, -}; - -enum jz4740_dma_flags { - JZ4740_DMA_SRC_AUTOINC = 0x2, - JZ4740_DMA_DST_AUTOINC = 0x1, -}; - -enum jz4740_dma_mode { - JZ4740_DMA_MODE_SINGLE = 0, - JZ4740_DMA_MODE_BLOCK = 1, -}; - -struct jz4740_dma_config { - enum jz4740_dma_width src_width; - enum jz4740_dma_width dst_width; - enum jz4740_dma_transfer_size transfer_size; - enum jz4740_dma_request_type request_type; - enum jz4740_dma_flags flags; - enum jz4740_dma_mode mode; -}; - -typedef void (*jz4740_dma_complete_callback_t)(struct jz4740_dma_chan *, int, void *); - -struct jz4740_dma_chan *jz4740_dma_request(void *dev, const char *name); -void jz4740_dma_free(struct jz4740_dma_chan *dma); - -void jz4740_dma_configure(struct jz4740_dma_chan *dma, - const struct jz4740_dma_config *config); - - -void jz4740_dma_enable(struct jz4740_dma_chan *dma); -void jz4740_dma_disable(struct jz4740_dma_chan *dma); - -void jz4740_dma_set_src_addr(struct jz4740_dma_chan *dma, dma_addr_t src); -void jz4740_dma_set_dst_addr(struct jz4740_dma_chan *dma, dma_addr_t dst); -void jz4740_dma_set_transfer_count(struct jz4740_dma_chan *dma, uint32_t count); - -uint32_t jz4740_dma_get_residue(const struct jz4740_dma_chan *dma); - -void jz4740_dma_set_complete_cb(struct jz4740_dma_chan *dma, - jz4740_dma_complete_callback_t cb); - #endif /* __ASM_JZ4740_DMA_H__ */ diff --git a/arch/mips/jz4740/Makefile b/arch/mips/jz4740/Makefile index 63bad0e491d0..28e5535dfa9e 100644 --- a/arch/mips/jz4740/Makefile +++ b/arch/mips/jz4740/Makefile @@ -4,7 +4,7 @@ # Object file lists. -obj-y += prom.o irq.o time.o reset.o setup.o dma.o \ +obj-y += prom.o irq.o time.o reset.o setup.o \ gpio.o clock.o platform.o timer.o serial.o obj-$(CONFIG_DEBUG_FS) += clock-debugfs.o diff --git a/arch/mips/jz4740/dma.c b/arch/mips/jz4740/dma.c deleted file mode 100644 index 0e34b97efa8c..000000000000 --- a/arch/mips/jz4740/dma.c +++ /dev/null @@ -1,307 +0,0 @@ -/* - * Copyright (C) 2010, Lars-Peter Clausen - * JZ4740 SoC DMA support - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 675 Mass Ave, Cambridge, MA 02139, USA. - * - */ - -#include -#include -#include -#include -#include - -#include -#include -#include - -#define JZ_REG_DMA_SRC_ADDR(x) (0x00 + (x) * 0x20) -#define JZ_REG_DMA_DST_ADDR(x) (0x04 + (x) * 0x20) -#define JZ_REG_DMA_TRANSFER_COUNT(x) (0x08 + (x) * 0x20) -#define JZ_REG_DMA_REQ_TYPE(x) (0x0C + (x) * 0x20) -#define JZ_REG_DMA_STATUS_CTRL(x) (0x10 + (x) * 0x20) -#define JZ_REG_DMA_CMD(x) (0x14 + (x) * 0x20) -#define JZ_REG_DMA_DESC_ADDR(x) (0x18 + (x) * 0x20) - -#define JZ_REG_DMA_CTRL 0x300 -#define JZ_REG_DMA_IRQ 0x304 -#define JZ_REG_DMA_DOORBELL 0x308 -#define JZ_REG_DMA_DOORBELL_SET 0x30C - -#define JZ_DMA_STATUS_CTRL_NO_DESC BIT(31) -#define JZ_DMA_STATUS_CTRL_DESC_INV BIT(6) -#define JZ_DMA_STATUS_CTRL_ADDR_ERR BIT(4) -#define JZ_DMA_STATUS_CTRL_TRANSFER_DONE BIT(3) -#define JZ_DMA_STATUS_CTRL_HALT BIT(2) -#define JZ_DMA_STATUS_CTRL_COUNT_TERMINATE BIT(1) -#define JZ_DMA_STATUS_CTRL_ENABLE BIT(0) - -#define JZ_DMA_CMD_SRC_INC BIT(23) -#define JZ_DMA_CMD_DST_INC BIT(22) -#define JZ_DMA_CMD_RDIL_MASK (0xf << 16) -#define JZ_DMA_CMD_SRC_WIDTH_MASK (0x3 << 14) -#define JZ_DMA_CMD_DST_WIDTH_MASK (0x3 << 12) -#define JZ_DMA_CMD_INTERVAL_LENGTH_MASK (0x7 << 8) -#define JZ_DMA_CMD_BLOCK_MODE BIT(7) -#define JZ_DMA_CMD_DESC_VALID BIT(4) -#define JZ_DMA_CMD_DESC_VALID_MODE BIT(3) -#define JZ_DMA_CMD_VALID_IRQ_ENABLE BIT(2) -#define JZ_DMA_CMD_TRANSFER_IRQ_ENABLE BIT(1) -#define JZ_DMA_CMD_LINK_ENABLE BIT(0) - -#define JZ_DMA_CMD_FLAGS_OFFSET 22 -#define JZ_DMA_CMD_RDIL_OFFSET 16 -#define JZ_DMA_CMD_SRC_WIDTH_OFFSET 14 -#define JZ_DMA_CMD_DST_WIDTH_OFFSET 12 -#define JZ_DMA_CMD_TRANSFER_SIZE_OFFSET 8 -#define JZ_DMA_CMD_MODE_OFFSET 7 - -#define JZ_DMA_CTRL_PRIORITY_MASK (0x3 << 8) -#define JZ_DMA_CTRL_HALT BIT(3) -#define JZ_DMA_CTRL_ADDRESS_ERROR BIT(2) -#define JZ_DMA_CTRL_ENABLE BIT(0) - - -static void __iomem *jz4740_dma_base; -static spinlock_t jz4740_dma_lock; - -static inline uint32_t jz4740_dma_read(size_t reg) -{ - return readl(jz4740_dma_base + reg); -} - -static inline void jz4740_dma_write(size_t reg, uint32_t val) -{ - writel(val, jz4740_dma_base + reg); -} - -static inline void jz4740_dma_write_mask(size_t reg, uint32_t val, uint32_t mask) -{ - uint32_t val2; - val2 = jz4740_dma_read(reg); - val2 &= ~mask; - val2 |= val; - jz4740_dma_write(reg, val2); -} - -struct jz4740_dma_chan { - unsigned int id; - void *dev; - const char *name; - - enum jz4740_dma_flags flags; - uint32_t transfer_shift; - - jz4740_dma_complete_callback_t complete_cb; - - unsigned used:1; -}; - -#define JZ4740_DMA_CHANNEL(_id) { .id = _id } - -struct jz4740_dma_chan jz4740_dma_channels[] = { - JZ4740_DMA_CHANNEL(0), - JZ4740_DMA_CHANNEL(1), - JZ4740_DMA_CHANNEL(2), - JZ4740_DMA_CHANNEL(3), - JZ4740_DMA_CHANNEL(4), - JZ4740_DMA_CHANNEL(5), -}; - -struct jz4740_dma_chan *jz4740_dma_request(void *dev, const char *name) -{ - unsigned int i; - struct jz4740_dma_chan *dma = NULL; - - spin_lock(&jz4740_dma_lock); - - for (i = 0; i < ARRAY_SIZE(jz4740_dma_channels); ++i) { - if (!jz4740_dma_channels[i].used) { - dma = &jz4740_dma_channels[i]; - dma->used = 1; - break; - } - } - - spin_unlock(&jz4740_dma_lock); - - if (!dma) - return NULL; - - dma->dev = dev; - dma->name = name; - - return dma; -} -EXPORT_SYMBOL_GPL(jz4740_dma_request); - -void jz4740_dma_configure(struct jz4740_dma_chan *dma, - const struct jz4740_dma_config *config) -{ - uint32_t cmd; - - switch (config->transfer_size) { - case JZ4740_DMA_TRANSFER_SIZE_2BYTE: - dma->transfer_shift = 1; - break; - case JZ4740_DMA_TRANSFER_SIZE_4BYTE: - dma->transfer_shift = 2; - break; - case JZ4740_DMA_TRANSFER_SIZE_16BYTE: - dma->transfer_shift = 4; - break; - case JZ4740_DMA_TRANSFER_SIZE_32BYTE: - dma->transfer_shift = 5; - break; - default: - dma->transfer_shift = 0; - break; - } - - cmd = config->flags << JZ_DMA_CMD_FLAGS_OFFSET; - cmd |= config->src_width << JZ_DMA_CMD_SRC_WIDTH_OFFSET; - cmd |= config->dst_width << JZ_DMA_CMD_DST_WIDTH_OFFSET; - cmd |= config->transfer_size << JZ_DMA_CMD_TRANSFER_SIZE_OFFSET; - cmd |= config->mode << JZ_DMA_CMD_MODE_OFFSET; - cmd |= JZ_DMA_CMD_TRANSFER_IRQ_ENABLE; - - jz4740_dma_write(JZ_REG_DMA_CMD(dma->id), cmd); - jz4740_dma_write(JZ_REG_DMA_STATUS_CTRL(dma->id), 0); - jz4740_dma_write(JZ_REG_DMA_REQ_TYPE(dma->id), config->request_type); -} -EXPORT_SYMBOL_GPL(jz4740_dma_configure); - -void jz4740_dma_set_src_addr(struct jz4740_dma_chan *dma, dma_addr_t src) -{ - jz4740_dma_write(JZ_REG_DMA_SRC_ADDR(dma->id), src); -} -EXPORT_SYMBOL_GPL(jz4740_dma_set_src_addr); - -void jz4740_dma_set_dst_addr(struct jz4740_dma_chan *dma, dma_addr_t dst) -{ - jz4740_dma_write(JZ_REG_DMA_DST_ADDR(dma->id), dst); -} -EXPORT_SYMBOL_GPL(jz4740_dma_set_dst_addr); - -void jz4740_dma_set_transfer_count(struct jz4740_dma_chan *dma, uint32_t count) -{ - count >>= dma->transfer_shift; - jz4740_dma_write(JZ_REG_DMA_TRANSFER_COUNT(dma->id), count); -} -EXPORT_SYMBOL_GPL(jz4740_dma_set_transfer_count); - -void jz4740_dma_set_complete_cb(struct jz4740_dma_chan *dma, - jz4740_dma_complete_callback_t cb) -{ - dma->complete_cb = cb; -} -EXPORT_SYMBOL_GPL(jz4740_dma_set_complete_cb); - -void jz4740_dma_free(struct jz4740_dma_chan *dma) -{ - dma->dev = NULL; - dma->complete_cb = NULL; - dma->used = 0; -} -EXPORT_SYMBOL_GPL(jz4740_dma_free); - -void jz4740_dma_enable(struct jz4740_dma_chan *dma) -{ - jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), - JZ_DMA_STATUS_CTRL_NO_DESC | JZ_DMA_STATUS_CTRL_ENABLE, - JZ_DMA_STATUS_CTRL_HALT | JZ_DMA_STATUS_CTRL_NO_DESC | - JZ_DMA_STATUS_CTRL_ENABLE); - - jz4740_dma_write_mask(JZ_REG_DMA_CTRL, - JZ_DMA_CTRL_ENABLE, - JZ_DMA_CTRL_HALT | JZ_DMA_CTRL_ENABLE); -} -EXPORT_SYMBOL_GPL(jz4740_dma_enable); - -void jz4740_dma_disable(struct jz4740_dma_chan *dma) -{ - jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), 0, - JZ_DMA_STATUS_CTRL_ENABLE); -} -EXPORT_SYMBOL_GPL(jz4740_dma_disable); - -uint32_t jz4740_dma_get_residue(const struct jz4740_dma_chan *dma) -{ - uint32_t residue; - residue = jz4740_dma_read(JZ_REG_DMA_TRANSFER_COUNT(dma->id)); - return residue << dma->transfer_shift; -} -EXPORT_SYMBOL_GPL(jz4740_dma_get_residue); - -static void jz4740_dma_chan_irq(struct jz4740_dma_chan *dma) -{ - (void) jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id)); - - jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), 0, - JZ_DMA_STATUS_CTRL_ENABLE | JZ_DMA_STATUS_CTRL_TRANSFER_DONE); - - if (dma->complete_cb) - dma->complete_cb(dma, 0, dma->dev); -} - -static irqreturn_t jz4740_dma_irq(int irq, void *dev_id) -{ - uint32_t irq_status; - unsigned int i; - - irq_status = readl(jz4740_dma_base + JZ_REG_DMA_IRQ); - - for (i = 0; i < 6; ++i) { - if (irq_status & (1 << i)) - jz4740_dma_chan_irq(&jz4740_dma_channels[i]); - } - - return IRQ_HANDLED; -} - -static int jz4740_dma_init(void) -{ - struct clk *clk; - unsigned int ret; - - jz4740_dma_base = ioremap(JZ4740_DMAC_BASE_ADDR, 0x400); - - if (!jz4740_dma_base) - return -EBUSY; - - spin_lock_init(&jz4740_dma_lock); - - clk = clk_get(NULL, "dma"); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); - printk(KERN_ERR "JZ4740 DMA: Failed to request clock: %d\n", - ret); - goto err_iounmap; - } - - ret = request_irq(JZ4740_IRQ_DMAC, jz4740_dma_irq, 0, "DMA", NULL); - if (ret) { - printk(KERN_ERR "JZ4740 DMA: Failed to request irq: %d\n", ret); - goto err_clkput; - } - - clk_prepare_enable(clk); - - return 0; - -err_clkput: - clk_put(clk); - -err_iounmap: - iounmap(jz4740_dma_base); - return ret; -} -arch_initcall(jz4740_dma_init); diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index 3d424341d8fe..b0c0c8268d42 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include #include @@ -29,6 +31,76 @@ #define JZ_DMA_NR_CHANS 6 +#define JZ_REG_DMA_SRC_ADDR(x) (0x00 + (x) * 0x20) +#define JZ_REG_DMA_DST_ADDR(x) (0x04 + (x) * 0x20) +#define JZ_REG_DMA_TRANSFER_COUNT(x) (0x08 + (x) * 0x20) +#define JZ_REG_DMA_REQ_TYPE(x) (0x0C + (x) * 0x20) +#define JZ_REG_DMA_STATUS_CTRL(x) (0x10 + (x) * 0x20) +#define JZ_REG_DMA_CMD(x) (0x14 + (x) * 0x20) +#define JZ_REG_DMA_DESC_ADDR(x) (0x18 + (x) * 0x20) + +#define JZ_REG_DMA_CTRL 0x300 +#define JZ_REG_DMA_IRQ 0x304 +#define JZ_REG_DMA_DOORBELL 0x308 +#define JZ_REG_DMA_DOORBELL_SET 0x30C + +#define JZ_DMA_STATUS_CTRL_NO_DESC BIT(31) +#define JZ_DMA_STATUS_CTRL_DESC_INV BIT(6) +#define JZ_DMA_STATUS_CTRL_ADDR_ERR BIT(4) +#define JZ_DMA_STATUS_CTRL_TRANSFER_DONE BIT(3) +#define JZ_DMA_STATUS_CTRL_HALT BIT(2) +#define JZ_DMA_STATUS_CTRL_COUNT_TERMINATE BIT(1) +#define JZ_DMA_STATUS_CTRL_ENABLE BIT(0) + +#define JZ_DMA_CMD_SRC_INC BIT(23) +#define JZ_DMA_CMD_DST_INC BIT(22) +#define JZ_DMA_CMD_RDIL_MASK (0xf << 16) +#define JZ_DMA_CMD_SRC_WIDTH_MASK (0x3 << 14) +#define JZ_DMA_CMD_DST_WIDTH_MASK (0x3 << 12) +#define JZ_DMA_CMD_INTERVAL_LENGTH_MASK (0x7 << 8) +#define JZ_DMA_CMD_BLOCK_MODE BIT(7) +#define JZ_DMA_CMD_DESC_VALID BIT(4) +#define JZ_DMA_CMD_DESC_VALID_MODE BIT(3) +#define JZ_DMA_CMD_VALID_IRQ_ENABLE BIT(2) +#define JZ_DMA_CMD_TRANSFER_IRQ_ENABLE BIT(1) +#define JZ_DMA_CMD_LINK_ENABLE BIT(0) + +#define JZ_DMA_CMD_FLAGS_OFFSET 22 +#define JZ_DMA_CMD_RDIL_OFFSET 16 +#define JZ_DMA_CMD_SRC_WIDTH_OFFSET 14 +#define JZ_DMA_CMD_DST_WIDTH_OFFSET 12 +#define JZ_DMA_CMD_TRANSFER_SIZE_OFFSET 8 +#define JZ_DMA_CMD_MODE_OFFSET 7 + +#define JZ_DMA_CTRL_PRIORITY_MASK (0x3 << 8) +#define JZ_DMA_CTRL_HALT BIT(3) +#define JZ_DMA_CTRL_ADDRESS_ERROR BIT(2) +#define JZ_DMA_CTRL_ENABLE BIT(0) + +enum jz4740_dma_width { + JZ4740_DMA_WIDTH_32BIT = 0, + JZ4740_DMA_WIDTH_8BIT = 1, + JZ4740_DMA_WIDTH_16BIT = 2, +}; + +enum jz4740_dma_transfer_size { + JZ4740_DMA_TRANSFER_SIZE_4BYTE = 0, + JZ4740_DMA_TRANSFER_SIZE_1BYTE = 1, + JZ4740_DMA_TRANSFER_SIZE_2BYTE = 2, + JZ4740_DMA_TRANSFER_SIZE_16BYTE = 3, + JZ4740_DMA_TRANSFER_SIZE_32BYTE = 4, +}; + +enum jz4740_dma_flags { + JZ4740_DMA_SRC_AUTOINC = 0x2, + JZ4740_DMA_DST_AUTOINC = 0x1, +}; + +enum jz4740_dma_mode { + JZ4740_DMA_MODE_SINGLE = 0, + JZ4740_DMA_MODE_BLOCK = 1, +}; + struct jz4740_dma_sg { dma_addr_t addr; unsigned int len; @@ -46,9 +118,10 @@ struct jz4740_dma_desc { struct jz4740_dmaengine_chan { struct virt_dma_chan vchan; - struct jz4740_dma_chan *jz_chan; + unsigned int id; dma_addr_t fifo_addr; + unsigned int transfer_shift; struct jz4740_dma_desc *desc; unsigned int next_sg; @@ -56,10 +129,19 @@ struct jz4740_dmaengine_chan { struct jz4740_dma_dev { struct dma_device ddev; + void __iomem *base; + struct clk *clk; struct jz4740_dmaengine_chan chan[JZ_DMA_NR_CHANS]; }; +static struct jz4740_dma_dev *jz4740_dma_chan_get_dev( + struct jz4740_dmaengine_chan *chan) +{ + return container_of(chan->vchan.chan.device, struct jz4740_dma_dev, + ddev); +} + static struct jz4740_dmaengine_chan *to_jz4740_dma_chan(struct dma_chan *c) { return container_of(c, struct jz4740_dmaengine_chan, vchan.chan); @@ -70,6 +152,29 @@ static struct jz4740_dma_desc *to_jz4740_dma_desc(struct virt_dma_desc *vdesc) return container_of(vdesc, struct jz4740_dma_desc, vdesc); } +static inline uint32_t jz4740_dma_read(struct jz4740_dma_dev *dmadev, + unsigned int reg) +{ + return readl(dmadev->base + reg); +} + +static inline void jz4740_dma_write(struct jz4740_dma_dev *dmadev, + unsigned reg, uint32_t val) +{ + writel(val, dmadev->base + reg); +} + +static inline void jz4740_dma_write_mask(struct jz4740_dma_dev *dmadev, + unsigned int reg, uint32_t val, uint32_t mask) +{ + uint32_t tmp; + + tmp = jz4740_dma_read(dmadev, reg); + tmp &= ~mask; + tmp |= val; + jz4740_dma_write(dmadev, reg, tmp); +} + static struct jz4740_dma_desc *jz4740_dma_alloc_desc(unsigned int num_sgs) { return kzalloc(sizeof(struct jz4740_dma_desc) + @@ -108,30 +213,60 @@ static int jz4740_dma_slave_config(struct dma_chan *c, const struct dma_slave_config *config) { struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); - struct jz4740_dma_config jzcfg; + struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); + enum jz4740_dma_width src_width; + enum jz4740_dma_width dst_width; + enum jz4740_dma_transfer_size transfer_size; + enum jz4740_dma_flags flags; + uint32_t cmd; switch (config->direction) { case DMA_MEM_TO_DEV: - jzcfg.flags = JZ4740_DMA_SRC_AUTOINC; - jzcfg.transfer_size = jz4740_dma_maxburst(config->dst_maxburst); + flags = JZ4740_DMA_SRC_AUTOINC; + transfer_size = jz4740_dma_maxburst(config->dst_maxburst); chan->fifo_addr = config->dst_addr; break; case DMA_DEV_TO_MEM: - jzcfg.flags = JZ4740_DMA_DST_AUTOINC; - jzcfg.transfer_size = jz4740_dma_maxburst(config->src_maxburst); + flags = JZ4740_DMA_DST_AUTOINC; + transfer_size = jz4740_dma_maxburst(config->src_maxburst); chan->fifo_addr = config->src_addr; break; default: return -EINVAL; } + src_width = jz4740_dma_width(config->src_addr_width); + dst_width = jz4740_dma_width(config->dst_addr_width); + + switch (transfer_size) { + case JZ4740_DMA_TRANSFER_SIZE_2BYTE: + chan->transfer_shift = 1; + break; + case JZ4740_DMA_TRANSFER_SIZE_4BYTE: + chan->transfer_shift = 2; + break; + case JZ4740_DMA_TRANSFER_SIZE_16BYTE: + chan->transfer_shift = 4; + break; + case JZ4740_DMA_TRANSFER_SIZE_32BYTE: + chan->transfer_shift = 5; + break; + default: + chan->transfer_shift = 0; + break; + } - jzcfg.src_width = jz4740_dma_width(config->src_addr_width); - jzcfg.dst_width = jz4740_dma_width(config->dst_addr_width); - jzcfg.mode = JZ4740_DMA_MODE_SINGLE; - jzcfg.request_type = config->slave_id; + cmd = flags << JZ_DMA_CMD_FLAGS_OFFSET; + cmd |= src_width << JZ_DMA_CMD_SRC_WIDTH_OFFSET; + cmd |= dst_width << JZ_DMA_CMD_DST_WIDTH_OFFSET; + cmd |= transfer_size << JZ_DMA_CMD_TRANSFER_SIZE_OFFSET; + cmd |= JZ4740_DMA_MODE_SINGLE << JZ_DMA_CMD_MODE_OFFSET; + cmd |= JZ_DMA_CMD_TRANSFER_IRQ_ENABLE; - jz4740_dma_configure(chan->jz_chan, &jzcfg); + jz4740_dma_write(dmadev, JZ_REG_DMA_CMD(chan->id), cmd); + jz4740_dma_write(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0); + jz4740_dma_write(dmadev, JZ_REG_DMA_REQ_TYPE(chan->id), + config->slave_id); return 0; } @@ -139,11 +274,13 @@ static int jz4740_dma_slave_config(struct dma_chan *c, static int jz4740_dma_terminate_all(struct dma_chan *c) { struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); + struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); unsigned long flags; LIST_HEAD(head); spin_lock_irqsave(&chan->vchan.lock, flags); - jz4740_dma_disable(chan->jz_chan); + jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0, + JZ_DMA_STATUS_CTRL_ENABLE); chan->desc = NULL; vchan_get_all_descriptors(&chan->vchan, &head); spin_unlock_irqrestore(&chan->vchan.lock, flags); @@ -170,11 +307,13 @@ static int jz4740_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan) { + struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); dma_addr_t src_addr, dst_addr; struct virt_dma_desc *vdesc; struct jz4740_dma_sg *sg; - jz4740_dma_disable(chan->jz_chan); + jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0, + JZ_DMA_STATUS_CTRL_ENABLE); if (!chan->desc) { vdesc = vchan_next_desc(&chan->vchan); @@ -196,22 +335,27 @@ static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan) src_addr = chan->fifo_addr; dst_addr = sg->addr; } - jz4740_dma_set_src_addr(chan->jz_chan, src_addr); - jz4740_dma_set_dst_addr(chan->jz_chan, dst_addr); - jz4740_dma_set_transfer_count(chan->jz_chan, sg->len); + jz4740_dma_write(dmadev, JZ_REG_DMA_SRC_ADDR(chan->id), src_addr); + jz4740_dma_write(dmadev, JZ_REG_DMA_DST_ADDR(chan->id), dst_addr); + jz4740_dma_write(dmadev, JZ_REG_DMA_TRANSFER_COUNT(chan->id), + sg->len >> chan->transfer_shift); chan->next_sg++; - jz4740_dma_enable(chan->jz_chan); + jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), + JZ_DMA_STATUS_CTRL_NO_DESC | JZ_DMA_STATUS_CTRL_ENABLE, + JZ_DMA_STATUS_CTRL_HALT | JZ_DMA_STATUS_CTRL_NO_DESC | + JZ_DMA_STATUS_CTRL_ENABLE); + + jz4740_dma_write_mask(dmadev, JZ_REG_DMA_CTRL, + JZ_DMA_CTRL_ENABLE, + JZ_DMA_CTRL_HALT | JZ_DMA_CTRL_ENABLE); return 0; } -static void jz4740_dma_complete_cb(struct jz4740_dma_chan *jz_chan, int error, - void *devid) +static void jz4740_dma_chan_irq(struct jz4740_dmaengine_chan *chan) { - struct jz4740_dmaengine_chan *chan = devid; - spin_lock(&chan->vchan.lock); if (chan->desc) { if (chan->desc && chan->desc->cyclic) { @@ -227,6 +371,28 @@ static void jz4740_dma_complete_cb(struct jz4740_dma_chan *jz_chan, int error, spin_unlock(&chan->vchan.lock); } +static irqreturn_t jz4740_dma_irq(int irq, void *devid) +{ + struct jz4740_dma_dev *dmadev = devid; + uint32_t irq_status; + unsigned int i; + + irq_status = readl(dmadev->base + JZ_REG_DMA_IRQ); + + for (i = 0; i < 6; ++i) { + if (irq_status & (1 << i)) { + jz4740_dma_write_mask(dmadev, + JZ_REG_DMA_STATUS_CTRL(i), 0, + JZ_DMA_STATUS_CTRL_ENABLE | + JZ_DMA_STATUS_CTRL_TRANSFER_DONE); + + jz4740_dma_chan_irq(&dmadev->chan[i]); + } + } + + return IRQ_HANDLED; +} + static void jz4740_dma_issue_pending(struct dma_chan *c) { struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); @@ -298,7 +464,8 @@ static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic( static size_t jz4740_dma_desc_residue(struct jz4740_dmaengine_chan *chan, struct jz4740_dma_desc *desc, unsigned int next_sg) { - size_t residue = 0; + struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); + unsigned int residue, count; unsigned int i; residue = 0; @@ -306,8 +473,11 @@ static size_t jz4740_dma_desc_residue(struct jz4740_dmaengine_chan *chan, for (i = next_sg; i < desc->num_sgs; i++) residue += desc->sg[i].len; - if (next_sg != 0) - residue += jz4740_dma_get_residue(chan->jz_chan); + if (next_sg != 0) { + count = jz4740_dma_read(dmadev, + JZ_REG_DMA_TRANSFER_COUNT(chan->id)); + residue += count << chan->transfer_shift; + } return residue; } @@ -342,24 +512,12 @@ static enum dma_status jz4740_dma_tx_status(struct dma_chan *c, static int jz4740_dma_alloc_chan_resources(struct dma_chan *c) { - struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); - - chan->jz_chan = jz4740_dma_request(chan, NULL); - if (!chan->jz_chan) - return -EBUSY; - - jz4740_dma_set_complete_cb(chan->jz_chan, jz4740_dma_complete_cb); - return 0; } static void jz4740_dma_free_chan_resources(struct dma_chan *c) { - struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); - - vchan_free_chan_resources(&chan->vchan); - jz4740_dma_free(chan->jz_chan); - chan->jz_chan = NULL; + vchan_free_chan_resources(to_virt_chan(c)); } static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc) @@ -373,7 +531,9 @@ static int jz4740_dma_probe(struct platform_device *pdev) struct jz4740_dma_dev *dmadev; struct dma_device *dd; unsigned int i; + struct resource *res; int ret; + int irq; dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); if (!dmadev) @@ -381,6 +541,17 @@ static int jz4740_dma_probe(struct platform_device *pdev) dd = &dmadev->ddev; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dmadev->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(dmadev->base)) + return PTR_ERR(dmadev->base); + + dmadev->clk = clk_get(&pdev->dev, "dma"); + if (IS_ERR(dmadev->clk)) + return PTR_ERR(dmadev->clk); + + clk_prepare_enable(dmadev->clk); + dma_cap_set(DMA_SLAVE, dd->cap_mask); dma_cap_set(DMA_CYCLIC, dd->cap_mask); dd->device_alloc_chan_resources = jz4740_dma_alloc_chan_resources; @@ -396,6 +567,7 @@ static int jz4740_dma_probe(struct platform_device *pdev) for (i = 0; i < dd->chancnt; i++) { chan = &dmadev->chan[i]; + chan->id = i; chan->vchan.desc_free = jz4740_dma_desc_free; vchan_init(&chan->vchan, dd); } @@ -404,16 +576,28 @@ static int jz4740_dma_probe(struct platform_device *pdev) if (ret) return ret; + irq = platform_get_irq(pdev, 0); + ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev); + if (ret) + goto err_unregister; + platform_set_drvdata(pdev, dmadev); return 0; + +err_unregister: + dma_async_device_unregister(dd); + return ret; } static int jz4740_dma_remove(struct platform_device *pdev) { struct jz4740_dma_dev *dmadev = platform_get_drvdata(pdev); + int irq = platform_get_irq(pdev, 0); + free_irq(irq, dmadev); dma_async_device_unregister(&dmadev->ddev); + clk_disable_unprepare(dmadev->clk); return 0; } -- cgit v1.2.3-58-ga151 From 62971b29825adb06908bad81e7679d1f7904b24d Mon Sep 17 00:00:00 2001 From: Ludovic Desroches Date: Thu, 13 Jun 2013 10:39:39 +0200 Subject: dmaengine: at_hdmac: add FIFO configuration parameter to DMA DT binding For most devices the FIFO configuration is the same i.e. when half FIFO size is available/filled, a source/destination request is serviced. But USART devices have to do it when there is enough space/data available to perform a single AHB access so the ASAP configuration. Acked-by: Nicolas Ferre Acked-by: Jean-Christophe PLAGNIOL-VILLARD Signed-off-by: Ludovic Desroches Signed-off-by: Nicolas Ferre --- .../devicetree/bindings/dma/atmel-dma.txt | 7 ++++-- drivers/dma/at_hdmac.c | 25 ++++++++++++++++++---- 2 files changed, 26 insertions(+), 6 deletions(-) (limited to 'drivers/dma') diff --git a/Documentation/devicetree/bindings/dma/atmel-dma.txt b/Documentation/devicetree/bindings/dma/atmel-dma.txt index c80e8a3402f0..c280a0e6f42d 100644 --- a/Documentation/devicetree/bindings/dma/atmel-dma.txt +++ b/Documentation/devicetree/bindings/dma/atmel-dma.txt @@ -24,8 +24,11 @@ The three cells in order are: 1. A phandle pointing to the DMA controller. 2. The memory interface (16 most significant bits), the peripheral interface (16 less significant bits). -3. The peripheral identifier for the hardware handshaking interface. The -identifier can be different for tx and rx. +3. Parameters for the at91 DMA configuration register which are device +dependant: + - bit 7-0: peripheral identifier for the hardware handshaking interface. The + identifier can be different for tx and rx. + - bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 1 for ASAP. Example: diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 6db5228f4134..b7050a46bd87 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -14,6 +14,7 @@ * found on AT91SAM9263. */ +#include #include #include #include @@ -1320,15 +1321,31 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL); if (!atslave) return NULL; + + atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW; /* * We can fill both SRC_PER and DST_PER, one of these fields will be * ignored depending on DMA transfer direction. */ - per_id = dma_spec->args[1]; - atslave->cfg = ATC_FIFOCFG_HALFFIFO - | ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW - | ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id) + per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK; + atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id) | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id); + /* + * We have to translate the value we get from the device tree since + * the half FIFO configuration value had to be 0 to keep backward + * compatibility. + */ + switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) { + case AT91_DMA_CFG_FIFOCFG_ALAP: + atslave->cfg |= ATC_FIFOCFG_LARGESTBURST; + break; + case AT91_DMA_CFG_FIFOCFG_ASAP: + atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE; + break; + case AT91_DMA_CFG_FIFOCFG_HALF: + default: + atslave->cfg |= ATC_FIFOCFG_HALFFIFO; + } atslave->dma_dev = &dmac_pdev->dev; chan = dma_request_channel(mask, at_dma_filter, atslave); -- cgit v1.2.3-58-ga151 From 538eea6c7c2387b11fa5ba919743cd36e3a4d42f Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Fri, 7 Jun 2013 17:10:53 +0200 Subject: dmaengine: at_hdmac: remove unsuded atc_cleanup_descriptors() Since patch 7c407d3e54dcc0c79119553c8d5ef176c1d5bc3a (DMA: AT91: Get residual bytes in dma buffer), the function atc_cleanup_descriptors() is not used anymore. We remove it to prevent warnings. Reported-by: Arnd Bergmann Signed-off-by: Nicolas Ferre Acked-by: Arnd Bergmann --- drivers/dma/at_hdmac.c | 31 ------------------------------- 1 file changed, 31 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index b7050a46bd87..1ee3ed187f4d 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -416,37 +416,6 @@ static void atc_complete_all(struct at_dma_chan *atchan) atc_chain_complete(atchan, desc); } -/** - * atc_cleanup_descriptors - cleanup up finished descriptors in active_list - * @atchan: channel to be cleaned up - * - * Called with atchan->lock held and bh disabled - */ -static void atc_cleanup_descriptors(struct at_dma_chan *atchan) -{ - struct at_desc *desc, *_desc; - struct at_desc *child; - - dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n"); - - list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { - if (!(desc->lli.ctrla & ATC_DONE)) - /* This one is currently in progress */ - return; - - list_for_each_entry(child, &desc->tx_list, desc_node) - if (!(child->lli.ctrla & ATC_DONE)) - /* Currently in progress */ - return; - - /* - * No descriptors so far seem to be in progress, i.e. - * this chain must be done. - */ - atc_chain_complete(atchan, desc); - } -} - /** * atc_advance_work - at the end of a transaction, move forward * @atchan: channel where the transaction ended -- cgit v1.2.3-58-ga151 From c3dbc60c9b2510fee6cea9b77b89a7708bf98bd3 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Fri, 7 Jun 2013 17:26:14 +0200 Subject: dmaengine/trivial: at_hdmac: add curly brackets to if/else expressions Correct coding style following the patch: 7c407d3e54dcc0c79119553c8d5ef176c1d5bc3a (DMA: AT91: Get residual bytes in dma buffer). Signed-off-by: Nicolas Ferre --- drivers/dma/at_hdmac.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 1ee3ed187f4d..3e070d2fb6a3 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -299,8 +299,9 @@ static int atc_get_bytes_left(struct dma_chan *chan) if (atchan->remain_desc < 0) { ret = -EINVAL; goto out; - } else + } else { ret = atchan->remain_desc; + } } else { /* * Get residual bytes when current @@ -1120,8 +1121,9 @@ atc_tx_status(struct dma_chan *chan, if (unlikely(bytes < 0)) { dev_vdbg(chan2dev(chan), "get residual bytes error\n"); return DMA_ERROR; - } else + } else { dma_set_residue(txstate, bytes); + } dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n", ret, cookie, bytes); -- cgit v1.2.3-58-ga151 From f784d9c90469d75a9f7a38c1568f47e95ae504ca Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Wed, 19 Jun 2013 13:14:54 +0200 Subject: dmaengine: at_hdmac: prepare clk before calling enable Replace clk_enable/disable with clk_prepare_enable/disable_unprepare to avoid common clk framework warnings. Signed-off-by: Boris BREZILLON [nicolas.ferre@atmel.com: remove return code checking in at_dma_resume_noirq()] Signed-off-by: Nicolas Ferre --- drivers/dma/at_hdmac.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 3e070d2fb6a3..c787f38a186a 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1460,7 +1460,9 @@ static int __init at_dma_probe(struct platform_device *pdev) err = PTR_ERR(atdma->clk); goto err_clk; } - clk_enable(atdma->clk); + err = clk_prepare_enable(atdma->clk); + if (err) + goto err_clk_prepare; /* force dma off, just in case */ at_dma_off(atdma); @@ -1560,7 +1562,8 @@ err_of_dma_controller_register: err_pool_create: free_irq(platform_get_irq(pdev, 0), atdma); err_irq: - clk_disable(atdma->clk); + clk_disable_unprepare(atdma->clk); +err_clk_prepare: clk_put(atdma->clk); err_clk: iounmap(atdma->regs); @@ -1596,7 +1599,7 @@ static int at_dma_remove(struct platform_device *pdev) list_del(&chan->device_node); } - clk_disable(atdma->clk); + clk_disable_unprepare(atdma->clk); clk_put(atdma->clk); iounmap(atdma->regs); @@ -1615,7 +1618,7 @@ static void at_dma_shutdown(struct platform_device *pdev) struct at_dma *atdma = platform_get_drvdata(pdev); at_dma_off(platform_get_drvdata(pdev)); - clk_disable(atdma->clk); + clk_disable_unprepare(atdma->clk); } static int at_dma_prepare(struct device *dev) @@ -1672,7 +1675,7 @@ static int at_dma_suspend_noirq(struct device *dev) /* disable DMA controller */ at_dma_off(atdma); - clk_disable(atdma->clk); + clk_disable_unprepare(atdma->clk); return 0; } @@ -1702,7 +1705,7 @@ static int at_dma_resume_noirq(struct device *dev) struct dma_chan *chan, *_chan; /* bring back DMA controller */ - clk_enable(atdma->clk); + clk_prepare_enable(atdma->clk); dma_writel(atdma, EN, AT_DMA_ENABLE); /* clear any pending interrupt */ -- cgit v1.2.3-58-ga151 From fa74326c44767626a5ae794b29d54103e2259e64 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Thu, 6 Jun 2013 17:37:13 +0200 Subject: DMA: shdma: (cosmetic) don't re-calculate a pointer Use an existing pointer instead of retrieving it again. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Vinod Koul --- drivers/dma/sh/shdma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c index b70709b030d8..a5a1887c34b5 100644 --- a/drivers/dma/sh/shdma.c +++ b/drivers/dma/sh/shdma.c @@ -729,7 +729,7 @@ static int sh_dmae_probe(struct platform_device *pdev) goto eshdma; /* platform data */ - shdev->pdata = pdev->dev.platform_data; + shdev->pdata = pdata; if (pdata->chcr_offset) shdev->chcr_offset = pdata->chcr_offset; -- cgit v1.2.3-58-ga151 From 67eacc1583909d0588c8d5d80c16298c899a6382 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 18 Jun 2013 18:16:57 +0200 Subject: DMA: shdma: add DT support This patch adds Device Tree support to the shdma driver. No special DT properties are used, only standard DMA DT bindings are implemented. Since shdma controllers reside on SoCs, their configuration is SoC-specific and shall be passed to the driver from the SoC platform data, using the auxdata procedure. Signed-off-by: Guennadi Liakhovetski Acked-by: Arnd Bergmann Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/shdma.txt | 75 ++++++++++++++++++++++ drivers/dma/sh/Makefile | 2 +- drivers/dma/sh/shdma-base.c | 26 ++++++-- drivers/dma/sh/shdma-of.c | 82 +++++++++++++++++++++++++ drivers/dma/sh/shdma.c | 31 ++++++++-- include/linux/shdma-base.h | 2 + 6 files changed, 205 insertions(+), 13 deletions(-) create mode 100644 Documentation/devicetree/bindings/dma/shdma.txt create mode 100644 drivers/dma/sh/shdma-of.c (limited to 'drivers/dma') diff --git a/Documentation/devicetree/bindings/dma/shdma.txt b/Documentation/devicetree/bindings/dma/shdma.txt new file mode 100644 index 000000000000..c15994aa1939 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/shdma.txt @@ -0,0 +1,75 @@ +* SHDMA Device Tree bindings + +Sh-/r-mobile and r-car systems often have multiple identical DMA controller +instances, capable of serving any of a common set of DMA slave devices, using +the same configuration. To describe this topology we require all compatible +SHDMA DT nodes to be placed under a DMA multiplexer node. All such compatible +DMAC instances have the same number of channels and use the same DMA +descriptors. Therefore respective DMA DT bindings can also all be placed in the +multiplexer node. Even if there is only one such DMAC instance on a system, it +still has to be placed under such a multiplexer node. + +* DMA multiplexer + +Required properties: +- compatible: should be "renesas,shdma-mux" +- #dma-cells: should be <1>, see "dmas" property below + +Optional properties (currently unused): +- dma-channels: number of DMA channels +- dma-requests: number of DMA request signals + +* DMA controller + +Required properties: +- compatible: should be "renesas,shdma" + +Example: + dmac: dma-mux0 { + compatible = "renesas,shdma-mux"; + #dma-cells = <1>; + dma-channels = <6>; + dma-requests = <256>; + reg = <0 0>; /* Needed for AUXDATA */ + #address-cells = <1>; + #size-cells = <1>; + ranges; + + dma0: shdma@fe008020 { + compatible = "renesas,shdma"; + reg = <0xfe008020 0x270>, + <0xfe009000 0xc>; + interrupt-parent = <&gic>; + interrupts = <0 34 4 + 0 28 4 + 0 29 4 + 0 30 4 + 0 31 4 + 0 32 4 + 0 33 4>; + interrupt-names = "error", + "ch0", "ch1", "ch2", "ch3", + "ch4", "ch5"; + }; + + dma1: shdma@fe018020 { + ... + }; + + dma2: shdma@fe028020 { + ... + }; + }; + +* DMA client + +Required properties: +- dmas: a list of <[DMA multiplexer phandle] [MID/RID value]> pairs, + where MID/RID values are fixed handles, specified in the SoC + manual +- dma-names: a list of DMA channel names, one per "dmas" entry + +Example: + dmas = <&dmac 0xd1 + &dmac 0xd2>; + dma-names = "tx", "rx"; diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile index c07ca4612e46..c962138dde96 100644 --- a/drivers/dma/sh/Makefile +++ b/drivers/dma/sh/Makefile @@ -1,3 +1,3 @@ -obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o +obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o obj-$(CONFIG_SH_DMAE) += shdma.o obj-$(CONFIG_SUDMAC) += sudmac.o diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 4acb85a10250..28ca36121631 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -175,7 +175,18 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id) { struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); const struct shdma_ops *ops = sdev->ops; - int ret; + int ret, match; + + if (schan->dev->of_node) { + match = schan->hw_req; + ret = ops->set_slave(schan, match, true); + if (ret < 0) + return ret; + + slave_id = schan->slave_id; + } else { + match = slave_id; + } if (slave_id < 0 || slave_id >= slave_num) return -EINVAL; @@ -183,7 +194,7 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id) if (test_and_set_bit(slave_id, shdma_slave_used)) return -EBUSY; - ret = ops->set_slave(schan, slave_id, false); + ret = ops->set_slave(schan, match, false); if (ret < 0) { clear_bit(slave_id, shdma_slave_used); return ret; @@ -206,23 +217,26 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id) * services would have to provide their own filters, which first would check * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do * this, and only then, in case of a match, call this common filter. + * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate(). + * In that case the MID-RID value is used for slave channel filtering and is + * passed to this function in the "arg" parameter. */ bool shdma_chan_filter(struct dma_chan *chan, void *arg) { struct shdma_chan *schan = to_shdma_chan(chan); struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); const struct shdma_ops *ops = sdev->ops; - int slave_id = (int)arg; + int match = (int)arg; int ret; - if (slave_id < 0) + if (match < 0) /* No slave requested - arbitrary channel */ return true; - if (slave_id >= slave_num) + if (!schan->dev->of_node && match >= slave_num) return false; - ret = ops->set_slave(schan, slave_id, true); + ret = ops->set_slave(schan, match, true); if (ret < 0) return false; diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c new file mode 100644 index 000000000000..11bcb05cd79c --- /dev/null +++ b/drivers/dma/sh/shdma-of.c @@ -0,0 +1,82 @@ +/* + * SHDMA Device Tree glue + * + * Copyright (C) 2013 Renesas Electronics Inc. + * Author: Guennadi Liakhovetski + * + * This is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan) + +static struct dma_chan *shdma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + u32 id = dma_spec->args[0]; + dma_cap_mask_t mask; + struct dma_chan *chan; + + if (dma_spec->args_count != 1) + return NULL; + + dma_cap_zero(mask); + /* Only slave DMA channels can be allocated via DT */ + dma_cap_set(DMA_SLAVE, mask); + + chan = dma_request_channel(mask, shdma_chan_filter, (void *)id); + if (chan) + to_shdma_chan(chan)->hw_req = id; + + return chan; +} + +static int shdma_of_probe(struct platform_device *pdev) +{ + const struct of_dev_auxdata *lookup = pdev->dev.platform_data; + int ret; + + if (!lookup) + return -EINVAL; + + ret = of_dma_controller_register(pdev->dev.of_node, + shdma_of_xlate, pdev); + if (ret < 0) + return ret; + + ret = of_platform_populate(pdev->dev.of_node, NULL, lookup, &pdev->dev); + if (ret < 0) + of_dma_controller_free(pdev->dev.of_node); + + return ret; +} + +static const struct of_device_id shdma_of_match[] = { + { .compatible = "renesas,shdma-mux", }, + { } +}; +MODULE_DEVICE_TABLE(of, sh_dmae_of_match); + +static struct platform_driver shdma_of = { + .driver = { + .owner = THIS_MODULE, + .name = "shdma-of", + .of_match_table = shdma_of_match, + }, + .probe = shdma_of_probe, +}; + +module_platform_driver(shdma_of); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("SH-DMA driver DT glue"); +MODULE_AUTHOR("Guennadi Liakhovetski "); diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c index a5a1887c34b5..b67f45f5c271 100644 --- a/drivers/dma/sh/shdma.c +++ b/drivers/dma/sh/shdma.c @@ -301,20 +301,32 @@ static void sh_dmae_setup_xfer(struct shdma_chan *schan, } } +/* + * Find a slave channel configuration from the contoller list by either a slave + * ID in the non-DT case, or by a MID/RID value in the DT case + */ static const struct sh_dmae_slave_config *dmae_find_slave( - struct sh_dmae_chan *sh_chan, int slave_id) + struct sh_dmae_chan *sh_chan, int match) { struct sh_dmae_device *shdev = to_sh_dev(sh_chan); struct sh_dmae_pdata *pdata = shdev->pdata; const struct sh_dmae_slave_config *cfg; int i; - if (slave_id >= SH_DMA_SLAVE_NUMBER) - return NULL; + if (!sh_chan->shdma_chan.dev->of_node) { + if (match >= SH_DMA_SLAVE_NUMBER) + return NULL; - for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) - if (cfg->slave_id == slave_id) - return cfg; + for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) + if (cfg->slave_id == match) + return cfg; + } else { + for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) + if (cfg->mid_rid == match) { + sh_chan->shdma_chan.slave_id = cfg->slave_id; + return cfg; + } + } return NULL; } @@ -920,11 +932,18 @@ static int sh_dmae_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id sh_dmae_of_match[] = { + { .compatible = "renesas,shdma", }, + { } +}; +MODULE_DEVICE_TABLE(of, sh_dmae_of_match); + static struct platform_driver sh_dmae_driver = { .driver = { .owner = THIS_MODULE, .pm = &sh_dmae_pm, .name = SH_DMAE_DRV_NAME, + .of_match_table = sh_dmae_of_match, }, .remove = sh_dmae_remove, .shutdown = sh_dmae_shutdown, diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h index 9a938971bc4a..382cf710ca9a 100644 --- a/include/linux/shdma-base.h +++ b/include/linux/shdma-base.h @@ -68,6 +68,8 @@ struct shdma_chan { int id; /* Raw id of this channel */ int irq; /* Channel IRQ */ int slave_id; /* Client ID for slave DMA */ + int hw_req; /* DMA request line for slave DMA - same + * as MID/RID, used with DT */ enum shdma_pm_state pm_state; }; -- cgit v1.2.3-58-ga151